From a560b89a3c683a3dd97936a8c257f87eb72a3d6e Mon Sep 17 00:00:00 2001 From: cobalt77 <8194899+cobalt77@users.noreply.github.com> Date: Fri, 12 Mar 2021 21:59:55 -0500 Subject: [PATCH 01/12] work in progress --- api/v1alpha1/buildcluster_types.go | 17 +- api/v1alpha1/zz_generated.deepcopy.go | 29 ++ bake.hcl | 17 +- cmd/kcctl/commands/monitor.go | 2 +- cmd/kcctl/commands/root.go | 7 +- cmd/kcctl/commands/status.go | 27 +- cmd/kubecc/components/agent/agent.go | 19 +- cmd/kubecc/components/consumerd/consumerd.go | 5 +- cmd/kubecc/components/monitor/monitor.go | 26 +- cmd/kubecc/components/scheduler/scheduler.go | 2 +- cmd/kubecc/root.go | 5 +- config/crd/bases/kubecc.io_buildclusters.yaml | 271 ++++++++++- config/default/manager_config_patch.yaml | 7 +- config/manager/kustomization.yaml | 10 +- config/manager/manager.yaml | 5 +- config/samples/_v1alpha1_buildcluster.yaml | 12 +- controllers/buildcluster_controller.go | 6 + controllers/buildcluster_controller_test.go | 35 ++ images/kubecc/Dockerfile | 3 +- images/manager/Dockerfile | 18 - pkg/apps/agent/server.go | 63 +-- pkg/apps/agent/taskstream.go | 89 ++++ pkg/apps/cachesrv/server.go | 4 +- pkg/apps/consumerd/server.go | 6 +- pkg/apps/monitor/monitor_test.go | 13 +- pkg/apps/monitor/prometheus.go | 2 +- pkg/apps/monitor/server.go | 17 +- pkg/apps/scheduler/agent_dialer.go | 37 -- pkg/apps/scheduler/scheduler.go | 11 +- pkg/apps/scheduler/server.go | 6 +- pkg/apps/scheduler/types.go | 2 +- pkg/config/spec.go | 7 +- pkg/host/sysfs.go | 23 +- pkg/metrics/keyedbuffer.go | 2 +- pkg/metrics/listener.go | 6 +- pkg/metrics/provider.go | 4 +- pkg/rec/objects/agent_daemonset.yaml | 14 +- pkg/rec/objects/cachesrv_deployment.yaml | 58 +++ pkg/rec/objects/cachesrv_service.yaml | 12 + pkg/rec/objects/kubecc_configmap.yaml | 30 ++ pkg/rec/objects/monitor_deployment.yaml | 16 +- pkg/rec/objects/monitor_service.yaml | 5 +- pkg/rec/objects/scheduler_deployment.yaml | 16 +- pkg/resolvers/cachesrv.go | 75 ++++ pkg/resolvers/configmap.go | 37 ++ pkg/types/types.pb.go | 120 +++-- pkg/types/types_grpc.pb.go | 420 +++++++----------- pkg/ui/statusdisplay.go | 12 +- proto/types.proto | 11 +- test/integration/integration.go | 32 +- 50 files changed, 1036 insertions(+), 637 deletions(-) delete mode 100644 images/manager/Dockerfile create mode 100644 pkg/apps/agent/taskstream.go delete mode 100644 pkg/apps/scheduler/agent_dialer.go create mode 100644 pkg/rec/objects/cachesrv_deployment.yaml create mode 100644 pkg/rec/objects/cachesrv_service.yaml create mode 100644 pkg/rec/objects/kubecc_configmap.yaml create mode 100644 pkg/resolvers/cachesrv.go create mode 100644 pkg/resolvers/configmap.go diff --git a/api/v1alpha1/buildcluster_types.go b/api/v1alpha1/buildcluster_types.go index c732324..63097e6 100644 --- a/api/v1alpha1/buildcluster_types.go +++ b/api/v1alpha1/buildcluster_types.go @@ -16,6 +16,7 @@ type ComponentsSpec struct { Agent AgentSpec `json:"agent"` Scheduler SchedulerSpec `json:"scheduler,omitempty"` // +optional Monitor MonitorSpec `json:"monitor,omitempty"` // +optional + Cache CacheSpec `json:"cache,omitempty"` // +optional } type IngressSpec struct { @@ -52,8 +53,6 @@ type AgentSpec struct { // +kubebuilder:default:="gcr.io/kubecc/agent:latest" Image string `json:"image"` // +optional AdditionalLabels map[string]string `json:"additionalLabels,omitempty"` // +optional - // +kubebuilder:default:=debug - LogLevel string `json:"logLevel"` // +optional // +kubebuilder:default:=Always ImagePullPolicy v1.PullPolicy `json:"imagePullPolicy"` // +optional } @@ -64,8 +63,6 @@ type SchedulerSpec struct { // +kubebuilder:default:="gcr.io/kubecc/scheduler:latest" Image string `json:"image"` // +optional AdditionalLabels map[string]string `json:"additionalLabels,omitempty"` // +optional - // +kubebuilder:default:=debug - LogLevel string `json:"logLevel"` // +optional // +kubebuilder:default:=Always ImagePullPolicy v1.PullPolicy `json:"imagePullPolicy"` // +optional } @@ -76,8 +73,16 @@ type MonitorSpec struct { // +kubebuilder:default:="gcr.io/kubecc/monitor:latest" Image string `json:"image"` // +optional AdditionalLabels map[string]string `json:"additionalLabels,omitempty"` // +optional - // +kubebuilder:default:=debug - LogLevel string `json:"logLevel"` // +optional + // +kubebuilder:default:=Always + ImagePullPolicy v1.PullPolicy `json:"imagePullPolicy"` // +optional +} + +type CacheSpec struct { + NodeAffinity *v1.NodeAffinity `json:"nodeAffinity,omitempty"` // +optional + Resources v1.ResourceRequirements `json:"resources,omitempty"` // +optional + // +kubebuilder:default:="gcr.io/kubecc/monitor:latest" + Image string `json:"image"` // +optional + AdditionalLabels map[string]string `json:"additionalLabels,omitempty"` // +optional // +kubebuilder:default:=Always ImagePullPolicy v1.PullPolicy `json:"imagePullPolicy"` // +optional } diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index e0aff00..5acdded 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -129,6 +129,34 @@ func (in *BuildClusterStatus) DeepCopy() *BuildClusterStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CacheSpec) DeepCopyInto(out *CacheSpec) { + *out = *in + if in.NodeAffinity != nil { + in, out := &in.NodeAffinity, &out.NodeAffinity + *out = new(v1.NodeAffinity) + (*in).DeepCopyInto(*out) + } + in.Resources.DeepCopyInto(&out.Resources) + if in.AdditionalLabels != nil { + in, out := &in.AdditionalLabels, &out.AdditionalLabels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CacheSpec. +func (in *CacheSpec) DeepCopy() *CacheSpec { + if in == nil { + return nil + } + out := new(CacheSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CollectorSpec) DeepCopyInto(out *CollectorSpec) { *out = *in @@ -150,6 +178,7 @@ func (in *ComponentsSpec) DeepCopyInto(out *ComponentsSpec) { in.Agent.DeepCopyInto(&out.Agent) in.Scheduler.DeepCopyInto(&out.Scheduler) in.Monitor.DeepCopyInto(&out.Monitor) + in.Cache.DeepCopyInto(&out.Cache) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentsSpec. diff --git a/bake.hcl b/bake.hcl index 5f603fc..543b837 100644 --- a/bake.hcl +++ b/bake.hcl @@ -1,29 +1,20 @@ group "default" { - targets = ["manager", "kubecc", "environment"] -} - -target "manager" { - dockerfile = "images/manager/Dockerfile" - tags = ["gcr.io/kubecc/manager"] - platforms = ["linux/amd64"] - context = "." - cache-from = ["type=local,src=build/cache/manager"] - cache-to = ["type=local,dest=build/cache/manager"] + targets = ["kubecc", "environment"] } target "kubecc" { dockerfile = "images/kubecc/Dockerfile" tags = ["gcr.io/kubecc/kubecc"] - platforms = ["linux/amd64"] + platforms = ["linux/amd64", "linux/arm64"] context = "." cache-from = ["type=local,src=build/cache/kubecc"] cache-to = ["type=local,dest=build/cache/kubecc"] } -target "kubecc" { +target "environment" { dockerfile = "images/environment/Dockerfile" tags = ["gcr.io/kubecc/environment"] - platforms = ["linux/amd64"] + platforms = ["linux/amd64", "linux/arm64"] context = "." cache-from = ["type=local,src=build/cache/environment"] cache-to = ["type=local,dest=build/cache/environment"] diff --git a/cmd/kcctl/commands/monitor.go b/cmd/kcctl/commands/monitor.go index f726dbb..3a50f67 100644 --- a/cmd/kcctl/commands/monitor.go +++ b/cmd/kcctl/commands/monitor.go @@ -56,7 +56,7 @@ var listenCmd = &cobra.Command{ if err != nil { cliLog.Fatal(err) } - client := types.NewExternalMonitorClient(cc) + client := types.NewMonitorClient(cc) listener := metrics.NewListener(cliContext, client) tb := &ui.TextBox{} diff --git a/cmd/kcctl/commands/root.go b/cmd/kcctl/commands/root.go index 86aaf9e..43398e5 100644 --- a/cmd/kcctl/commands/root.go +++ b/cmd/kcctl/commands/root.go @@ -6,7 +6,6 @@ import ( "os" "github.com/cobalt77/kubecc/internal/logkc" - "github.com/cobalt77/kubecc/internal/zapkc" "github.com/cobalt77/kubecc/pkg/config" "github.com/cobalt77/kubecc/pkg/identity" "github.com/cobalt77/kubecc/pkg/meta" @@ -24,10 +23,8 @@ var ( // rootCmd represents the base command when called without any subcommands. var rootCmd = &cobra.Command{ - Use: "kcctl", - Short: "A brief description of your application", - Long: fmt.Sprintf("%s\n%s", zapkc.Yellow.Add(logkc.BigAsciiTextColored), ` -The kubecc CLI utility`), + Use: "kcctl", + Long: fmt.Sprintf("%s\n%s", logkc.BigAsciiTextColored, `The kubecc CLI utility`), } // Execute adds all child commands to the root command and sets flags appropriately. diff --git a/cmd/kcctl/commands/status.go b/cmd/kcctl/commands/status.go index 3aeac57..5b430ec 100644 --- a/cmd/kcctl/commands/status.go +++ b/cmd/kcctl/commands/status.go @@ -2,10 +2,8 @@ package commands import ( "context" - "io" "github.com/cobalt77/kubecc/internal/logkc" - "github.com/cobalt77/kubecc/pkg/config" "github.com/cobalt77/kubecc/pkg/identity" "github.com/cobalt77/kubecc/pkg/meta" "github.com/cobalt77/kubecc/pkg/metrics" @@ -14,6 +12,7 @@ import ( "github.com/cobalt77/kubecc/pkg/types" "github.com/cobalt77/kubecc/pkg/ui" "github.com/spf13/cobra" + "go.uber.org/zap/zapcore" ) // statusCmd represents the status command. @@ -27,25 +26,35 @@ Cobra is a CLI library for Go that empowers applications. This application is a tool to generate the needed files to quickly create a Cobra application.`, Run: func(cmd *cobra.Command, args []string) { - conf := (&config.ConfigMapProvider{}).Load().Kcctl - - cc, err := servers.Dial(cliContext, conf.MonitorAddress) + cc, err := servers.Dial(cliContext, cliConfig.MonitorAddress, + servers.WithTLS(!cliConfig.DisableTLS)) if err != nil { cliLog.Fatal(err) } ctx := meta.NewContext( meta.WithProvider(identity.Component, meta.WithValue(types.CLI)), meta.WithProvider(identity.UUID), - meta.WithProvider(logkc.Logger, meta.WithValue(logkc.New(types.CLI, - logkc.WithWriter(io.Discard), + meta.WithProvider(logkc.Logger, meta.WithValue(logkc.New( + types.CLI, + logkc.WithLogLevel(zapcore.ErrorLevel), ))), ) - client := types.NewExternalMonitorClient(cc) + client := types.NewMonitorClient(cc) listener := metrics.NewListener(ctx, client) display := ui.NewStatusDisplay() listener.OnProviderAdded(func(pctx context.Context, uuid string) { - display.AddAgent(pctx, uuid) + info, err := client.Whois(ctx, &types.WhoisRequest{ + UUID: uuid, + }) + if err != nil { + return + } + if info.Component != types.Agent && info.Component != types.Consumerd { + return + } + + display.AddAgent(pctx, info) listener.OnValueChanged(uuid, func(qp *common.QueueParams) { display.Update(uuid, qp) }) diff --git a/cmd/kubecc/components/agent/agent.go b/cmd/kubecc/components/agent/agent.go index e4dd7bd..d0c4fde 100644 --- a/cmd/kubecc/components/agent/agent.go +++ b/cmd/kubecc/components/agent/agent.go @@ -1,8 +1,6 @@ package commands import ( - "net" - "github.com/cobalt77/kubecc/internal/logkc" "github.com/cobalt77/kubecc/internal/sleep" sleeptoolchain "github.com/cobalt77/kubecc/internal/sleep/toolchain" @@ -38,12 +36,6 @@ func run(cmd *cobra.Command, args []string) { ) lg := meta.Log(ctx) - srv := servers.NewServer(ctx) - listener, err := net.Listen("tcp", conf.ListenAddress) - if err != nil { - lg.With(zap.Error(err)).Fatalw("Error listening on socket") - } - schedulerCC, err := servers.Dial(ctx, conf.SchedulerAddress) lg.With("address", schedulerCC.Target()).Info("Dialing scheduler") if err != nil { @@ -57,7 +49,7 @@ func run(cmd *cobra.Command, args []string) { } schedulerClient := types.NewSchedulerClient(schedulerCC) - monitorClient := types.NewInternalMonitorClient(monitorCC) + monitorClient := types.NewMonitorClient(monitorCC) a := agent.NewAgentServer(ctx, agent.WithUsageLimits(&types.UsageLimits{ @@ -77,15 +69,10 @@ func run(cmd *cobra.Command, args []string) { agent.WithSchedulerClient(schedulerClient), agent.WithMonitorClient(monitorClient), ) - types.RegisterAgentServer(srv, a) + go a.StartMetricsProvider() mgr := servers.NewStreamManager(ctx, a) - go mgr.Run() - go a.StartMetricsProvider() - err = srv.Serve(listener) - if err != nil { - lg.With(zap.Error(err)).Error("GRPC error") - } + mgr.Run() } var Command = &cobra.Command{ diff --git a/cmd/kubecc/components/consumerd/consumerd.go b/cmd/kubecc/components/consumerd/consumerd.go index d92f7bf..6fd78c5 100644 --- a/cmd/kubecc/components/consumerd/consumerd.go +++ b/cmd/kubecc/components/consumerd/consumerd.go @@ -46,14 +46,15 @@ func run(cmd *cobra.Command, args []string) { lg.With(zap.Error(err)).Fatal("Error dialing scheduler") } - monitorCC, err := servers.Dial(ctx, conf.MonitorAddress) + monitorCC, err := servers.Dial(ctx, conf.MonitorAddress, + servers.WithTLS(!conf.DisableTLS)) lg.With("address", monitorCC.Target()).Info("Dialing monitor") if err != nil { lg.With(zap.Error(err)).Fatal("Error dialing monitor") } schedulerClient := types.NewSchedulerClient(schedulerCC) - monitorClient := types.NewInternalMonitorClient(monitorCC) + monitorClient := types.NewMonitorClient(monitorCC) d := consumerd.NewConsumerdServer(ctx, consumerd.WithUsageLimits(&types.UsageLimits{ diff --git a/cmd/kubecc/components/monitor/monitor.go b/cmd/kubecc/components/monitor/monitor.go index 0d82b84..5d39e68 100644 --- a/cmd/kubecc/components/monitor/monitor.go +++ b/cmd/kubecc/components/monitor/monitor.go @@ -30,31 +30,17 @@ func run(cmd *cobra.Command, args []string) { ) lg := meta.Log(ctx) - extListener, err := net.Listen("tcp", conf.ListenAddress.External) + listener, err := net.Listen("tcp", conf.ListenAddress) if err != nil { panic(err.Error()) } - lg.With("addr", extListener.Addr().String()).Info("External API listening") + lg.With("addr", listener.Addr().String()).Info("Metrics API listening") - intListener, err := net.Listen("tcp", conf.ListenAddress.Internal) - if err != nil { - panic(err.Error()) - } - lg.With("addr", intListener.Addr().String()).Info("Internal API listening") - - internal := servers.NewServer(ctx) - external := servers.NewServer(ctx) - srv := monitor.NewMonitorServer(ctx, monitor.InMemoryStoreCreator) - types.RegisterInternalMonitorServer(internal, srv) - types.RegisterExternalMonitorServer(external, srv) + srv := servers.NewServer(ctx) + monitorServer := monitor.NewMonitorServer(ctx, monitor.InMemoryStoreCreator) + types.RegisterMonitorServer(srv, monitorServer) - go func() { - err = external.Serve(extListener) - if err != nil { - lg.Error(err) - } - }() - err = internal.Serve(intListener) + err = srv.Serve(listener) if err != nil { lg.Error(err) } diff --git a/cmd/kubecc/components/scheduler/scheduler.go b/cmd/kubecc/components/scheduler/scheduler.go index 6207874..e2c625a 100644 --- a/cmd/kubecc/components/scheduler/scheduler.go +++ b/cmd/kubecc/components/scheduler/scheduler.go @@ -46,7 +46,7 @@ func run(cmd *cobra.Command, args []string) { } lg.With("address", monitorCC.Target()).Info("Dialing monitor") - monitorClient := types.NewInternalMonitorClient(monitorCC) + monitorClient := types.NewMonitorClient(monitorCC) sc := scheduler.NewSchedulerServer(ctx, scheduler.WithMonitorClient(monitorClient), diff --git a/cmd/kubecc/root.go b/cmd/kubecc/root.go index 52301f8..e70ba3c 100644 --- a/cmd/kubecc/root.go +++ b/cmd/kubecc/root.go @@ -12,13 +12,14 @@ import ( ctrlcmd "github.com/cobalt77/kubecc/cmd/kubecc/components/controller" moncmd "github.com/cobalt77/kubecc/cmd/kubecc/components/monitor" schedcmd "github.com/cobalt77/kubecc/cmd/kubecc/components/scheduler" + "github.com/cobalt77/kubecc/internal/logkc" "github.com/cobalt77/kubecc/pkg/cluster" ) // rootCmd represents the base command when called without any subcommands. var rootCmd = &cobra.Command{ - Use: "kubecc", - Short: "kubecc", + Use: "kubecc", + Long: logkc.BigAsciiTextColored, } // Execute adds all child commands to the root command and sets flags appropriately. diff --git a/config/crd/bases/kubecc.io_buildclusters.yaml b/config/crd/bases/kubecc.io_buildclusters.yaml index ad9e848..e57dd9a 100644 --- a/config/crd/bases/kubecc.io_buildclusters.yaml +++ b/config/crd/bases/kubecc.io_buildclusters.yaml @@ -51,9 +51,6 @@ spec: description: PullPolicy describes a policy for if/when to pull a container image type: string - logLevel: - default: debug - type: string nodeAffinity: description: Node affinity is a group of node affinity scheduling rules. @@ -298,10 +295,9 @@ spec: required: - image - imagePullPolicy - - logLevel - nodeAffinity type: object - monitor: + cache: properties: additionalLabels: additionalProperties: @@ -315,8 +311,264 @@ spec: description: PullPolicy describes a policy for if/when to pull a container image type: string - logLevel: - default: debug + nodeAffinity: + description: Node affinity is a group of node affinity scheduling + rules. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods + to nodes that satisfy the affinity expressions specified + by this field, but it may choose a node that violates + one or more of the expressions. The node that is most + preferred is the one with the greatest sum of weights, + i.e. for each node that meets all of the scheduling + requirements (resource request, requiredDuringScheduling + affinity expressions, etc.), compute a sum by iterating + through the elements of this field and adding "weight" + to the sum if the node matches the corresponding matchExpressions; + the node(s) with the highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches + all objects with implicit weight 0 (i.e. it's a no-op). + A null preferred scheduling term matches no objects + (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with + the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is + a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators + are In, NotIn, Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array of string values. + If the operator is In or NotIn, the + values array must be non-empty. If the + operator is Exists or DoesNotExist, + the values array must be empty. If the + operator is Gt or Lt, the values array + must have a single element, which will + be interpreted as an integer. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is + a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators + are In, NotIn, Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array of string values. + If the operator is In or NotIn, the + values array must be non-empty. If the + operator is Exists or DoesNotExist, + the values array must be empty. If the + operator is Gt or Lt, the values array + must have a single element, which will + be interpreted as an integer. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the + corresponding nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by + this field are not met at scheduling time, the pod will + not be scheduled onto the node. If the affinity requirements + specified by this field cease to be met at some point + during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from + its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: A null or empty node selector term + matches no objects. The requirements of them are + ANDed. The TopologySelectorTerm type implements + a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is + a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators + are In, NotIn, Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array of string values. + If the operator is In or NotIn, the + values array must be non-empty. If the + operator is Exists or DoesNotExist, + the values array must be empty. If the + operator is Gt or Lt, the values array + must have a single element, which will + be interpreted as an integer. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is + a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators + are In, NotIn, Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array of string values. + If the operator is In or NotIn, the + values array must be non-empty. If the + operator is Exists or DoesNotExist, + the values array must be empty. If the + operator is Gt or Lt, the values array + must have a single element, which will + be interpreted as an integer. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + resources: + description: ResourceRequirements describes the compute resource + requirements. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of + compute resources required. If Requests is omitted for + a container, it defaults to Limits if that is explicitly + specified, otherwise to an implementation-defined value. + More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + required: + - image + - imagePullPolicy + type: object + monitor: + properties: + additionalLabels: + additionalProperties: + type: string + type: object + image: + default: gcr.io/kubecc/monitor:latest + type: string + imagePullPolicy: + default: Always + description: PullPolicy describes a policy for if/when to + pull a container image type: string nodeAffinity: description: Node affinity is a group of node affinity scheduling @@ -562,7 +814,6 @@ spec: required: - image - imagePullPolicy - - logLevel type: object scheduler: properties: @@ -578,9 +829,6 @@ spec: description: PullPolicy describes a policy for if/when to pull a container image type: string - logLevel: - default: debug - type: string nodeAffinity: description: Node affinity is a group of node affinity scheduling rules. @@ -825,7 +1073,6 @@ spec: required: - image - imagePullPolicy - - logLevel type: object required: - agent diff --git a/config/default/manager_config_patch.yaml b/config/default/manager_config_patch.yaml index c3c7018..a912bc1 100644 --- a/config/default/manager_config_patch.yaml +++ b/config/default/manager_config_patch.yaml @@ -14,12 +14,7 @@ spec: - name: manager-config mountPath: /controller_manager_config.yaml subPath: controller_manager_config.yaml - - name: templates - mountPath: /templates volumes: - name: manager-config configMap: - name: manager-config - - name: templates - configMap: - name: templates + name: manager-config \ No newline at end of file diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml index afa068d..508f8cc 100644 --- a/config/manager/kustomization.yaml +++ b/config/manager/kustomization.yaml @@ -7,12 +7,4 @@ generatorOptions: configMapGenerator: - files: - controller_manager_config.yaml - name: manager-config -- files: - - templates/agent_daemonset.yaml - - templates/agent_service.yaml - - templates/monitor_deployment.yaml - - templates/monitor_service.yaml - - templates/scheduler_deployment.yaml - - templates/scheduler_service.yaml - name: templates + name: manager-config \ No newline at end of file diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index 0b83a36..f172b10 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -26,10 +26,11 @@ spec: runAsUser: 65532 containers: - command: - - /manager + - /kubecc + - controller args: - --leader-elect - image: gcr.io/kubecc/manager + image: gcr.io/kubecc/kubecc imagePullPolicy: Always name: manager securityContext: diff --git a/config/samples/_v1alpha1_buildcluster.yaml b/config/samples/_v1alpha1_buildcluster.yaml index d90ef8b..e5251ae 100644 --- a/config/samples/_v1alpha1_buildcluster.yaml +++ b/config/samples/_v1alpha1_buildcluster.yaml @@ -17,17 +17,13 @@ spec: resources: limits: memory: "16Gi" - image: gcr.io/kubecc/agent:latest - additionalLabels: {} - logLevel: debug + image: gcr.io/kubecc/kubecc:latest imagePullPolicy: Always scheduler: - image: gcr.io/kubecc/scheduler:latest - logLevel: debug + image: gcr.io/kubecc/kubecc:latest imagePullPolicy: Always monitor: - image: gcr.io/kubecc/monitor:latest - logLevel: debug + image: gcr.io/kubecc/kubecc:latest imagePullPolicy: Always tracing: jaeger: @@ -36,4 +32,4 @@ spec: internalEndpoint: http://collector.observability.svc.cluster.local:14268/api/traces sampling: type: probabilistic - param: 0.2 + param: "0.2" diff --git a/controllers/buildcluster_controller.go b/controllers/buildcluster_controller.go index 9ef6819..3fffea8 100644 --- a/controllers/buildcluster_controller.go +++ b/controllers/buildcluster_controller.go @@ -69,11 +69,17 @@ func (r *BuildClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { { Resolver: &resolvers.MonitorResolver{}, }, + { + Resolver: &resolvers.CacheSrvResolver{}, + }, }, }, { Resolver: &resolvers.TracingResolver{}, }, + { + Resolver: &resolvers.ConfigMapResolver{}, + }, }, }) diff --git a/controllers/buildcluster_controller_test.go b/controllers/buildcluster_controller_test.go index b70c85e..ff6c77e 100644 --- a/controllers/buildcluster_controller_test.go +++ b/controllers/buildcluster_controller_test.go @@ -77,6 +77,11 @@ var _ = Describe("BuildCluster Controller", func() { Image: "gcr.io/kubecc/monitor:latest", ImagePullPolicy: "Always", }, + Cache: v1alpha1.CacheSpec{ + Resources: resources, + Image: "gcr.io/kubecc/cache:latest", + ImagePullPolicy: "Always", + }, }, }, } @@ -111,6 +116,36 @@ var _ = Describe("BuildCluster Controller", func() { return err == nil }, timeout, interval).Should(BeTrue()) }) + It("Should create a monitor deployment", func() { + Eventually(func() bool { + agents := &appsv1.Deployment{} + err := k8sClient.Get(ctx, types.NamespacedName{ + Name: "kubecc-monitor", + Namespace: Namespace, + }, agents) + return err == nil + }, timeout, interval).Should(BeTrue()) + }) + It("Should create a cache server deployment", func() { + Eventually(func() bool { + agents := &appsv1.Deployment{} + err := k8sClient.Get(ctx, types.NamespacedName{ + Name: "kubecc-cache", + Namespace: Namespace, + }, agents) + return err == nil + }, timeout, interval).Should(BeTrue()) + }) + It("Should create a configmap", func() { + Eventually(func() bool { + agents := &v1.ConfigMap{} + err := k8sClient.Get(ctx, types.NamespacedName{ + Name: "kubecc", + Namespace: Namespace, + }, agents) + return err == nil + }, timeout, interval).Should(BeTrue()) + }) It("Should resolve agent CRD updates", func() { cluster := &v1alpha1.BuildCluster{} err := k8sClient.Get(ctx, types.NamespacedName{ diff --git a/images/kubecc/Dockerfile b/images/kubecc/Dockerfile index 5deb322..9d3a65c 100644 --- a/images/kubecc/Dockerfile +++ b/images/kubecc/Dockerfile @@ -10,7 +10,8 @@ RUN make kubecc # Use distroless as minimal base image to package the binary # Refer to https://github.com/GoogleContainerTools/distroless for more details -FROM gcr.io/distroless/static:nonroot +#FROM gcr.io/distroless/static:nonroot +FROM busybox:latest WORKDIR / COPY --from=builder /workspace/build/bin/kubecc . USER 65532:65532 diff --git a/images/manager/Dockerfile b/images/manager/Dockerfile deleted file mode 100644 index 342d5c9..0000000 --- a/images/manager/Dockerfile +++ /dev/null @@ -1,18 +0,0 @@ -FROM golang:1.16 as builder - -WORKDIR /workspace - -COPY . . - -RUN go mod download - -RUN make manager - -# Use distroless as minimal base image to package the binary -# Refer to https://github.com/GoogleContainerTools/distroless for more details -FROM gcr.io/distroless/static:nonroot -WORKDIR / -COPY --from=builder /workspace/build/bin/manager . -USER 65532:65532 - -ENTRYPOINT ["/manager"] diff --git a/pkg/apps/agent/server.go b/pkg/apps/agent/server.go index a56803b..2d4dc49 100644 --- a/pkg/apps/agent/server.go +++ b/pkg/apps/agent/server.go @@ -16,28 +16,24 @@ import ( "github.com/cobalt77/kubecc/pkg/util" "go.uber.org/zap" "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" ) type AgentServer struct { - types.UnimplementedAgentServer - AgentServerOptions srvContext context.Context executor run.Executor lg *zap.SugaredLogger tcStore *toolchains.Store - tcRunStore *run.ToolchainRunnerStore metricsProvider metrics.Provider + taskStreamMgr *TaskStreamManager } type AgentServerOptions struct { toolchainFinders []toolchains.FinderWithOptions toolchainRunners []run.StoreAddFunc schedulerClient types.SchedulerClient - monitorClient types.InternalMonitorClient + monitorClient types.MonitorClient usageLimits *types.UsageLimits } @@ -67,7 +63,7 @@ func WithSchedulerClient(client types.SchedulerClient) agentServerOption { } } -func WithMonitorClient(client types.InternalMonitorClient) agentServerOption { +func WithMonitorClient(client types.MonitorClient) agentServerOption { return func(o *AgentServerOptions) { o.monitorClient = client } @@ -96,53 +92,20 @@ func NewAgentServer( srvContext: ctx, lg: meta.Log(ctx), tcStore: toolchains.Aggregate(ctx, options.toolchainFinders...), - tcRunStore: runStore, executor: run.NewQueuedExecutor(run.WithUsageLimits(options.usageLimits)), } - return srv -} - -func (s *AgentServer) Compile( - ctx context.Context, - req *types.CompileRequest, -) (*types.CompileResponse, error) { - s.lg.Debug("Handling compile request") - if err := meta.CheckContext(ctx); err != nil { - return nil, err - } - - span, sctx, err := servers.StartSpanFromServer(ctx, "compile") - if err != nil { - s.lg.Error(err) - } else { - defer span.Finish() - } - - runner, err := s.tcRunStore.Get(req.GetToolchain().Kind) - if err != nil { - return nil, status.Error(codes.Unavailable, - "No toolchain runner available") + srv.taskStreamMgr = &TaskStreamManager{ + srvContext: ctx, + lg: meta.Log(ctx), + schedulerClient: options.schedulerClient, + tcStore: srv.tcStore, + tcRunStore: runStore, + executor: srv.executor, } + mgr := servers.NewStreamManager(ctx, srv.taskStreamMgr) + go mgr.Run() - tc, err := s.tcStore.TryMatch(req.GetToolchain()) - if err != nil { - return nil, status.Error(codes.Unavailable, - err.Error()) - } - - // Swap remote toolchain with the local toolchain in case the executable - // path is different locally - req.Toolchain = tc - resp, err := runner.RecvRemote().Run(run.Contexts{ - ServerContext: s.srvContext, - ClientContext: sctx, - }, s.executor, req) - if err != nil { - s.lg.With( - zap.Error(err), - ).Error("Error from remote runner") - } - return resp.(*types.CompileResponse), err + return srv } func (s *AgentServer) postQueueParams() { diff --git a/pkg/apps/agent/taskstream.go b/pkg/apps/agent/taskstream.go new file mode 100644 index 0000000..fac2fbd --- /dev/null +++ b/pkg/apps/agent/taskstream.go @@ -0,0 +1,89 @@ +package agent + +import ( + "context" + + "github.com/cobalt77/kubecc/pkg/meta" + "github.com/cobalt77/kubecc/pkg/run" + "github.com/cobalt77/kubecc/pkg/servers" + "github.com/cobalt77/kubecc/pkg/toolchains" + "github.com/cobalt77/kubecc/pkg/types" + "go.uber.org/zap" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +type TaskStreamManager struct { + srvContext context.Context + lg *zap.SugaredLogger + schedulerClient types.SchedulerClient + tcStore *toolchains.Store + tcRunStore *run.ToolchainRunnerStore + executor run.Executor +} + +func (s *TaskStreamManager) HandleStream(stream grpc.ClientStream) error { + s.lg.Info("Streaming tasks from scheduler") + defer s.lg.Warn("Task stream closed") + streamCtx := stream.Context() + for { + compileRequest := &types.CompileRequest{} + err := stream.RecvMsg(compileRequest) + if err != nil { + return err + } + go s.Compile(streamCtx, compileRequest) + } +} + +func (s *TaskStreamManager) TryConnect() (grpc.ClientStream, error) { + return s.schedulerClient.ConnectAgent(s.srvContext) +} + +func (s *TaskStreamManager) Target() string { + return "scheduler" +} + +func (s *TaskStreamManager) Compile( + ctx context.Context, + req *types.CompileRequest, +) (*types.CompileResponse, error) { + s.lg.Debug("Handling compile request") + if err := meta.CheckContext(ctx); err != nil { + return nil, err + } + + span, sctx, err := servers.StartSpanFromServer(ctx, "compile") + if err != nil { + s.lg.Error(err) + } else { + defer span.Finish() + } + + runner, err := s.tcRunStore.Get(req.GetToolchain().Kind) + if err != nil { + return nil, status.Error(codes.Unavailable, + "No toolchain runner available") + } + + tc, err := s.tcStore.TryMatch(req.GetToolchain()) + if err != nil { + return nil, status.Error(codes.Unavailable, + err.Error()) + } + + // Swap remote toolchain with the local toolchain in case the executable + // path is different locally + req.Toolchain = tc + resp, err := runner.RecvRemote().Run(run.Contexts{ + ServerContext: s.srvContext, + ClientContext: sctx, + }, s.executor, req) + if err != nil { + s.lg.With( + zap.Error(err), + ).Error("Error from remote runner") + } + return resp.(*types.CompileResponse), err +} diff --git a/pkg/apps/cachesrv/server.go b/pkg/apps/cachesrv/server.go index 83ea6f7..2294ff7 100644 --- a/pkg/apps/cachesrv/server.go +++ b/pkg/apps/cachesrv/server.go @@ -27,7 +27,7 @@ type CacheServer struct { type CacheServerOptions struct { storageProvider storage.StorageProvider - monitorClient types.InternalMonitorClient + monitorClient types.MonitorClient } type cacheServerOption func(*CacheServerOptions) @@ -45,7 +45,7 @@ func WithStorageProvider(sp storage.StorageProvider) cacheServerOption { } func WithMonitorClient( - client types.InternalMonitorClient, + client types.MonitorClient, ) cacheServerOption { return func(o *CacheServerOptions) { o.monitorClient = client diff --git a/pkg/apps/consumerd/server.go b/pkg/apps/consumerd/server.go index 0909bab..a7d58d7 100644 --- a/pkg/apps/consumerd/server.go +++ b/pkg/apps/consumerd/server.go @@ -48,7 +48,7 @@ type ConsumerdServerOptions struct { toolchainFinders []toolchains.FinderWithOptions toolchainRunners []run.StoreAddFunc schedulerClient types.SchedulerClient - monitorClient types.InternalMonitorClient + monitorClient types.MonitorClient schedulerConnection *grpc.ClientConn usageLimits *types.UsageLimits } @@ -83,10 +83,10 @@ func WithSchedulerClient( } } -// Note this accepts an InternalMonitorClient even though consumerd runs +// Note this accepts an MonitorClient even though consumerd runs // outside the cluster. func WithMonitorClient( - client types.InternalMonitorClient, + client types.MonitorClient, ) consumerdServerOption { return func(o *ConsumerdServerOptions) { o.monitorClient = client diff --git a/pkg/apps/monitor/monitor_test.go b/pkg/apps/monitor/monitor_test.go index e0f722f..17eb443 100644 --- a/pkg/apps/monitor/monitor_test.go +++ b/pkg/apps/monitor/monitor_test.go @@ -70,8 +70,7 @@ var _ = Describe("Monitor", func() { srv := servers.NewServer(monitorCtx, servers.WithServerOpts( grpc.NumStreamWorkers(12), )) - types.RegisterInternalMonitorServer(srv, mon) - types.RegisterExternalMonitorServer(srv, mon) + types.RegisterMonitorServer(srv, mon) go func() { Expect(srv.Serve(listener)).NotTo(HaveOccurred()) }() @@ -113,7 +112,7 @@ var _ = Describe("Monitor", func() { }), )) Expect(err).NotTo(HaveOccurred()) - client := types.NewExternalMonitorClient(cc) + client := types.NewMonitorClient(cc) listener := metrics.NewListener(ctx, client) listener.OnProviderAdded(func(pctx context.Context, uuid string) { listenerEvents["providerAdded"] <- uuid @@ -157,7 +156,7 @@ var _ = Describe("Monitor", func() { }), )) Expect(err).NotTo(HaveOccurred()) - client := types.NewInternalMonitorClient(cc) + client := types.NewMonitorClient(cc) provider = metrics.NewMonitorProvider(cctx, client, metrics.Buffered|metrics.Block) Expect(provider).NotTo(BeNil()) }) @@ -202,7 +201,7 @@ var _ = Describe("Monitor", func() { }), )) Expect(err).NotTo(HaveOccurred()) - client := types.NewExternalMonitorClient(cc) + client := types.NewMonitorClient(cc) listener := metrics.NewListener(ctx, client) listener.OnProviderAdded(func(pctx context.Context, uuid string) { lateJoinListenerEvents["providerAdded"] <- uuid @@ -315,7 +314,7 @@ var _ = Describe("Monitor", func() { return listener.Dial() }), )) - client := types.NewInternalMonitorClient(cc) + client := types.NewMonitorClient(cc) provider := metrics.NewMonitorProvider(ctx, client, metrics.Buffered) providers[i] = provider } @@ -339,7 +338,7 @@ var _ = Describe("Monitor", func() { return listener.Dial() }), )) - client := types.NewExternalMonitorClient(cc) + client := types.NewMonitorClient(cc) l := metrics.NewListener(ctx, client) listeners[sampleIdx] = l handler := handlers[sampleIdx%4] diff --git a/pkg/apps/monitor/prometheus.go b/pkg/apps/monitor/prometheus.go index 8015dcb..aa54158 100644 --- a/pkg/apps/monitor/prometheus.go +++ b/pkg/apps/monitor/prometheus.go @@ -202,7 +202,7 @@ func serveMetricsEndpoint(ctx context.Context, address string) { func servePrometheusMetrics( srvContext context.Context, - client types.ExternalMonitorClient, + client types.MonitorClient, ) { go serveMetricsEndpoint(srvContext, ":2112") lg := meta.Log(srvContext) diff --git a/pkg/apps/monitor/server.go b/pkg/apps/monitor/server.go index 13fcf6f..da42ccc 100644 --- a/pkg/apps/monitor/server.go +++ b/pkg/apps/monitor/server.go @@ -26,8 +26,7 @@ type Receiver interface { } type MonitorServer struct { - types.UnimplementedInternalMonitorServer - types.UnimplementedExternalMonitorServer + types.UnimplementedMonitorServer srvContext context.Context lg *zap.SugaredLogger @@ -65,7 +64,7 @@ func NewMonitorServer( func (m *MonitorServer) runPrometheusListener() { inMemoryListener := bufconn.Listen(1024 * 1024) inMemoryGrpcSrv := servers.NewServer(m.srvContext) - types.RegisterExternalMonitorServer(inMemoryGrpcSrv, m) + types.RegisterMonitorServer(inMemoryGrpcSrv, m) go func() { if err := inMemoryGrpcSrv.Serve(inMemoryListener); err != nil { @@ -89,7 +88,7 @@ func (m *MonitorServer) runPrometheusListener() { panic(err) } - client := types.NewExternalMonitorClient(cc) + client := types.NewMonitorClient(cc) servePrometheusMetrics(m.srvContext, client) } @@ -130,14 +129,20 @@ func providerIP(ctx context.Context) (string, error) { } func (m *MonitorServer) Stream( - srv types.InternalMonitor_StreamServer, + srv types.Monitor_StreamServer, ) (streamError error) { if err := meta.CheckContext(srv.Context()); err != nil { + m.lg.With( + zap.Error(err), + ).Error("Error handling provider stream") return err } ctx := srv.Context() addr, err := providerIP(srv.Context()) if err != nil { + m.lg.With( + zap.Error(err), + ).Error("Error handling provider stream") return err } uuid := meta.UUID(ctx) @@ -278,7 +283,7 @@ func (m *MonitorServer) post(metric *types.Metric) error { func (m *MonitorServer) Listen( key *types.Key, - srv types.ExternalMonitor_ListenServer, + srv types.Monitor_ListenServer, ) error { if err := meta.CheckContext(srv.Context()); err != nil { return err diff --git a/pkg/apps/scheduler/agent_dialer.go b/pkg/apps/scheduler/agent_dialer.go deleted file mode 100644 index 24d7ace..0000000 --- a/pkg/apps/scheduler/agent_dialer.go +++ /dev/null @@ -1,37 +0,0 @@ -package scheduler - -import ( - "context" - "fmt" - "net" - - "github.com/cobalt77/kubecc/pkg/servers" - "github.com/cobalt77/kubecc/pkg/types" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/peer" - "google.golang.org/grpc/status" -) - -type AgentDialer interface { - Dial(ctx context.Context) (types.AgentClient, error) -} - -type tcpDialer struct{} - -func (d *tcpDialer) Dial(ctx context.Context) (types.AgentClient, error) { - peer, ok := peer.FromContext(ctx) - if !ok { - return nil, status.Error(codes.Internal, - "Error identifying agent peer") - } - - cc, err := servers.Dial(ctx, - fmt.Sprintf("%s:9090", peer.Addr.(*net.TCPAddr).IP.String())) - if err != nil { - if !ok { - return nil, status.Error(codes.Internal, - "Error establishing connection to agent's server") - } - } - return types.NewAgentClient(cc), nil -} diff --git a/pkg/apps/scheduler/scheduler.go b/pkg/apps/scheduler/scheduler.go index 0106135..9e10331 100644 --- a/pkg/apps/scheduler/scheduler.go +++ b/pkg/apps/scheduler/scheduler.go @@ -54,7 +54,6 @@ type Scheduler struct { } type SchedulerOptions struct { - agentDialer AgentDialer } type schedulerOption func(*SchedulerOptions) @@ -65,16 +64,8 @@ func (o *SchedulerOptions) Apply(opts ...schedulerOption) { } } -func WithAgentDialer(d AgentDialer) schedulerOption { - return func(o *SchedulerOptions) { - o.agentDialer = d - } -} - func NewScheduler(ctx context.Context, opts ...schedulerOption) *Scheduler { - options := SchedulerOptions{ - agentDialer: &tcpDialer{}, - } + options := SchedulerOptions{} options.Apply(opts...) return &Scheduler{ diff --git a/pkg/apps/scheduler/server.go b/pkg/apps/scheduler/server.go index 26e892c..c1323dd 100644 --- a/pkg/apps/scheduler/server.go +++ b/pkg/apps/scheduler/server.go @@ -23,7 +23,7 @@ import ( type schedulerServer struct { types.UnimplementedSchedulerServer - monClient types.InternalMonitorClient + monClient types.MonitorClient cacheClient types.CacheClient srvContext context.Context @@ -38,7 +38,7 @@ type schedulerServer struct { type SchedulerServerOptions struct { schedulerOptions []schedulerOption - monClient types.InternalMonitorClient + monClient types.MonitorClient cacheClient types.CacheClient } @@ -56,7 +56,7 @@ func WithSchedulerOptions(opts ...schedulerOption) schedulerServerOption { } } -func WithMonitorClient(monClient types.InternalMonitorClient) schedulerServerOption { +func WithMonitorClient(monClient types.MonitorClient) schedulerServerOption { return func(o *SchedulerServerOptions) { o.monClient = monClient } diff --git a/pkg/apps/scheduler/types.go b/pkg/apps/scheduler/types.go index bc3c374..3c69e2b 100644 --- a/pkg/apps/scheduler/types.go +++ b/pkg/apps/scheduler/types.go @@ -28,7 +28,7 @@ type Agent struct { remoteInfo *sync.RWMutex - Client types.AgentClient + Client types.Scheduler_StreamTasksServer QueueStatus types.QueueStatus } diff --git a/pkg/config/spec.go b/pkg/config/spec.go index 82f4aa3..8cd0841 100644 --- a/pkg/config/spec.go +++ b/pkg/config/spec.go @@ -76,7 +76,7 @@ type SchedulerSpec struct { type MonitorSpec struct { GlobalSpec - ListenAddress MonitorListenAddressSpec `json:"listenAddress"` + ListenAddress string `json:"listenAddress"` } type CacheSpec struct { @@ -108,11 +108,6 @@ type StorageLimitsSpec struct { Disk string `json:"disk"` } -type MonitorListenAddressSpec struct { - Internal string `json:"internal"` - External string `json:"external"` -} - type KcctlSpec struct { GlobalSpec MonitorAddress string `json:"monitorAddress"` diff --git a/pkg/host/sysfs.go b/pkg/host/sysfs.go index 5330804..bdf18d6 100644 --- a/pkg/host/sysfs.go +++ b/pkg/host/sysfs.go @@ -1,8 +1,10 @@ package host import ( + "fmt" "os" "path/filepath" + "runtime" "strconv" "strings" ) @@ -11,23 +13,34 @@ const ( cgroupDir = "/sys/fs/cgroup" ) -func readInt64(path string) int64 { +func readInt64(path string) (int64, error) { data, err := os.ReadFile(filepath.Join(cgroupDir, path)) if err != nil { - panic("Could not read CFS quota from sysfs") + return 0, err } value, err := strconv.ParseInt( strings.TrimSpace(string(data)), 10, 64) if err != nil { panic(err) } - return value + return value, nil } func CfsQuota() int64 { - return readInt64("cpu/cpu.cfs_quota_us") + value, err := readInt64("cpu/cpu.cfs_quota_us") + if err != nil { + fmt.Printf("Warning: could not read CFS quota from %s. Your kernel may not be compiled with CFS Bandwidth support.\n", cgroupDir) + // Assuming CfsPeriod() will fail and return 1 + return int64(runtime.NumCPU()) + } + return value } func CfsPeriod() int64 { - return readInt64("cpu/cpu.cfs_period_us") + value, err := readInt64("cpu/cpu.cfs_period_us") + if err != nil { + fmt.Printf("Warning: could not read CFS period from %s. Your kernel may not be compiled with CFS Bandwidth support.\n", cgroupDir) + return 1 + } + return value } diff --git a/pkg/metrics/keyedbuffer.go b/pkg/metrics/keyedbuffer.go index 9afab14..8ac8845 100644 --- a/pkg/metrics/keyedbuffer.go +++ b/pkg/metrics/keyedbuffer.go @@ -47,7 +47,7 @@ func runWaitReceiver(postQueue chan KeyedMetric, enableQueue <-chan bool) { func NewKeyedBufferMonitorProvider( ctx context.Context, - client types.InternalMonitorClient, + client types.MonitorClient, ) Provider { provider := &keyedBufferMonitorProvider{ monitorProvider: monitorProvider{ diff --git a/pkg/metrics/listener.go b/pkg/metrics/listener.go index 453000c..3abb73a 100644 --- a/pkg/metrics/listener.go +++ b/pkg/metrics/listener.go @@ -21,7 +21,7 @@ import ( type monitorListener struct { ctx context.Context - monClient types.ExternalMonitorClient + monClient types.MonitorClient lg *zap.SugaredLogger streamOpts []servers.StreamManagerOption knownProviders map[string]context.CancelFunc @@ -30,7 +30,7 @@ type monitorListener struct { func NewListener( ctx context.Context, - client types.ExternalMonitorClient, + client types.MonitorClient, streamOpts ...servers.StreamManagerOption, ) Listener { listener := &monitorListener{ @@ -88,7 +88,7 @@ type changeListener struct { expiredHandler func() RetryOptions handler reflect.Value ehMutex *sync.Mutex - monClient types.ExternalMonitorClient + monClient types.MonitorClient key *types.Key argType reflect.Type } diff --git a/pkg/metrics/provider.go b/pkg/metrics/provider.go index 626e876..b325ca7 100644 --- a/pkg/metrics/provider.go +++ b/pkg/metrics/provider.go @@ -16,7 +16,7 @@ import ( type monitorProvider struct { ctx context.Context lg *zap.SugaredLogger - monClient types.InternalMonitorClient + monClient types.MonitorClient postQueue chan KeyedMetric queueStrategy QueueStrategy } @@ -31,7 +31,7 @@ const ( func NewMonitorProvider( ctx context.Context, - client types.InternalMonitorClient, + client types.MonitorClient, qs QueueStrategy, ) Provider { var postQueue chan KeyedMetric diff --git a/pkg/rec/objects/agent_daemonset.yaml b/pkg/rec/objects/agent_daemonset.yaml index ecd32b4..ac8a0fd 100644 --- a/pkg/rec/objects/agent_daemonset.yaml +++ b/pkg/rec/objects/agent_daemonset.yaml @@ -28,6 +28,10 @@ spec: mountPath: /tmp/kubecc-bin containers: - name: kubecc-agent + command: + - /usr/bin/kubecc + - run + - agent image: {{ .Spec.Image }} imagePullPolicy: {{ .Spec.ImagePullPolicy }} {{ if .Spec.Resources }} @@ -55,6 +59,14 @@ spec: - name: kubecc-binary mountPath: /usr/bin/kubecc subPath: kubecc + - name: config + mountPath: /etc/kubecc volumes: - name: kubecc-binary - emptyDir: {} \ No newline at end of file + emptyDir: {} + - name: config + configMap: + name: kubecc + items: + - key: config.yaml + path: config.yaml diff --git a/pkg/rec/objects/cachesrv_deployment.yaml b/pkg/rec/objects/cachesrv_deployment.yaml new file mode 100644 index 0000000..4c8096e --- /dev/null +++ b/pkg/rec/objects/cachesrv_deployment.yaml @@ -0,0 +1,58 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kubecc-cache +spec: + replicas: 1 + selector: + matchLabels: + app: kubecc-cache + template: + metadata: + labels: + app: kubecc-cache + spec: +{{ if .Spec.NodeAffinity }} + affinity: + nodeAffinity: +{{ .Spec.NodeAffinity | toYaml | indent 10 }} +{{ end }} + containers: + - name: kubecc-cache + command: + - /kubecc + - run + - cache + image: {{ .Spec.Image }} + imagePullPolicy: {{ .Spec.ImagePullPolicy }} +{{ if .Spec.Resources }} + resources: +{{ .Spec.Resources | toYaml | indent 12 }} +{{ end }} + env: + - name: KUBECC_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: KUBECC_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: KUBECC_NODE + valueFrom: + fieldRef: + fieldPath: spec.nodeName + ports: + - name: grpc + containerPort: 9090 + protocol: TCP + volumeMounts: + - name: config + mountPath: /etc/kubecc + volumes: + - name: config + configMap: + name: kubecc + items: + - key: config.yaml + path: config.yaml \ No newline at end of file diff --git a/pkg/rec/objects/cachesrv_service.yaml b/pkg/rec/objects/cachesrv_service.yaml new file mode 100644 index 0000000..946e7a7 --- /dev/null +++ b/pkg/rec/objects/cachesrv_service.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Service +metadata: + name: kubecc-cache +spec: + selector: + app: kubecc-cache + type: ClusterIP + ports: + - name: grpc + port: 9090 + protocol: TCP \ No newline at end of file diff --git a/pkg/rec/objects/kubecc_configmap.yaml b/pkg/rec/objects/kubecc_configmap.yaml new file mode 100644 index 0000000..667e0e8 --- /dev/null +++ b/pkg/rec/objects/kubecc_configmap.yaml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: kubecc +data: + config.yaml: | + global: + logLevel: info + agent: + usageLimits: + concurrentProcessLimit: -1 + queuePressureMultiplier: 1 + queueRejectMultiplier: 2 + schedulerAddress: kubecc-scheduler:9090 + monitorAddress: kubecc-monitor:9090 + listenAddress: :9090 + scheduler: + monitorAddress: kubecc-monitor:9090 + cacheAddress: kubecc-cache:9090 + listenAddress: :9090 + monitor: + listenAddress: :9090 + cache: + monitorAddress: kubecc-monitor:9090 + listenAddress: :9090 + localStorage: + path: /var/lib/kubecc/cache + limits: + disk: 50Gi + memory: 1Gi diff --git a/pkg/rec/objects/monitor_deployment.yaml b/pkg/rec/objects/monitor_deployment.yaml index 7f02281..3257ccd 100644 --- a/pkg/rec/objects/monitor_deployment.yaml +++ b/pkg/rec/objects/monitor_deployment.yaml @@ -19,6 +19,10 @@ spec: {{ end }} containers: - name: kubecc-monitor + command: + - /kubecc + - run + - monitor image: {{ .Spec.Image }} imagePullPolicy: {{ .Spec.ImagePullPolicy }} {{ if .Spec.Resources }} @@ -41,4 +45,14 @@ spec: ports: - name: grpc containerPort: 9090 - protocol: TCP \ No newline at end of file + protocol: TCP + volumeMounts: + - name: config + mountPath: /etc/kubecc + volumes: + - name: config + configMap: + name: kubecc + items: + - key: config.yaml + path: config.yaml \ No newline at end of file diff --git a/pkg/rec/objects/monitor_service.yaml b/pkg/rec/objects/monitor_service.yaml index f2ff196..4457dcf 100644 --- a/pkg/rec/objects/monitor_service.yaml +++ b/pkg/rec/objects/monitor_service.yaml @@ -7,9 +7,6 @@ spec: app: kubecc-monitor type: ClusterIP ports: - - name: external-grpc + - name: grpc port: 9090 protocol: TCP - - name: internal-grpc - port: 9091 - protocol: TCP diff --git a/pkg/rec/objects/scheduler_deployment.yaml b/pkg/rec/objects/scheduler_deployment.yaml index 38eb1ca..3890243 100644 --- a/pkg/rec/objects/scheduler_deployment.yaml +++ b/pkg/rec/objects/scheduler_deployment.yaml @@ -19,6 +19,10 @@ spec: {{ end }} containers: - name: kubecc-scheduler + command: + - /kubecc + - run + - scheduler image: {{ .Spec.Image }} imagePullPolicy: {{ .Spec.ImagePullPolicy }} {{ if .Spec.Resources }} @@ -41,4 +45,14 @@ spec: ports: - name: grpc containerPort: 9090 - protocol: TCP \ No newline at end of file + protocol: TCP + volumeMounts: + - name: config + mountPath: /etc/kubecc + volumes: + - name: config + configMap: + name: kubecc + items: + - key: config.yaml + path: config.yaml \ No newline at end of file diff --git a/pkg/resolvers/cachesrv.go b/pkg/resolvers/cachesrv.go new file mode 100644 index 0000000..017250f --- /dev/null +++ b/pkg/resolvers/cachesrv.go @@ -0,0 +1,75 @@ +package resolvers + +import ( + "github.com/cobalt77/kubecc/api/v1alpha1" + "github.com/cobalt77/kubecc/pkg/rec" + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type CacheSrvResolver struct{} + +const ( + cacheSrvAppName = "kubecc-cache" +) + +func (r *CacheSrvResolver) Resolve( + rc rec.ResolveContext, +) (ctrl.Result, error) { + cacheSpec := rc.Object.(v1alpha1.CacheSpec) + deployment := &appsv1.Deployment{} + res, err := rec.Find(rc, types.NamespacedName{ + Namespace: rc.RootObject.GetNamespace(), + Name: cacheSrvAppName, + }, deployment, + rec.WithCreator(rec.FromTemplate("objects/cachesrv_deployment.yaml")), + rec.RecreateIfChanged(), + ) + if rec.ShouldRequeue(res, err) { + return rec.RequeueWith(res, err) + } + staticLabels := map[string]string{ + "app": cacheSrvAppName, + } + + res, err = rec.UpdateIfNeeded(rc, deployment, + []rec.Updater{ + rec.AffinityUpdater(cacheSpec.NodeAffinity, + &deployment.Spec.Template.Spec), + rec.ResourceUpdater(cacheSpec.Resources, + &deployment.Spec.Template.Spec, 0), + rec.ImageUpdater(cacheSpec.Image, + &deployment.Spec.Template.Spec, 0), + rec.PullPolicyUpdater(cacheSpec.ImagePullPolicy, + &deployment.Spec.Template.Spec, 0), + rec.LabelUpdater(cacheSpec.AdditionalLabels, + &deployment.Spec.Template, + staticLabels, + ), + }, + ) + if rec.ShouldRequeue(res, err) { + return rec.RequeueWith(res, err) + } + + svc := &v1.Service{} + res, err = rec.Find(rc, types.NamespacedName{ + Namespace: rc.RootObject.GetNamespace(), + Name: cacheSrvAppName, + }, svc, + rec.WithCreator(rec.FromTemplate("objects/cachesrv_service.yaml")), + rec.RecreateIfChanged(), + ) + if rec.ShouldRequeue(res, err) { + return rec.RequeueWith(res, err) + } + + return rec.DoNotRequeue() +} + +func (r *CacheSrvResolver) Find(root client.Object) interface{} { + return root.(*v1alpha1.BuildCluster).Spec.Components.Cache +} diff --git a/pkg/resolvers/configmap.go b/pkg/resolvers/configmap.go new file mode 100644 index 0000000..146be61 --- /dev/null +++ b/pkg/resolvers/configmap.go @@ -0,0 +1,37 @@ +package resolvers + +import ( + "github.com/cobalt77/kubecc/api/v1alpha1" + "github.com/cobalt77/kubecc/pkg/rec" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type ConfigMapResolver struct{} + +const ( + configMapName = "kubecc" +) + +func (r *ConfigMapResolver) Resolve( + rc rec.ResolveContext, +) (ctrl.Result, error) { + configMap := &v1.ConfigMap{} + res, err := rec.Find(rc, types.NamespacedName{ + Namespace: rc.RootObject.GetNamespace(), + Name: configMapName, + }, configMap, + rec.WithCreator(rec.FromTemplate("objects/kubecc_configmap.yaml")), + ) + if rec.ShouldRequeue(res, err) { + return rec.RequeueWith(res, err) + } + + return rec.DoNotRequeue() +} + +func (r *ConfigMapResolver) Find(root client.Object) interface{} { + return root.(*v1alpha1.BuildCluster).Spec.Components +} diff --git a/pkg/types/types.pb.go b/pkg/types/types.pb.go index 09c7583..fa7c390 100644 --- a/pkg/types/types.pb.go +++ b/pkg/types/types.pb.go @@ -2291,41 +2291,37 @@ var file_proto_types_proto_rawDesc = []byte{ 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x5f, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x10, 0x03, 0x32, 0x2d, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x72, 0x64, 0x12, 0x20, 0x0a, 0x03, 0x52, 0x75, 0x6e, 0x12, 0x0b, 0x2e, 0x52, 0x75, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0c, - 0x2e, 0x52, 0x75, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0x5d, 0x0a, 0x05, - 0x41, 0x67, 0x65, 0x6e, 0x74, 0x12, 0x2c, 0x0a, 0x07, 0x43, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, - 0x12, 0x0f, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x10, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a, 0x0e, 0x53, 0x65, 0x74, 0x55, 0x73, 0x61, 0x67, 0x65, 0x4c, - 0x69, 0x6d, 0x69, 0x74, 0x73, 0x12, 0x0c, 0x2e, 0x55, 0x73, 0x61, 0x67, 0x65, 0x4c, 0x69, 0x6d, - 0x69, 0x74, 0x73, 0x1a, 0x06, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x32, 0x8b, 0x01, 0x0a, 0x09, - 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x12, 0x2c, 0x0a, 0x07, 0x43, 0x6f, 0x6d, - 0x70, 0x69, 0x6c, 0x65, 0x12, 0x0f, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x10, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x0c, 0x43, 0x6f, 0x6e, 0x6e, 0x65, - 0x63, 0x74, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x12, 0x09, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, - 0x74, 0x61, 0x1a, 0x06, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x28, 0x01, 0x30, 0x01, 0x12, 0x29, - 0x0a, 0x10, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, - 0x72, 0x64, 0x12, 0x09, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x06, 0x2e, - 0x45, 0x6d, 0x70, 0x74, 0x79, 0x28, 0x01, 0x30, 0x01, 0x32, 0x30, 0x0a, 0x0f, 0x49, 0x6e, 0x74, - 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x06, - 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x07, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x1a, - 0x06, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x28, 0x01, 0x30, 0x01, 0x32, 0x53, 0x0a, 0x0f, 0x45, - 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x12, 0x18, - 0x0a, 0x06, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x12, 0x04, 0x2e, 0x4b, 0x65, 0x79, 0x1a, 0x06, - 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x30, 0x01, 0x12, 0x26, 0x0a, 0x05, 0x57, 0x68, 0x6f, 0x69, - 0x73, 0x12, 0x0d, 0x2e, 0x57, 0x68, 0x6f, 0x69, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x0e, 0x2e, 0x57, 0x68, 0x6f, 0x69, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x32, 0x97, 0x01, 0x0a, 0x05, 0x43, 0x61, 0x63, 0x68, 0x65, 0x12, 0x1c, 0x0a, 0x04, 0x50, 0x75, - 0x73, 0x68, 0x12, 0x0c, 0x2e, 0x50, 0x75, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x06, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x22, 0x0a, 0x04, 0x50, 0x75, 0x6c, 0x6c, - 0x12, 0x0c, 0x2e, 0x50, 0x75, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0c, - 0x2e, 0x43, 0x61, 0x63, 0x68, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x26, 0x0a, 0x05, - 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x0d, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0e, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x04, 0x53, 0x79, 0x6e, 0x63, 0x12, 0x0c, 0x2e, 0x53, - 0x79, 0x6e, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0c, 0x2e, 0x43, 0x61, 0x63, - 0x68, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x30, 0x01, 0x42, 0x0b, 0x5a, 0x09, 0x70, 0x6b, - 0x67, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x2e, 0x52, 0x75, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xc1, 0x01, 0x0a, + 0x09, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x12, 0x2c, 0x0a, 0x07, 0x43, 0x6f, + 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x12, 0x0f, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x10, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x0c, 0x43, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x12, 0x09, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x1a, 0x06, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x28, 0x01, 0x30, 0x01, 0x12, + 0x29, 0x0a, 0x10, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, + 0x65, 0x72, 0x64, 0x12, 0x09, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x06, + 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x28, 0x01, 0x30, 0x01, 0x12, 0x34, 0x0a, 0x0b, 0x53, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x12, 0x10, 0x2e, 0x43, 0x6f, 0x6d, 0x70, + 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x1a, 0x0f, 0x2e, 0x43, 0x6f, + 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x28, 0x01, 0x30, 0x01, + 0x32, 0x6a, 0x0a, 0x07, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x06, 0x53, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x07, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x1a, 0x06, + 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x28, 0x01, 0x30, 0x01, 0x12, 0x18, 0x0a, 0x06, 0x4c, 0x69, + 0x73, 0x74, 0x65, 0x6e, 0x12, 0x04, 0x2e, 0x4b, 0x65, 0x79, 0x1a, 0x06, 0x2e, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x30, 0x01, 0x12, 0x26, 0x0a, 0x05, 0x57, 0x68, 0x6f, 0x69, 0x73, 0x12, 0x0d, 0x2e, + 0x57, 0x68, 0x6f, 0x69, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0e, 0x2e, 0x57, + 0x68, 0x6f, 0x69, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0x97, 0x01, 0x0a, + 0x05, 0x43, 0x61, 0x63, 0x68, 0x65, 0x12, 0x1c, 0x0a, 0x04, 0x50, 0x75, 0x73, 0x68, 0x12, 0x0c, + 0x2e, 0x50, 0x75, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x06, 0x2e, 0x45, + 0x6d, 0x70, 0x74, 0x79, 0x12, 0x22, 0x0a, 0x04, 0x50, 0x75, 0x6c, 0x6c, 0x12, 0x0c, 0x2e, 0x50, + 0x75, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0c, 0x2e, 0x43, 0x61, 0x63, + 0x68, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x26, 0x0a, 0x05, 0x51, 0x75, 0x65, 0x72, + 0x79, 0x12, 0x0d, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x0e, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x24, 0x0a, 0x04, 0x53, 0x79, 0x6e, 0x63, 0x12, 0x0c, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0c, 0x2e, 0x43, 0x61, 0x63, 0x68, 0x65, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x30, 0x01, 0x42, 0x0b, 0x5a, 0x09, 0x70, 0x6b, 0x67, 0x2f, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -2403,33 +2399,31 @@ var file_proto_types_proto_depIdxs = []int32{ 24, // 19: CompileRequest.Toolchain:type_name -> Toolchain 5, // 20: CompileResponse.CompileResult:type_name -> CompileResponse.Result 27, // 21: Consumerd.Run:input_type -> RunRequest - 33, // 22: Agent.Compile:input_type -> CompileRequest - 21, // 23: Agent.SetUsageLimits:input_type -> UsageLimits - 33, // 24: Scheduler.Compile:input_type -> CompileRequest - 23, // 25: Scheduler.ConnectAgent:input_type -> Metadata - 23, // 26: Scheduler.ConnectConsumerd:input_type -> Metadata - 18, // 27: InternalMonitor.Stream:input_type -> Metric - 19, // 28: ExternalMonitor.Listen:input_type -> Key - 16, // 29: ExternalMonitor.Whois:input_type -> WhoisRequest - 7, // 30: Cache.Push:input_type -> PushRequest - 8, // 31: Cache.Pull:input_type -> PullRequest - 9, // 32: Cache.Query:input_type -> QueryRequest - 11, // 33: Cache.Sync:input_type -> SyncRequest - 28, // 34: Consumerd.Run:output_type -> RunResponse - 34, // 35: Agent.Compile:output_type -> CompileResponse - 6, // 36: Agent.SetUsageLimits:output_type -> Empty - 34, // 37: Scheduler.Compile:output_type -> CompileResponse - 6, // 38: Scheduler.ConnectAgent:output_type -> Empty - 6, // 39: Scheduler.ConnectConsumerd:output_type -> Empty - 6, // 40: InternalMonitor.Stream:output_type -> Empty - 20, // 41: ExternalMonitor.Listen:output_type -> Value - 17, // 42: ExternalMonitor.Whois:output_type -> WhoisResponse - 6, // 43: Cache.Push:output_type -> Empty - 13, // 44: Cache.Pull:output_type -> CacheObject - 10, // 45: Cache.Query:output_type -> QueryResponse - 13, // 46: Cache.Sync:output_type -> CacheObject - 34, // [34:47] is the sub-list for method output_type - 21, // [21:34] is the sub-list for method input_type + 33, // 22: Scheduler.Compile:input_type -> CompileRequest + 23, // 23: Scheduler.ConnectAgent:input_type -> Metadata + 23, // 24: Scheduler.ConnectConsumerd:input_type -> Metadata + 34, // 25: Scheduler.StreamTasks:input_type -> CompileResponse + 18, // 26: Monitor.Stream:input_type -> Metric + 19, // 27: Monitor.Listen:input_type -> Key + 16, // 28: Monitor.Whois:input_type -> WhoisRequest + 7, // 29: Cache.Push:input_type -> PushRequest + 8, // 30: Cache.Pull:input_type -> PullRequest + 9, // 31: Cache.Query:input_type -> QueryRequest + 11, // 32: Cache.Sync:input_type -> SyncRequest + 28, // 33: Consumerd.Run:output_type -> RunResponse + 34, // 34: Scheduler.Compile:output_type -> CompileResponse + 6, // 35: Scheduler.ConnectAgent:output_type -> Empty + 6, // 36: Scheduler.ConnectConsumerd:output_type -> Empty + 33, // 37: Scheduler.StreamTasks:output_type -> CompileRequest + 6, // 38: Monitor.Stream:output_type -> Empty + 20, // 39: Monitor.Listen:output_type -> Value + 17, // 40: Monitor.Whois:output_type -> WhoisResponse + 6, // 41: Cache.Push:output_type -> Empty + 13, // 42: Cache.Pull:output_type -> CacheObject + 10, // 43: Cache.Query:output_type -> QueryResponse + 13, // 44: Cache.Sync:output_type -> CacheObject + 33, // [33:45] is the sub-list for method output_type + 21, // [21:33] is the sub-list for method input_type 21, // [21:21] is the sub-list for extension type_name 21, // [21:21] is the sub-list for extension extendee 0, // [0:21] is the sub-list for field type_name @@ -2806,7 +2800,7 @@ func file_proto_types_proto_init() { NumEnums: 6, NumMessages: 30, NumExtensions: 0, - NumServices: 6, + NumServices: 4, }, GoTypes: file_proto_types_proto_goTypes, DependencyIndexes: file_proto_types_proto_depIdxs, diff --git a/pkg/types/types_grpc.pb.go b/pkg/types/types_grpc.pb.go index 130cc62..9d5710b 100644 --- a/pkg/types/types_grpc.pb.go +++ b/pkg/types/types_grpc.pb.go @@ -100,128 +100,6 @@ var Consumerd_ServiceDesc = grpc.ServiceDesc{ Metadata: "proto/types.proto", } -// AgentClient is the client API for Agent service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type AgentClient interface { - Compile(ctx context.Context, in *CompileRequest, opts ...grpc.CallOption) (*CompileResponse, error) - SetUsageLimits(ctx context.Context, in *UsageLimits, opts ...grpc.CallOption) (*Empty, error) -} - -type agentClient struct { - cc grpc.ClientConnInterface -} - -func NewAgentClient(cc grpc.ClientConnInterface) AgentClient { - return &agentClient{cc} -} - -func (c *agentClient) Compile(ctx context.Context, in *CompileRequest, opts ...grpc.CallOption) (*CompileResponse, error) { - out := new(CompileResponse) - err := c.cc.Invoke(ctx, "/Agent/Compile", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *agentClient) SetUsageLimits(ctx context.Context, in *UsageLimits, opts ...grpc.CallOption) (*Empty, error) { - out := new(Empty) - err := c.cc.Invoke(ctx, "/Agent/SetUsageLimits", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// AgentServer is the server API for Agent service. -// All implementations must embed UnimplementedAgentServer -// for forward compatibility -type AgentServer interface { - Compile(context.Context, *CompileRequest) (*CompileResponse, error) - SetUsageLimits(context.Context, *UsageLimits) (*Empty, error) - mustEmbedUnimplementedAgentServer() -} - -// UnimplementedAgentServer must be embedded to have forward compatible implementations. -type UnimplementedAgentServer struct { -} - -func (UnimplementedAgentServer) Compile(context.Context, *CompileRequest) (*CompileResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Compile not implemented") -} -func (UnimplementedAgentServer) SetUsageLimits(context.Context, *UsageLimits) (*Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method SetUsageLimits not implemented") -} -func (UnimplementedAgentServer) mustEmbedUnimplementedAgentServer() {} - -// UnsafeAgentServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to AgentServer will -// result in compilation errors. -type UnsafeAgentServer interface { - mustEmbedUnimplementedAgentServer() -} - -func RegisterAgentServer(s grpc.ServiceRegistrar, srv AgentServer) { - s.RegisterService(&Agent_ServiceDesc, srv) -} - -func _Agent_Compile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CompileRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AgentServer).Compile(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/Agent/Compile", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AgentServer).Compile(ctx, req.(*CompileRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Agent_SetUsageLimits_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UsageLimits) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AgentServer).SetUsageLimits(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/Agent/SetUsageLimits", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AgentServer).SetUsageLimits(ctx, req.(*UsageLimits)) - } - return interceptor(ctx, in, info, handler) -} - -// Agent_ServiceDesc is the grpc.ServiceDesc for Agent service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var Agent_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "Agent", - HandlerType: (*AgentServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Compile", - Handler: _Agent_Compile_Handler, - }, - { - MethodName: "SetUsageLimits", - Handler: _Agent_SetUsageLimits_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "proto/types.proto", -} - // SchedulerClient is the client API for Scheduler service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -229,6 +107,7 @@ type SchedulerClient interface { Compile(ctx context.Context, in *CompileRequest, opts ...grpc.CallOption) (*CompileResponse, error) ConnectAgent(ctx context.Context, opts ...grpc.CallOption) (Scheduler_ConnectAgentClient, error) ConnectConsumerd(ctx context.Context, opts ...grpc.CallOption) (Scheduler_ConnectConsumerdClient, error) + StreamTasks(ctx context.Context, opts ...grpc.CallOption) (Scheduler_StreamTasksClient, error) } type schedulerClient struct { @@ -310,6 +189,37 @@ func (x *schedulerConnectConsumerdClient) Recv() (*Empty, error) { return m, nil } +func (c *schedulerClient) StreamTasks(ctx context.Context, opts ...grpc.CallOption) (Scheduler_StreamTasksClient, error) { + stream, err := c.cc.NewStream(ctx, &Scheduler_ServiceDesc.Streams[2], "/Scheduler/StreamTasks", opts...) + if err != nil { + return nil, err + } + x := &schedulerStreamTasksClient{stream} + return x, nil +} + +type Scheduler_StreamTasksClient interface { + Send(*CompileResponse) error + Recv() (*CompileRequest, error) + grpc.ClientStream +} + +type schedulerStreamTasksClient struct { + grpc.ClientStream +} + +func (x *schedulerStreamTasksClient) Send(m *CompileResponse) error { + return x.ClientStream.SendMsg(m) +} + +func (x *schedulerStreamTasksClient) Recv() (*CompileRequest, error) { + m := new(CompileRequest) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + // SchedulerServer is the server API for Scheduler service. // All implementations must embed UnimplementedSchedulerServer // for forward compatibility @@ -317,6 +227,7 @@ type SchedulerServer interface { Compile(context.Context, *CompileRequest) (*CompileResponse, error) ConnectAgent(Scheduler_ConnectAgentServer) error ConnectConsumerd(Scheduler_ConnectConsumerdServer) error + StreamTasks(Scheduler_StreamTasksServer) error mustEmbedUnimplementedSchedulerServer() } @@ -333,6 +244,9 @@ func (UnimplementedSchedulerServer) ConnectAgent(Scheduler_ConnectAgentServer) e func (UnimplementedSchedulerServer) ConnectConsumerd(Scheduler_ConnectConsumerdServer) error { return status.Errorf(codes.Unimplemented, "method ConnectConsumerd not implemented") } +func (UnimplementedSchedulerServer) StreamTasks(Scheduler_StreamTasksServer) error { + return status.Errorf(codes.Unimplemented, "method StreamTasks not implemented") +} func (UnimplementedSchedulerServer) mustEmbedUnimplementedSchedulerServer() {} // UnsafeSchedulerServer may be embedded to opt out of forward compatibility for this service. @@ -416,6 +330,32 @@ func (x *schedulerConnectConsumerdServer) Recv() (*Metadata, error) { return m, nil } +func _Scheduler_StreamTasks_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(SchedulerServer).StreamTasks(&schedulerStreamTasksServer{stream}) +} + +type Scheduler_StreamTasksServer interface { + Send(*CompileRequest) error + Recv() (*CompileResponse, error) + grpc.ServerStream +} + +type schedulerStreamTasksServer struct { + grpc.ServerStream +} + +func (x *schedulerStreamTasksServer) Send(m *CompileRequest) error { + return x.ServerStream.SendMsg(m) +} + +func (x *schedulerStreamTasksServer) Recv() (*CompileResponse, error) { + m := new(CompileResponse) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + // Scheduler_ServiceDesc is the grpc.ServiceDesc for Scheduler service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) @@ -441,49 +381,57 @@ var Scheduler_ServiceDesc = grpc.ServiceDesc{ ServerStreams: true, ClientStreams: true, }, + { + StreamName: "StreamTasks", + Handler: _Scheduler_StreamTasks_Handler, + ServerStreams: true, + ClientStreams: true, + }, }, Metadata: "proto/types.proto", } -// InternalMonitorClient is the client API for InternalMonitor service. +// MonitorClient is the client API for Monitor service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type InternalMonitorClient interface { - Stream(ctx context.Context, opts ...grpc.CallOption) (InternalMonitor_StreamClient, error) +type MonitorClient interface { + Stream(ctx context.Context, opts ...grpc.CallOption) (Monitor_StreamClient, error) + Listen(ctx context.Context, in *Key, opts ...grpc.CallOption) (Monitor_ListenClient, error) + Whois(ctx context.Context, in *WhoisRequest, opts ...grpc.CallOption) (*WhoisResponse, error) } -type internalMonitorClient struct { +type monitorClient struct { cc grpc.ClientConnInterface } -func NewInternalMonitorClient(cc grpc.ClientConnInterface) InternalMonitorClient { - return &internalMonitorClient{cc} +func NewMonitorClient(cc grpc.ClientConnInterface) MonitorClient { + return &monitorClient{cc} } -func (c *internalMonitorClient) Stream(ctx context.Context, opts ...grpc.CallOption) (InternalMonitor_StreamClient, error) { - stream, err := c.cc.NewStream(ctx, &InternalMonitor_ServiceDesc.Streams[0], "/InternalMonitor/Stream", opts...) +func (c *monitorClient) Stream(ctx context.Context, opts ...grpc.CallOption) (Monitor_StreamClient, error) { + stream, err := c.cc.NewStream(ctx, &Monitor_ServiceDesc.Streams[0], "/Monitor/Stream", opts...) if err != nil { return nil, err } - x := &internalMonitorStreamClient{stream} + x := &monitorStreamClient{stream} return x, nil } -type InternalMonitor_StreamClient interface { +type Monitor_StreamClient interface { Send(*Metric) error Recv() (*Empty, error) grpc.ClientStream } -type internalMonitorStreamClient struct { +type monitorStreamClient struct { grpc.ClientStream } -func (x *internalMonitorStreamClient) Send(m *Metric) error { +func (x *monitorStreamClient) Send(m *Metric) error { return x.ClientStream.SendMsg(m) } -func (x *internalMonitorStreamClient) Recv() (*Empty, error) { +func (x *monitorStreamClient) Recv() (*Empty, error) { m := new(Empty) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err @@ -491,100 +439,12 @@ func (x *internalMonitorStreamClient) Recv() (*Empty, error) { return m, nil } -// InternalMonitorServer is the server API for InternalMonitor service. -// All implementations must embed UnimplementedInternalMonitorServer -// for forward compatibility -type InternalMonitorServer interface { - Stream(InternalMonitor_StreamServer) error - mustEmbedUnimplementedInternalMonitorServer() -} - -// UnimplementedInternalMonitorServer must be embedded to have forward compatible implementations. -type UnimplementedInternalMonitorServer struct { -} - -func (UnimplementedInternalMonitorServer) Stream(InternalMonitor_StreamServer) error { - return status.Errorf(codes.Unimplemented, "method Stream not implemented") -} -func (UnimplementedInternalMonitorServer) mustEmbedUnimplementedInternalMonitorServer() {} - -// UnsafeInternalMonitorServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to InternalMonitorServer will -// result in compilation errors. -type UnsafeInternalMonitorServer interface { - mustEmbedUnimplementedInternalMonitorServer() -} - -func RegisterInternalMonitorServer(s grpc.ServiceRegistrar, srv InternalMonitorServer) { - s.RegisterService(&InternalMonitor_ServiceDesc, srv) -} - -func _InternalMonitor_Stream_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(InternalMonitorServer).Stream(&internalMonitorStreamServer{stream}) -} - -type InternalMonitor_StreamServer interface { - Send(*Empty) error - Recv() (*Metric, error) - grpc.ServerStream -} - -type internalMonitorStreamServer struct { - grpc.ServerStream -} - -func (x *internalMonitorStreamServer) Send(m *Empty) error { - return x.ServerStream.SendMsg(m) -} - -func (x *internalMonitorStreamServer) Recv() (*Metric, error) { - m := new(Metric) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// InternalMonitor_ServiceDesc is the grpc.ServiceDesc for InternalMonitor service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var InternalMonitor_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "InternalMonitor", - HandlerType: (*InternalMonitorServer)(nil), - Methods: []grpc.MethodDesc{}, - Streams: []grpc.StreamDesc{ - { - StreamName: "Stream", - Handler: _InternalMonitor_Stream_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "proto/types.proto", -} - -// ExternalMonitorClient is the client API for ExternalMonitor service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type ExternalMonitorClient interface { - Listen(ctx context.Context, in *Key, opts ...grpc.CallOption) (ExternalMonitor_ListenClient, error) - Whois(ctx context.Context, in *WhoisRequest, opts ...grpc.CallOption) (*WhoisResponse, error) -} - -type externalMonitorClient struct { - cc grpc.ClientConnInterface -} - -func NewExternalMonitorClient(cc grpc.ClientConnInterface) ExternalMonitorClient { - return &externalMonitorClient{cc} -} - -func (c *externalMonitorClient) Listen(ctx context.Context, in *Key, opts ...grpc.CallOption) (ExternalMonitor_ListenClient, error) { - stream, err := c.cc.NewStream(ctx, &ExternalMonitor_ServiceDesc.Streams[0], "/ExternalMonitor/Listen", opts...) +func (c *monitorClient) Listen(ctx context.Context, in *Key, opts ...grpc.CallOption) (Monitor_ListenClient, error) { + stream, err := c.cc.NewStream(ctx, &Monitor_ServiceDesc.Streams[1], "/Monitor/Listen", opts...) if err != nil { return nil, err } - x := &externalMonitorListenClient{stream} + x := &monitorListenClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } @@ -594,16 +454,16 @@ func (c *externalMonitorClient) Listen(ctx context.Context, in *Key, opts ...grp return x, nil } -type ExternalMonitor_ListenClient interface { +type Monitor_ListenClient interface { Recv() (*Value, error) grpc.ClientStream } -type externalMonitorListenClient struct { +type monitorListenClient struct { grpc.ClientStream } -func (x *externalMonitorListenClient) Recv() (*Value, error) { +func (x *monitorListenClient) Recv() (*Value, error) { m := new(Value) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err @@ -611,102 +471,138 @@ func (x *externalMonitorListenClient) Recv() (*Value, error) { return m, nil } -func (c *externalMonitorClient) Whois(ctx context.Context, in *WhoisRequest, opts ...grpc.CallOption) (*WhoisResponse, error) { +func (c *monitorClient) Whois(ctx context.Context, in *WhoisRequest, opts ...grpc.CallOption) (*WhoisResponse, error) { out := new(WhoisResponse) - err := c.cc.Invoke(ctx, "/ExternalMonitor/Whois", in, out, opts...) + err := c.cc.Invoke(ctx, "/Monitor/Whois", in, out, opts...) if err != nil { return nil, err } return out, nil } -// ExternalMonitorServer is the server API for ExternalMonitor service. -// All implementations must embed UnimplementedExternalMonitorServer +// MonitorServer is the server API for Monitor service. +// All implementations must embed UnimplementedMonitorServer // for forward compatibility -type ExternalMonitorServer interface { - Listen(*Key, ExternalMonitor_ListenServer) error +type MonitorServer interface { + Stream(Monitor_StreamServer) error + Listen(*Key, Monitor_ListenServer) error Whois(context.Context, *WhoisRequest) (*WhoisResponse, error) - mustEmbedUnimplementedExternalMonitorServer() + mustEmbedUnimplementedMonitorServer() } -// UnimplementedExternalMonitorServer must be embedded to have forward compatible implementations. -type UnimplementedExternalMonitorServer struct { +// UnimplementedMonitorServer must be embedded to have forward compatible implementations. +type UnimplementedMonitorServer struct { } -func (UnimplementedExternalMonitorServer) Listen(*Key, ExternalMonitor_ListenServer) error { +func (UnimplementedMonitorServer) Stream(Monitor_StreamServer) error { + return status.Errorf(codes.Unimplemented, "method Stream not implemented") +} +func (UnimplementedMonitorServer) Listen(*Key, Monitor_ListenServer) error { return status.Errorf(codes.Unimplemented, "method Listen not implemented") } -func (UnimplementedExternalMonitorServer) Whois(context.Context, *WhoisRequest) (*WhoisResponse, error) { +func (UnimplementedMonitorServer) Whois(context.Context, *WhoisRequest) (*WhoisResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method Whois not implemented") } -func (UnimplementedExternalMonitorServer) mustEmbedUnimplementedExternalMonitorServer() {} +func (UnimplementedMonitorServer) mustEmbedUnimplementedMonitorServer() {} -// UnsafeExternalMonitorServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to ExternalMonitorServer will +// UnsafeMonitorServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to MonitorServer will // result in compilation errors. -type UnsafeExternalMonitorServer interface { - mustEmbedUnimplementedExternalMonitorServer() +type UnsafeMonitorServer interface { + mustEmbedUnimplementedMonitorServer() } -func RegisterExternalMonitorServer(s grpc.ServiceRegistrar, srv ExternalMonitorServer) { - s.RegisterService(&ExternalMonitor_ServiceDesc, srv) +func RegisterMonitorServer(s grpc.ServiceRegistrar, srv MonitorServer) { + s.RegisterService(&Monitor_ServiceDesc, srv) } -func _ExternalMonitor_Listen_Handler(srv interface{}, stream grpc.ServerStream) error { +func _Monitor_Stream_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(MonitorServer).Stream(&monitorStreamServer{stream}) +} + +type Monitor_StreamServer interface { + Send(*Empty) error + Recv() (*Metric, error) + grpc.ServerStream +} + +type monitorStreamServer struct { + grpc.ServerStream +} + +func (x *monitorStreamServer) Send(m *Empty) error { + return x.ServerStream.SendMsg(m) +} + +func (x *monitorStreamServer) Recv() (*Metric, error) { + m := new(Metric) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _Monitor_Listen_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(Key) if err := stream.RecvMsg(m); err != nil { return err } - return srv.(ExternalMonitorServer).Listen(m, &externalMonitorListenServer{stream}) + return srv.(MonitorServer).Listen(m, &monitorListenServer{stream}) } -type ExternalMonitor_ListenServer interface { +type Monitor_ListenServer interface { Send(*Value) error grpc.ServerStream } -type externalMonitorListenServer struct { +type monitorListenServer struct { grpc.ServerStream } -func (x *externalMonitorListenServer) Send(m *Value) error { +func (x *monitorListenServer) Send(m *Value) error { return x.ServerStream.SendMsg(m) } -func _ExternalMonitor_Whois_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { +func _Monitor_Whois_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(WhoisRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(ExternalMonitorServer).Whois(ctx, in) + return srv.(MonitorServer).Whois(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/ExternalMonitor/Whois", + FullMethod: "/Monitor/Whois", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ExternalMonitorServer).Whois(ctx, req.(*WhoisRequest)) + return srv.(MonitorServer).Whois(ctx, req.(*WhoisRequest)) } return interceptor(ctx, in, info, handler) } -// ExternalMonitor_ServiceDesc is the grpc.ServiceDesc for ExternalMonitor service. +// Monitor_ServiceDesc is the grpc.ServiceDesc for Monitor service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) -var ExternalMonitor_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "ExternalMonitor", - HandlerType: (*ExternalMonitorServer)(nil), +var Monitor_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "Monitor", + HandlerType: (*MonitorServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "Whois", - Handler: _ExternalMonitor_Whois_Handler, + Handler: _Monitor_Whois_Handler, }, }, Streams: []grpc.StreamDesc{ + { + StreamName: "Stream", + Handler: _Monitor_Stream_Handler, + ServerStreams: true, + ClientStreams: true, + }, { StreamName: "Listen", - Handler: _ExternalMonitor_Listen_Handler, + Handler: _Monitor_Listen_Handler, ServerStreams: true, }, }, diff --git a/pkg/ui/statusdisplay.go b/pkg/ui/statusdisplay.go index 01ed657..354e6ec 100644 --- a/pkg/ui/statusdisplay.go +++ b/pkg/ui/statusdisplay.go @@ -14,7 +14,7 @@ import ( type agent struct { ctx context.Context - uuid string + info *types.WhoisResponse queueParams *common.QueueParams taskStatus *common.TaskStatus queueStatus *common.QueueStatus @@ -42,7 +42,7 @@ func (s *StatusDisplay) makeRows() [][]string { rows = append(rows, header) for _, agent := range s.agents { row := []string{ - agent.uuid, + fmt.Sprintf("[%s] %s", agent.info.Component.Name(), agent.info.Address), fmt.Sprintf("%d/%d", agent.taskStatus.NumRunning, agent.queueParams.ConcurrentProcessLimit), fmt.Sprint(agent.taskStatus.NumQueued), types.QueueStatus(agent.queueStatus.QueueStatus).String(), @@ -52,11 +52,11 @@ func (s *StatusDisplay) makeRows() [][]string { return rows } -func (s *StatusDisplay) AddAgent(ctx context.Context, uuid string) { +func (s *StatusDisplay) AddAgent(ctx context.Context, info *types.WhoisResponse) { s.mutex.Lock() s.agents = append(s.agents, &agent{ ctx: ctx, - uuid: uuid, + info: info, queueParams: &common.QueueParams{}, taskStatus: &common.TaskStatus{}, queueStatus: &common.QueueStatus{}, @@ -68,7 +68,7 @@ func (s *StatusDisplay) AddAgent(ctx context.Context, uuid string) { <-ctx.Done() s.mutex.Lock() for i, a := range s.agents { - if a.uuid == uuid { + if a.info.UUID == info.UUID { s.agents = append(s.agents[:i], s.agents[i+1:]...) } } @@ -81,7 +81,7 @@ func (s *StatusDisplay) Update(uuid string, params interface{}) { s.mutex.Lock() var index int for i, a := range s.agents { - if a.uuid == uuid { + if a.info.UUID == uuid { index = i } } diff --git a/proto/types.proto b/proto/types.proto index 896b140..b1a39b2 100644 --- a/proto/types.proto +++ b/proto/types.proto @@ -7,22 +7,15 @@ service Consumerd { rpc Run(RunRequest) returns (RunResponse); } -service Agent { - rpc Compile(CompileRequest) returns (CompileResponse); - rpc SetUsageLimits (UsageLimits) returns (Empty); -} - service Scheduler { rpc Compile(CompileRequest) returns (CompileResponse); rpc ConnectAgent(stream Metadata) returns (stream Empty); rpc ConnectConsumerd(stream Metadata) returns (stream Empty); + rpc StreamTasks(stream CompileResponse) returns (stream CompileRequest); } -service InternalMonitor { +service Monitor { rpc Stream(stream Metric) returns (stream Empty); -} - -service ExternalMonitor { rpc Listen(Key) returns (stream Value); rpc Whois(WhoisRequest) returns (WhoisResponse); } diff --git a/test/integration/integration.go b/test/integration/integration.go index 41b91ff..6461333 100644 --- a/test/integration/integration.go +++ b/test/integration/integration.go @@ -100,10 +100,10 @@ func (tc *TestController) startAgent(cfg *types.UsageLimits) { cc := dial(ctx, tc.schedListener) schedClient := types.NewSchedulerClient(cc) cc = dial(ctx, tc.monListener) - internalMonClient := types.NewInternalMonitorClient(cc) + monClient := types.NewMonitorClient(cc) agentSrv := agent.NewAgentServer(ctx, agent.WithSchedulerClient(schedClient), - agent.WithMonitorClient(internalMonClient), + agent.WithMonitorClient(monClient), agent.WithUsageLimits(cfg), agent.WithToolchainFinders(toolchains.FinderWithOptions{ Finder: testutil.TestToolchainFinder{}, @@ -138,7 +138,7 @@ func (tc *TestController) startScheduler() { srv := servers.NewServer(ctx) cc := dial(ctx, tc.monListener) - internalMonClient := types.NewInternalMonitorClient(cc) + monClient := types.NewMonitorClient(cc) cc = dial(ctx, tc.cacheListener) cacheClient := types.NewCacheClient(cc) @@ -147,7 +147,7 @@ func (tc *TestController) startScheduler() { scheduler.WithSchedulerOptions( scheduler.WithAgentDialer(tc), ), - scheduler.WithMonitorClient(internalMonClient), + scheduler.WithMonitorClient(monClient), scheduler.WithCacheClient(cacheClient), ) types.RegisterSchedulerServer(srv, sc) @@ -173,25 +173,13 @@ func (tc *TestController) startMonitor() { lg := meta.Log(ctx) tc.monListener = bufconn.Listen(bufSize) - internalSrv := servers.NewServer(ctx) - externalSrv := servers.NewServer(ctx) - extListener, err := net.Listen("tcp", "127.0.0.1:9097") - if err != nil { - panic(err) - } + srv := servers.NewServer(ctx) mon := monitor.NewMonitorServer(ctx, monitor.InMemoryStoreCreator) - types.RegisterInternalMonitorServer(internalSrv, mon) - types.RegisterExternalMonitorServer(externalSrv, mon) - - go func() { - if err := internalSrv.Serve(tc.monListener); err != nil { - lg.Info(err) - } - }() + types.RegisterMonitorServer(srv, mon) go func() { - if err := externalSrv.Serve(extListener); err != nil { + if err := srv.Serve(tc.monListener); err != nil { lg.Info(err) } }() @@ -211,7 +199,7 @@ func (tc *TestController) startCache() { lg := meta.Log(ctx) cc := dial(ctx, tc.monListener) - internalMonClient := types.NewInternalMonitorClient(cc) + internalMonClient := types.NewMonitorClient(cc) tc.cacheListener = bufconn.Listen(bufSize) srv := servers.NewServer(ctx) @@ -268,7 +256,7 @@ func (tc *TestController) startConsumerd(cfg *types.UsageLimits) { cc := dial(ctx, tc.schedListener) schedulerClient := types.NewSchedulerClient(cc) cc = dial(ctx, tc.monListener) - monitorClient := types.NewInternalMonitorClient(cc) + monitorClient := types.NewMonitorClient(cc) d := consumerd.NewConsumerdServer(ctx, consumerd.WithToolchainFinders(toolchains.FinderWithOptions{ @@ -328,7 +316,7 @@ func (tc *TestController) Start(ops TestOptions) { len(ops.Clients) + 1 /*scheduler*/ + 1 /*cache*/) - extClient := types.NewExternalMonitorClient(cc) + extClient := types.NewMonitorClient(cc) listener := metrics.NewListener(tc.ctx, extClient) listener.OnProviderAdded(func(pctx context.Context, uuid string) { resp, _ := extClient.Whois(tc.ctx, &types.WhoisRequest{ From ded34467c645441d33108e22f321f06da0b8caae Mon Sep 17 00:00:00 2001 From: cobalt77 <8194899+cobalt77@users.noreply.github.com> Date: Sun, 14 Mar 2021 22:58:14 -0400 Subject: [PATCH 02/12] work in progress --- Makefile | 7 +- cmd/kcctl/commands/monitor.go | 20 +- cmd/kcctl/commands/status.go | 11 +- cmd/kubecc/components/agent/agent.go | 3 +- cmd/kubecc/components/consumerd/consumerd.go | 9 +- internal/testutil/testpb.pb.go | 143 -- internal/testutil/testutil.pb.go | 397 ++++ internal/testutil/testutil.proto | 30 + ...{testpb_grpc.pb.go => testutil_grpc.pb.go} | 14 +- pkg/apps/agent/server.go | 140 +- pkg/apps/agent/taskstream.go | 89 - pkg/apps/cachesrv/metrics/metrics.go | 40 - pkg/apps/cachesrv/metrics/metrics_gen.go | 544 ------ pkg/apps/cachesrv/metrics/metrics_gen_test.go | 462 ----- pkg/apps/cachesrv/server.go | 14 +- pkg/apps/consumerd/metrics/metrics.go | 11 - pkg/apps/consumerd/metrics/metrics_gen.go | 110 -- .../consumerd/metrics/metrics_gen_test.go | 123 -- pkg/apps/consumerd/server.go | 136 +- pkg/apps/monitor/metrics/meta.go | 34 - pkg/apps/monitor/metrics/meta_gen.go | 782 -------- pkg/apps/monitor/metrics/meta_gen_test.go | 462 ----- pkg/apps/monitor/metrics/metrics.go | 19 - pkg/apps/monitor/metrics/metrics_gen.go | 213 --- pkg/apps/monitor/metrics/metrics_gen_test.go | 236 --- pkg/apps/monitor/monitor_test.go | 73 +- pkg/apps/monitor/prometheus.go | 38 +- pkg/apps/monitor/server.go | 76 +- pkg/apps/monitor/store.go | 25 +- pkg/apps/monitor/store_test.go | 49 +- pkg/apps/monitor/test/keys.go | 34 - pkg/apps/monitor/test/keys_gen.go | 419 ----- pkg/apps/monitor/test/keys_gen_test.go | 462 ----- pkg/apps/monitor/test/types.go | 26 - pkg/apps/scheduler/broker.go | 255 +++ pkg/apps/scheduler/filter.go | 251 +++ pkg/apps/scheduler/metrics/metrics.go | 74 - pkg/apps/scheduler/metrics/metrics_gen.go | 1174 ------------ .../scheduler/metrics/metrics_gen_test.go | 1027 ---------- pkg/apps/scheduler/scheduler.go | 8 +- pkg/apps/scheduler/server.go | 168 +- pkg/apps/scheduler/types.go | 28 +- pkg/{metrics => clients}/keyedbuffer.go | 19 +- pkg/{metrics => clients}/listener.go | 53 +- pkg/{metrics => clients}/provider.go | 103 +- pkg/metrics/common/common.go | 50 - pkg/metrics/common/common_gen.go | 499 ----- pkg/metrics/common/common_gen_test.go | 462 ----- pkg/metrics/completers.go | 9 + pkg/metrics/metrics.pb.go | 1650 +++++++++++++++++ pkg/metrics/metrics.proto | 116 ++ pkg/metrics/noop.go | 5 +- pkg/metrics/types.go | 48 +- pkg/run/executor.go | 52 +- pkg/run/run_test.go | 3 +- pkg/run/workerpool.go | 2 + pkg/storage/chain.go | 4 +- pkg/storage/provider.go | 4 +- pkg/storage/s3.go | 6 +- pkg/storage/volatile.go | 6 +- pkg/toolchains/metadata.go | 45 + pkg/types/enum.go | 5 - pkg/types/marshal.go | 7 - pkg/types/types.pb.go | 1517 ++++++--------- {proto => pkg/types}/types.proto | 77 +- pkg/types/types_grpc.pb.go | 225 +-- pkg/ui/statusdisplay.go | 25 +- proto/testpb.proto | 12 - test/integration/integration.go | 21 +- test/integration/integration_test.go | 5 +- 70 files changed, 4024 insertions(+), 9242 deletions(-) delete mode 100644 internal/testutil/testpb.pb.go create mode 100644 internal/testutil/testutil.pb.go create mode 100644 internal/testutil/testutil.proto rename internal/testutil/{testpb_grpc.pb.go => testutil_grpc.pb.go} (95%) delete mode 100644 pkg/apps/agent/taskstream.go delete mode 100644 pkg/apps/cachesrv/metrics/metrics.go delete mode 100644 pkg/apps/cachesrv/metrics/metrics_gen.go delete mode 100644 pkg/apps/cachesrv/metrics/metrics_gen_test.go delete mode 100644 pkg/apps/consumerd/metrics/metrics.go delete mode 100644 pkg/apps/consumerd/metrics/metrics_gen.go delete mode 100644 pkg/apps/consumerd/metrics/metrics_gen_test.go delete mode 100644 pkg/apps/monitor/metrics/meta.go delete mode 100644 pkg/apps/monitor/metrics/meta_gen.go delete mode 100644 pkg/apps/monitor/metrics/meta_gen_test.go delete mode 100644 pkg/apps/monitor/metrics/metrics.go delete mode 100644 pkg/apps/monitor/metrics/metrics_gen.go delete mode 100644 pkg/apps/monitor/metrics/metrics_gen_test.go delete mode 100644 pkg/apps/monitor/test/keys.go delete mode 100644 pkg/apps/monitor/test/keys_gen.go delete mode 100644 pkg/apps/monitor/test/keys_gen_test.go delete mode 100644 pkg/apps/monitor/test/types.go create mode 100644 pkg/apps/scheduler/broker.go create mode 100644 pkg/apps/scheduler/filter.go delete mode 100644 pkg/apps/scheduler/metrics/metrics.go delete mode 100644 pkg/apps/scheduler/metrics/metrics_gen.go delete mode 100644 pkg/apps/scheduler/metrics/metrics_gen_test.go rename pkg/{metrics => clients}/keyedbuffer.go (75%) rename pkg/{metrics => clients}/listener.go (79%) rename pkg/{metrics => clients}/provider.go (54%) delete mode 100644 pkg/metrics/common/common.go delete mode 100644 pkg/metrics/common/common_gen.go delete mode 100644 pkg/metrics/common/common_gen_test.go create mode 100644 pkg/metrics/completers.go create mode 100644 pkg/metrics/metrics.pb.go create mode 100644 pkg/metrics/metrics.proto create mode 100644 pkg/toolchains/metadata.go rename {proto => pkg/types}/types.proto (77%) delete mode 100644 proto/testpb.proto diff --git a/Makefile b/Makefile index 507ed4f..d23af46 100644 --- a/Makefile +++ b/Makefile @@ -61,11 +61,14 @@ manifests: GOROOT=$(shell $(GO) env GOROOT) controller-gen $(CRD_OPTIONS) rbac:roleName=manager-role webhook paths="./..." output:crd:artifacts:config=config/crd/bases +module_opt = module=github.com/cobalt77/kubecc + # Protobuf code generators .PHONY: proto proto: - protoc proto/types.proto --go_out=. --go-grpc_out=. - protoc proto/testpb.proto --go_out=. --go-grpc_out=. + protoc pkg/types/types.proto -I. --go_out=. --go_opt=$(module_opt) --go-grpc_out=. --go-grpc_opt=$(module_opt) + protoc internal/testutil/testutil.proto -I. --go_out=. --go_opt=$(module_opt) --go-grpc_out=. --go-grpc_opt=$(module_opt) + protoc pkg/metrics/metrics.proto -I. --go_out=. --go_opt=$(module_opt) # Code generating, formatting, vetting .PHONY: fmt vet generate diff --git a/cmd/kcctl/commands/monitor.go b/cmd/kcctl/commands/monitor.go index 3a50f67..6efb74f 100644 --- a/cmd/kcctl/commands/monitor.go +++ b/cmd/kcctl/commands/monitor.go @@ -1,17 +1,15 @@ package commands import ( - "bytes" "encoding/json" "strings" - monitormetrics "github.com/cobalt77/kubecc/pkg/apps/monitor/metrics" + "github.com/cobalt77/kubecc/pkg/clients" "github.com/cobalt77/kubecc/pkg/metrics" "github.com/cobalt77/kubecc/pkg/servers" "github.com/cobalt77/kubecc/pkg/types" "github.com/cobalt77/kubecc/pkg/ui" "github.com/spf13/cobra" - "github.com/tinylib/msgp/msgp" ) // monitorCmd represents the monitor command. @@ -20,19 +18,13 @@ var monitorCmd = &cobra.Command{ Short: "Commands to interact with the monitor", } -func onValueChanged(tb *ui.TextBox) func(*monitormetrics.StoreContents) { - return func(contents *monitormetrics.StoreContents) { +func onValueChanged(tb *ui.TextBox) func(*metrics.StoreContents) { + return func(contents *metrics.StoreContents) { printable := map[string]interface{}{} for _, bucket := range contents.Buckets { jsonContents := map[string]string{} for k, v := range bucket.Data { - buf := new(bytes.Buffer) - _, err := msgp.UnmarshalAsJSON(buf, v) - if err != nil { - jsonContents[k] = "" - } else { - jsonContents[k] = buf.String() - } + jsonContents[k] = v.String() } printable[bucket.Name] = jsonContents } @@ -57,10 +49,10 @@ var listenCmd = &cobra.Command{ cliLog.Fatal(err) } client := types.NewMonitorClient(cc) - listener := metrics.NewListener(cliContext, client) + listener := clients.NewListener(cliContext, client) tb := &ui.TextBox{} - listener.OnValueChanged(monitormetrics.MetaBucket, onValueChanged(tb)). + listener.OnValueChanged(metrics.MetaBucket, onValueChanged(tb)). OrExpired(func() metrics.RetryOptions { tb.SetText("-- KEY EXPIRED -- \n\n" + tb.Paragraph.Text) return metrics.NoRetry diff --git a/cmd/kcctl/commands/status.go b/cmd/kcctl/commands/status.go index 5b430ec..7650c91 100644 --- a/cmd/kcctl/commands/status.go +++ b/cmd/kcctl/commands/status.go @@ -4,10 +4,10 @@ import ( "context" "github.com/cobalt77/kubecc/internal/logkc" + "github.com/cobalt77/kubecc/pkg/clients" "github.com/cobalt77/kubecc/pkg/identity" "github.com/cobalt77/kubecc/pkg/meta" "github.com/cobalt77/kubecc/pkg/metrics" - "github.com/cobalt77/kubecc/pkg/metrics/common" "github.com/cobalt77/kubecc/pkg/servers" "github.com/cobalt77/kubecc/pkg/types" "github.com/cobalt77/kubecc/pkg/ui" @@ -40,7 +40,7 @@ to quickly create a Cobra application.`, ))), ) client := types.NewMonitorClient(cc) - listener := metrics.NewListener(ctx, client) + listener := clients.NewListener(ctx, client) display := ui.NewStatusDisplay() listener.OnProviderAdded(func(pctx context.Context, uuid string) { @@ -55,13 +55,10 @@ to quickly create a Cobra application.`, } display.AddAgent(pctx, info) - listener.OnValueChanged(uuid, func(qp *common.QueueParams) { + listener.OnValueChanged(uuid, func(qp *metrics.UsageLimits) { display.Update(uuid, qp) }) - listener.OnValueChanged(uuid, func(qs *common.QueueStatus) { - display.Update(uuid, qs) - }) - listener.OnValueChanged(uuid, func(ts *common.TaskStatus) { + listener.OnValueChanged(uuid, func(ts *metrics.TaskStatus) { display.Update(uuid, ts) }) <-pctx.Done() diff --git a/cmd/kubecc/components/agent/agent.go b/cmd/kubecc/components/agent/agent.go index d0c4fde..ea6cdbd 100644 --- a/cmd/kubecc/components/agent/agent.go +++ b/cmd/kubecc/components/agent/agent.go @@ -11,6 +11,7 @@ import ( "github.com/cobalt77/kubecc/pkg/host" "github.com/cobalt77/kubecc/pkg/identity" "github.com/cobalt77/kubecc/pkg/meta" + "github.com/cobalt77/kubecc/pkg/metrics" "github.com/cobalt77/kubecc/pkg/servers" "github.com/cobalt77/kubecc/pkg/toolchains" "github.com/cobalt77/kubecc/pkg/tracing" @@ -52,7 +53,7 @@ func run(cmd *cobra.Command, args []string) { monitorClient := types.NewMonitorClient(monitorCC) a := agent.NewAgentServer(ctx, - agent.WithUsageLimits(&types.UsageLimits{ + agent.WithUsageLimits(&metrics.UsageLimits{ ConcurrentProcessLimit: int32(conf.UsageLimits.ConcurrentProcessLimit), QueuePressureMultiplier: conf.UsageLimits.QueuePressureMultiplier, QueueRejectMultiplier: conf.UsageLimits.QueueRejectMultiplier, diff --git a/cmd/kubecc/components/consumerd/consumerd.go b/cmd/kubecc/components/consumerd/consumerd.go index 6fd78c5..52ae58e 100644 --- a/cmd/kubecc/components/consumerd/consumerd.go +++ b/cmd/kubecc/components/consumerd/consumerd.go @@ -13,6 +13,7 @@ import ( "github.com/cobalt77/kubecc/pkg/host" "github.com/cobalt77/kubecc/pkg/identity" "github.com/cobalt77/kubecc/pkg/meta" + "github.com/cobalt77/kubecc/pkg/metrics" "github.com/cobalt77/kubecc/pkg/servers" "github.com/cobalt77/kubecc/pkg/toolchains" "github.com/cobalt77/kubecc/pkg/tracing" @@ -57,7 +58,7 @@ func run(cmd *cobra.Command, args []string) { monitorClient := types.NewMonitorClient(monitorCC) d := consumerd.NewConsumerdServer(ctx, - consumerd.WithUsageLimits(&types.UsageLimits{ + consumerd.WithUsageLimits(&metrics.UsageLimits{ ConcurrentProcessLimit: int32(conf.UsageLimits.ConcurrentProcessLimit), QueuePressureMultiplier: conf.UsageLimits.QueuePressureMultiplier, QueueRejectMultiplier: conf.UsageLimits.QueueRejectMultiplier, @@ -75,9 +76,9 @@ func run(cmd *cobra.Command, args []string) { consumerd.WithMonitorClient(monitorClient), ) - mgr := servers.NewStreamManager(ctx, d) - go d.StartMetricsProvider() - go mgr.Run() + // mgr := servers.NewStreamManager(ctx, d) + // go d.StartMetricsProvider() + // go mgr.Run() listener, err := net.Listen("tcp", conf.ListenAddress) if err != nil { diff --git a/internal/testutil/testpb.pb.go b/internal/testutil/testpb.pb.go deleted file mode 100644 index cf991d7..0000000 --- a/internal/testutil/testpb.pb.go +++ /dev/null @@ -1,143 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.25.0 -// protoc v3.12.3 -// source: proto/testpb.proto - -package testutil - -import ( - proto "github.com/golang/protobuf/proto" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - -type Baz struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *Baz) Reset() { - *x = Baz{} - if protoimpl.UnsafeEnabled { - mi := &file_proto_testpb_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Baz) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Baz) ProtoMessage() {} - -func (x *Baz) ProtoReflect() protoreflect.Message { - mi := &file_proto_testpb_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Baz.ProtoReflect.Descriptor instead. -func (*Baz) Descriptor() ([]byte, []int) { - return file_proto_testpb_proto_rawDescGZIP(), []int{0} -} - -var File_proto_testpb_proto protoreflect.FileDescriptor - -var file_proto_testpb_proto_rawDesc = []byte{ - 0x0a, 0x12, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x74, 0x65, 0x73, 0x74, 0x70, 0x62, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x05, 0x0a, 0x03, 0x42, 0x61, 0x7a, 0x32, 0x18, 0x0a, 0x03, 0x46, - 0x6f, 0x6f, 0x12, 0x11, 0x0a, 0x03, 0x46, 0x6f, 0x6f, 0x12, 0x04, 0x2e, 0x42, 0x61, 0x7a, 0x1a, - 0x04, 0x2e, 0x42, 0x61, 0x7a, 0x32, 0x1c, 0x0a, 0x03, 0x42, 0x61, 0x72, 0x12, 0x15, 0x0a, 0x03, - 0x42, 0x61, 0x72, 0x12, 0x04, 0x2e, 0x42, 0x61, 0x7a, 0x1a, 0x04, 0x2e, 0x42, 0x61, 0x7a, 0x28, - 0x01, 0x30, 0x01, 0x42, 0x13, 0x5a, 0x11, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, - 0x74, 0x65, 0x73, 0x74, 0x75, 0x74, 0x69, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_proto_testpb_proto_rawDescOnce sync.Once - file_proto_testpb_proto_rawDescData = file_proto_testpb_proto_rawDesc -) - -func file_proto_testpb_proto_rawDescGZIP() []byte { - file_proto_testpb_proto_rawDescOnce.Do(func() { - file_proto_testpb_proto_rawDescData = protoimpl.X.CompressGZIP(file_proto_testpb_proto_rawDescData) - }) - return file_proto_testpb_proto_rawDescData -} - -var file_proto_testpb_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_proto_testpb_proto_goTypes = []interface{}{ - (*Baz)(nil), // 0: Baz -} -var file_proto_testpb_proto_depIdxs = []int32{ - 0, // 0: Foo.Foo:input_type -> Baz - 0, // 1: Bar.Bar:input_type -> Baz - 0, // 2: Foo.Foo:output_type -> Baz - 0, // 3: Bar.Bar:output_type -> Baz - 2, // [2:4] is the sub-list for method output_type - 0, // [0:2] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_proto_testpb_proto_init() } -func file_proto_testpb_proto_init() { - if File_proto_testpb_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_proto_testpb_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Baz); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_proto_testpb_proto_rawDesc, - NumEnums: 0, - NumMessages: 1, - NumExtensions: 0, - NumServices: 2, - }, - GoTypes: file_proto_testpb_proto_goTypes, - DependencyIndexes: file_proto_testpb_proto_depIdxs, - MessageInfos: file_proto_testpb_proto_msgTypes, - }.Build() - File_proto_testpb_proto = out.File - file_proto_testpb_proto_rawDesc = nil - file_proto_testpb_proto_goTypes = nil - file_proto_testpb_proto_depIdxs = nil -} diff --git a/internal/testutil/testutil.pb.go b/internal/testutil/testutil.pb.go new file mode 100644 index 0000000..d370613 --- /dev/null +++ b/internal/testutil/testutil.pb.go @@ -0,0 +1,397 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.25.0 +// protoc v3.12.3 +// source: internal/testutil/testutil.proto + +package testutil + +import ( + proto "github.com/golang/protobuf/proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 + +type Baz struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Baz) Reset() { + *x = Baz{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_testutil_testutil_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Baz) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Baz) ProtoMessage() {} + +func (x *Baz) ProtoReflect() protoreflect.Message { + mi := &file_internal_testutil_testutil_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Baz.ProtoReflect.Descriptor instead. +func (*Baz) Descriptor() ([]byte, []int) { + return file_internal_testutil_testutil_proto_rawDescGZIP(), []int{0} +} + +type Test1 struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Counter int32 `protobuf:"varint,1,opt,name=Counter,proto3" json:"Counter,omitempty"` +} + +func (x *Test1) Reset() { + *x = Test1{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_testutil_testutil_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Test1) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Test1) ProtoMessage() {} + +func (x *Test1) ProtoReflect() protoreflect.Message { + mi := &file_internal_testutil_testutil_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Test1.ProtoReflect.Descriptor instead. +func (*Test1) Descriptor() ([]byte, []int) { + return file_internal_testutil_testutil_proto_rawDescGZIP(), []int{1} +} + +func (x *Test1) GetCounter() int32 { + if x != nil { + return x.Counter + } + return 0 +} + +type Test2 struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Value string `protobuf:"bytes,1,opt,name=Value,proto3" json:"Value,omitempty"` +} + +func (x *Test2) Reset() { + *x = Test2{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_testutil_testutil_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Test2) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Test2) ProtoMessage() {} + +func (x *Test2) ProtoReflect() protoreflect.Message { + mi := &file_internal_testutil_testutil_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Test2.ProtoReflect.Descriptor instead. +func (*Test2) Descriptor() ([]byte, []int) { + return file_internal_testutil_testutil_proto_rawDescGZIP(), []int{2} +} + +func (x *Test2) GetValue() string { + if x != nil { + return x.Value + } + return "" +} + +type Test3 struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Counter int32 `protobuf:"varint,1,opt,name=counter,proto3" json:"counter,omitempty"` +} + +func (x *Test3) Reset() { + *x = Test3{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_testutil_testutil_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Test3) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Test3) ProtoMessage() {} + +func (x *Test3) ProtoReflect() protoreflect.Message { + mi := &file_internal_testutil_testutil_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Test3.ProtoReflect.Descriptor instead. +func (*Test3) Descriptor() ([]byte, []int) { + return file_internal_testutil_testutil_proto_rawDescGZIP(), []int{3} +} + +func (x *Test3) GetCounter() int32 { + if x != nil { + return x.Counter + } + return 0 +} + +type Test4 struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Value string `protobuf:"bytes,1,opt,name=Value,proto3" json:"Value,omitempty"` +} + +func (x *Test4) Reset() { + *x = Test4{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_testutil_testutil_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Test4) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Test4) ProtoMessage() {} + +func (x *Test4) ProtoReflect() protoreflect.Message { + mi := &file_internal_testutil_testutil_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Test4.ProtoReflect.Descriptor instead. +func (*Test4) Descriptor() ([]byte, []int) { + return file_internal_testutil_testutil_proto_rawDescGZIP(), []int{4} +} + +func (x *Test4) GetValue() string { + if x != nil { + return x.Value + } + return "" +} + +var File_internal_testutil_testutil_proto protoreflect.FileDescriptor + +var file_internal_testutil_testutil_proto_rawDesc = []byte{ + 0x0a, 0x20, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x74, 0x65, 0x73, 0x74, 0x75, + 0x74, 0x69, 0x6c, 0x2f, 0x74, 0x65, 0x73, 0x74, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x08, 0x74, 0x65, 0x73, 0x74, 0x75, 0x74, 0x69, 0x6c, 0x22, 0x05, 0x0a, 0x03, + 0x42, 0x61, 0x7a, 0x22, 0x21, 0x0a, 0x05, 0x54, 0x65, 0x73, 0x74, 0x31, 0x12, 0x18, 0x0a, 0x07, + 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x43, + 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x22, 0x1d, 0x0a, 0x05, 0x54, 0x65, 0x73, 0x74, 0x32, 0x12, + 0x14, 0x0a, 0x05, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x21, 0x0a, 0x05, 0x54, 0x65, 0x73, 0x74, 0x33, 0x12, 0x18, + 0x0a, 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x22, 0x1d, 0x0a, 0x05, 0x54, 0x65, 0x73, 0x74, + 0x34, 0x12, 0x14, 0x0a, 0x05, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x32, 0x2a, 0x0a, 0x03, 0x46, 0x6f, 0x6f, 0x12, 0x23, + 0x0a, 0x03, 0x46, 0x6f, 0x6f, 0x12, 0x0d, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x75, 0x74, 0x69, 0x6c, + 0x2e, 0x42, 0x61, 0x7a, 0x1a, 0x0d, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x75, 0x74, 0x69, 0x6c, 0x2e, + 0x42, 0x61, 0x7a, 0x32, 0x2e, 0x0a, 0x03, 0x42, 0x61, 0x72, 0x12, 0x27, 0x0a, 0x03, 0x42, 0x61, + 0x72, 0x12, 0x0d, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x42, 0x61, 0x7a, + 0x1a, 0x0d, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x42, 0x61, 0x7a, 0x28, + 0x01, 0x30, 0x01, 0x42, 0x2e, 0x5a, 0x2c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x63, 0x6f, 0x62, 0x61, 0x6c, 0x74, 0x37, 0x37, 0x2f, 0x6b, 0x75, 0x62, 0x65, 0x63, + 0x63, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x74, 0x65, 0x73, 0x74, 0x75, + 0x74, 0x69, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_internal_testutil_testutil_proto_rawDescOnce sync.Once + file_internal_testutil_testutil_proto_rawDescData = file_internal_testutil_testutil_proto_rawDesc +) + +func file_internal_testutil_testutil_proto_rawDescGZIP() []byte { + file_internal_testutil_testutil_proto_rawDescOnce.Do(func() { + file_internal_testutil_testutil_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_testutil_testutil_proto_rawDescData) + }) + return file_internal_testutil_testutil_proto_rawDescData +} + +var file_internal_testutil_testutil_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_internal_testutil_testutil_proto_goTypes = []interface{}{ + (*Baz)(nil), // 0: testutil.Baz + (*Test1)(nil), // 1: testutil.Test1 + (*Test2)(nil), // 2: testutil.Test2 + (*Test3)(nil), // 3: testutil.Test3 + (*Test4)(nil), // 4: testutil.Test4 +} +var file_internal_testutil_testutil_proto_depIdxs = []int32{ + 0, // 0: testutil.Foo.Foo:input_type -> testutil.Baz + 0, // 1: testutil.Bar.Bar:input_type -> testutil.Baz + 0, // 2: testutil.Foo.Foo:output_type -> testutil.Baz + 0, // 3: testutil.Bar.Bar:output_type -> testutil.Baz + 2, // [2:4] is the sub-list for method output_type + 0, // [0:2] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_internal_testutil_testutil_proto_init() } +func file_internal_testutil_testutil_proto_init() { + if File_internal_testutil_testutil_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_internal_testutil_testutil_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Baz); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_testutil_testutil_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Test1); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_testutil_testutil_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Test2); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_testutil_testutil_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Test3); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_testutil_testutil_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Test4); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_internal_testutil_testutil_proto_rawDesc, + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 2, + }, + GoTypes: file_internal_testutil_testutil_proto_goTypes, + DependencyIndexes: file_internal_testutil_testutil_proto_depIdxs, + MessageInfos: file_internal_testutil_testutil_proto_msgTypes, + }.Build() + File_internal_testutil_testutil_proto = out.File + file_internal_testutil_testutil_proto_rawDesc = nil + file_internal_testutil_testutil_proto_goTypes = nil + file_internal_testutil_testutil_proto_depIdxs = nil +} diff --git a/internal/testutil/testutil.proto b/internal/testutil/testutil.proto new file mode 100644 index 0000000..3cf7d81 --- /dev/null +++ b/internal/testutil/testutil.proto @@ -0,0 +1,30 @@ +syntax = "proto3"; +option go_package = "github.com/cobalt77/kubecc/internal/testutil"; + +package testutil; + +message Baz {} + +service Foo { + rpc Foo(Baz) returns (Baz); +} + +service Bar { + rpc Bar(stream Baz) returns (stream Baz); +} + +message Test1 { + int32 Counter = 1; +} + +message Test2 { + string Value = 1; +} + +message Test3 { + int32 counter = 1; +} + +message Test4 { + string Value = 1; +} \ No newline at end of file diff --git a/internal/testutil/testpb_grpc.pb.go b/internal/testutil/testutil_grpc.pb.go similarity index 95% rename from internal/testutil/testpb_grpc.pb.go rename to internal/testutil/testutil_grpc.pb.go index 2c72ed8..b759132 100644 --- a/internal/testutil/testpb_grpc.pb.go +++ b/internal/testutil/testutil_grpc.pb.go @@ -31,7 +31,7 @@ func NewFooClient(cc grpc.ClientConnInterface) FooClient { func (c *fooClient) Foo(ctx context.Context, in *Baz, opts ...grpc.CallOption) (*Baz, error) { out := new(Baz) - err := c.cc.Invoke(ctx, "/Foo/Foo", in, out, opts...) + err := c.cc.Invoke(ctx, "/testutil.Foo/Foo", in, out, opts...) if err != nil { return nil, err } @@ -76,7 +76,7 @@ func _Foo_Foo_Handler(srv interface{}, ctx context.Context, dec func(interface{} } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/Foo/Foo", + FullMethod: "/testutil.Foo/Foo", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(FooServer).Foo(ctx, req.(*Baz)) @@ -88,7 +88,7 @@ func _Foo_Foo_Handler(srv interface{}, ctx context.Context, dec func(interface{} // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) var Foo_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "Foo", + ServiceName: "testutil.Foo", HandlerType: (*FooServer)(nil), Methods: []grpc.MethodDesc{ { @@ -97,7 +97,7 @@ var Foo_ServiceDesc = grpc.ServiceDesc{ }, }, Streams: []grpc.StreamDesc{}, - Metadata: "proto/testpb.proto", + Metadata: "internal/testutil/testutil.proto", } // BarClient is the client API for Bar service. @@ -116,7 +116,7 @@ func NewBarClient(cc grpc.ClientConnInterface) BarClient { } func (c *barClient) Bar(ctx context.Context, opts ...grpc.CallOption) (Bar_BarClient, error) { - stream, err := c.cc.NewStream(ctx, &Bar_ServiceDesc.Streams[0], "/Bar/Bar", opts...) + stream, err := c.cc.NewStream(ctx, &Bar_ServiceDesc.Streams[0], "/testutil.Bar/Bar", opts...) if err != nil { return nil, err } @@ -204,7 +204,7 @@ func (x *barBarServer) Recv() (*Baz, error) { // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) var Bar_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "Bar", + ServiceName: "testutil.Bar", HandlerType: (*BarServer)(nil), Methods: []grpc.MethodDesc{}, Streams: []grpc.StreamDesc{ @@ -215,5 +215,5 @@ var Bar_ServiceDesc = grpc.ServiceDesc{ ClientStreams: true, }, }, - Metadata: "proto/testpb.proto", + Metadata: "internal/testutil/testutil.proto", } diff --git a/pkg/apps/agent/server.go b/pkg/apps/agent/server.go index 2d4dc49..a7cd678 100644 --- a/pkg/apps/agent/server.go +++ b/pkg/apps/agent/server.go @@ -2,13 +2,11 @@ package agent import ( "context" - "errors" - "io" "time" + "github.com/cobalt77/kubecc/pkg/clients" "github.com/cobalt77/kubecc/pkg/meta" "github.com/cobalt77/kubecc/pkg/metrics" - "github.com/cobalt77/kubecc/pkg/metrics/common" "github.com/cobalt77/kubecc/pkg/run" "github.com/cobalt77/kubecc/pkg/servers" "github.com/cobalt77/kubecc/pkg/toolchains" @@ -16,6 +14,7 @@ import ( "github.com/cobalt77/kubecc/pkg/util" "go.uber.org/zap" "google.golang.org/grpc" + "google.golang.org/grpc/metadata" ) type AgentServer struct { @@ -25,8 +24,8 @@ type AgentServer struct { executor run.Executor lg *zap.SugaredLogger tcStore *toolchains.Store + tcRunStore *run.ToolchainRunnerStore metricsProvider metrics.Provider - taskStreamMgr *TaskStreamManager } type AgentServerOptions struct { @@ -34,7 +33,7 @@ type AgentServerOptions struct { toolchainRunners []run.StoreAddFunc schedulerClient types.SchedulerClient monitorClient types.MonitorClient - usageLimits *types.UsageLimits + usageLimits *metrics.UsageLimits } type agentServerOption func(*AgentServerOptions) @@ -69,7 +68,7 @@ func WithMonitorClient(client types.MonitorClient) agentServerOption { } } -func WithUsageLimits(usageLimits *types.UsageLimits) agentServerOption { +func WithUsageLimits(usageLimits *metrics.UsageLimits) agentServerOption { return func(o *AgentServerOptions) { o.usageLimits = usageLimits } @@ -93,51 +92,38 @@ func NewAgentServer( lg: meta.Log(ctx), tcStore: toolchains.Aggregate(ctx, options.toolchainFinders...), executor: run.NewQueuedExecutor(run.WithUsageLimits(options.usageLimits)), + tcRunStore: runStore, } - srv.taskStreamMgr = &TaskStreamManager{ - srvContext: ctx, - lg: meta.Log(ctx), - schedulerClient: options.schedulerClient, - tcStore: srv.tcStore, - tcRunStore: runStore, - executor: srv.executor, - } - mgr := servers.NewStreamManager(ctx, srv.taskStreamMgr) + + mgr := servers.NewStreamManager(ctx, srv) go mgr.Run() return srv } -func (s *AgentServer) postQueueParams() { - qp := &common.QueueParams{} - s.executor.CompleteQueueParams(qp) +func (s *AgentServer) postUsageLimits() { + qp := &metrics.UsageLimits{} + s.executor.CompleteUsageLimits(qp) s.metricsProvider.Post(qp) } func (s *AgentServer) postTaskStatus() { - ts := &common.TaskStatus{} + ts := &metrics.TaskStatus{} s.executor.CompleteTaskStatus(ts) s.metricsProvider.Post(ts) } -func (s *AgentServer) postQueueStatus() { - qs := &common.QueueStatus{} - s.executor.CompleteQueueStatus(qs) - s.metricsProvider.Post(qs) -} - func (s *AgentServer) StartMetricsProvider() { s.lg.Info("Starting metrics provider") - s.metricsProvider = metrics.NewMonitorProvider(s.srvContext, s.monitorClient, - metrics.Buffered|metrics.Discard) - s.postQueueParams() + s.metricsProvider = clients.NewMonitorProvider(s.srvContext, s.monitorClient, + clients.Buffered|clients.Discard) + s.postUsageLimits() fastTimer := util.NewJitteredTimer(time.Second/6, 2.0) go func() { for { <-fastTimer s.postTaskStatus() - s.postQueueStatus() } }() @@ -145,48 +131,100 @@ func (s *AgentServer) StartMetricsProvider() { go func() { for { <-slowTimer - s.postQueueParams() + s.postUsageLimits() } }() } func (s *AgentServer) SetUsageLimits( ctx context.Context, - usageLimits *types.UsageLimits, + usageLimits *metrics.UsageLimits, ) (*types.Empty, error) { s.executor.(*run.QueuedExecutor).SetUsageLimits(usageLimits) s.usageLimits = usageLimits - s.postQueueParams() + s.postUsageLimits() return &types.Empty{}, nil } func (s *AgentServer) HandleStream(stream grpc.ClientStream) error { - s.lg.Info("Streaming metadata to scheduler") - defer s.lg.Warn("Stream closed") - err := stream.SendMsg(&types.Metadata{ - Toolchains: &types.Toolchains{ - Items: s.tcStore.ItemsList(), - }, - }) - if err != nil { - if errors.Is(err, io.EOF) { - return stream.RecvMsg(nil) + s.lg.Info("Streaming tasks from scheduler") + defer s.lg.Warn("Task stream closed") + streamCtx := stream.Context() + for { + compileRequest := &types.CompileRequest{} + err := stream.RecvMsg(compileRequest) + if err != nil { + return err } - s.lg.Error(err) - return err - } - select { - case err := <-servers.EmptyServerStreamDone(s.srvContext, stream): - return err - case <-s.srvContext.Done(): - return nil + go func() { + err := stream.SendMsg(s.compile(streamCtx, compileRequest)) + if err != nil { + s.lg.With( + zap.Error(err), + ).Error("Error sending response to scheduler") + } + }() } } func (s *AgentServer) TryConnect() (grpc.ClientStream, error) { - return s.schedulerClient.ConnectAgent(s.srvContext) + tcs := s.tcStore.ItemsList() + md := toolchains.CreateMetadata(&metrics.Toolchains{ + Items: tcs, + }) + ctx := metadata.NewOutgoingContext(s.srvContext, md) + return s.schedulerClient.StreamIncomingTasks(ctx) } func (s *AgentServer) Target() string { return "scheduler" } + +func (s *AgentServer) compile( + ctx context.Context, + req *types.CompileRequest, +) *types.CompileResponse { + makeInternalErr := func(err string) *types.CompileResponse { + return &types.CompileResponse{ + RequestID: req.RequestID, + CompileResult: types.CompileResponse_InternalError, + Data: &types.CompileResponse_Error{ + Error: err, + }, + } + } + + s.lg.Debug("Handling compile request") + if err := meta.CheckContext(ctx); err != nil { + return makeInternalErr(err.Error()) + } + + span, sctx, err := servers.StartSpanFromServer(ctx, "compile") + if err != nil { + s.lg.Error(err) + } else { + defer span.Finish() + } + + runner, err := s.tcRunStore.Get(req.GetToolchain().Kind) + if err != nil { + return makeInternalErr("No toolchain runner available") + } + + tc, err := s.tcStore.TryMatch(req.GetToolchain()) + if err != nil { + return makeInternalErr(err.Error()) + } + + // Swap remote toolchain with the local toolchain in case the executable + // path is different locally + req.Toolchain = tc + resp, err := runner.RecvRemote().Run(run.Contexts{ + ServerContext: s.srvContext, + ClientContext: sctx, + }, s.executor, req) + if err != nil { + return makeInternalErr(err.Error()) + } + return resp.(*types.CompileResponse) +} diff --git a/pkg/apps/agent/taskstream.go b/pkg/apps/agent/taskstream.go deleted file mode 100644 index fac2fbd..0000000 --- a/pkg/apps/agent/taskstream.go +++ /dev/null @@ -1,89 +0,0 @@ -package agent - -import ( - "context" - - "github.com/cobalt77/kubecc/pkg/meta" - "github.com/cobalt77/kubecc/pkg/run" - "github.com/cobalt77/kubecc/pkg/servers" - "github.com/cobalt77/kubecc/pkg/toolchains" - "github.com/cobalt77/kubecc/pkg/types" - "go.uber.org/zap" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -type TaskStreamManager struct { - srvContext context.Context - lg *zap.SugaredLogger - schedulerClient types.SchedulerClient - tcStore *toolchains.Store - tcRunStore *run.ToolchainRunnerStore - executor run.Executor -} - -func (s *TaskStreamManager) HandleStream(stream grpc.ClientStream) error { - s.lg.Info("Streaming tasks from scheduler") - defer s.lg.Warn("Task stream closed") - streamCtx := stream.Context() - for { - compileRequest := &types.CompileRequest{} - err := stream.RecvMsg(compileRequest) - if err != nil { - return err - } - go s.Compile(streamCtx, compileRequest) - } -} - -func (s *TaskStreamManager) TryConnect() (grpc.ClientStream, error) { - return s.schedulerClient.ConnectAgent(s.srvContext) -} - -func (s *TaskStreamManager) Target() string { - return "scheduler" -} - -func (s *TaskStreamManager) Compile( - ctx context.Context, - req *types.CompileRequest, -) (*types.CompileResponse, error) { - s.lg.Debug("Handling compile request") - if err := meta.CheckContext(ctx); err != nil { - return nil, err - } - - span, sctx, err := servers.StartSpanFromServer(ctx, "compile") - if err != nil { - s.lg.Error(err) - } else { - defer span.Finish() - } - - runner, err := s.tcRunStore.Get(req.GetToolchain().Kind) - if err != nil { - return nil, status.Error(codes.Unavailable, - "No toolchain runner available") - } - - tc, err := s.tcStore.TryMatch(req.GetToolchain()) - if err != nil { - return nil, status.Error(codes.Unavailable, - err.Error()) - } - - // Swap remote toolchain with the local toolchain in case the executable - // path is different locally - req.Toolchain = tc - resp, err := runner.RecvRemote().Run(run.Contexts{ - ServerContext: s.srvContext, - ClientContext: sctx, - }, s.executor, req) - if err != nil { - s.lg.With( - zap.Error(err), - ).Error("Error from remote runner") - } - return resp.(*types.CompileResponse), err -} diff --git a/pkg/apps/cachesrv/metrics/metrics.go b/pkg/apps/cachesrv/metrics/metrics.go deleted file mode 100644 index d986b2c..0000000 --- a/pkg/apps/cachesrv/metrics/metrics.go +++ /dev/null @@ -1,40 +0,0 @@ -package metrics - -//go:generate msgp - -type StorageProvider struct { - Kind string `msg:"kind"` -} - -func (StorageProvider) Key() string { - return "StorageProvider" -} - -type UsageInfo struct { - ObjectCount int64 `msg:"objectCount"` - TotalSize int64 `msg:"totalSize"` - UsagePercent float64 `msg:"usagePercent"` -} - -func (UsageInfo) Key() string { - return "UsageInfo" -} - -type CacheHits struct { - CacheHitsTotal int64 `msg:"cacheHitsTotal"` - CacheMissesTotal int64 `msg:"cacheMissesTotal"` - CacheHitPercent float64 `msg:"cacheHitPercent"` -} - -func (CacheHits) Key() string { - return "CacheHits" -} - -type PerformanceInfo struct { - AveragePutTime int64 `msg:"averagePutTime"` - AverageGetTime int64 `msg:"averageGetTime"` -} - -func (PerformanceInfo) Key() string { - return "PerformanceInfo" -} diff --git a/pkg/apps/cachesrv/metrics/metrics_gen.go b/pkg/apps/cachesrv/metrics/metrics_gen.go deleted file mode 100644 index 73a7553..0000000 --- a/pkg/apps/cachesrv/metrics/metrics_gen.go +++ /dev/null @@ -1,544 +0,0 @@ -package metrics - -// Code generated by github.com/tinylib/msgp DO NOT EDIT. - -import ( - "github.com/tinylib/msgp/msgp" -) - -// DecodeMsg implements msgp.Decodable -func (z *CacheHits) DecodeMsg(dc *msgp.Reader) (err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, err = dc.ReadMapHeader() - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "cacheHitsTotal": - z.CacheHitsTotal, err = dc.ReadInt64() - if err != nil { - err = msgp.WrapError(err, "CacheHitsTotal") - return - } - case "cacheMissesTotal": - z.CacheMissesTotal, err = dc.ReadInt64() - if err != nil { - err = msgp.WrapError(err, "CacheMissesTotal") - return - } - case "cacheHitPercent": - z.CacheHitPercent, err = dc.ReadFloat64() - if err != nil { - err = msgp.WrapError(err, "CacheHitPercent") - return - } - default: - err = dc.Skip() - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - return -} - -// EncodeMsg implements msgp.Encodable -func (z CacheHits) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 3 - // write "cacheHitsTotal" - err = en.Append(0x83, 0xae, 0x63, 0x61, 0x63, 0x68, 0x65, 0x48, 0x69, 0x74, 0x73, 0x54, 0x6f, 0x74, 0x61, 0x6c) - if err != nil { - return - } - err = en.WriteInt64(z.CacheHitsTotal) - if err != nil { - err = msgp.WrapError(err, "CacheHitsTotal") - return - } - // write "cacheMissesTotal" - err = en.Append(0xb0, 0x63, 0x61, 0x63, 0x68, 0x65, 0x4d, 0x69, 0x73, 0x73, 0x65, 0x73, 0x54, 0x6f, 0x74, 0x61, 0x6c) - if err != nil { - return - } - err = en.WriteInt64(z.CacheMissesTotal) - if err != nil { - err = msgp.WrapError(err, "CacheMissesTotal") - return - } - // write "cacheHitPercent" - err = en.Append(0xaf, 0x63, 0x61, 0x63, 0x68, 0x65, 0x48, 0x69, 0x74, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74) - if err != nil { - return - } - err = en.WriteFloat64(z.CacheHitPercent) - if err != nil { - err = msgp.WrapError(err, "CacheHitPercent") - return - } - return -} - -// MarshalMsg implements msgp.Marshaler -func (z CacheHits) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 3 - // string "cacheHitsTotal" - o = append(o, 0x83, 0xae, 0x63, 0x61, 0x63, 0x68, 0x65, 0x48, 0x69, 0x74, 0x73, 0x54, 0x6f, 0x74, 0x61, 0x6c) - o = msgp.AppendInt64(o, z.CacheHitsTotal) - // string "cacheMissesTotal" - o = append(o, 0xb0, 0x63, 0x61, 0x63, 0x68, 0x65, 0x4d, 0x69, 0x73, 0x73, 0x65, 0x73, 0x54, 0x6f, 0x74, 0x61, 0x6c) - o = msgp.AppendInt64(o, z.CacheMissesTotal) - // string "cacheHitPercent" - o = append(o, 0xaf, 0x63, 0x61, 0x63, 0x68, 0x65, 0x48, 0x69, 0x74, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74) - o = msgp.AppendFloat64(o, z.CacheHitPercent) - return -} - -// UnmarshalMsg implements msgp.Unmarshaler -func (z *CacheHits) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "cacheHitsTotal": - z.CacheHitsTotal, bts, err = msgp.ReadInt64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "CacheHitsTotal") - return - } - case "cacheMissesTotal": - z.CacheMissesTotal, bts, err = msgp.ReadInt64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "CacheMissesTotal") - return - } - case "cacheHitPercent": - z.CacheHitPercent, bts, err = msgp.ReadFloat64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "CacheHitPercent") - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - o = bts - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z CacheHits) Msgsize() (s int) { - s = 1 + 15 + msgp.Int64Size + 17 + msgp.Int64Size + 16 + msgp.Float64Size - return -} - -// DecodeMsg implements msgp.Decodable -func (z *PerformanceInfo) DecodeMsg(dc *msgp.Reader) (err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, err = dc.ReadMapHeader() - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "averagePutTime": - z.AveragePutTime, err = dc.ReadInt64() - if err != nil { - err = msgp.WrapError(err, "AveragePutTime") - return - } - case "averageGetTime": - z.AverageGetTime, err = dc.ReadInt64() - if err != nil { - err = msgp.WrapError(err, "AverageGetTime") - return - } - default: - err = dc.Skip() - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - return -} - -// EncodeMsg implements msgp.Encodable -func (z PerformanceInfo) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 2 - // write "averagePutTime" - err = en.Append(0x82, 0xae, 0x61, 0x76, 0x65, 0x72, 0x61, 0x67, 0x65, 0x50, 0x75, 0x74, 0x54, 0x69, 0x6d, 0x65) - if err != nil { - return - } - err = en.WriteInt64(z.AveragePutTime) - if err != nil { - err = msgp.WrapError(err, "AveragePutTime") - return - } - // write "averageGetTime" - err = en.Append(0xae, 0x61, 0x76, 0x65, 0x72, 0x61, 0x67, 0x65, 0x47, 0x65, 0x74, 0x54, 0x69, 0x6d, 0x65) - if err != nil { - return - } - err = en.WriteInt64(z.AverageGetTime) - if err != nil { - err = msgp.WrapError(err, "AverageGetTime") - return - } - return -} - -// MarshalMsg implements msgp.Marshaler -func (z PerformanceInfo) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 2 - // string "averagePutTime" - o = append(o, 0x82, 0xae, 0x61, 0x76, 0x65, 0x72, 0x61, 0x67, 0x65, 0x50, 0x75, 0x74, 0x54, 0x69, 0x6d, 0x65) - o = msgp.AppendInt64(o, z.AveragePutTime) - // string "averageGetTime" - o = append(o, 0xae, 0x61, 0x76, 0x65, 0x72, 0x61, 0x67, 0x65, 0x47, 0x65, 0x74, 0x54, 0x69, 0x6d, 0x65) - o = msgp.AppendInt64(o, z.AverageGetTime) - return -} - -// UnmarshalMsg implements msgp.Unmarshaler -func (z *PerformanceInfo) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "averagePutTime": - z.AveragePutTime, bts, err = msgp.ReadInt64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "AveragePutTime") - return - } - case "averageGetTime": - z.AverageGetTime, bts, err = msgp.ReadInt64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "AverageGetTime") - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - o = bts - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z PerformanceInfo) Msgsize() (s int) { - s = 1 + 15 + msgp.Int64Size + 15 + msgp.Int64Size - return -} - -// DecodeMsg implements msgp.Decodable -func (z *StorageProvider) DecodeMsg(dc *msgp.Reader) (err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, err = dc.ReadMapHeader() - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "kind": - z.Kind, err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "Kind") - return - } - default: - err = dc.Skip() - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - return -} - -// EncodeMsg implements msgp.Encodable -func (z StorageProvider) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 1 - // write "kind" - err = en.Append(0x81, 0xa4, 0x6b, 0x69, 0x6e, 0x64) - if err != nil { - return - } - err = en.WriteString(z.Kind) - if err != nil { - err = msgp.WrapError(err, "Kind") - return - } - return -} - -// MarshalMsg implements msgp.Marshaler -func (z StorageProvider) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 1 - // string "kind" - o = append(o, 0x81, 0xa4, 0x6b, 0x69, 0x6e, 0x64) - o = msgp.AppendString(o, z.Kind) - return -} - -// UnmarshalMsg implements msgp.Unmarshaler -func (z *StorageProvider) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "kind": - z.Kind, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Kind") - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - o = bts - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z StorageProvider) Msgsize() (s int) { - s = 1 + 5 + msgp.StringPrefixSize + len(z.Kind) - return -} - -// DecodeMsg implements msgp.Decodable -func (z *UsageInfo) DecodeMsg(dc *msgp.Reader) (err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, err = dc.ReadMapHeader() - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "objectCount": - z.ObjectCount, err = dc.ReadInt64() - if err != nil { - err = msgp.WrapError(err, "ObjectCount") - return - } - case "totalSize": - z.TotalSize, err = dc.ReadInt64() - if err != nil { - err = msgp.WrapError(err, "TotalSize") - return - } - case "usagePercent": - z.UsagePercent, err = dc.ReadFloat64() - if err != nil { - err = msgp.WrapError(err, "UsagePercent") - return - } - default: - err = dc.Skip() - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - return -} - -// EncodeMsg implements msgp.Encodable -func (z UsageInfo) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 3 - // write "objectCount" - err = en.Append(0x83, 0xab, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74) - if err != nil { - return - } - err = en.WriteInt64(z.ObjectCount) - if err != nil { - err = msgp.WrapError(err, "ObjectCount") - return - } - // write "totalSize" - err = en.Append(0xa9, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65) - if err != nil { - return - } - err = en.WriteInt64(z.TotalSize) - if err != nil { - err = msgp.WrapError(err, "TotalSize") - return - } - // write "usagePercent" - err = en.Append(0xac, 0x75, 0x73, 0x61, 0x67, 0x65, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74) - if err != nil { - return - } - err = en.WriteFloat64(z.UsagePercent) - if err != nil { - err = msgp.WrapError(err, "UsagePercent") - return - } - return -} - -// MarshalMsg implements msgp.Marshaler -func (z UsageInfo) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 3 - // string "objectCount" - o = append(o, 0x83, 0xab, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74) - o = msgp.AppendInt64(o, z.ObjectCount) - // string "totalSize" - o = append(o, 0xa9, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65) - o = msgp.AppendInt64(o, z.TotalSize) - // string "usagePercent" - o = append(o, 0xac, 0x75, 0x73, 0x61, 0x67, 0x65, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74) - o = msgp.AppendFloat64(o, z.UsagePercent) - return -} - -// UnmarshalMsg implements msgp.Unmarshaler -func (z *UsageInfo) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "objectCount": - z.ObjectCount, bts, err = msgp.ReadInt64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "ObjectCount") - return - } - case "totalSize": - z.TotalSize, bts, err = msgp.ReadInt64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "TotalSize") - return - } - case "usagePercent": - z.UsagePercent, bts, err = msgp.ReadFloat64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "UsagePercent") - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - o = bts - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z UsageInfo) Msgsize() (s int) { - s = 1 + 12 + msgp.Int64Size + 10 + msgp.Int64Size + 13 + msgp.Float64Size - return -} diff --git a/pkg/apps/cachesrv/metrics/metrics_gen_test.go b/pkg/apps/cachesrv/metrics/metrics_gen_test.go deleted file mode 100644 index 649bdb6..0000000 --- a/pkg/apps/cachesrv/metrics/metrics_gen_test.go +++ /dev/null @@ -1,462 +0,0 @@ -package metrics - -// Code generated by github.com/tinylib/msgp DO NOT EDIT. - -import ( - "bytes" - "testing" - - "github.com/tinylib/msgp/msgp" -) - -func TestMarshalUnmarshalCacheHits(t *testing.T) { - v := CacheHits{} - bts, err := v.MarshalMsg(nil) - if err != nil { - t.Fatal(err) - } - left, err := v.UnmarshalMsg(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) - } - - left, err = msgp.Skip(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after Skip(): %q", len(left), left) - } -} - -func BenchmarkMarshalMsgCacheHits(b *testing.B) { - v := CacheHits{} - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.MarshalMsg(nil) - } -} - -func BenchmarkAppendMsgCacheHits(b *testing.B) { - v := CacheHits{} - bts := make([]byte, 0, v.Msgsize()) - bts, _ = v.MarshalMsg(bts[0:0]) - b.SetBytes(int64(len(bts))) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - bts, _ = v.MarshalMsg(bts[0:0]) - } -} - -func BenchmarkUnmarshalCacheHits(b *testing.B) { - v := CacheHits{} - bts, _ := v.MarshalMsg(nil) - b.ReportAllocs() - b.SetBytes(int64(len(bts))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err := v.UnmarshalMsg(bts) - if err != nil { - b.Fatal(err) - } - } -} - -func TestEncodeDecodeCacheHits(t *testing.T) { - v := CacheHits{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - - m := v.Msgsize() - if buf.Len() > m { - t.Log("WARNING: TestEncodeDecodeCacheHits Msgsize() is inaccurate") - } - - vn := CacheHits{} - err := msgp.Decode(&buf, &vn) - if err != nil { - t.Error(err) - } - - buf.Reset() - msgp.Encode(&buf, &v) - err = msgp.NewReader(&buf).Skip() - if err != nil { - t.Error(err) - } -} - -func BenchmarkEncodeCacheHits(b *testing.B) { - v := CacheHits{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - en := msgp.NewWriter(msgp.Nowhere) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.EncodeMsg(en) - } - en.Flush() -} - -func BenchmarkDecodeCacheHits(b *testing.B) { - v := CacheHits{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - rd := msgp.NewEndlessReader(buf.Bytes(), b) - dc := msgp.NewReader(rd) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - err := v.DecodeMsg(dc) - if err != nil { - b.Fatal(err) - } - } -} - -func TestMarshalUnmarshalPerformanceInfo(t *testing.T) { - v := PerformanceInfo{} - bts, err := v.MarshalMsg(nil) - if err != nil { - t.Fatal(err) - } - left, err := v.UnmarshalMsg(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) - } - - left, err = msgp.Skip(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after Skip(): %q", len(left), left) - } -} - -func BenchmarkMarshalMsgPerformanceInfo(b *testing.B) { - v := PerformanceInfo{} - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.MarshalMsg(nil) - } -} - -func BenchmarkAppendMsgPerformanceInfo(b *testing.B) { - v := PerformanceInfo{} - bts := make([]byte, 0, v.Msgsize()) - bts, _ = v.MarshalMsg(bts[0:0]) - b.SetBytes(int64(len(bts))) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - bts, _ = v.MarshalMsg(bts[0:0]) - } -} - -func BenchmarkUnmarshalPerformanceInfo(b *testing.B) { - v := PerformanceInfo{} - bts, _ := v.MarshalMsg(nil) - b.ReportAllocs() - b.SetBytes(int64(len(bts))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err := v.UnmarshalMsg(bts) - if err != nil { - b.Fatal(err) - } - } -} - -func TestEncodeDecodePerformanceInfo(t *testing.T) { - v := PerformanceInfo{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - - m := v.Msgsize() - if buf.Len() > m { - t.Log("WARNING: TestEncodeDecodePerformanceInfo Msgsize() is inaccurate") - } - - vn := PerformanceInfo{} - err := msgp.Decode(&buf, &vn) - if err != nil { - t.Error(err) - } - - buf.Reset() - msgp.Encode(&buf, &v) - err = msgp.NewReader(&buf).Skip() - if err != nil { - t.Error(err) - } -} - -func BenchmarkEncodePerformanceInfo(b *testing.B) { - v := PerformanceInfo{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - en := msgp.NewWriter(msgp.Nowhere) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.EncodeMsg(en) - } - en.Flush() -} - -func BenchmarkDecodePerformanceInfo(b *testing.B) { - v := PerformanceInfo{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - rd := msgp.NewEndlessReader(buf.Bytes(), b) - dc := msgp.NewReader(rd) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - err := v.DecodeMsg(dc) - if err != nil { - b.Fatal(err) - } - } -} - -func TestMarshalUnmarshalStorageProvider(t *testing.T) { - v := StorageProvider{} - bts, err := v.MarshalMsg(nil) - if err != nil { - t.Fatal(err) - } - left, err := v.UnmarshalMsg(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) - } - - left, err = msgp.Skip(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after Skip(): %q", len(left), left) - } -} - -func BenchmarkMarshalMsgStorageProvider(b *testing.B) { - v := StorageProvider{} - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.MarshalMsg(nil) - } -} - -func BenchmarkAppendMsgStorageProvider(b *testing.B) { - v := StorageProvider{} - bts := make([]byte, 0, v.Msgsize()) - bts, _ = v.MarshalMsg(bts[0:0]) - b.SetBytes(int64(len(bts))) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - bts, _ = v.MarshalMsg(bts[0:0]) - } -} - -func BenchmarkUnmarshalStorageProvider(b *testing.B) { - v := StorageProvider{} - bts, _ := v.MarshalMsg(nil) - b.ReportAllocs() - b.SetBytes(int64(len(bts))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err := v.UnmarshalMsg(bts) - if err != nil { - b.Fatal(err) - } - } -} - -func TestEncodeDecodeStorageProvider(t *testing.T) { - v := StorageProvider{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - - m := v.Msgsize() - if buf.Len() > m { - t.Log("WARNING: TestEncodeDecodeStorageProvider Msgsize() is inaccurate") - } - - vn := StorageProvider{} - err := msgp.Decode(&buf, &vn) - if err != nil { - t.Error(err) - } - - buf.Reset() - msgp.Encode(&buf, &v) - err = msgp.NewReader(&buf).Skip() - if err != nil { - t.Error(err) - } -} - -func BenchmarkEncodeStorageProvider(b *testing.B) { - v := StorageProvider{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - en := msgp.NewWriter(msgp.Nowhere) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.EncodeMsg(en) - } - en.Flush() -} - -func BenchmarkDecodeStorageProvider(b *testing.B) { - v := StorageProvider{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - rd := msgp.NewEndlessReader(buf.Bytes(), b) - dc := msgp.NewReader(rd) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - err := v.DecodeMsg(dc) - if err != nil { - b.Fatal(err) - } - } -} - -func TestMarshalUnmarshalUsageInfo(t *testing.T) { - v := UsageInfo{} - bts, err := v.MarshalMsg(nil) - if err != nil { - t.Fatal(err) - } - left, err := v.UnmarshalMsg(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) - } - - left, err = msgp.Skip(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after Skip(): %q", len(left), left) - } -} - -func BenchmarkMarshalMsgUsageInfo(b *testing.B) { - v := UsageInfo{} - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.MarshalMsg(nil) - } -} - -func BenchmarkAppendMsgUsageInfo(b *testing.B) { - v := UsageInfo{} - bts := make([]byte, 0, v.Msgsize()) - bts, _ = v.MarshalMsg(bts[0:0]) - b.SetBytes(int64(len(bts))) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - bts, _ = v.MarshalMsg(bts[0:0]) - } -} - -func BenchmarkUnmarshalUsageInfo(b *testing.B) { - v := UsageInfo{} - bts, _ := v.MarshalMsg(nil) - b.ReportAllocs() - b.SetBytes(int64(len(bts))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err := v.UnmarshalMsg(bts) - if err != nil { - b.Fatal(err) - } - } -} - -func TestEncodeDecodeUsageInfo(t *testing.T) { - v := UsageInfo{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - - m := v.Msgsize() - if buf.Len() > m { - t.Log("WARNING: TestEncodeDecodeUsageInfo Msgsize() is inaccurate") - } - - vn := UsageInfo{} - err := msgp.Decode(&buf, &vn) - if err != nil { - t.Error(err) - } - - buf.Reset() - msgp.Encode(&buf, &v) - err = msgp.NewReader(&buf).Skip() - if err != nil { - t.Error(err) - } -} - -func BenchmarkEncodeUsageInfo(b *testing.B) { - v := UsageInfo{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - en := msgp.NewWriter(msgp.Nowhere) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.EncodeMsg(en) - } - en.Flush() -} - -func BenchmarkDecodeUsageInfo(b *testing.B) { - v := UsageInfo{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - rd := msgp.NewEndlessReader(buf.Bytes(), b) - dc := msgp.NewReader(rd) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - err := v.DecodeMsg(dc) - if err != nil { - b.Fatal(err) - } - } -} diff --git a/pkg/apps/cachesrv/server.go b/pkg/apps/cachesrv/server.go index 2294ff7..6f2e585 100644 --- a/pkg/apps/cachesrv/server.go +++ b/pkg/apps/cachesrv/server.go @@ -4,7 +4,7 @@ import ( "context" "time" - csrvmetrics "github.com/cobalt77/kubecc/pkg/apps/cachesrv/metrics" + "github.com/cobalt77/kubecc/pkg/clients" "github.com/cobalt77/kubecc/pkg/config" "github.com/cobalt77/kubecc/pkg/meta" "github.com/cobalt77/kubecc/pkg/metrics" @@ -68,8 +68,8 @@ func NewCacheServer( } srv.storageProvider = options.storageProvider if options.monitorClient != nil { - srv.metricsProvider = metrics.NewMonitorProvider( - ctx, options.monitorClient, metrics.Buffered|metrics.Block) + srv.metricsProvider = clients.NewMonitorProvider( + ctx, options.monitorClient, clients.Buffered|clients.Block) } else { srv.metricsProvider = metrics.NewNoopProvider() } @@ -110,12 +110,6 @@ func (s *CacheServer) Sync(*types.SyncRequest, types.Cache_SyncServer) error { return status.Errorf(codes.Unimplemented, "method Sync not implemented") } -func (s *CacheServer) postStorageProvider() { - s.metricsProvider.Post(&csrvmetrics.StorageProvider{ - Kind: s.storageProvider.Location().String(), - }) -} - func (s *CacheServer) postStorageInfo() { s.metricsProvider.Post(s.storageProvider.UsageInfo()) } @@ -126,7 +120,6 @@ func (s *CacheServer) postCacheHits() { func (s *CacheServer) StartMetricsProvider() { s.lg.Info("Starting metrics provider") - s.postStorageProvider() slowTimer := util.NewJitteredTimer(20*time.Second, 0.5) go func() { @@ -134,7 +127,6 @@ func (s *CacheServer) StartMetricsProvider() { <-slowTimer s.postStorageInfo() s.postCacheHits() - s.postStorageProvider() } }() } diff --git a/pkg/apps/consumerd/metrics/metrics.go b/pkg/apps/consumerd/metrics/metrics.go deleted file mode 100644 index eb6c8bf..0000000 --- a/pkg/apps/consumerd/metrics/metrics.go +++ /dev/null @@ -1,11 +0,0 @@ -package metrics - -//go:generate msgp - -type LocalTasksCompleted struct { - Total int64 `msg:"total"` -} - -func (LocalTasksCompleted) Key() string { - return "LocalTasksCompleted" -} diff --git a/pkg/apps/consumerd/metrics/metrics_gen.go b/pkg/apps/consumerd/metrics/metrics_gen.go deleted file mode 100644 index 051cf90..0000000 --- a/pkg/apps/consumerd/metrics/metrics_gen.go +++ /dev/null @@ -1,110 +0,0 @@ -package metrics - -// Code generated by github.com/tinylib/msgp DO NOT EDIT. - -import ( - "github.com/tinylib/msgp/msgp" -) - -// DecodeMsg implements msgp.Decodable -func (z *LocalTasksCompleted) DecodeMsg(dc *msgp.Reader) (err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, err = dc.ReadMapHeader() - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "total": - z.Total, err = dc.ReadInt64() - if err != nil { - err = msgp.WrapError(err, "Total") - return - } - default: - err = dc.Skip() - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - return -} - -// EncodeMsg implements msgp.Encodable -func (z LocalTasksCompleted) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 1 - // write "total" - err = en.Append(0x81, 0xa5, 0x74, 0x6f, 0x74, 0x61, 0x6c) - if err != nil { - return - } - err = en.WriteInt64(z.Total) - if err != nil { - err = msgp.WrapError(err, "Total") - return - } - return -} - -// MarshalMsg implements msgp.Marshaler -func (z LocalTasksCompleted) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 1 - // string "total" - o = append(o, 0x81, 0xa5, 0x74, 0x6f, 0x74, 0x61, 0x6c) - o = msgp.AppendInt64(o, z.Total) - return -} - -// UnmarshalMsg implements msgp.Unmarshaler -func (z *LocalTasksCompleted) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "total": - z.Total, bts, err = msgp.ReadInt64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "Total") - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - o = bts - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z LocalTasksCompleted) Msgsize() (s int) { - s = 1 + 6 + msgp.Int64Size - return -} diff --git a/pkg/apps/consumerd/metrics/metrics_gen_test.go b/pkg/apps/consumerd/metrics/metrics_gen_test.go deleted file mode 100644 index e5cbbe1..0000000 --- a/pkg/apps/consumerd/metrics/metrics_gen_test.go +++ /dev/null @@ -1,123 +0,0 @@ -package metrics - -// Code generated by github.com/tinylib/msgp DO NOT EDIT. - -import ( - "bytes" - "testing" - - "github.com/tinylib/msgp/msgp" -) - -func TestMarshalUnmarshalLocalTasksCompleted(t *testing.T) { - v := LocalTasksCompleted{} - bts, err := v.MarshalMsg(nil) - if err != nil { - t.Fatal(err) - } - left, err := v.UnmarshalMsg(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) - } - - left, err = msgp.Skip(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after Skip(): %q", len(left), left) - } -} - -func BenchmarkMarshalMsgLocalTasksCompleted(b *testing.B) { - v := LocalTasksCompleted{} - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.MarshalMsg(nil) - } -} - -func BenchmarkAppendMsgLocalTasksCompleted(b *testing.B) { - v := LocalTasksCompleted{} - bts := make([]byte, 0, v.Msgsize()) - bts, _ = v.MarshalMsg(bts[0:0]) - b.SetBytes(int64(len(bts))) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - bts, _ = v.MarshalMsg(bts[0:0]) - } -} - -func BenchmarkUnmarshalLocalTasksCompleted(b *testing.B) { - v := LocalTasksCompleted{} - bts, _ := v.MarshalMsg(nil) - b.ReportAllocs() - b.SetBytes(int64(len(bts))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err := v.UnmarshalMsg(bts) - if err != nil { - b.Fatal(err) - } - } -} - -func TestEncodeDecodeLocalTasksCompleted(t *testing.T) { - v := LocalTasksCompleted{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - - m := v.Msgsize() - if buf.Len() > m { - t.Log("WARNING: TestEncodeDecodeLocalTasksCompleted Msgsize() is inaccurate") - } - - vn := LocalTasksCompleted{} - err := msgp.Decode(&buf, &vn) - if err != nil { - t.Error(err) - } - - buf.Reset() - msgp.Encode(&buf, &v) - err = msgp.NewReader(&buf).Skip() - if err != nil { - t.Error(err) - } -} - -func BenchmarkEncodeLocalTasksCompleted(b *testing.B) { - v := LocalTasksCompleted{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - en := msgp.NewWriter(msgp.Nowhere) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.EncodeMsg(en) - } - en.Flush() -} - -func BenchmarkDecodeLocalTasksCompleted(b *testing.B) { - v := LocalTasksCompleted{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - rd := msgp.NewEndlessReader(buf.Bytes(), b) - dc := msgp.NewReader(rd) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - err := v.DecodeMsg(dc) - if err != nil { - b.Fatal(err) - } - } -} diff --git a/pkg/apps/consumerd/server.go b/pkg/apps/consumerd/server.go index a7d58d7..b3b4ee6 100644 --- a/pkg/apps/consumerd/server.go +++ b/pkg/apps/consumerd/server.go @@ -5,10 +5,9 @@ import ( "io/fs" "time" - cdmetrics "github.com/cobalt77/kubecc/pkg/apps/consumerd/metrics" + "github.com/cobalt77/kubecc/pkg/clients" "github.com/cobalt77/kubecc/pkg/meta" "github.com/cobalt77/kubecc/pkg/metrics" - "github.com/cobalt77/kubecc/pkg/metrics/common" "github.com/cobalt77/kubecc/pkg/run" "github.com/cobalt77/kubecc/pkg/servers" "github.com/cobalt77/kubecc/pkg/toolchains" @@ -22,7 +21,6 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/status" - "google.golang.org/protobuf/proto" ) type consumerdServer struct { @@ -50,7 +48,7 @@ type ConsumerdServerOptions struct { schedulerClient types.SchedulerClient monitorClient types.MonitorClient schedulerConnection *grpc.ClientConn - usageLimits *types.UsageLimits + usageLimits *metrics.UsageLimits } type consumerdServerOption func(*ConsumerdServerOptions) @@ -93,7 +91,7 @@ func WithMonitorClient( } } -func WithUsageLimits(cpuConfig *types.UsageLimits) consumerdServerOption { +func WithUsageLimits(cpuConfig *metrics.UsageLimits) consumerdServerOption { return func(o *ConsumerdServerOptions) { o.usageLimits = cpuConfig } @@ -126,8 +124,8 @@ func NewConsumerdServer( srv.connection = options.schedulerConnection } if options.monitorClient != nil { - srv.metricsProvider = metrics.NewMonitorProvider(ctx, options.monitorClient, - metrics.Buffered|metrics.Discard) + srv.metricsProvider = clients.NewMonitorProvider(ctx, options.monitorClient, + clients.Buffered|clients.Discard) } else { srv.metricsProvider = metrics.NewNoopProvider() } @@ -192,44 +190,41 @@ func (c *consumerdServer) applyToolchainToReq(req *types.RunRequest) error { return nil } -func (s *consumerdServer) postAlive() { - s.metricsProvider.Post(&common.Alive{}) -} - -func (s *consumerdServer) postQueueParams() { - qp := &common.QueueParams{} - s.localExecutor.CompleteQueueParams(qp) +func (s *consumerdServer) postUsageLimits() { + qp := &metrics.UsageLimits{} + s.localExecutor.CompleteUsageLimits(qp) s.metricsProvider.Post(qp) } func (s *consumerdServer) postTaskStatus() { - ts := &common.TaskStatus{} + ts := &metrics.TaskStatus{} s.localExecutor.CompleteTaskStatus(ts) // Complete Running and Queued s.remoteExecutor.CompleteTaskStatus(ts) // Complete Delegated s.metricsProvider.Post(ts) } -func (s *consumerdServer) postQueueStatus() { - qs := &common.QueueStatus{} - s.localExecutor.CompleteQueueStatus(qs) - s.metricsProvider.Post(qs) -} - func (s *consumerdServer) postTotals() { - s.metricsProvider.Post(&cdmetrics.LocalTasksCompleted{ + s.metricsProvider.Post(&metrics.LocalTasksCompleted{ Total: s.localTasksCompleted.Load(), }) } +func (s *consumerdServer) postToolchains() { + s.metricsProvider.Post(&metrics.Toolchains{ + Items: s.tcStore.ItemsList(), + }) +} + func (s *consumerdServer) StartMetricsProvider() { s.lg.Info("Starting metrics provider") - s.postQueueParams() + s.postUsageLimits() + s.postToolchains() slowTimer := util.NewJitteredTimer(5*time.Second, 0.25) go func() { for { <-slowTimer - s.postQueueParams() + s.postUsageLimits() s.postTotals() } }() @@ -239,7 +234,13 @@ func (s *consumerdServer) StartMetricsProvider() { for { <-fastTimer s.postTaskStatus() - s.postQueueStatus() + } + }() + + go func() { + for { + <-s.storeUpdateCh + s.postToolchains() } }() } @@ -298,10 +299,11 @@ func (c *consumerdServer) Run( c.lg.Info("Running local, scheduler disconnected") canRunRemote = false } - if !c.remoteOnly && c.localExecutor.Status() == types.Available { - c.lg.Info("Running local, not at capacity yet") - canRunRemote = false - } + // todo + // if !c.remoteOnly && c.localExecutor.Status() == types.Available { + // c.lg.Info("Running local, not at capacity yet") + // canRunRemote = false + // } if !canRunRemote { defer c.localTasksCompleted.Inc() @@ -325,41 +327,41 @@ func (c *consumerdServer) Run( } } -func (c *consumerdServer) HandleStream(stream grpc.ClientStream) error { - select { - case c.storeUpdateCh <- struct{}{}: - default: - } - for { - select { - case <-c.storeUpdateCh: - copiedItems := []*types.Toolchain{} - for tc := range c.tcStore.Items() { - copiedItems = append(copiedItems, proto.Clone(tc).(*types.Toolchain)) - } - err := stream.SendMsg(&types.Metadata{ - Toolchains: &types.Toolchains{ - Items: copiedItems, - }, - }) - if err != nil { - c.lg.With( - zap.Error(err), - ).Error("Error sending updated toolchains to scheduler") - return err - } - case err := <-servers.EmptyServerStreamDone(c.srvContext, stream): - return err - case <-c.srvContext.Done(): - return nil - } - } -} - -func (c *consumerdServer) TryConnect() (grpc.ClientStream, error) { - return c.schedulerClient.ConnectConsumerd(c.srvContext) -} - -func (c *consumerdServer) Target() string { - return "scheduler" -} +// func (c *consumerdServer) HandleStream(stream grpc.ClientStream) error { +// select { +// case c.storeUpdateCh <- struct{}{}: +// default: +// } +// for { +// select { +// case <-c.storeUpdateCh: +// copiedItems := []*types.Toolchain{} +// for tc := range c.tcStore.Items() { +// copiedItems = append(copiedItems, proto.Clone(tc).(*types.Toolchain)) +// } +// err := stream.SendMsg(&types.Metadata{ +// Toolchains: &metrics.Toolchains{ +// Items: copiedItems, +// }, +// }) +// if err != nil { +// c.lg.With( +// zap.Error(err), +// ).Error("Error sending updated toolchains to scheduler") +// return err +// } +// case err := <-servers.EmptyServerStreamDone(c.srvContext, stream): +// return err +// case <-c.srvContext.Done(): +// return nil +// } +// } +// } + +// func (c *consumerdServer) TryConnect() (grpc.ClientStream, error) { +// return c.schedulerClient.ConnectConsumerd(c.srvContext) +// } + +// func (c *consumerdServer) Target() string { +// return "scheduler" +// } diff --git a/pkg/apps/monitor/metrics/meta.go b/pkg/apps/monitor/metrics/meta.go deleted file mode 100644 index e4dba16..0000000 --- a/pkg/apps/monitor/metrics/meta.go +++ /dev/null @@ -1,34 +0,0 @@ -package metrics - -//go:generate msgp - -const ( - MetaBucket = "meta" -) - -type ProviderInfo struct { - UUID string `msg:"uuid"` - Component int32 `msg:"component"` - Address string `msg:"address"` -} - -type Providers struct { - Items map[string]ProviderInfo `msg:"items"` -} - -func (Providers) Key() string { - return "Providers" -} - -type StoreContents struct { - Buckets []BucketSpec `json:"buckets" msg:"buckets"` -} - -type BucketSpec struct { - Name string `json:"name" msg:"name"` - Data map[string][]byte `json:"data" msg:"data"` -} - -func (StoreContents) Key() string { - return "StoreContents" -} diff --git a/pkg/apps/monitor/metrics/meta_gen.go b/pkg/apps/monitor/metrics/meta_gen.go deleted file mode 100644 index 8b2ef79..0000000 --- a/pkg/apps/monitor/metrics/meta_gen.go +++ /dev/null @@ -1,782 +0,0 @@ -package metrics - -// Code generated by github.com/tinylib/msgp DO NOT EDIT. - -import ( - "github.com/tinylib/msgp/msgp" -) - -// DecodeMsg implements msgp.Decodable -func (z *BucketSpec) DecodeMsg(dc *msgp.Reader) (err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, err = dc.ReadMapHeader() - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "name": - z.Name, err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "Name") - return - } - case "data": - var zb0002 uint32 - zb0002, err = dc.ReadMapHeader() - if err != nil { - err = msgp.WrapError(err, "Data") - return - } - if z.Data == nil { - z.Data = make(map[string][]byte, zb0002) - } else if len(z.Data) > 0 { - for key := range z.Data { - delete(z.Data, key) - } - } - for zb0002 > 0 { - zb0002-- - var za0001 string - var za0002 []byte - za0001, err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "Data") - return - } - za0002, err = dc.ReadBytes(za0002) - if err != nil { - err = msgp.WrapError(err, "Data", za0001) - return - } - z.Data[za0001] = za0002 - } - default: - err = dc.Skip() - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - return -} - -// EncodeMsg implements msgp.Encodable -func (z *BucketSpec) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 2 - // write "name" - err = en.Append(0x82, 0xa4, 0x6e, 0x61, 0x6d, 0x65) - if err != nil { - return - } - err = en.WriteString(z.Name) - if err != nil { - err = msgp.WrapError(err, "Name") - return - } - // write "data" - err = en.Append(0xa4, 0x64, 0x61, 0x74, 0x61) - if err != nil { - return - } - err = en.WriteMapHeader(uint32(len(z.Data))) - if err != nil { - err = msgp.WrapError(err, "Data") - return - } - for za0001, za0002 := range z.Data { - err = en.WriteString(za0001) - if err != nil { - err = msgp.WrapError(err, "Data") - return - } - err = en.WriteBytes(za0002) - if err != nil { - err = msgp.WrapError(err, "Data", za0001) - return - } - } - return -} - -// MarshalMsg implements msgp.Marshaler -func (z *BucketSpec) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 2 - // string "name" - o = append(o, 0x82, 0xa4, 0x6e, 0x61, 0x6d, 0x65) - o = msgp.AppendString(o, z.Name) - // string "data" - o = append(o, 0xa4, 0x64, 0x61, 0x74, 0x61) - o = msgp.AppendMapHeader(o, uint32(len(z.Data))) - for za0001, za0002 := range z.Data { - o = msgp.AppendString(o, za0001) - o = msgp.AppendBytes(o, za0002) - } - return -} - -// UnmarshalMsg implements msgp.Unmarshaler -func (z *BucketSpec) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "name": - z.Name, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Name") - return - } - case "data": - var zb0002 uint32 - zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Data") - return - } - if z.Data == nil { - z.Data = make(map[string][]byte, zb0002) - } else if len(z.Data) > 0 { - for key := range z.Data { - delete(z.Data, key) - } - } - for zb0002 > 0 { - var za0001 string - var za0002 []byte - zb0002-- - za0001, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Data") - return - } - za0002, bts, err = msgp.ReadBytesBytes(bts, za0002) - if err != nil { - err = msgp.WrapError(err, "Data", za0001) - return - } - z.Data[za0001] = za0002 - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - o = bts - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *BucketSpec) Msgsize() (s int) { - s = 1 + 5 + msgp.StringPrefixSize + len(z.Name) + 5 + msgp.MapHeaderSize - if z.Data != nil { - for za0001, za0002 := range z.Data { - _ = za0002 - s += msgp.StringPrefixSize + len(za0001) + msgp.BytesPrefixSize + len(za0002) - } - } - return -} - -// DecodeMsg implements msgp.Decodable -func (z *ProviderInfo) DecodeMsg(dc *msgp.Reader) (err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, err = dc.ReadMapHeader() - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "uuid": - z.UUID, err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "UUID") - return - } - case "component": - z.Component, err = dc.ReadInt32() - if err != nil { - err = msgp.WrapError(err, "Component") - return - } - case "address": - z.Address, err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "Address") - return - } - default: - err = dc.Skip() - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - return -} - -// EncodeMsg implements msgp.Encodable -func (z ProviderInfo) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 3 - // write "uuid" - err = en.Append(0x83, 0xa4, 0x75, 0x75, 0x69, 0x64) - if err != nil { - return - } - err = en.WriteString(z.UUID) - if err != nil { - err = msgp.WrapError(err, "UUID") - return - } - // write "component" - err = en.Append(0xa9, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74) - if err != nil { - return - } - err = en.WriteInt32(z.Component) - if err != nil { - err = msgp.WrapError(err, "Component") - return - } - // write "address" - err = en.Append(0xa7, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73) - if err != nil { - return - } - err = en.WriteString(z.Address) - if err != nil { - err = msgp.WrapError(err, "Address") - return - } - return -} - -// MarshalMsg implements msgp.Marshaler -func (z ProviderInfo) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 3 - // string "uuid" - o = append(o, 0x83, 0xa4, 0x75, 0x75, 0x69, 0x64) - o = msgp.AppendString(o, z.UUID) - // string "component" - o = append(o, 0xa9, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74) - o = msgp.AppendInt32(o, z.Component) - // string "address" - o = append(o, 0xa7, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73) - o = msgp.AppendString(o, z.Address) - return -} - -// UnmarshalMsg implements msgp.Unmarshaler -func (z *ProviderInfo) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "uuid": - z.UUID, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "UUID") - return - } - case "component": - z.Component, bts, err = msgp.ReadInt32Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "Component") - return - } - case "address": - z.Address, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Address") - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - o = bts - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z ProviderInfo) Msgsize() (s int) { - s = 1 + 5 + msgp.StringPrefixSize + len(z.UUID) + 10 + msgp.Int32Size + 8 + msgp.StringPrefixSize + len(z.Address) - return -} - -// DecodeMsg implements msgp.Decodable -func (z *Providers) DecodeMsg(dc *msgp.Reader) (err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, err = dc.ReadMapHeader() - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "items": - var zb0002 uint32 - zb0002, err = dc.ReadMapHeader() - if err != nil { - err = msgp.WrapError(err, "Items") - return - } - if z.Items == nil { - z.Items = make(map[string]ProviderInfo, zb0002) - } else if len(z.Items) > 0 { - for key := range z.Items { - delete(z.Items, key) - } - } - for zb0002 > 0 { - zb0002-- - var za0001 string - var za0002 ProviderInfo - za0001, err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "Items") - return - } - var zb0003 uint32 - zb0003, err = dc.ReadMapHeader() - if err != nil { - err = msgp.WrapError(err, "Items", za0001) - return - } - for zb0003 > 0 { - zb0003-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - err = msgp.WrapError(err, "Items", za0001) - return - } - switch msgp.UnsafeString(field) { - case "uuid": - za0002.UUID, err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "Items", za0001, "UUID") - return - } - case "component": - za0002.Component, err = dc.ReadInt32() - if err != nil { - err = msgp.WrapError(err, "Items", za0001, "Component") - return - } - case "address": - za0002.Address, err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "Items", za0001, "Address") - return - } - default: - err = dc.Skip() - if err != nil { - err = msgp.WrapError(err, "Items", za0001) - return - } - } - } - z.Items[za0001] = za0002 - } - default: - err = dc.Skip() - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - return -} - -// EncodeMsg implements msgp.Encodable -func (z *Providers) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 1 - // write "items" - err = en.Append(0x81, 0xa5, 0x69, 0x74, 0x65, 0x6d, 0x73) - if err != nil { - return - } - err = en.WriteMapHeader(uint32(len(z.Items))) - if err != nil { - err = msgp.WrapError(err, "Items") - return - } - for za0001, za0002 := range z.Items { - err = en.WriteString(za0001) - if err != nil { - err = msgp.WrapError(err, "Items") - return - } - // map header, size 3 - // write "uuid" - err = en.Append(0x83, 0xa4, 0x75, 0x75, 0x69, 0x64) - if err != nil { - return - } - err = en.WriteString(za0002.UUID) - if err != nil { - err = msgp.WrapError(err, "Items", za0001, "UUID") - return - } - // write "component" - err = en.Append(0xa9, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74) - if err != nil { - return - } - err = en.WriteInt32(za0002.Component) - if err != nil { - err = msgp.WrapError(err, "Items", za0001, "Component") - return - } - // write "address" - err = en.Append(0xa7, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73) - if err != nil { - return - } - err = en.WriteString(za0002.Address) - if err != nil { - err = msgp.WrapError(err, "Items", za0001, "Address") - return - } - } - return -} - -// MarshalMsg implements msgp.Marshaler -func (z *Providers) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 1 - // string "items" - o = append(o, 0x81, 0xa5, 0x69, 0x74, 0x65, 0x6d, 0x73) - o = msgp.AppendMapHeader(o, uint32(len(z.Items))) - for za0001, za0002 := range z.Items { - o = msgp.AppendString(o, za0001) - // map header, size 3 - // string "uuid" - o = append(o, 0x83, 0xa4, 0x75, 0x75, 0x69, 0x64) - o = msgp.AppendString(o, za0002.UUID) - // string "component" - o = append(o, 0xa9, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74) - o = msgp.AppendInt32(o, za0002.Component) - // string "address" - o = append(o, 0xa7, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73) - o = msgp.AppendString(o, za0002.Address) - } - return -} - -// UnmarshalMsg implements msgp.Unmarshaler -func (z *Providers) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "items": - var zb0002 uint32 - zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Items") - return - } - if z.Items == nil { - z.Items = make(map[string]ProviderInfo, zb0002) - } else if len(z.Items) > 0 { - for key := range z.Items { - delete(z.Items, key) - } - } - for zb0002 > 0 { - var za0001 string - var za0002 ProviderInfo - zb0002-- - za0001, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Items") - return - } - var zb0003 uint32 - zb0003, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Items", za0001) - return - } - for zb0003 > 0 { - zb0003-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - err = msgp.WrapError(err, "Items", za0001) - return - } - switch msgp.UnsafeString(field) { - case "uuid": - za0002.UUID, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Items", za0001, "UUID") - return - } - case "component": - za0002.Component, bts, err = msgp.ReadInt32Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "Items", za0001, "Component") - return - } - case "address": - za0002.Address, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Items", za0001, "Address") - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err, "Items", za0001) - return - } - } - } - z.Items[za0001] = za0002 - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - o = bts - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *Providers) Msgsize() (s int) { - s = 1 + 6 + msgp.MapHeaderSize - if z.Items != nil { - for za0001, za0002 := range z.Items { - _ = za0002 - s += msgp.StringPrefixSize + len(za0001) + 1 + 5 + msgp.StringPrefixSize + len(za0002.UUID) + 10 + msgp.Int32Size + 8 + msgp.StringPrefixSize + len(za0002.Address) - } - } - return -} - -// DecodeMsg implements msgp.Decodable -func (z *StoreContents) DecodeMsg(dc *msgp.Reader) (err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, err = dc.ReadMapHeader() - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "buckets": - var zb0002 uint32 - zb0002, err = dc.ReadArrayHeader() - if err != nil { - err = msgp.WrapError(err, "Buckets") - return - } - if cap(z.Buckets) >= int(zb0002) { - z.Buckets = (z.Buckets)[:zb0002] - } else { - z.Buckets = make([]BucketSpec, zb0002) - } - for za0001 := range z.Buckets { - err = z.Buckets[za0001].DecodeMsg(dc) - if err != nil { - err = msgp.WrapError(err, "Buckets", za0001) - return - } - } - default: - err = dc.Skip() - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - return -} - -// EncodeMsg implements msgp.Encodable -func (z *StoreContents) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 1 - // write "buckets" - err = en.Append(0x81, 0xa7, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73) - if err != nil { - return - } - err = en.WriteArrayHeader(uint32(len(z.Buckets))) - if err != nil { - err = msgp.WrapError(err, "Buckets") - return - } - for za0001 := range z.Buckets { - err = z.Buckets[za0001].EncodeMsg(en) - if err != nil { - err = msgp.WrapError(err, "Buckets", za0001) - return - } - } - return -} - -// MarshalMsg implements msgp.Marshaler -func (z *StoreContents) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 1 - // string "buckets" - o = append(o, 0x81, 0xa7, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73) - o = msgp.AppendArrayHeader(o, uint32(len(z.Buckets))) - for za0001 := range z.Buckets { - o, err = z.Buckets[za0001].MarshalMsg(o) - if err != nil { - err = msgp.WrapError(err, "Buckets", za0001) - return - } - } - return -} - -// UnmarshalMsg implements msgp.Unmarshaler -func (z *StoreContents) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "buckets": - var zb0002 uint32 - zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Buckets") - return - } - if cap(z.Buckets) >= int(zb0002) { - z.Buckets = (z.Buckets)[:zb0002] - } else { - z.Buckets = make([]BucketSpec, zb0002) - } - for za0001 := range z.Buckets { - bts, err = z.Buckets[za0001].UnmarshalMsg(bts) - if err != nil { - err = msgp.WrapError(err, "Buckets", za0001) - return - } - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - o = bts - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *StoreContents) Msgsize() (s int) { - s = 1 + 8 + msgp.ArrayHeaderSize - for za0001 := range z.Buckets { - s += z.Buckets[za0001].Msgsize() - } - return -} diff --git a/pkg/apps/monitor/metrics/meta_gen_test.go b/pkg/apps/monitor/metrics/meta_gen_test.go deleted file mode 100644 index 4e4fda8..0000000 --- a/pkg/apps/monitor/metrics/meta_gen_test.go +++ /dev/null @@ -1,462 +0,0 @@ -package metrics - -// Code generated by github.com/tinylib/msgp DO NOT EDIT. - -import ( - "bytes" - "testing" - - "github.com/tinylib/msgp/msgp" -) - -func TestMarshalUnmarshalBucketSpec(t *testing.T) { - v := BucketSpec{} - bts, err := v.MarshalMsg(nil) - if err != nil { - t.Fatal(err) - } - left, err := v.UnmarshalMsg(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) - } - - left, err = msgp.Skip(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after Skip(): %q", len(left), left) - } -} - -func BenchmarkMarshalMsgBucketSpec(b *testing.B) { - v := BucketSpec{} - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.MarshalMsg(nil) - } -} - -func BenchmarkAppendMsgBucketSpec(b *testing.B) { - v := BucketSpec{} - bts := make([]byte, 0, v.Msgsize()) - bts, _ = v.MarshalMsg(bts[0:0]) - b.SetBytes(int64(len(bts))) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - bts, _ = v.MarshalMsg(bts[0:0]) - } -} - -func BenchmarkUnmarshalBucketSpec(b *testing.B) { - v := BucketSpec{} - bts, _ := v.MarshalMsg(nil) - b.ReportAllocs() - b.SetBytes(int64(len(bts))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err := v.UnmarshalMsg(bts) - if err != nil { - b.Fatal(err) - } - } -} - -func TestEncodeDecodeBucketSpec(t *testing.T) { - v := BucketSpec{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - - m := v.Msgsize() - if buf.Len() > m { - t.Log("WARNING: TestEncodeDecodeBucketSpec Msgsize() is inaccurate") - } - - vn := BucketSpec{} - err := msgp.Decode(&buf, &vn) - if err != nil { - t.Error(err) - } - - buf.Reset() - msgp.Encode(&buf, &v) - err = msgp.NewReader(&buf).Skip() - if err != nil { - t.Error(err) - } -} - -func BenchmarkEncodeBucketSpec(b *testing.B) { - v := BucketSpec{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - en := msgp.NewWriter(msgp.Nowhere) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.EncodeMsg(en) - } - en.Flush() -} - -func BenchmarkDecodeBucketSpec(b *testing.B) { - v := BucketSpec{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - rd := msgp.NewEndlessReader(buf.Bytes(), b) - dc := msgp.NewReader(rd) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - err := v.DecodeMsg(dc) - if err != nil { - b.Fatal(err) - } - } -} - -func TestMarshalUnmarshalProviderInfo(t *testing.T) { - v := ProviderInfo{} - bts, err := v.MarshalMsg(nil) - if err != nil { - t.Fatal(err) - } - left, err := v.UnmarshalMsg(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) - } - - left, err = msgp.Skip(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after Skip(): %q", len(left), left) - } -} - -func BenchmarkMarshalMsgProviderInfo(b *testing.B) { - v := ProviderInfo{} - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.MarshalMsg(nil) - } -} - -func BenchmarkAppendMsgProviderInfo(b *testing.B) { - v := ProviderInfo{} - bts := make([]byte, 0, v.Msgsize()) - bts, _ = v.MarshalMsg(bts[0:0]) - b.SetBytes(int64(len(bts))) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - bts, _ = v.MarshalMsg(bts[0:0]) - } -} - -func BenchmarkUnmarshalProviderInfo(b *testing.B) { - v := ProviderInfo{} - bts, _ := v.MarshalMsg(nil) - b.ReportAllocs() - b.SetBytes(int64(len(bts))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err := v.UnmarshalMsg(bts) - if err != nil { - b.Fatal(err) - } - } -} - -func TestEncodeDecodeProviderInfo(t *testing.T) { - v := ProviderInfo{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - - m := v.Msgsize() - if buf.Len() > m { - t.Log("WARNING: TestEncodeDecodeProviderInfo Msgsize() is inaccurate") - } - - vn := ProviderInfo{} - err := msgp.Decode(&buf, &vn) - if err != nil { - t.Error(err) - } - - buf.Reset() - msgp.Encode(&buf, &v) - err = msgp.NewReader(&buf).Skip() - if err != nil { - t.Error(err) - } -} - -func BenchmarkEncodeProviderInfo(b *testing.B) { - v := ProviderInfo{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - en := msgp.NewWriter(msgp.Nowhere) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.EncodeMsg(en) - } - en.Flush() -} - -func BenchmarkDecodeProviderInfo(b *testing.B) { - v := ProviderInfo{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - rd := msgp.NewEndlessReader(buf.Bytes(), b) - dc := msgp.NewReader(rd) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - err := v.DecodeMsg(dc) - if err != nil { - b.Fatal(err) - } - } -} - -func TestMarshalUnmarshalProviders(t *testing.T) { - v := Providers{} - bts, err := v.MarshalMsg(nil) - if err != nil { - t.Fatal(err) - } - left, err := v.UnmarshalMsg(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) - } - - left, err = msgp.Skip(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after Skip(): %q", len(left), left) - } -} - -func BenchmarkMarshalMsgProviders(b *testing.B) { - v := Providers{} - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.MarshalMsg(nil) - } -} - -func BenchmarkAppendMsgProviders(b *testing.B) { - v := Providers{} - bts := make([]byte, 0, v.Msgsize()) - bts, _ = v.MarshalMsg(bts[0:0]) - b.SetBytes(int64(len(bts))) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - bts, _ = v.MarshalMsg(bts[0:0]) - } -} - -func BenchmarkUnmarshalProviders(b *testing.B) { - v := Providers{} - bts, _ := v.MarshalMsg(nil) - b.ReportAllocs() - b.SetBytes(int64(len(bts))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err := v.UnmarshalMsg(bts) - if err != nil { - b.Fatal(err) - } - } -} - -func TestEncodeDecodeProviders(t *testing.T) { - v := Providers{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - - m := v.Msgsize() - if buf.Len() > m { - t.Log("WARNING: TestEncodeDecodeProviders Msgsize() is inaccurate") - } - - vn := Providers{} - err := msgp.Decode(&buf, &vn) - if err != nil { - t.Error(err) - } - - buf.Reset() - msgp.Encode(&buf, &v) - err = msgp.NewReader(&buf).Skip() - if err != nil { - t.Error(err) - } -} - -func BenchmarkEncodeProviders(b *testing.B) { - v := Providers{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - en := msgp.NewWriter(msgp.Nowhere) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.EncodeMsg(en) - } - en.Flush() -} - -func BenchmarkDecodeProviders(b *testing.B) { - v := Providers{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - rd := msgp.NewEndlessReader(buf.Bytes(), b) - dc := msgp.NewReader(rd) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - err := v.DecodeMsg(dc) - if err != nil { - b.Fatal(err) - } - } -} - -func TestMarshalUnmarshalStoreContents(t *testing.T) { - v := StoreContents{} - bts, err := v.MarshalMsg(nil) - if err != nil { - t.Fatal(err) - } - left, err := v.UnmarshalMsg(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) - } - - left, err = msgp.Skip(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after Skip(): %q", len(left), left) - } -} - -func BenchmarkMarshalMsgStoreContents(b *testing.B) { - v := StoreContents{} - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.MarshalMsg(nil) - } -} - -func BenchmarkAppendMsgStoreContents(b *testing.B) { - v := StoreContents{} - bts := make([]byte, 0, v.Msgsize()) - bts, _ = v.MarshalMsg(bts[0:0]) - b.SetBytes(int64(len(bts))) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - bts, _ = v.MarshalMsg(bts[0:0]) - } -} - -func BenchmarkUnmarshalStoreContents(b *testing.B) { - v := StoreContents{} - bts, _ := v.MarshalMsg(nil) - b.ReportAllocs() - b.SetBytes(int64(len(bts))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err := v.UnmarshalMsg(bts) - if err != nil { - b.Fatal(err) - } - } -} - -func TestEncodeDecodeStoreContents(t *testing.T) { - v := StoreContents{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - - m := v.Msgsize() - if buf.Len() > m { - t.Log("WARNING: TestEncodeDecodeStoreContents Msgsize() is inaccurate") - } - - vn := StoreContents{} - err := msgp.Decode(&buf, &vn) - if err != nil { - t.Error(err) - } - - buf.Reset() - msgp.Encode(&buf, &v) - err = msgp.NewReader(&buf).Skip() - if err != nil { - t.Error(err) - } -} - -func BenchmarkEncodeStoreContents(b *testing.B) { - v := StoreContents{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - en := msgp.NewWriter(msgp.Nowhere) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.EncodeMsg(en) - } - en.Flush() -} - -func BenchmarkDecodeStoreContents(b *testing.B) { - v := StoreContents{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - rd := msgp.NewEndlessReader(buf.Bytes(), b) - dc := msgp.NewReader(rd) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - err := v.DecodeMsg(dc) - if err != nil { - b.Fatal(err) - } - } -} diff --git a/pkg/apps/monitor/metrics/metrics.go b/pkg/apps/monitor/metrics/metrics.go deleted file mode 100644 index fe5a608..0000000 --- a/pkg/apps/monitor/metrics/metrics.go +++ /dev/null @@ -1,19 +0,0 @@ -package metrics - -//go:generate msgp - -type MetricsPostedTotal struct { - Total int32 `msg:"total"` -} - -func (MetricsPostedTotal) Key() string { - return "MetricsPostedTotal" -} - -type ListenerCount struct { - Value int32 `msg:"value"` -} - -func (ListenerCount) Key() string { - return "ListenerCount" -} diff --git a/pkg/apps/monitor/metrics/metrics_gen.go b/pkg/apps/monitor/metrics/metrics_gen.go deleted file mode 100644 index dcf8b5a..0000000 --- a/pkg/apps/monitor/metrics/metrics_gen.go +++ /dev/null @@ -1,213 +0,0 @@ -package metrics - -// Code generated by github.com/tinylib/msgp DO NOT EDIT. - -import ( - "github.com/tinylib/msgp/msgp" -) - -// DecodeMsg implements msgp.Decodable -func (z *ListenerCount) DecodeMsg(dc *msgp.Reader) (err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, err = dc.ReadMapHeader() - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "value": - z.Value, err = dc.ReadInt32() - if err != nil { - err = msgp.WrapError(err, "Value") - return - } - default: - err = dc.Skip() - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - return -} - -// EncodeMsg implements msgp.Encodable -func (z ListenerCount) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 1 - // write "value" - err = en.Append(0x81, 0xa5, 0x76, 0x61, 0x6c, 0x75, 0x65) - if err != nil { - return - } - err = en.WriteInt32(z.Value) - if err != nil { - err = msgp.WrapError(err, "Value") - return - } - return -} - -// MarshalMsg implements msgp.Marshaler -func (z ListenerCount) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 1 - // string "value" - o = append(o, 0x81, 0xa5, 0x76, 0x61, 0x6c, 0x75, 0x65) - o = msgp.AppendInt32(o, z.Value) - return -} - -// UnmarshalMsg implements msgp.Unmarshaler -func (z *ListenerCount) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "value": - z.Value, bts, err = msgp.ReadInt32Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "Value") - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - o = bts - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z ListenerCount) Msgsize() (s int) { - s = 1 + 6 + msgp.Int32Size - return -} - -// DecodeMsg implements msgp.Decodable -func (z *MetricsPostedTotal) DecodeMsg(dc *msgp.Reader) (err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, err = dc.ReadMapHeader() - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "total": - z.Total, err = dc.ReadInt32() - if err != nil { - err = msgp.WrapError(err, "Total") - return - } - default: - err = dc.Skip() - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - return -} - -// EncodeMsg implements msgp.Encodable -func (z MetricsPostedTotal) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 1 - // write "total" - err = en.Append(0x81, 0xa5, 0x74, 0x6f, 0x74, 0x61, 0x6c) - if err != nil { - return - } - err = en.WriteInt32(z.Total) - if err != nil { - err = msgp.WrapError(err, "Total") - return - } - return -} - -// MarshalMsg implements msgp.Marshaler -func (z MetricsPostedTotal) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 1 - // string "total" - o = append(o, 0x81, 0xa5, 0x74, 0x6f, 0x74, 0x61, 0x6c) - o = msgp.AppendInt32(o, z.Total) - return -} - -// UnmarshalMsg implements msgp.Unmarshaler -func (z *MetricsPostedTotal) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "total": - z.Total, bts, err = msgp.ReadInt32Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "Total") - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - o = bts - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z MetricsPostedTotal) Msgsize() (s int) { - s = 1 + 6 + msgp.Int32Size - return -} diff --git a/pkg/apps/monitor/metrics/metrics_gen_test.go b/pkg/apps/monitor/metrics/metrics_gen_test.go deleted file mode 100644 index 4205f5b..0000000 --- a/pkg/apps/monitor/metrics/metrics_gen_test.go +++ /dev/null @@ -1,236 +0,0 @@ -package metrics - -// Code generated by github.com/tinylib/msgp DO NOT EDIT. - -import ( - "bytes" - "testing" - - "github.com/tinylib/msgp/msgp" -) - -func TestMarshalUnmarshalListenerCount(t *testing.T) { - v := ListenerCount{} - bts, err := v.MarshalMsg(nil) - if err != nil { - t.Fatal(err) - } - left, err := v.UnmarshalMsg(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) - } - - left, err = msgp.Skip(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after Skip(): %q", len(left), left) - } -} - -func BenchmarkMarshalMsgListenerCount(b *testing.B) { - v := ListenerCount{} - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.MarshalMsg(nil) - } -} - -func BenchmarkAppendMsgListenerCount(b *testing.B) { - v := ListenerCount{} - bts := make([]byte, 0, v.Msgsize()) - bts, _ = v.MarshalMsg(bts[0:0]) - b.SetBytes(int64(len(bts))) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - bts, _ = v.MarshalMsg(bts[0:0]) - } -} - -func BenchmarkUnmarshalListenerCount(b *testing.B) { - v := ListenerCount{} - bts, _ := v.MarshalMsg(nil) - b.ReportAllocs() - b.SetBytes(int64(len(bts))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err := v.UnmarshalMsg(bts) - if err != nil { - b.Fatal(err) - } - } -} - -func TestEncodeDecodeListenerCount(t *testing.T) { - v := ListenerCount{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - - m := v.Msgsize() - if buf.Len() > m { - t.Log("WARNING: TestEncodeDecodeListenerCount Msgsize() is inaccurate") - } - - vn := ListenerCount{} - err := msgp.Decode(&buf, &vn) - if err != nil { - t.Error(err) - } - - buf.Reset() - msgp.Encode(&buf, &v) - err = msgp.NewReader(&buf).Skip() - if err != nil { - t.Error(err) - } -} - -func BenchmarkEncodeListenerCount(b *testing.B) { - v := ListenerCount{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - en := msgp.NewWriter(msgp.Nowhere) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.EncodeMsg(en) - } - en.Flush() -} - -func BenchmarkDecodeListenerCount(b *testing.B) { - v := ListenerCount{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - rd := msgp.NewEndlessReader(buf.Bytes(), b) - dc := msgp.NewReader(rd) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - err := v.DecodeMsg(dc) - if err != nil { - b.Fatal(err) - } - } -} - -func TestMarshalUnmarshalMetricsPostedTotal(t *testing.T) { - v := MetricsPostedTotal{} - bts, err := v.MarshalMsg(nil) - if err != nil { - t.Fatal(err) - } - left, err := v.UnmarshalMsg(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) - } - - left, err = msgp.Skip(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after Skip(): %q", len(left), left) - } -} - -func BenchmarkMarshalMsgMetricsPostedTotal(b *testing.B) { - v := MetricsPostedTotal{} - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.MarshalMsg(nil) - } -} - -func BenchmarkAppendMsgMetricsPostedTotal(b *testing.B) { - v := MetricsPostedTotal{} - bts := make([]byte, 0, v.Msgsize()) - bts, _ = v.MarshalMsg(bts[0:0]) - b.SetBytes(int64(len(bts))) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - bts, _ = v.MarshalMsg(bts[0:0]) - } -} - -func BenchmarkUnmarshalMetricsPostedTotal(b *testing.B) { - v := MetricsPostedTotal{} - bts, _ := v.MarshalMsg(nil) - b.ReportAllocs() - b.SetBytes(int64(len(bts))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err := v.UnmarshalMsg(bts) - if err != nil { - b.Fatal(err) - } - } -} - -func TestEncodeDecodeMetricsPostedTotal(t *testing.T) { - v := MetricsPostedTotal{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - - m := v.Msgsize() - if buf.Len() > m { - t.Log("WARNING: TestEncodeDecodeMetricsPostedTotal Msgsize() is inaccurate") - } - - vn := MetricsPostedTotal{} - err := msgp.Decode(&buf, &vn) - if err != nil { - t.Error(err) - } - - buf.Reset() - msgp.Encode(&buf, &v) - err = msgp.NewReader(&buf).Skip() - if err != nil { - t.Error(err) - } -} - -func BenchmarkEncodeMetricsPostedTotal(b *testing.B) { - v := MetricsPostedTotal{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - en := msgp.NewWriter(msgp.Nowhere) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.EncodeMsg(en) - } - en.Flush() -} - -func BenchmarkDecodeMetricsPostedTotal(b *testing.B) { - v := MetricsPostedTotal{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - rd := msgp.NewEndlessReader(buf.Bytes(), b) - dc := msgp.NewReader(rd) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - err := v.DecodeMsg(dc) - if err != nil { - b.Fatal(err) - } - } -} diff --git a/pkg/apps/monitor/monitor_test.go b/pkg/apps/monitor/monitor_test.go index 17eb443..4f69a22 100644 --- a/pkg/apps/monitor/monitor_test.go +++ b/pkg/apps/monitor/monitor_test.go @@ -10,7 +10,7 @@ import ( "github.com/cobalt77/kubecc/internal/logkc" "github.com/cobalt77/kubecc/internal/testutil" "github.com/cobalt77/kubecc/pkg/apps/monitor" - "github.com/cobalt77/kubecc/pkg/apps/monitor/test" + "github.com/cobalt77/kubecc/pkg/clients" "github.com/cobalt77/kubecc/pkg/identity" "github.com/cobalt77/kubecc/pkg/meta" "github.com/cobalt77/kubecc/pkg/metrics" @@ -26,6 +26,23 @@ import ( "google.golang.org/grpc/test/bufconn" ) +type TestStoreCreator struct { + Count *atomic.Int32 + Stores sync.Map // map[string]monitor.KeyValueStore +} + +func (c *TestStoreCreator) NewStore(ctx context.Context) monitor.KeyValueStore { + store := monitor.InMemoryStoreCreator.NewStore(ctx) + c.Stores.Store(ctx, store) + i := int32(0) + c.Stores.Range(func(key, value interface{}) bool { + i++ + return true + }) + c.Count.Store(i) + return store +} + func drain(c chan interface{}) { for { select { @@ -50,10 +67,10 @@ func recycle(c chan context.CancelFunc) { var _ = Describe("Monitor", func() { var listener *bufconn.Listener var monitorCtx context.Context - var storeCreator *test.TestStoreCreator + var storeCreator *TestStoreCreator Specify("Monitor server setup", func() { - storeCreator = &test.TestStoreCreator{ + storeCreator = &TestStoreCreator{ Stores: sync.Map{}, Count: atomic.NewInt32(0), } @@ -112,17 +129,17 @@ var _ = Describe("Monitor", func() { }), )) Expect(err).NotTo(HaveOccurred()) - client := types.NewMonitorClient(cc) - listener := metrics.NewListener(ctx, client) + mc := types.NewMonitorClient(cc) + listener := clients.NewListener(ctx, mc) listener.OnProviderAdded(func(pctx context.Context, uuid string) { listenerEvents["providerAdded"] <- uuid - listener.OnValueChanged(uuid, func(k1 *test.TestKey1) { + listener.OnValueChanged(uuid, func(k1 *testutil.Test1) { listenerEvents["testKey1Changed"] <- k1.Counter }).OrExpired(func() metrics.RetryOptions { listenerEvents["testKey1Expired"] <- struct{}{} return metrics.NoRetry }) - listener.OnValueChanged(uuid, func(k2 *test.TestKey2) { + listener.OnValueChanged(uuid, func(k2 *testutil.Test2) { listenerEvents["testKey2Changed"] <- k2.Value }).OrExpired(func() metrics.RetryOptions { listenerEvents["testKey2Expired"] <- struct{}{} @@ -156,8 +173,8 @@ var _ = Describe("Monitor", func() { }), )) Expect(err).NotTo(HaveOccurred()) - client := types.NewMonitorClient(cc) - provider = metrics.NewMonitorProvider(cctx, client, metrics.Buffered|metrics.Block) + mc := types.NewMonitorClient(cc) + provider = clients.NewMonitorProvider(cctx, mc, clients.Buffered|clients.Block) Expect(provider).NotTo(BeNil()) }) It("should create a store", func() { @@ -175,7 +192,7 @@ var _ = Describe("Monitor", func() { }) When("The provider updates a key", func() { It("should succeed", func() { - provider.Post(&test.TestKey1{ + provider.Post(&testutil.Test1{ Counter: 1, }) }) @@ -201,11 +218,11 @@ var _ = Describe("Monitor", func() { }), )) Expect(err).NotTo(HaveOccurred()) - client := types.NewMonitorClient(cc) - listener := metrics.NewListener(ctx, client) + mc := types.NewMonitorClient(cc) + listener := clients.NewListener(ctx, mc) listener.OnProviderAdded(func(pctx context.Context, uuid string) { lateJoinListenerEvents["providerAdded"] <- uuid - listener.OnValueChanged(uuid, func(k1 *test.TestKey1) { + listener.OnValueChanged(uuid, func(k1 *testutil.Test1) { lateJoinListenerEvents["testKey1Changed"] <- k1.Counter }).OrExpired(func() metrics.RetryOptions { lateJoinListenerEvents["testKey1Expired"] <- struct{}{} @@ -220,7 +237,7 @@ var _ = Describe("Monitor", func() { }) When("The provider updates a different key", func() { It("should succeed", func() { - provider.Post(&test.TestKey2{ + provider.Post(&testutil.Test2{ Value: "test", }) }) @@ -233,10 +250,10 @@ var _ = Describe("Monitor", func() { }) When("The provider posts a key with the same value", func() { It("should succeed", func() { - provider.Post(&test.TestKey2{ + provider.Post(&testutil.Test2{ Value: "test", }) - provider.Post(&test.TestKey1{ + provider.Post(&testutil.Test1{ Counter: 1, }) }) @@ -284,16 +301,16 @@ var _ = Describe("Monitor", func() { atomic.NewInt32(0), } handlers := []interface{}{ - func(k *test.TestKey1) { + func(k *testutil.Test1) { totals[0].Inc() }, - func(k *test.TestKey2) { + func(k *testutil.Test2) { totals[1].Inc() }, - func(k *test.TestKey3) { + func(k *testutil.Test3) { totals[2].Inc() }, - func(k *test.TestKey4) { + func(k *testutil.Test4) { totals[3].Inc() }, } @@ -314,8 +331,8 @@ var _ = Describe("Monitor", func() { return listener.Dial() }), )) - client := types.NewMonitorClient(cc) - provider := metrics.NewMonitorProvider(ctx, client, metrics.Buffered) + mc := types.NewMonitorClient(cc) + provider := clients.NewMonitorProvider(ctx, mc, clients.Buffered) providers[i] = provider } }) @@ -338,8 +355,8 @@ var _ = Describe("Monitor", func() { return listener.Dial() }), )) - client := types.NewMonitorClient(cc) - l := metrics.NewListener(ctx, client) + mc := types.NewMonitorClient(cc) + l := clients.NewListener(ctx, mc) listeners[sampleIdx] = l handler := handlers[sampleIdx%4] b.Time("Handling provider add callbacks", func() { @@ -363,7 +380,7 @@ var _ = Describe("Monitor", func() { defer GinkgoRecover() b.Time(fmt.Sprintf("%d Key 1 updates", numUpdatesPerKey), func() { for i := 0; i < numUpdatesPerKey; i++ { - providers[i%len(providers)].Post(&test.TestKey1{Counter: i}) + providers[i%len(providers)].Post(&testutil.Test1{Counter: int32(i)}) } }) }() @@ -371,7 +388,7 @@ var _ = Describe("Monitor", func() { defer GinkgoRecover() b.Time(fmt.Sprintf("%d Key 2 updates", numUpdatesPerKey), func() { for i := 0; i < numUpdatesPerKey; i++ { - providers[i%len(providers)].Post(&test.TestKey2{Value: fmt.Sprint(i)}) + providers[i%len(providers)].Post(&testutil.Test2{Value: fmt.Sprint(i)}) } }) }() @@ -379,7 +396,7 @@ var _ = Describe("Monitor", func() { defer GinkgoRecover() b.Time(fmt.Sprintf("%d Key 3 updates", numUpdatesPerKey), func() { for i := 0; i < numUpdatesPerKey; i++ { - providers[i%len(providers)].Post(&test.TestKey3{Counter: i}) + providers[i%len(providers)].Post(&testutil.Test3{Counter: int32(i)}) } }) }() @@ -387,7 +404,7 @@ var _ = Describe("Monitor", func() { defer GinkgoRecover() b.Time(fmt.Sprintf("%d Key 4 updates", numUpdatesPerKey), func() { for i := 0; i < numUpdatesPerKey; i++ { - providers[i%len(providers)].Post(&test.TestKey4{Value: fmt.Sprint(i)}) + providers[i%len(providers)].Post(&testutil.Test4{Value: fmt.Sprint(i)}) } }) }() diff --git a/pkg/apps/monitor/prometheus.go b/pkg/apps/monitor/prometheus.go index aa54158..b425f4f 100644 --- a/pkg/apps/monitor/prometheus.go +++ b/pkg/apps/monitor/prometheus.go @@ -36,11 +36,9 @@ import ( "net/http" "sync" - cdmetrics "github.com/cobalt77/kubecc/pkg/apps/consumerd/metrics" - scmetrics "github.com/cobalt77/kubecc/pkg/apps/scheduler/metrics" + "github.com/cobalt77/kubecc/pkg/clients" "github.com/cobalt77/kubecc/pkg/meta" "github.com/cobalt77/kubecc/pkg/metrics" - "github.com/cobalt77/kubecc/pkg/metrics/common" "github.com/cobalt77/kubecc/pkg/servers" "github.com/cobalt77/kubecc/pkg/types" "github.com/prometheus/client_golang/prometheus" @@ -206,7 +204,7 @@ func servePrometheusMetrics( ) { go serveMetricsEndpoint(srvContext, ":2112") lg := meta.Log(srvContext) - listener := metrics.NewListener(srvContext, client, + listener := clients.NewListener(srvContext, client, servers.WithLogEvents(servers.LogNone), ) listener.OnProviderAdded(func(ctx context.Context, uuid string) { @@ -246,12 +244,12 @@ func watchAgentKeys( labels := prometheus.Labels{ "agent": info.Address, } - listener.OnValueChanged(info.UUID, func(value *common.TaskStatus) { + listener.OnValueChanged(info.UUID, func(value *metrics.TaskStatus) { agentTasksActive.With(labels).Set(float64(value.NumRunning)) agentTasksActive.With(labels).Set(float64(value.NumRunning)) agentTasksQueued.With(labels).Set(float64(value.NumQueued)) }) - listener.OnValueChanged(info.UUID, func(value *common.QueueParams) { + listener.OnValueChanged(info.UUID, func(value *metrics.UsageLimits) { agentTasksMax.With(labels).Set(float64(value.ConcurrentProcessLimit)) }) } @@ -260,10 +258,10 @@ func watchSchedulerKeys( listener metrics.Listener, info *types.WhoisResponse, ) { - listener.OnValueChanged(info.UUID, func(value *scmetrics.AgentCount) { + listener.OnValueChanged(info.UUID, func(value *metrics.AgentCount) { agentCount.Set(float64(value.Count)) }) - listener.OnValueChanged(info.UUID, func(value *scmetrics.AgentTasksTotal) { + listener.OnValueChanged(info.UUID, func(value *metrics.AgentTasksTotal) { infoMutex.RLock() defer infoMutex.RUnlock() if info, ok := providerInfo[value.UUID]; ok { @@ -271,18 +269,10 @@ func watchSchedulerKeys( Set(float64(value.Total)) } }) - listener.OnValueChanged(info.UUID, func(value *scmetrics.AgentWeight) { - infoMutex.RLock() - defer infoMutex.RUnlock() - if info, ok := providerInfo[value.UUID]; ok { - agentWeight.WithLabelValues(info.Address). - Set(value.Value) - } - }) - listener.OnValueChanged(info.UUID, func(value *scmetrics.CdCount) { + listener.OnValueChanged(info.UUID, func(value *metrics.ConsumerdCount) { cdCount.Set(float64(value.Count)) }) - listener.OnValueChanged(info.UUID, func(value *scmetrics.CdTasksTotal) { + listener.OnValueChanged(info.UUID, func(value *metrics.ConsumerdTasksTotal) { infoMutex.RLock() defer infoMutex.RUnlock() if info, ok := providerInfo[value.UUID]; ok { @@ -290,13 +280,13 @@ func watchSchedulerKeys( Set(float64(value.Total)) } }) - listener.OnValueChanged(info.UUID, func(value *scmetrics.SchedulingRequestsTotal) { + listener.OnValueChanged(info.UUID, func(value *metrics.SchedulingRequestsTotal) { schedulingRequestsTotal.Set(float64(value.Total)) }) - listener.OnValueChanged(info.UUID, func(value *scmetrics.TasksCompletedTotal) { + listener.OnValueChanged(info.UUID, func(value *metrics.TasksCompletedTotal) { tasksCompletedTotal.Set(float64(value.Total)) }) - listener.OnValueChanged(info.UUID, func(value *scmetrics.TasksFailedTotal) { + listener.OnValueChanged(info.UUID, func(value *metrics.TasksFailedTotal) { tasksFailedTotal.Set(float64(value.Total)) }) } @@ -308,15 +298,15 @@ func watchConsumerdKeys( labels := prometheus.Labels{ "consumerd": info.Address, } - listener.OnValueChanged(info.UUID, func(value *common.TaskStatus) { + listener.OnValueChanged(info.UUID, func(value *metrics.TaskStatus) { cdLocalTasksActive.With(labels).Set(float64(value.NumRunning)) cdRemoteTasksActive.With(labels).Set(float64(value.NumDelegated)) cdLocalTasksQueued.With(labels).Set(float64(value.NumQueued)) }) - listener.OnValueChanged(info.UUID, func(value *common.QueueParams) { + listener.OnValueChanged(info.UUID, func(value *metrics.UsageLimits) { cdTasksMax.With(labels).Set(float64(value.ConcurrentProcessLimit)) }) - listener.OnValueChanged(info.UUID, func(value *cdmetrics.LocalTasksCompleted) { + listener.OnValueChanged(info.UUID, func(value *metrics.LocalTasksCompleted) { cdLocalTasksTotal.With(labels).Set(float64(value.Total)) }) } diff --git a/pkg/apps/monitor/server.go b/pkg/apps/monitor/server.go index da42ccc..0d66864 100644 --- a/pkg/apps/monitor/server.go +++ b/pkg/apps/monitor/server.go @@ -8,21 +8,21 @@ import ( "net" "sync" - "github.com/cobalt77/kubecc/pkg/apps/monitor/metrics" "github.com/cobalt77/kubecc/pkg/meta" + "github.com/cobalt77/kubecc/pkg/metrics" "github.com/cobalt77/kubecc/pkg/servers" "github.com/cobalt77/kubecc/pkg/types" - "github.com/cobalt77/kubecc/pkg/util" "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/peer" "google.golang.org/grpc/status" "google.golang.org/grpc/test/bufconn" + "google.golang.org/protobuf/types/known/anypb" ) type Receiver interface { - Send(*types.Value) error + Send(*anypb.Any) error } type MonitorServer struct { @@ -93,22 +93,18 @@ func (m *MonitorServer) runPrometheusListener() { servePrometheusMetrics(m.srvContext, client) } -func (m *MonitorServer) encodeProviders() []byte { - m.providerMutex.RLock() - defer m.providerMutex.RUnlock() - return util.EncodeMsgp(m.providers) -} - // bucketMutex must not be held by the same thread when calling this function. func (m *MonitorServer) providersUpdated() { - err := m.post(&types.Metric{ + any, err := anypb.New(m.providers) + if err != nil { + panic(err) + } + err = m.post(&types.Metric{ Key: &types.Key{ Bucket: metrics.MetaBucket, - Name: metrics.Providers{}.Key(), - }, - Value: &types.Value{ - Data: m.encodeProviders(), + Name: any.GetTypeUrl(), }, + Value: any, }) if err != nil { panic(err) @@ -157,11 +153,11 @@ func (m *MonitorServer) Stream( store := m.storeCreator.NewStore(bucketCtx) m.buckets[uuid] = store if m.providers.Items == nil { - m.providers.Items = make(map[string]metrics.ProviderInfo) + m.providers.Items = make(map[string]*metrics.ProviderInfo) } - m.providers.Items[uuid] = metrics.ProviderInfo{ + m.providers.Items[uuid] = &metrics.ProviderInfo{ UUID: uuid, - Component: int32(component), + Component: component, Address: addr, } providerCount.Inc() @@ -218,10 +214,16 @@ func (m *MonitorServer) notify(metric *types.Metric) { } } -var storeContentsKey = (&types.Key{ - Bucket: metrics.MetaBucket, - Name: metrics.StoreContents{}.Key(), -}).Canonical() +var storeContentsKey string + +func init() { + sc := &metrics.StoreContents{} + any, err := anypb.New(sc) + if err != nil { + panic(err) + } + storeContentsKey = any.GetTypeUrl() +} func (m *MonitorServer) notifyStoreMeta() { m.listenerMutex.RLock() @@ -229,25 +231,31 @@ func (m *MonitorServer) notifyStoreMeta() { if listeners, ok := m.listeners[storeContentsKey]; ok { contents := &metrics.StoreContents{ - Buckets: []metrics.BucketSpec{}, + Buckets: []*metrics.BucketSpec{}, } for k, v := range m.buckets { - copied := map[string][]byte{} + copied := map[string]*anypb.Any{} for _, key := range v.Keys() { if value, ok := v.Get(key); ok { - copied[key] = value + any, err := anypb.New(value) + if err != nil { + m.lg.Error(err) + continue + } + copied[key] = any } } - contents.Buckets = append(contents.Buckets, metrics.BucketSpec{ + contents.Buckets = append(contents.Buckets, &metrics.BucketSpec{ Name: k, Data: copied, }) } - encoded := util.EncodeMsgp(contents) for _, v := range listeners { - err := v.Send(&types.Value{ - Data: encoded, - }) + any, err := anypb.New(contents) + if err != nil { + panic(err) + } + err = v.Send(any) if err != nil { m.lg.With(zap.Error(err)).Error("Error sending data to listener") } @@ -264,7 +272,7 @@ func (m *MonitorServer) post(metric *types.Metric) error { store.Delete(metric.Key.Name) return nil } - if store.CAS(metric.Key.Name, metric.Value.Data) { + if store.CAS(metric.Key.Name, metric.Value) { m.lg.With( zap.String("key", metric.Key.ShortID()), ).Debug("Metric updated") @@ -317,9 +325,11 @@ func (m *MonitorServer) Listen( // late join if value, ok := bucket.Get(key.Name); ok { - err := srv.Send(&types.Value{ - Data: value, - }) + any, err := anypb.New(value) + if err != nil { + panic(err) + } + err = srv.Send(any) if err != nil { m.lg.With(zap.Error(err)).Error("Error sending data to listener") } diff --git a/pkg/apps/monitor/store.go b/pkg/apps/monitor/store.go index 8b7665e..c45d84f 100644 --- a/pkg/apps/monitor/store.go +++ b/pkg/apps/monitor/store.go @@ -1,17 +1,18 @@ package monitor import ( - "bytes" "context" "sync" + + "google.golang.org/protobuf/proto" ) type KeyValueStore interface { Context() context.Context - Set(key string, value []byte) + Set(key string, value proto.Message) Delete(key string) - Get(key string) ([]byte, bool) - CAS(key string, value []byte) bool + Get(key string) (proto.Message, bool) + CAS(key string, value proto.Message) bool Keys() []string Len() int } @@ -21,7 +22,7 @@ type StoreCreator interface { } type InMemoryStore struct { - data map[string][]byte + data map[string]proto.Message mutex *sync.RWMutex ctx context.Context } @@ -32,7 +33,7 @@ var InMemoryStoreCreator inMemoryStoreCreator func (inMemoryStoreCreator) NewStore(ctx context.Context) KeyValueStore { return &InMemoryStore{ - data: make(map[string][]byte), + data: make(map[string]proto.Message), mutex: &sync.RWMutex{}, ctx: ctx, } @@ -42,7 +43,7 @@ func (m *InMemoryStore) Context() context.Context { return m.ctx } -func (m *InMemoryStore) Set(key string, value []byte) { +func (m *InMemoryStore) Set(key string, value proto.Message) { m.mutex.Lock() defer m.mutex.Unlock() m.data[key] = value @@ -54,23 +55,21 @@ func (m *InMemoryStore) Delete(key string) { delete(m.data, key) } -func (m *InMemoryStore) Get(key string) ([]byte, bool) { +func (m *InMemoryStore) Get(key string) (proto.Message, bool) { m.mutex.RLock() defer m.mutex.RUnlock() data, ok := m.data[key] if ok { - buf := make([]byte, len(data)) - copy(buf, data) - return buf, true + return proto.Clone(data), true } return nil, false } -func (m *InMemoryStore) CAS(key string, value []byte) bool { +func (m *InMemoryStore) CAS(key string, value proto.Message) bool { m.mutex.Lock() defer m.mutex.Unlock() data, ok := m.data[key] - if !ok || !bytes.Equal(data, value) { + if !ok || !proto.Equal(data, value) { m.data[key] = value return true } diff --git a/pkg/apps/monitor/store_test.go b/pkg/apps/monitor/store_test.go index 90e0eae..0d4e8ee 100644 --- a/pkg/apps/monitor/store_test.go +++ b/pkg/apps/monitor/store_test.go @@ -1,13 +1,14 @@ package monitor_test import ( - "bytes" "context" + "strings" mapset "github.com/deckarep/golang-set" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" + "github.com/cobalt77/kubecc/internal/testutil" "github.com/cobalt77/kubecc/pkg/apps/monitor" ) @@ -20,20 +21,20 @@ var _ = Describe("Store", func() { }) }) It("Should handle setting and retrieving keys", func() { - store.Set("key1", []byte("test")) + store.Set("key1", &testutil.Test1{Counter: 1}) value, ok := store.Get("key1") Expect(ok).To(BeTrue()) - Expect(value).To(BeEquivalentTo([]byte("test"))) + Expect(value).To(BeEquivalentTo(&testutil.Test1{Counter: 1})) Expect(store.Len()).To(Equal(1)) - store.Set("key2", []byte("test2")) + store.Set("key2", &testutil.Test2{Value: "1"}) value, ok = store.Get("key2") Expect(ok).To(BeTrue()) - Expect(value).To(BeEquivalentTo([]byte("test2"))) + Expect(value).To(BeEquivalentTo(&testutil.Test2{Value: "1"})) Expect(store.Len()).To(Equal(2)) - store.Set("key3", []byte("test3")) + store.Set("key3", &testutil.Test3{Counter: 2}) value, ok = store.Get("key3") Expect(ok).To(BeTrue()) - Expect(value).To(BeEquivalentTo([]byte("test3"))) + Expect(value).To(BeEquivalentTo(&testutil.Test3{Counter: 2})) Expect(store.Len()).To(Equal(3)) }) It("Should list the available keys", func() { @@ -65,65 +66,65 @@ var _ = Describe("Store", func() { Expect(store.Keys()).To(BeEmpty()) }) It("Should handle compare-and-swap", func() { - Expect(store.CAS("key1", []byte("a"))).To(BeTrue()) + Expect(store.CAS("key1", &testutil.Test1{Counter: 2})).To(BeTrue()) value, ok := store.Get("key1") Expect(ok).To(BeTrue()) - Expect(value).To(BeEquivalentTo([]byte("a"))) + Expect(value).To(BeEquivalentTo(&testutil.Test1{Counter: 2})) - Expect(store.CAS("key1", []byte("a"))).To(BeFalse()) + Expect(store.CAS("key1", &testutil.Test1{Counter: 2})).To(BeFalse()) value, ok = store.Get("key1") Expect(ok).To(BeTrue()) - Expect(value).To(BeEquivalentTo([]byte("a"))) + Expect(value).To(BeEquivalentTo(&testutil.Test1{Counter: 2})) - Expect(store.CAS("key1", []byte("b"))).To(BeTrue()) + Expect(store.CAS("key1", &testutil.Test1{Counter: 3})).To(BeTrue()) value, ok = store.Get("key1") Expect(ok).To(BeTrue()) - Expect(value).To(BeEquivalentTo([]byte("b"))) + Expect(value).To(BeEquivalentTo(&testutil.Test1{Counter: 3})) - Expect(store.CAS("key1", []byte("b"))).To(BeFalse()) + Expect(store.CAS("key1", &testutil.Test1{Counter: 3})).To(BeFalse()) value, ok = store.Get("key1") Expect(ok).To(BeTrue()) - Expect(value).To(BeEquivalentTo([]byte("b"))) + Expect(value).To(BeEquivalentTo(&testutil.Test1{Counter: 3})) - Expect(store.CAS("key1", []byte("a"))).To(BeTrue()) + Expect(store.CAS("key1", &testutil.Test1{Counter: 2})).To(BeTrue()) value, ok = store.Get("key1") Expect(ok).To(BeTrue()) - Expect(value).To(BeEquivalentTo([]byte("a"))) + Expect(value).To(BeEquivalentTo(&testutil.Test1{Counter: 2})) }) Measure("Performance", func(b Benchmarker) { store = monitor.InMemoryStoreCreator.NewStore(context.Background()) b.Time("10B payload Set/Get", func() { - store.Set("key1", []byte("0123456789")) + store.Set("key1", &testutil.Test2{Value: "0123456789"}) _, _ = store.Get("key1") }) store.Delete("key1") b.Time("100B payload Set/Get", func() { - store.Set("key1", bytes.Repeat([]byte("0123456789"), 10)) + store.Set("key1", &testutil.Test2{Value: strings.Repeat("0123456789", 10)}) _, _ = store.Get("key1") }) store.Delete("key1") b.Time("1KB payload Set/Get", func() { - store.Set("key1", bytes.Repeat([]byte("0123456789"), 100)) + store.Set("key1", &testutil.Test2{Value: strings.Repeat("0123456789", 100)}) _, _ = store.Get("key1") }) store.Delete("key1") b.Time("10KB payload Set/Get", func() { - store.Set("key1", bytes.Repeat([]byte("0123456789"), 1000)) + store.Set("key1", &testutil.Test2{Value: strings.Repeat("0123456789", 1000)}) _, _ = store.Get("key1") }) store.Delete("key1") b.Time("100KB payload Set/Get", func() { - store.Set("key1", bytes.Repeat([]byte("0123456789"), 10000)) + store.Set("key1", &testutil.Test2{Value: strings.Repeat("0123456789", 10000)}) _, _ = store.Get("key1") }) store.Delete("key1") b.Time("1MB payload Set/Get", func() { - store.Set("key1", bytes.Repeat([]byte("0123456789"), 1e5)) + store.Set("key1", &testutil.Test2{Value: strings.Repeat("0123456789", 1e5)}) _, _ = store.Get("key1") }) store.Delete("key1") b.Time("10MB payload Set/Get", func() { - store.Set("key1", bytes.Repeat([]byte("0123456789"), 1e6)) + store.Set("key1", &testutil.Test2{Value: strings.Repeat("0123456789", 1e6)}) _, _ = store.Get("key1") }) store.Delete("key1") diff --git a/pkg/apps/monitor/test/keys.go b/pkg/apps/monitor/test/keys.go deleted file mode 100644 index fb839d5..0000000 --- a/pkg/apps/monitor/test/keys.go +++ /dev/null @@ -1,34 +0,0 @@ -package test - -//go:generate msgp -type TestKey1 struct { - Counter int `msg:"counter"` -} - -func (k TestKey1) Key() string { - return "TestKey1" -} - -type TestKey2 struct { - Value string `msg:"value"` -} - -func (k TestKey2) Key() string { - return "TestKey2" -} - -type TestKey3 struct { - Counter int `msg:"counter"` -} - -func (k TestKey3) Key() string { - return "TestKey3" -} - -type TestKey4 struct { - Value string `msg:"value"` -} - -func (k TestKey4) Key() string { - return "TestKey4" -} diff --git a/pkg/apps/monitor/test/keys_gen.go b/pkg/apps/monitor/test/keys_gen.go deleted file mode 100644 index 31b1888..0000000 --- a/pkg/apps/monitor/test/keys_gen.go +++ /dev/null @@ -1,419 +0,0 @@ -package test - -// Code generated by github.com/tinylib/msgp DO NOT EDIT. - -import ( - "github.com/tinylib/msgp/msgp" -) - -// DecodeMsg implements msgp.Decodable -func (z *TestKey1) DecodeMsg(dc *msgp.Reader) (err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, err = dc.ReadMapHeader() - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "counter": - z.Counter, err = dc.ReadInt() - if err != nil { - err = msgp.WrapError(err, "Counter") - return - } - default: - err = dc.Skip() - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - return -} - -// EncodeMsg implements msgp.Encodable -func (z TestKey1) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 1 - // write "counter" - err = en.Append(0x81, 0xa7, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72) - if err != nil { - return - } - err = en.WriteInt(z.Counter) - if err != nil { - err = msgp.WrapError(err, "Counter") - return - } - return -} - -// MarshalMsg implements msgp.Marshaler -func (z TestKey1) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 1 - // string "counter" - o = append(o, 0x81, 0xa7, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72) - o = msgp.AppendInt(o, z.Counter) - return -} - -// UnmarshalMsg implements msgp.Unmarshaler -func (z *TestKey1) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "counter": - z.Counter, bts, err = msgp.ReadIntBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Counter") - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - o = bts - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z TestKey1) Msgsize() (s int) { - s = 1 + 8 + msgp.IntSize - return -} - -// DecodeMsg implements msgp.Decodable -func (z *TestKey2) DecodeMsg(dc *msgp.Reader) (err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, err = dc.ReadMapHeader() - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "value": - z.Value, err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "Value") - return - } - default: - err = dc.Skip() - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - return -} - -// EncodeMsg implements msgp.Encodable -func (z TestKey2) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 1 - // write "value" - err = en.Append(0x81, 0xa5, 0x76, 0x61, 0x6c, 0x75, 0x65) - if err != nil { - return - } - err = en.WriteString(z.Value) - if err != nil { - err = msgp.WrapError(err, "Value") - return - } - return -} - -// MarshalMsg implements msgp.Marshaler -func (z TestKey2) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 1 - // string "value" - o = append(o, 0x81, 0xa5, 0x76, 0x61, 0x6c, 0x75, 0x65) - o = msgp.AppendString(o, z.Value) - return -} - -// UnmarshalMsg implements msgp.Unmarshaler -func (z *TestKey2) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "value": - z.Value, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Value") - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - o = bts - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z TestKey2) Msgsize() (s int) { - s = 1 + 6 + msgp.StringPrefixSize + len(z.Value) - return -} - -// DecodeMsg implements msgp.Decodable -func (z *TestKey3) DecodeMsg(dc *msgp.Reader) (err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, err = dc.ReadMapHeader() - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "counter": - z.Counter, err = dc.ReadInt() - if err != nil { - err = msgp.WrapError(err, "Counter") - return - } - default: - err = dc.Skip() - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - return -} - -// EncodeMsg implements msgp.Encodable -func (z TestKey3) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 1 - // write "counter" - err = en.Append(0x81, 0xa7, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72) - if err != nil { - return - } - err = en.WriteInt(z.Counter) - if err != nil { - err = msgp.WrapError(err, "Counter") - return - } - return -} - -// MarshalMsg implements msgp.Marshaler -func (z TestKey3) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 1 - // string "counter" - o = append(o, 0x81, 0xa7, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72) - o = msgp.AppendInt(o, z.Counter) - return -} - -// UnmarshalMsg implements msgp.Unmarshaler -func (z *TestKey3) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "counter": - z.Counter, bts, err = msgp.ReadIntBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Counter") - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - o = bts - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z TestKey3) Msgsize() (s int) { - s = 1 + 8 + msgp.IntSize - return -} - -// DecodeMsg implements msgp.Decodable -func (z *TestKey4) DecodeMsg(dc *msgp.Reader) (err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, err = dc.ReadMapHeader() - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "value": - z.Value, err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "Value") - return - } - default: - err = dc.Skip() - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - return -} - -// EncodeMsg implements msgp.Encodable -func (z TestKey4) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 1 - // write "value" - err = en.Append(0x81, 0xa5, 0x76, 0x61, 0x6c, 0x75, 0x65) - if err != nil { - return - } - err = en.WriteString(z.Value) - if err != nil { - err = msgp.WrapError(err, "Value") - return - } - return -} - -// MarshalMsg implements msgp.Marshaler -func (z TestKey4) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 1 - // string "value" - o = append(o, 0x81, 0xa5, 0x76, 0x61, 0x6c, 0x75, 0x65) - o = msgp.AppendString(o, z.Value) - return -} - -// UnmarshalMsg implements msgp.Unmarshaler -func (z *TestKey4) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "value": - z.Value, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Value") - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - o = bts - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z TestKey4) Msgsize() (s int) { - s = 1 + 6 + msgp.StringPrefixSize + len(z.Value) - return -} diff --git a/pkg/apps/monitor/test/keys_gen_test.go b/pkg/apps/monitor/test/keys_gen_test.go deleted file mode 100644 index 581c7be..0000000 --- a/pkg/apps/monitor/test/keys_gen_test.go +++ /dev/null @@ -1,462 +0,0 @@ -package test - -// Code generated by github.com/tinylib/msgp DO NOT EDIT. - -import ( - "bytes" - "testing" - - "github.com/tinylib/msgp/msgp" -) - -func TestMarshalUnmarshalTestKey1(t *testing.T) { - v := TestKey1{} - bts, err := v.MarshalMsg(nil) - if err != nil { - t.Fatal(err) - } - left, err := v.UnmarshalMsg(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) - } - - left, err = msgp.Skip(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after Skip(): %q", len(left), left) - } -} - -func BenchmarkMarshalMsgTestKey1(b *testing.B) { - v := TestKey1{} - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.MarshalMsg(nil) - } -} - -func BenchmarkAppendMsgTestKey1(b *testing.B) { - v := TestKey1{} - bts := make([]byte, 0, v.Msgsize()) - bts, _ = v.MarshalMsg(bts[0:0]) - b.SetBytes(int64(len(bts))) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - bts, _ = v.MarshalMsg(bts[0:0]) - } -} - -func BenchmarkUnmarshalTestKey1(b *testing.B) { - v := TestKey1{} - bts, _ := v.MarshalMsg(nil) - b.ReportAllocs() - b.SetBytes(int64(len(bts))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err := v.UnmarshalMsg(bts) - if err != nil { - b.Fatal(err) - } - } -} - -func TestEncodeDecodeTestKey1(t *testing.T) { - v := TestKey1{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - - m := v.Msgsize() - if buf.Len() > m { - t.Log("WARNING: TestEncodeDecodeTestKey1 Msgsize() is inaccurate") - } - - vn := TestKey1{} - err := msgp.Decode(&buf, &vn) - if err != nil { - t.Error(err) - } - - buf.Reset() - msgp.Encode(&buf, &v) - err = msgp.NewReader(&buf).Skip() - if err != nil { - t.Error(err) - } -} - -func BenchmarkEncodeTestKey1(b *testing.B) { - v := TestKey1{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - en := msgp.NewWriter(msgp.Nowhere) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.EncodeMsg(en) - } - en.Flush() -} - -func BenchmarkDecodeTestKey1(b *testing.B) { - v := TestKey1{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - rd := msgp.NewEndlessReader(buf.Bytes(), b) - dc := msgp.NewReader(rd) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - err := v.DecodeMsg(dc) - if err != nil { - b.Fatal(err) - } - } -} - -func TestMarshalUnmarshalTestKey2(t *testing.T) { - v := TestKey2{} - bts, err := v.MarshalMsg(nil) - if err != nil { - t.Fatal(err) - } - left, err := v.UnmarshalMsg(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) - } - - left, err = msgp.Skip(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after Skip(): %q", len(left), left) - } -} - -func BenchmarkMarshalMsgTestKey2(b *testing.B) { - v := TestKey2{} - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.MarshalMsg(nil) - } -} - -func BenchmarkAppendMsgTestKey2(b *testing.B) { - v := TestKey2{} - bts := make([]byte, 0, v.Msgsize()) - bts, _ = v.MarshalMsg(bts[0:0]) - b.SetBytes(int64(len(bts))) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - bts, _ = v.MarshalMsg(bts[0:0]) - } -} - -func BenchmarkUnmarshalTestKey2(b *testing.B) { - v := TestKey2{} - bts, _ := v.MarshalMsg(nil) - b.ReportAllocs() - b.SetBytes(int64(len(bts))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err := v.UnmarshalMsg(bts) - if err != nil { - b.Fatal(err) - } - } -} - -func TestEncodeDecodeTestKey2(t *testing.T) { - v := TestKey2{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - - m := v.Msgsize() - if buf.Len() > m { - t.Log("WARNING: TestEncodeDecodeTestKey2 Msgsize() is inaccurate") - } - - vn := TestKey2{} - err := msgp.Decode(&buf, &vn) - if err != nil { - t.Error(err) - } - - buf.Reset() - msgp.Encode(&buf, &v) - err = msgp.NewReader(&buf).Skip() - if err != nil { - t.Error(err) - } -} - -func BenchmarkEncodeTestKey2(b *testing.B) { - v := TestKey2{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - en := msgp.NewWriter(msgp.Nowhere) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.EncodeMsg(en) - } - en.Flush() -} - -func BenchmarkDecodeTestKey2(b *testing.B) { - v := TestKey2{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - rd := msgp.NewEndlessReader(buf.Bytes(), b) - dc := msgp.NewReader(rd) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - err := v.DecodeMsg(dc) - if err != nil { - b.Fatal(err) - } - } -} - -func TestMarshalUnmarshalTestKey3(t *testing.T) { - v := TestKey3{} - bts, err := v.MarshalMsg(nil) - if err != nil { - t.Fatal(err) - } - left, err := v.UnmarshalMsg(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) - } - - left, err = msgp.Skip(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after Skip(): %q", len(left), left) - } -} - -func BenchmarkMarshalMsgTestKey3(b *testing.B) { - v := TestKey3{} - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.MarshalMsg(nil) - } -} - -func BenchmarkAppendMsgTestKey3(b *testing.B) { - v := TestKey3{} - bts := make([]byte, 0, v.Msgsize()) - bts, _ = v.MarshalMsg(bts[0:0]) - b.SetBytes(int64(len(bts))) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - bts, _ = v.MarshalMsg(bts[0:0]) - } -} - -func BenchmarkUnmarshalTestKey3(b *testing.B) { - v := TestKey3{} - bts, _ := v.MarshalMsg(nil) - b.ReportAllocs() - b.SetBytes(int64(len(bts))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err := v.UnmarshalMsg(bts) - if err != nil { - b.Fatal(err) - } - } -} - -func TestEncodeDecodeTestKey3(t *testing.T) { - v := TestKey3{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - - m := v.Msgsize() - if buf.Len() > m { - t.Log("WARNING: TestEncodeDecodeTestKey3 Msgsize() is inaccurate") - } - - vn := TestKey3{} - err := msgp.Decode(&buf, &vn) - if err != nil { - t.Error(err) - } - - buf.Reset() - msgp.Encode(&buf, &v) - err = msgp.NewReader(&buf).Skip() - if err != nil { - t.Error(err) - } -} - -func BenchmarkEncodeTestKey3(b *testing.B) { - v := TestKey3{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - en := msgp.NewWriter(msgp.Nowhere) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.EncodeMsg(en) - } - en.Flush() -} - -func BenchmarkDecodeTestKey3(b *testing.B) { - v := TestKey3{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - rd := msgp.NewEndlessReader(buf.Bytes(), b) - dc := msgp.NewReader(rd) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - err := v.DecodeMsg(dc) - if err != nil { - b.Fatal(err) - } - } -} - -func TestMarshalUnmarshalTestKey4(t *testing.T) { - v := TestKey4{} - bts, err := v.MarshalMsg(nil) - if err != nil { - t.Fatal(err) - } - left, err := v.UnmarshalMsg(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) - } - - left, err = msgp.Skip(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after Skip(): %q", len(left), left) - } -} - -func BenchmarkMarshalMsgTestKey4(b *testing.B) { - v := TestKey4{} - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.MarshalMsg(nil) - } -} - -func BenchmarkAppendMsgTestKey4(b *testing.B) { - v := TestKey4{} - bts := make([]byte, 0, v.Msgsize()) - bts, _ = v.MarshalMsg(bts[0:0]) - b.SetBytes(int64(len(bts))) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - bts, _ = v.MarshalMsg(bts[0:0]) - } -} - -func BenchmarkUnmarshalTestKey4(b *testing.B) { - v := TestKey4{} - bts, _ := v.MarshalMsg(nil) - b.ReportAllocs() - b.SetBytes(int64(len(bts))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err := v.UnmarshalMsg(bts) - if err != nil { - b.Fatal(err) - } - } -} - -func TestEncodeDecodeTestKey4(t *testing.T) { - v := TestKey4{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - - m := v.Msgsize() - if buf.Len() > m { - t.Log("WARNING: TestEncodeDecodeTestKey4 Msgsize() is inaccurate") - } - - vn := TestKey4{} - err := msgp.Decode(&buf, &vn) - if err != nil { - t.Error(err) - } - - buf.Reset() - msgp.Encode(&buf, &v) - err = msgp.NewReader(&buf).Skip() - if err != nil { - t.Error(err) - } -} - -func BenchmarkEncodeTestKey4(b *testing.B) { - v := TestKey4{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - en := msgp.NewWriter(msgp.Nowhere) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.EncodeMsg(en) - } - en.Flush() -} - -func BenchmarkDecodeTestKey4(b *testing.B) { - v := TestKey4{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - rd := msgp.NewEndlessReader(buf.Bytes(), b) - dc := msgp.NewReader(rd) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - err := v.DecodeMsg(dc) - if err != nil { - b.Fatal(err) - } - } -} diff --git a/pkg/apps/monitor/test/types.go b/pkg/apps/monitor/test/types.go deleted file mode 100644 index 03216e5..0000000 --- a/pkg/apps/monitor/test/types.go +++ /dev/null @@ -1,26 +0,0 @@ -package test - -import ( - "context" - "sync" - - "github.com/cobalt77/kubecc/pkg/apps/monitor" - "go.uber.org/atomic" -) - -type TestStoreCreator struct { - Count *atomic.Int32 - Stores sync.Map // map[string]monitor.KeyValueStore -} - -func (c *TestStoreCreator) NewStore(ctx context.Context) monitor.KeyValueStore { - store := monitor.InMemoryStoreCreator.NewStore(ctx) - c.Stores.Store(ctx, store) - i := int32(0) - c.Stores.Range(func(key, value interface{}) bool { - i++ - return true - }) - c.Count.Store(i) - return store -} diff --git a/pkg/apps/scheduler/broker.go b/pkg/apps/scheduler/broker.go new file mode 100644 index 0000000..c1bc3af --- /dev/null +++ b/pkg/apps/scheduler/broker.go @@ -0,0 +1,255 @@ +package scheduler + +import ( + "context" + "errors" + "io" + "sync" + + "github.com/cobalt77/kubecc/pkg/clients" + "github.com/cobalt77/kubecc/pkg/meta" + "github.com/cobalt77/kubecc/pkg/metrics" + "github.com/cobalt77/kubecc/pkg/types" + "go.uber.org/atomic" + "go.uber.org/zap" +) + +type worker struct { + agent *Agent + taskQueue <-chan *types.CompileRequest +} + +func (w *worker) stream() { + for { + select { + case req := <-w.taskQueue: + w.agent.Stream.Send(req) + case <-w.agent.Context.Done(): + return + } + } +} + +type Broker struct { + srvContext context.Context + lg *zap.SugaredLogger + completedTasks *atomic.Int64 + failedTasks *atomic.Int64 + requestCount *atomic.Int64 + requestQueue chan *types.CompileRequest + responseQueue chan *types.CompileResponse + agents map[string]*Agent + consumerds map[string]*Consumerd + agentsMutex *sync.RWMutex + consumerdsMutex *sync.RWMutex + filter *ToolchainFilter + monClient types.MonitorClient + pendingRequests sync.Map // map[uuid string]Scheduler_StreamOutgoingTasksServer +} + +func NewBroker(ctx context.Context, monClient types.MonitorClient) *Broker { + return &Broker{ + srvContext: ctx, + lg: meta.Log(ctx), + completedTasks: atomic.NewInt64(0), + failedTasks: atomic.NewInt64(0), + requestCount: atomic.NewInt64(0), + requestQueue: make(chan *types.CompileRequest), + responseQueue: make(chan *types.CompileResponse), + agents: make(map[string]*Agent), + consumerds: make(map[string]*Consumerd), + agentsMutex: &sync.RWMutex{}, + consumerdsMutex: &sync.RWMutex{}, + filter: NewToolchainFilter(ctx), + monClient: monClient, + } +} + +func (b *Broker) watchToolchains(uuid string) chan *metrics.Toolchains { + ch := make(chan *metrics.Toolchains) + listener := clients.NewListener(b.srvContext, b.monClient) + listener.OnProviderAdded(func(ctx context.Context, id string) { + if id != uuid { + return + } + listener.OnValueChanged(id, func(tc *metrics.Toolchains) { + ch <- tc + }) + <-ctx.Done() + }) + return ch +} + +func (b *Broker) handleAgentStream( + srv types.Scheduler_StreamIncomingTasksServer, + filterOutput <-chan interface{}, +) { + go func() { + b.lg.Debug("Handling agent stream (send)") + defer b.lg.Debug("Agent stream done (send)") + for { + select { + case req, ok := <-filterOutput: + if !ok { + // Output closed + return + } + err := srv.Send(req.(*types.CompileRequest)) + if err != nil { + if errors.Is(err, io.EOF) { + b.lg.Debug(err) + } else { + b.lg.Error(err) + } + return + } + } + } + }() + go func() { + b.lg.Debug("Handling agent stream (recv)") + defer b.lg.Debug("Agent stream done (recv)") + + for { + resp, err := srv.Recv() + if err != nil { + if errors.Is(err, io.EOF) { + b.lg.Debug(err) + } else { + b.lg.Error(err) + } + return + } + b.responseQueue <- resp + } + }() +} + +func (b *Broker) handleConsumerdStream( + srv types.Scheduler_StreamOutgoingTasksServer, +) { + b.lg.Debug("Handling consumerd stream (recv)") + defer b.lg.Debug("Consumerd stream done (recv)") + + for { + req, err := srv.Recv() + if err != nil { + if errors.Is(err, io.EOF) { + b.lg.Debug(err) + } else { + b.lg.Error(err) + } + return + } + b.pendingRequests.Store(req.RequestID, srv) + if err := b.filter.Send(req); err != nil { + b.pendingRequests.Delete(req.RequestID) + b.responseQueue <- &types.CompileResponse{ + RequestID: req.RequestID, + CompileResult: types.CompileResponse_InternalError, + Data: &types.CompileResponse_Error{ + Error: err.Error(), + }, + } + } + } +} + +func (b *Broker) handleResponseQueue() { + for { + resp, open := <-b.responseQueue + if !open { + b.lg.Debug("Response queue closed") + return + } + if stream, ok := b.pendingRequests.LoadAndDelete(resp.RequestID); ok { + err := stream.(types.Scheduler_StreamOutgoingTasksServer).Send(resp) + if err != nil { + b.lg.With( + zap.Error(err), + ).Error("Error sending response") + } + } else { + b.lg.With( + "id", resp.RequestID, + ).Error("Received response for which there was no pending request") + } + } +} + +func (b *Broker) HandleIncomingTasksStream( + stream types.Scheduler_StreamIncomingTasksServer, +) { + b.agentsMutex.Lock() + defer b.agentsMutex.Unlock() + streamCtx := stream.Context() + id := meta.UUID(streamCtx) + tcChan := b.watchToolchains(id) + + b.lg.With(types.ShortID(id)).Info("Agent connected, waiting for toolchains") + tcs := <-tcChan + b.lg.With(types.ShortID(id)).Info("Toolchains received") + + agent := &Agent{ + remoteInfo: remoteInfoFromContext(streamCtx), + RWMutex: &sync.RWMutex{}, + Stream: stream, + Toolchains: tcs, + } + b.agents[agent.UUID] = agent + + b.agents[agent.UUID] = agent + filterOutput := b.filter.AddReceiver(agent) + b.handleAgentStream(stream, filterOutput) + + go func() { + <-streamCtx.Done() + + b.agentsMutex.RLock() + defer b.agentsMutex.RUnlock() + delete(b.agents, agent.UUID) + }() +} + +func (b *Broker) HandleOutgoingTasksStream( + stream types.Scheduler_StreamOutgoingTasksServer, +) { + b.consumerdsMutex.Lock() + defer b.consumerdsMutex.Unlock() + streamCtx := stream.Context() + id := meta.UUID(streamCtx) + tcChan := b.watchToolchains(id) + + b.lg.With(types.ShortID(id)).Info("Consumerd connected, waiting for toolchains") + tcs := <-tcChan + b.lg.With(types.ShortID(id)).Info("Toolchains received") + + cd := &Consumerd{ + remoteInfo: remoteInfoFromContext(streamCtx), + RWMutex: &sync.RWMutex{}, + Stream: stream, + Toolchains: tcs, + } + b.consumerds[cd.UUID] = cd + + b.consumerds[cd.UUID] = cd + b.filter.AddSender(cd) + b.handleConsumerdStream(stream) + + go func() { + select { + case tcs := <-tcChan: + b.filter.UpdateSenderToolchains(cd.UUID, tcs) + case <-streamCtx.Done(): + return + } + }() + + go func() { + <-streamCtx.Done() + + b.agentsMutex.RLock() + defer b.agentsMutex.RUnlock() + delete(b.agents, cd.UUID) + }() +} diff --git a/pkg/apps/scheduler/filter.go b/pkg/apps/scheduler/filter.go new file mode 100644 index 0000000..86f4b89 --- /dev/null +++ b/pkg/apps/scheduler/filter.go @@ -0,0 +1,251 @@ +package scheduler + +import ( + "context" + "errors" + "sync" + + "github.com/cobalt77/kubecc/pkg/metrics" + "github.com/cobalt77/kubecc/pkg/types" + md5simd "github.com/minio/md5-simd" + "go.uber.org/atomic" +) + +var ( + ErrNoAgents = errors.New("No available agents can run this task") + ErrStreamClosed = errors.New("Task stream closed") +) + +type sender struct { + cd *Consumerd + unfilteredInput <-chan interface{} +} + +type receiver struct { + agent *Agent + filteredOutput chan<- interface{} +} + +type taskChannel struct { + hash string + C chan interface{} + rxRefCount *atomic.Int32 + txRefCount *atomic.Int32 + channelCtx context.Context + cancel context.CancelFunc +} + +func (c *taskChannel) CanSend() bool { + return c.rxRefCount.Load() > 0 +} + +func (c *taskChannel) incRxRefCount() { + c.rxRefCount.Inc() +} + +func (c *taskChannel) decRxRefCount() { + if c.rxRefCount.Dec() <= 0 { + if c.txRefCount.Load() <= 0 { + c.cancel() + } + } +} + +func (c *taskChannel) incTxRefCount() { + c.txRefCount.Inc() +} + +func (c *taskChannel) decTxRefCount() { + if c.txRefCount.Dec() <= 0 { + if c.rxRefCount.Load() <= 0 { + c.cancel() + } + } +} + +func (c *taskChannel) AttachSender(s *sender) { + c.incTxRefCount() + defer c.decTxRefCount() + + for { + select { + case i := <-s.unfilteredInput: + select { + case c.C <- i: + default: + // Channel closed + return + } + case <-s.cd.Context.Done(): + return + } + } +} + +func (c *taskChannel) AttachReceiver(r *receiver) { + c.incRxRefCount() + defer c.decRxRefCount() + + for { + select { + case i, open := <-c.C: + if !open { + // Channel closed + return + } + r.filteredOutput <- i + case <-r.agent.Context.Done(): + return + } + } +} + +type ToolchainFilter struct { + ctx context.Context + senders map[string]*sender // key = uuid + receivers map[string]*receiver // key = uuid + channels map[string]*taskChannel // key = toolchain hash + channelsMutex *sync.RWMutex + sendersMutex *sync.RWMutex + receiversMutex *sync.RWMutex +} + +func NewToolchainFilter(ctx context.Context) *ToolchainFilter { + return &ToolchainFilter{ + ctx: ctx, + senders: make(map[string]*sender), + receivers: make(map[string]*receiver), + channels: make(map[string]*taskChannel), + channelsMutex: &sync.RWMutex{}, + sendersMutex: &sync.RWMutex{}, + receiversMutex: &sync.RWMutex{}, + } +} + +func tcHash(tc *types.Toolchain) string { + hasher := md5simd.StdlibHasher() + defer hasher.Close() + tc.Hash(hasher) + sum := hasher.Sum(nil) + return string(sum) +} + +func (f *ToolchainFilter) newTaskChannel(hash string) *taskChannel { + ctx, cancel := context.WithCancel(f.ctx) + taskCh := &taskChannel{ + hash: hash, + C: make(chan interface{}), + rxRefCount: atomic.NewInt32(0), + txRefCount: atomic.NewInt32(0), + cancel: cancel, + } + go func() { + <-ctx.Done() + // Ref count hit 0, clean up the channel to avoid a resource leak + f.channelsMutex.Lock() + defer f.channelsMutex.Unlock() + close(taskCh.C) + delete(f.channels, hash) + }() + return taskCh +} + +func (f *ToolchainFilter) taskChannelForToolchain(tc *types.Toolchain) *taskChannel { + f.channelsMutex.Lock() + defer f.channelsMutex.Unlock() + hash := tcHash(tc) + var taskCh *taskChannel + if c, ok := f.channels[hash]; ok { + taskCh = c + } else { + f.channels[hash] = f.newTaskChannel(hash) + } + return taskCh +} + +func (f *ToolchainFilter) AddSender(cd *Consumerd) { + f.sendersMutex.Lock() + defer f.sendersMutex.Unlock() + input := make(chan interface{}) + sender := &sender{ + cd: cd, + unfilteredInput: input, + } + f.senders[cd.UUID] = sender + for _, tc := range cd.Toolchains.GetItems() { + taskCh := f.taskChannelForToolchain(tc) + go taskCh.AttachSender(sender) + } +} + +func (f *ToolchainFilter) AddReceiver(agent *Agent) <-chan interface{} { + f.receiversMutex.Lock() + defer f.receiversMutex.Unlock() + output := make(chan interface{}) + receiver := &receiver{ + agent: agent, + filteredOutput: output, + } + f.receivers[agent.UUID] = receiver + for _, tc := range agent.Toolchains.GetItems() { + taskCh := f.taskChannelForToolchain(tc) + go taskCh.AttachReceiver(receiver) + } + return output +} + +func (f *ToolchainFilter) UpdateSenderToolchains( + uuid string, + tcs *metrics.Toolchains, +) { + f.sendersMutex.Lock() + defer f.sendersMutex.Unlock() + sender, ok := f.senders[uuid] + if !ok { + return + } + oldToolchains := sender.cd.Toolchains + newToolchains := tcs + + for _, oldTc := range oldToolchains.GetItems() { + stillExists := false + for _, newTc := range newToolchains.GetItems() { + if oldTc.EquivalentTo(newTc) { + stillExists = true + break + } + } + if !stillExists { + f.taskChannelForToolchain(oldTc).cancel() + } + } + for _, newTc := range newToolchains.GetItems() { + isNew := true + for _, oldTc := range oldToolchains.GetItems() { + if newTc.EquivalentTo(oldTc) { + isNew = false + break + } + } + if isNew { + defer func() { + f.taskChannelForToolchain(newTc).AttachSender(sender) + }() + } + } + + sender.cd.Toolchains = newToolchains +} + +func (f *ToolchainFilter) Send(req *types.CompileRequest) error { + taskCh := f.taskChannelForToolchain(req.GetToolchain()) + if taskCh.rxRefCount.Load() == 0 { + return ErrNoAgents + } + select { + case taskCh.C <- req: + return nil + default: + return ErrStreamClosed + } +} diff --git a/pkg/apps/scheduler/metrics/metrics.go b/pkg/apps/scheduler/metrics/metrics.go deleted file mode 100644 index 5065108..0000000 --- a/pkg/apps/scheduler/metrics/metrics.go +++ /dev/null @@ -1,74 +0,0 @@ -package metrics - -//go:generate msgp - -type TasksCompletedTotal struct { - Total int64 `msg:"total"` -} - -func (TasksCompletedTotal) Key() string { - return "TasksCompletedTotal" -} - -type TasksFailedTotal struct { - Total int64 `msg:"total"` -} - -func (TasksFailedTotal) Key() string { - return "TasksFailedTotal" -} - -type SchedulingRequestsTotal struct { - Total int64 `msg:"total"` -} - -func (SchedulingRequestsTotal) Key() string { - return "SchedulingRequestsTotal" -} - -type AgentCount struct { - Count int32 `msg:"count"` -} - -func (AgentCount) Key() string { - return "AgentCount" -} - -type CdCount struct { - Count int32 `msg:"count"` -} - -func (CdCount) Key() string { - return "CdCount" -} - -type Identifier struct { - UUID string `msg:"uuid"` -} - -type AgentWeight struct { - Identifier - Value float64 `msg:"agentWeight"` -} - -func (m AgentWeight) Key() string { - return m.Identifier.UUID + ".AgentWeight" -} - -type AgentTasksTotal struct { - Identifier - Total int64 `msg:"total"` -} - -func (m AgentTasksTotal) Key() string { - return m.Identifier.UUID + ".AgentTasksTotal" -} - -type CdTasksTotal struct { - Identifier - Total int64 `msg:"total"` -} - -func (m CdTasksTotal) Key() string { - return m.Identifier.UUID + ".CdTasksTotal" -} diff --git a/pkg/apps/scheduler/metrics/metrics_gen.go b/pkg/apps/scheduler/metrics/metrics_gen.go deleted file mode 100644 index 84d4bb9..0000000 --- a/pkg/apps/scheduler/metrics/metrics_gen.go +++ /dev/null @@ -1,1174 +0,0 @@ -package metrics - -// Code generated by github.com/tinylib/msgp DO NOT EDIT. - -import ( - "github.com/tinylib/msgp/msgp" -) - -// DecodeMsg implements msgp.Decodable -func (z *AgentCount) DecodeMsg(dc *msgp.Reader) (err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, err = dc.ReadMapHeader() - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "count": - z.Count, err = dc.ReadInt32() - if err != nil { - err = msgp.WrapError(err, "Count") - return - } - default: - err = dc.Skip() - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - return -} - -// EncodeMsg implements msgp.Encodable -func (z AgentCount) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 1 - // write "count" - err = en.Append(0x81, 0xa5, 0x63, 0x6f, 0x75, 0x6e, 0x74) - if err != nil { - return - } - err = en.WriteInt32(z.Count) - if err != nil { - err = msgp.WrapError(err, "Count") - return - } - return -} - -// MarshalMsg implements msgp.Marshaler -func (z AgentCount) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 1 - // string "count" - o = append(o, 0x81, 0xa5, 0x63, 0x6f, 0x75, 0x6e, 0x74) - o = msgp.AppendInt32(o, z.Count) - return -} - -// UnmarshalMsg implements msgp.Unmarshaler -func (z *AgentCount) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "count": - z.Count, bts, err = msgp.ReadInt32Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "Count") - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - o = bts - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z AgentCount) Msgsize() (s int) { - s = 1 + 6 + msgp.Int32Size - return -} - -// DecodeMsg implements msgp.Decodable -func (z *AgentTasksTotal) DecodeMsg(dc *msgp.Reader) (err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, err = dc.ReadMapHeader() - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "Identifier": - var zb0002 uint32 - zb0002, err = dc.ReadMapHeader() - if err != nil { - err = msgp.WrapError(err, "Identifier") - return - } - for zb0002 > 0 { - zb0002-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - err = msgp.WrapError(err, "Identifier") - return - } - switch msgp.UnsafeString(field) { - case "uuid": - z.Identifier.UUID, err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "Identifier", "UUID") - return - } - default: - err = dc.Skip() - if err != nil { - err = msgp.WrapError(err, "Identifier") - return - } - } - } - case "total": - z.Total, err = dc.ReadInt64() - if err != nil { - err = msgp.WrapError(err, "Total") - return - } - default: - err = dc.Skip() - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - return -} - -// EncodeMsg implements msgp.Encodable -func (z *AgentTasksTotal) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 2 - // write "Identifier" - err = en.Append(0x82, 0xaa, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72) - if err != nil { - return - } - // map header, size 1 - // write "uuid" - err = en.Append(0x81, 0xa4, 0x75, 0x75, 0x69, 0x64) - if err != nil { - return - } - err = en.WriteString(z.Identifier.UUID) - if err != nil { - err = msgp.WrapError(err, "Identifier", "UUID") - return - } - // write "total" - err = en.Append(0xa5, 0x74, 0x6f, 0x74, 0x61, 0x6c) - if err != nil { - return - } - err = en.WriteInt64(z.Total) - if err != nil { - err = msgp.WrapError(err, "Total") - return - } - return -} - -// MarshalMsg implements msgp.Marshaler -func (z *AgentTasksTotal) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 2 - // string "Identifier" - o = append(o, 0x82, 0xaa, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72) - // map header, size 1 - // string "uuid" - o = append(o, 0x81, 0xa4, 0x75, 0x75, 0x69, 0x64) - o = msgp.AppendString(o, z.Identifier.UUID) - // string "total" - o = append(o, 0xa5, 0x74, 0x6f, 0x74, 0x61, 0x6c) - o = msgp.AppendInt64(o, z.Total) - return -} - -// UnmarshalMsg implements msgp.Unmarshaler -func (z *AgentTasksTotal) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "Identifier": - var zb0002 uint32 - zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Identifier") - return - } - for zb0002 > 0 { - zb0002-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - err = msgp.WrapError(err, "Identifier") - return - } - switch msgp.UnsafeString(field) { - case "uuid": - z.Identifier.UUID, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Identifier", "UUID") - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err, "Identifier") - return - } - } - } - case "total": - z.Total, bts, err = msgp.ReadInt64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "Total") - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - o = bts - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *AgentTasksTotal) Msgsize() (s int) { - s = 1 + 11 + 1 + 5 + msgp.StringPrefixSize + len(z.Identifier.UUID) + 6 + msgp.Int64Size - return -} - -// DecodeMsg implements msgp.Decodable -func (z *AgentWeight) DecodeMsg(dc *msgp.Reader) (err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, err = dc.ReadMapHeader() - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "Identifier": - var zb0002 uint32 - zb0002, err = dc.ReadMapHeader() - if err != nil { - err = msgp.WrapError(err, "Identifier") - return - } - for zb0002 > 0 { - zb0002-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - err = msgp.WrapError(err, "Identifier") - return - } - switch msgp.UnsafeString(field) { - case "uuid": - z.Identifier.UUID, err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "Identifier", "UUID") - return - } - default: - err = dc.Skip() - if err != nil { - err = msgp.WrapError(err, "Identifier") - return - } - } - } - case "agentWeight": - z.Value, err = dc.ReadFloat64() - if err != nil { - err = msgp.WrapError(err, "Value") - return - } - default: - err = dc.Skip() - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - return -} - -// EncodeMsg implements msgp.Encodable -func (z *AgentWeight) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 2 - // write "Identifier" - err = en.Append(0x82, 0xaa, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72) - if err != nil { - return - } - // map header, size 1 - // write "uuid" - err = en.Append(0x81, 0xa4, 0x75, 0x75, 0x69, 0x64) - if err != nil { - return - } - err = en.WriteString(z.Identifier.UUID) - if err != nil { - err = msgp.WrapError(err, "Identifier", "UUID") - return - } - // write "agentWeight" - err = en.Append(0xab, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74) - if err != nil { - return - } - err = en.WriteFloat64(z.Value) - if err != nil { - err = msgp.WrapError(err, "Value") - return - } - return -} - -// MarshalMsg implements msgp.Marshaler -func (z *AgentWeight) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 2 - // string "Identifier" - o = append(o, 0x82, 0xaa, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72) - // map header, size 1 - // string "uuid" - o = append(o, 0x81, 0xa4, 0x75, 0x75, 0x69, 0x64) - o = msgp.AppendString(o, z.Identifier.UUID) - // string "agentWeight" - o = append(o, 0xab, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74) - o = msgp.AppendFloat64(o, z.Value) - return -} - -// UnmarshalMsg implements msgp.Unmarshaler -func (z *AgentWeight) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "Identifier": - var zb0002 uint32 - zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Identifier") - return - } - for zb0002 > 0 { - zb0002-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - err = msgp.WrapError(err, "Identifier") - return - } - switch msgp.UnsafeString(field) { - case "uuid": - z.Identifier.UUID, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Identifier", "UUID") - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err, "Identifier") - return - } - } - } - case "agentWeight": - z.Value, bts, err = msgp.ReadFloat64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "Value") - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - o = bts - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *AgentWeight) Msgsize() (s int) { - s = 1 + 11 + 1 + 5 + msgp.StringPrefixSize + len(z.Identifier.UUID) + 12 + msgp.Float64Size - return -} - -// DecodeMsg implements msgp.Decodable -func (z *CdCount) DecodeMsg(dc *msgp.Reader) (err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, err = dc.ReadMapHeader() - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "count": - z.Count, err = dc.ReadInt32() - if err != nil { - err = msgp.WrapError(err, "Count") - return - } - default: - err = dc.Skip() - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - return -} - -// EncodeMsg implements msgp.Encodable -func (z CdCount) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 1 - // write "count" - err = en.Append(0x81, 0xa5, 0x63, 0x6f, 0x75, 0x6e, 0x74) - if err != nil { - return - } - err = en.WriteInt32(z.Count) - if err != nil { - err = msgp.WrapError(err, "Count") - return - } - return -} - -// MarshalMsg implements msgp.Marshaler -func (z CdCount) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 1 - // string "count" - o = append(o, 0x81, 0xa5, 0x63, 0x6f, 0x75, 0x6e, 0x74) - o = msgp.AppendInt32(o, z.Count) - return -} - -// UnmarshalMsg implements msgp.Unmarshaler -func (z *CdCount) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "count": - z.Count, bts, err = msgp.ReadInt32Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "Count") - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - o = bts - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z CdCount) Msgsize() (s int) { - s = 1 + 6 + msgp.Int32Size - return -} - -// DecodeMsg implements msgp.Decodable -func (z *CdTasksTotal) DecodeMsg(dc *msgp.Reader) (err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, err = dc.ReadMapHeader() - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "Identifier": - var zb0002 uint32 - zb0002, err = dc.ReadMapHeader() - if err != nil { - err = msgp.WrapError(err, "Identifier") - return - } - for zb0002 > 0 { - zb0002-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - err = msgp.WrapError(err, "Identifier") - return - } - switch msgp.UnsafeString(field) { - case "uuid": - z.Identifier.UUID, err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "Identifier", "UUID") - return - } - default: - err = dc.Skip() - if err != nil { - err = msgp.WrapError(err, "Identifier") - return - } - } - } - case "total": - z.Total, err = dc.ReadInt64() - if err != nil { - err = msgp.WrapError(err, "Total") - return - } - default: - err = dc.Skip() - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - return -} - -// EncodeMsg implements msgp.Encodable -func (z *CdTasksTotal) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 2 - // write "Identifier" - err = en.Append(0x82, 0xaa, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72) - if err != nil { - return - } - // map header, size 1 - // write "uuid" - err = en.Append(0x81, 0xa4, 0x75, 0x75, 0x69, 0x64) - if err != nil { - return - } - err = en.WriteString(z.Identifier.UUID) - if err != nil { - err = msgp.WrapError(err, "Identifier", "UUID") - return - } - // write "total" - err = en.Append(0xa5, 0x74, 0x6f, 0x74, 0x61, 0x6c) - if err != nil { - return - } - err = en.WriteInt64(z.Total) - if err != nil { - err = msgp.WrapError(err, "Total") - return - } - return -} - -// MarshalMsg implements msgp.Marshaler -func (z *CdTasksTotal) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 2 - // string "Identifier" - o = append(o, 0x82, 0xaa, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72) - // map header, size 1 - // string "uuid" - o = append(o, 0x81, 0xa4, 0x75, 0x75, 0x69, 0x64) - o = msgp.AppendString(o, z.Identifier.UUID) - // string "total" - o = append(o, 0xa5, 0x74, 0x6f, 0x74, 0x61, 0x6c) - o = msgp.AppendInt64(o, z.Total) - return -} - -// UnmarshalMsg implements msgp.Unmarshaler -func (z *CdTasksTotal) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "Identifier": - var zb0002 uint32 - zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Identifier") - return - } - for zb0002 > 0 { - zb0002-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - err = msgp.WrapError(err, "Identifier") - return - } - switch msgp.UnsafeString(field) { - case "uuid": - z.Identifier.UUID, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Identifier", "UUID") - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err, "Identifier") - return - } - } - } - case "total": - z.Total, bts, err = msgp.ReadInt64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "Total") - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - o = bts - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *CdTasksTotal) Msgsize() (s int) { - s = 1 + 11 + 1 + 5 + msgp.StringPrefixSize + len(z.Identifier.UUID) + 6 + msgp.Int64Size - return -} - -// DecodeMsg implements msgp.Decodable -func (z *Identifier) DecodeMsg(dc *msgp.Reader) (err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, err = dc.ReadMapHeader() - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "uuid": - z.UUID, err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "UUID") - return - } - default: - err = dc.Skip() - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - return -} - -// EncodeMsg implements msgp.Encodable -func (z Identifier) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 1 - // write "uuid" - err = en.Append(0x81, 0xa4, 0x75, 0x75, 0x69, 0x64) - if err != nil { - return - } - err = en.WriteString(z.UUID) - if err != nil { - err = msgp.WrapError(err, "UUID") - return - } - return -} - -// MarshalMsg implements msgp.Marshaler -func (z Identifier) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 1 - // string "uuid" - o = append(o, 0x81, 0xa4, 0x75, 0x75, 0x69, 0x64) - o = msgp.AppendString(o, z.UUID) - return -} - -// UnmarshalMsg implements msgp.Unmarshaler -func (z *Identifier) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "uuid": - z.UUID, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "UUID") - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - o = bts - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z Identifier) Msgsize() (s int) { - s = 1 + 5 + msgp.StringPrefixSize + len(z.UUID) - return -} - -// DecodeMsg implements msgp.Decodable -func (z *SchedulingRequestsTotal) DecodeMsg(dc *msgp.Reader) (err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, err = dc.ReadMapHeader() - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "total": - z.Total, err = dc.ReadInt64() - if err != nil { - err = msgp.WrapError(err, "Total") - return - } - default: - err = dc.Skip() - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - return -} - -// EncodeMsg implements msgp.Encodable -func (z SchedulingRequestsTotal) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 1 - // write "total" - err = en.Append(0x81, 0xa5, 0x74, 0x6f, 0x74, 0x61, 0x6c) - if err != nil { - return - } - err = en.WriteInt64(z.Total) - if err != nil { - err = msgp.WrapError(err, "Total") - return - } - return -} - -// MarshalMsg implements msgp.Marshaler -func (z SchedulingRequestsTotal) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 1 - // string "total" - o = append(o, 0x81, 0xa5, 0x74, 0x6f, 0x74, 0x61, 0x6c) - o = msgp.AppendInt64(o, z.Total) - return -} - -// UnmarshalMsg implements msgp.Unmarshaler -func (z *SchedulingRequestsTotal) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "total": - z.Total, bts, err = msgp.ReadInt64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "Total") - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - o = bts - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z SchedulingRequestsTotal) Msgsize() (s int) { - s = 1 + 6 + msgp.Int64Size - return -} - -// DecodeMsg implements msgp.Decodable -func (z *TasksCompletedTotal) DecodeMsg(dc *msgp.Reader) (err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, err = dc.ReadMapHeader() - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "total": - z.Total, err = dc.ReadInt64() - if err != nil { - err = msgp.WrapError(err, "Total") - return - } - default: - err = dc.Skip() - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - return -} - -// EncodeMsg implements msgp.Encodable -func (z TasksCompletedTotal) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 1 - // write "total" - err = en.Append(0x81, 0xa5, 0x74, 0x6f, 0x74, 0x61, 0x6c) - if err != nil { - return - } - err = en.WriteInt64(z.Total) - if err != nil { - err = msgp.WrapError(err, "Total") - return - } - return -} - -// MarshalMsg implements msgp.Marshaler -func (z TasksCompletedTotal) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 1 - // string "total" - o = append(o, 0x81, 0xa5, 0x74, 0x6f, 0x74, 0x61, 0x6c) - o = msgp.AppendInt64(o, z.Total) - return -} - -// UnmarshalMsg implements msgp.Unmarshaler -func (z *TasksCompletedTotal) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "total": - z.Total, bts, err = msgp.ReadInt64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "Total") - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - o = bts - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z TasksCompletedTotal) Msgsize() (s int) { - s = 1 + 6 + msgp.Int64Size - return -} - -// DecodeMsg implements msgp.Decodable -func (z *TasksFailedTotal) DecodeMsg(dc *msgp.Reader) (err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, err = dc.ReadMapHeader() - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "total": - z.Total, err = dc.ReadInt64() - if err != nil { - err = msgp.WrapError(err, "Total") - return - } - default: - err = dc.Skip() - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - return -} - -// EncodeMsg implements msgp.Encodable -func (z TasksFailedTotal) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 1 - // write "total" - err = en.Append(0x81, 0xa5, 0x74, 0x6f, 0x74, 0x61, 0x6c) - if err != nil { - return - } - err = en.WriteInt64(z.Total) - if err != nil { - err = msgp.WrapError(err, "Total") - return - } - return -} - -// MarshalMsg implements msgp.Marshaler -func (z TasksFailedTotal) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 1 - // string "total" - o = append(o, 0x81, 0xa5, 0x74, 0x6f, 0x74, 0x61, 0x6c) - o = msgp.AppendInt64(o, z.Total) - return -} - -// UnmarshalMsg implements msgp.Unmarshaler -func (z *TasksFailedTotal) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "total": - z.Total, bts, err = msgp.ReadInt64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "Total") - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - o = bts - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z TasksFailedTotal) Msgsize() (s int) { - s = 1 + 6 + msgp.Int64Size - return -} diff --git a/pkg/apps/scheduler/metrics/metrics_gen_test.go b/pkg/apps/scheduler/metrics/metrics_gen_test.go deleted file mode 100644 index 61b2a30..0000000 --- a/pkg/apps/scheduler/metrics/metrics_gen_test.go +++ /dev/null @@ -1,1027 +0,0 @@ -package metrics - -// Code generated by github.com/tinylib/msgp DO NOT EDIT. - -import ( - "bytes" - "testing" - - "github.com/tinylib/msgp/msgp" -) - -func TestMarshalUnmarshalAgentCount(t *testing.T) { - v := AgentCount{} - bts, err := v.MarshalMsg(nil) - if err != nil { - t.Fatal(err) - } - left, err := v.UnmarshalMsg(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) - } - - left, err = msgp.Skip(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after Skip(): %q", len(left), left) - } -} - -func BenchmarkMarshalMsgAgentCount(b *testing.B) { - v := AgentCount{} - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.MarshalMsg(nil) - } -} - -func BenchmarkAppendMsgAgentCount(b *testing.B) { - v := AgentCount{} - bts := make([]byte, 0, v.Msgsize()) - bts, _ = v.MarshalMsg(bts[0:0]) - b.SetBytes(int64(len(bts))) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - bts, _ = v.MarshalMsg(bts[0:0]) - } -} - -func BenchmarkUnmarshalAgentCount(b *testing.B) { - v := AgentCount{} - bts, _ := v.MarshalMsg(nil) - b.ReportAllocs() - b.SetBytes(int64(len(bts))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err := v.UnmarshalMsg(bts) - if err != nil { - b.Fatal(err) - } - } -} - -func TestEncodeDecodeAgentCount(t *testing.T) { - v := AgentCount{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - - m := v.Msgsize() - if buf.Len() > m { - t.Log("WARNING: TestEncodeDecodeAgentCount Msgsize() is inaccurate") - } - - vn := AgentCount{} - err := msgp.Decode(&buf, &vn) - if err != nil { - t.Error(err) - } - - buf.Reset() - msgp.Encode(&buf, &v) - err = msgp.NewReader(&buf).Skip() - if err != nil { - t.Error(err) - } -} - -func BenchmarkEncodeAgentCount(b *testing.B) { - v := AgentCount{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - en := msgp.NewWriter(msgp.Nowhere) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.EncodeMsg(en) - } - en.Flush() -} - -func BenchmarkDecodeAgentCount(b *testing.B) { - v := AgentCount{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - rd := msgp.NewEndlessReader(buf.Bytes(), b) - dc := msgp.NewReader(rd) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - err := v.DecodeMsg(dc) - if err != nil { - b.Fatal(err) - } - } -} - -func TestMarshalUnmarshalAgentTasksTotal(t *testing.T) { - v := AgentTasksTotal{} - bts, err := v.MarshalMsg(nil) - if err != nil { - t.Fatal(err) - } - left, err := v.UnmarshalMsg(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) - } - - left, err = msgp.Skip(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after Skip(): %q", len(left), left) - } -} - -func BenchmarkMarshalMsgAgentTasksTotal(b *testing.B) { - v := AgentTasksTotal{} - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.MarshalMsg(nil) - } -} - -func BenchmarkAppendMsgAgentTasksTotal(b *testing.B) { - v := AgentTasksTotal{} - bts := make([]byte, 0, v.Msgsize()) - bts, _ = v.MarshalMsg(bts[0:0]) - b.SetBytes(int64(len(bts))) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - bts, _ = v.MarshalMsg(bts[0:0]) - } -} - -func BenchmarkUnmarshalAgentTasksTotal(b *testing.B) { - v := AgentTasksTotal{} - bts, _ := v.MarshalMsg(nil) - b.ReportAllocs() - b.SetBytes(int64(len(bts))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err := v.UnmarshalMsg(bts) - if err != nil { - b.Fatal(err) - } - } -} - -func TestEncodeDecodeAgentTasksTotal(t *testing.T) { - v := AgentTasksTotal{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - - m := v.Msgsize() - if buf.Len() > m { - t.Log("WARNING: TestEncodeDecodeAgentTasksTotal Msgsize() is inaccurate") - } - - vn := AgentTasksTotal{} - err := msgp.Decode(&buf, &vn) - if err != nil { - t.Error(err) - } - - buf.Reset() - msgp.Encode(&buf, &v) - err = msgp.NewReader(&buf).Skip() - if err != nil { - t.Error(err) - } -} - -func BenchmarkEncodeAgentTasksTotal(b *testing.B) { - v := AgentTasksTotal{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - en := msgp.NewWriter(msgp.Nowhere) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.EncodeMsg(en) - } - en.Flush() -} - -func BenchmarkDecodeAgentTasksTotal(b *testing.B) { - v := AgentTasksTotal{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - rd := msgp.NewEndlessReader(buf.Bytes(), b) - dc := msgp.NewReader(rd) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - err := v.DecodeMsg(dc) - if err != nil { - b.Fatal(err) - } - } -} - -func TestMarshalUnmarshalAgentWeight(t *testing.T) { - v := AgentWeight{} - bts, err := v.MarshalMsg(nil) - if err != nil { - t.Fatal(err) - } - left, err := v.UnmarshalMsg(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) - } - - left, err = msgp.Skip(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after Skip(): %q", len(left), left) - } -} - -func BenchmarkMarshalMsgAgentWeight(b *testing.B) { - v := AgentWeight{} - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.MarshalMsg(nil) - } -} - -func BenchmarkAppendMsgAgentWeight(b *testing.B) { - v := AgentWeight{} - bts := make([]byte, 0, v.Msgsize()) - bts, _ = v.MarshalMsg(bts[0:0]) - b.SetBytes(int64(len(bts))) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - bts, _ = v.MarshalMsg(bts[0:0]) - } -} - -func BenchmarkUnmarshalAgentWeight(b *testing.B) { - v := AgentWeight{} - bts, _ := v.MarshalMsg(nil) - b.ReportAllocs() - b.SetBytes(int64(len(bts))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err := v.UnmarshalMsg(bts) - if err != nil { - b.Fatal(err) - } - } -} - -func TestEncodeDecodeAgentWeight(t *testing.T) { - v := AgentWeight{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - - m := v.Msgsize() - if buf.Len() > m { - t.Log("WARNING: TestEncodeDecodeAgentWeight Msgsize() is inaccurate") - } - - vn := AgentWeight{} - err := msgp.Decode(&buf, &vn) - if err != nil { - t.Error(err) - } - - buf.Reset() - msgp.Encode(&buf, &v) - err = msgp.NewReader(&buf).Skip() - if err != nil { - t.Error(err) - } -} - -func BenchmarkEncodeAgentWeight(b *testing.B) { - v := AgentWeight{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - en := msgp.NewWriter(msgp.Nowhere) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.EncodeMsg(en) - } - en.Flush() -} - -func BenchmarkDecodeAgentWeight(b *testing.B) { - v := AgentWeight{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - rd := msgp.NewEndlessReader(buf.Bytes(), b) - dc := msgp.NewReader(rd) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - err := v.DecodeMsg(dc) - if err != nil { - b.Fatal(err) - } - } -} - -func TestMarshalUnmarshalCdCount(t *testing.T) { - v := CdCount{} - bts, err := v.MarshalMsg(nil) - if err != nil { - t.Fatal(err) - } - left, err := v.UnmarshalMsg(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) - } - - left, err = msgp.Skip(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after Skip(): %q", len(left), left) - } -} - -func BenchmarkMarshalMsgCdCount(b *testing.B) { - v := CdCount{} - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.MarshalMsg(nil) - } -} - -func BenchmarkAppendMsgCdCount(b *testing.B) { - v := CdCount{} - bts := make([]byte, 0, v.Msgsize()) - bts, _ = v.MarshalMsg(bts[0:0]) - b.SetBytes(int64(len(bts))) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - bts, _ = v.MarshalMsg(bts[0:0]) - } -} - -func BenchmarkUnmarshalCdCount(b *testing.B) { - v := CdCount{} - bts, _ := v.MarshalMsg(nil) - b.ReportAllocs() - b.SetBytes(int64(len(bts))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err := v.UnmarshalMsg(bts) - if err != nil { - b.Fatal(err) - } - } -} - -func TestEncodeDecodeCdCount(t *testing.T) { - v := CdCount{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - - m := v.Msgsize() - if buf.Len() > m { - t.Log("WARNING: TestEncodeDecodeCdCount Msgsize() is inaccurate") - } - - vn := CdCount{} - err := msgp.Decode(&buf, &vn) - if err != nil { - t.Error(err) - } - - buf.Reset() - msgp.Encode(&buf, &v) - err = msgp.NewReader(&buf).Skip() - if err != nil { - t.Error(err) - } -} - -func BenchmarkEncodeCdCount(b *testing.B) { - v := CdCount{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - en := msgp.NewWriter(msgp.Nowhere) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.EncodeMsg(en) - } - en.Flush() -} - -func BenchmarkDecodeCdCount(b *testing.B) { - v := CdCount{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - rd := msgp.NewEndlessReader(buf.Bytes(), b) - dc := msgp.NewReader(rd) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - err := v.DecodeMsg(dc) - if err != nil { - b.Fatal(err) - } - } -} - -func TestMarshalUnmarshalCdTasksTotal(t *testing.T) { - v := CdTasksTotal{} - bts, err := v.MarshalMsg(nil) - if err != nil { - t.Fatal(err) - } - left, err := v.UnmarshalMsg(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) - } - - left, err = msgp.Skip(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after Skip(): %q", len(left), left) - } -} - -func BenchmarkMarshalMsgCdTasksTotal(b *testing.B) { - v := CdTasksTotal{} - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.MarshalMsg(nil) - } -} - -func BenchmarkAppendMsgCdTasksTotal(b *testing.B) { - v := CdTasksTotal{} - bts := make([]byte, 0, v.Msgsize()) - bts, _ = v.MarshalMsg(bts[0:0]) - b.SetBytes(int64(len(bts))) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - bts, _ = v.MarshalMsg(bts[0:0]) - } -} - -func BenchmarkUnmarshalCdTasksTotal(b *testing.B) { - v := CdTasksTotal{} - bts, _ := v.MarshalMsg(nil) - b.ReportAllocs() - b.SetBytes(int64(len(bts))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err := v.UnmarshalMsg(bts) - if err != nil { - b.Fatal(err) - } - } -} - -func TestEncodeDecodeCdTasksTotal(t *testing.T) { - v := CdTasksTotal{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - - m := v.Msgsize() - if buf.Len() > m { - t.Log("WARNING: TestEncodeDecodeCdTasksTotal Msgsize() is inaccurate") - } - - vn := CdTasksTotal{} - err := msgp.Decode(&buf, &vn) - if err != nil { - t.Error(err) - } - - buf.Reset() - msgp.Encode(&buf, &v) - err = msgp.NewReader(&buf).Skip() - if err != nil { - t.Error(err) - } -} - -func BenchmarkEncodeCdTasksTotal(b *testing.B) { - v := CdTasksTotal{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - en := msgp.NewWriter(msgp.Nowhere) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.EncodeMsg(en) - } - en.Flush() -} - -func BenchmarkDecodeCdTasksTotal(b *testing.B) { - v := CdTasksTotal{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - rd := msgp.NewEndlessReader(buf.Bytes(), b) - dc := msgp.NewReader(rd) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - err := v.DecodeMsg(dc) - if err != nil { - b.Fatal(err) - } - } -} - -func TestMarshalUnmarshalIdentifier(t *testing.T) { - v := Identifier{} - bts, err := v.MarshalMsg(nil) - if err != nil { - t.Fatal(err) - } - left, err := v.UnmarshalMsg(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) - } - - left, err = msgp.Skip(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after Skip(): %q", len(left), left) - } -} - -func BenchmarkMarshalMsgIdentifier(b *testing.B) { - v := Identifier{} - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.MarshalMsg(nil) - } -} - -func BenchmarkAppendMsgIdentifier(b *testing.B) { - v := Identifier{} - bts := make([]byte, 0, v.Msgsize()) - bts, _ = v.MarshalMsg(bts[0:0]) - b.SetBytes(int64(len(bts))) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - bts, _ = v.MarshalMsg(bts[0:0]) - } -} - -func BenchmarkUnmarshalIdentifier(b *testing.B) { - v := Identifier{} - bts, _ := v.MarshalMsg(nil) - b.ReportAllocs() - b.SetBytes(int64(len(bts))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err := v.UnmarshalMsg(bts) - if err != nil { - b.Fatal(err) - } - } -} - -func TestEncodeDecodeIdentifier(t *testing.T) { - v := Identifier{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - - m := v.Msgsize() - if buf.Len() > m { - t.Log("WARNING: TestEncodeDecodeIdentifier Msgsize() is inaccurate") - } - - vn := Identifier{} - err := msgp.Decode(&buf, &vn) - if err != nil { - t.Error(err) - } - - buf.Reset() - msgp.Encode(&buf, &v) - err = msgp.NewReader(&buf).Skip() - if err != nil { - t.Error(err) - } -} - -func BenchmarkEncodeIdentifier(b *testing.B) { - v := Identifier{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - en := msgp.NewWriter(msgp.Nowhere) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.EncodeMsg(en) - } - en.Flush() -} - -func BenchmarkDecodeIdentifier(b *testing.B) { - v := Identifier{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - rd := msgp.NewEndlessReader(buf.Bytes(), b) - dc := msgp.NewReader(rd) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - err := v.DecodeMsg(dc) - if err != nil { - b.Fatal(err) - } - } -} - -func TestMarshalUnmarshalSchedulingRequestsTotal(t *testing.T) { - v := SchedulingRequestsTotal{} - bts, err := v.MarshalMsg(nil) - if err != nil { - t.Fatal(err) - } - left, err := v.UnmarshalMsg(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) - } - - left, err = msgp.Skip(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after Skip(): %q", len(left), left) - } -} - -func BenchmarkMarshalMsgSchedulingRequestsTotal(b *testing.B) { - v := SchedulingRequestsTotal{} - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.MarshalMsg(nil) - } -} - -func BenchmarkAppendMsgSchedulingRequestsTotal(b *testing.B) { - v := SchedulingRequestsTotal{} - bts := make([]byte, 0, v.Msgsize()) - bts, _ = v.MarshalMsg(bts[0:0]) - b.SetBytes(int64(len(bts))) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - bts, _ = v.MarshalMsg(bts[0:0]) - } -} - -func BenchmarkUnmarshalSchedulingRequestsTotal(b *testing.B) { - v := SchedulingRequestsTotal{} - bts, _ := v.MarshalMsg(nil) - b.ReportAllocs() - b.SetBytes(int64(len(bts))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err := v.UnmarshalMsg(bts) - if err != nil { - b.Fatal(err) - } - } -} - -func TestEncodeDecodeSchedulingRequestsTotal(t *testing.T) { - v := SchedulingRequestsTotal{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - - m := v.Msgsize() - if buf.Len() > m { - t.Log("WARNING: TestEncodeDecodeSchedulingRequestsTotal Msgsize() is inaccurate") - } - - vn := SchedulingRequestsTotal{} - err := msgp.Decode(&buf, &vn) - if err != nil { - t.Error(err) - } - - buf.Reset() - msgp.Encode(&buf, &v) - err = msgp.NewReader(&buf).Skip() - if err != nil { - t.Error(err) - } -} - -func BenchmarkEncodeSchedulingRequestsTotal(b *testing.B) { - v := SchedulingRequestsTotal{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - en := msgp.NewWriter(msgp.Nowhere) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.EncodeMsg(en) - } - en.Flush() -} - -func BenchmarkDecodeSchedulingRequestsTotal(b *testing.B) { - v := SchedulingRequestsTotal{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - rd := msgp.NewEndlessReader(buf.Bytes(), b) - dc := msgp.NewReader(rd) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - err := v.DecodeMsg(dc) - if err != nil { - b.Fatal(err) - } - } -} - -func TestMarshalUnmarshalTasksCompletedTotal(t *testing.T) { - v := TasksCompletedTotal{} - bts, err := v.MarshalMsg(nil) - if err != nil { - t.Fatal(err) - } - left, err := v.UnmarshalMsg(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) - } - - left, err = msgp.Skip(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after Skip(): %q", len(left), left) - } -} - -func BenchmarkMarshalMsgTasksCompletedTotal(b *testing.B) { - v := TasksCompletedTotal{} - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.MarshalMsg(nil) - } -} - -func BenchmarkAppendMsgTasksCompletedTotal(b *testing.B) { - v := TasksCompletedTotal{} - bts := make([]byte, 0, v.Msgsize()) - bts, _ = v.MarshalMsg(bts[0:0]) - b.SetBytes(int64(len(bts))) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - bts, _ = v.MarshalMsg(bts[0:0]) - } -} - -func BenchmarkUnmarshalTasksCompletedTotal(b *testing.B) { - v := TasksCompletedTotal{} - bts, _ := v.MarshalMsg(nil) - b.ReportAllocs() - b.SetBytes(int64(len(bts))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err := v.UnmarshalMsg(bts) - if err != nil { - b.Fatal(err) - } - } -} - -func TestEncodeDecodeTasksCompletedTotal(t *testing.T) { - v := TasksCompletedTotal{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - - m := v.Msgsize() - if buf.Len() > m { - t.Log("WARNING: TestEncodeDecodeTasksCompletedTotal Msgsize() is inaccurate") - } - - vn := TasksCompletedTotal{} - err := msgp.Decode(&buf, &vn) - if err != nil { - t.Error(err) - } - - buf.Reset() - msgp.Encode(&buf, &v) - err = msgp.NewReader(&buf).Skip() - if err != nil { - t.Error(err) - } -} - -func BenchmarkEncodeTasksCompletedTotal(b *testing.B) { - v := TasksCompletedTotal{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - en := msgp.NewWriter(msgp.Nowhere) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.EncodeMsg(en) - } - en.Flush() -} - -func BenchmarkDecodeTasksCompletedTotal(b *testing.B) { - v := TasksCompletedTotal{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - rd := msgp.NewEndlessReader(buf.Bytes(), b) - dc := msgp.NewReader(rd) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - err := v.DecodeMsg(dc) - if err != nil { - b.Fatal(err) - } - } -} - -func TestMarshalUnmarshalTasksFailedTotal(t *testing.T) { - v := TasksFailedTotal{} - bts, err := v.MarshalMsg(nil) - if err != nil { - t.Fatal(err) - } - left, err := v.UnmarshalMsg(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) - } - - left, err = msgp.Skip(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after Skip(): %q", len(left), left) - } -} - -func BenchmarkMarshalMsgTasksFailedTotal(b *testing.B) { - v := TasksFailedTotal{} - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.MarshalMsg(nil) - } -} - -func BenchmarkAppendMsgTasksFailedTotal(b *testing.B) { - v := TasksFailedTotal{} - bts := make([]byte, 0, v.Msgsize()) - bts, _ = v.MarshalMsg(bts[0:0]) - b.SetBytes(int64(len(bts))) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - bts, _ = v.MarshalMsg(bts[0:0]) - } -} - -func BenchmarkUnmarshalTasksFailedTotal(b *testing.B) { - v := TasksFailedTotal{} - bts, _ := v.MarshalMsg(nil) - b.ReportAllocs() - b.SetBytes(int64(len(bts))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err := v.UnmarshalMsg(bts) - if err != nil { - b.Fatal(err) - } - } -} - -func TestEncodeDecodeTasksFailedTotal(t *testing.T) { - v := TasksFailedTotal{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - - m := v.Msgsize() - if buf.Len() > m { - t.Log("WARNING: TestEncodeDecodeTasksFailedTotal Msgsize() is inaccurate") - } - - vn := TasksFailedTotal{} - err := msgp.Decode(&buf, &vn) - if err != nil { - t.Error(err) - } - - buf.Reset() - msgp.Encode(&buf, &v) - err = msgp.NewReader(&buf).Skip() - if err != nil { - t.Error(err) - } -} - -func BenchmarkEncodeTasksFailedTotal(b *testing.B) { - v := TasksFailedTotal{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - en := msgp.NewWriter(msgp.Nowhere) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.EncodeMsg(en) - } - en.Flush() -} - -func BenchmarkDecodeTasksFailedTotal(b *testing.B) { - v := TasksFailedTotal{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - rd := msgp.NewEndlessReader(buf.Bytes(), b) - dc := msgp.NewReader(rd) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - err := v.DecodeMsg(dc) - if err != nil { - b.Fatal(err) - } - } -} diff --git a/pkg/apps/scheduler/scheduler.go b/pkg/apps/scheduler/scheduler.go index 9e10331..fe4fb15 100644 --- a/pkg/apps/scheduler/scheduler.go +++ b/pkg/apps/scheduler/scheduler.go @@ -12,9 +12,7 @@ import ( "github.com/smallnest/weighted" "go.uber.org/atomic" "go.uber.org/zap" - "google.golang.org/grpc" "google.golang.org/grpc/codes" - "google.golang.org/grpc/encoding/gzip" "google.golang.org/grpc/status" ) @@ -106,9 +104,9 @@ func (s *Scheduler) Schedule( } } agent := next.(*Agent) - agentClient := agent.Client + agentStream := agent.Stream s.wLock.Unlock() - response, err := agentClient.Compile(ctx, req, grpc.UseCompressor(gzip.Name)) + err := agentStream.Send(req) if status.Code(err) == codes.Unavailable { s.lg.With( zap.Error(err), @@ -149,7 +147,7 @@ func (s *Scheduler) AgentConnected(ctx context.Context) error { return status.Error(codes.AlreadyExists, "Agent already connected") } var err error - agent.Client, err = s.agentDialer.Dial(ctx) + agent.Stream, err = s.agentDialer.Dial(ctx) if err != nil { return status.Error(codes.Internal, fmt.Sprintf("Error dialing agent: %s", err.Error())) diff --git a/pkg/apps/scheduler/server.go b/pkg/apps/scheduler/server.go index c1323dd..8e7d5ea 100644 --- a/pkg/apps/scheduler/server.go +++ b/pkg/apps/scheduler/server.go @@ -2,11 +2,10 @@ package scheduler import ( "context" - "errors" - "io" "time" scmetrics "github.com/cobalt77/kubecc/pkg/apps/scheduler/metrics" + "github.com/cobalt77/kubecc/pkg/clients" "github.com/cobalt77/kubecc/pkg/meta" "github.com/cobalt77/kubecc/pkg/metrics" "github.com/cobalt77/kubecc/pkg/servers" @@ -14,7 +13,6 @@ import ( "github.com/cobalt77/kubecc/pkg/util" "go.uber.org/atomic" "go.uber.org/zap" - "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/peer" "google.golang.org/grpc/status" @@ -31,6 +29,7 @@ type schedulerServer struct { scheduler *Scheduler metricsProvider metrics.Provider hashSrv *util.HashServer + broker *Broker agentCount *atomic.Int32 consumerdCount *atomic.Int32 @@ -83,12 +82,13 @@ func NewSchedulerServer( scheduler: NewScheduler(ctx, options.schedulerOptions...), agentCount: atomic.NewInt32(0), consumerdCount: atomic.NewInt32(0), + broker: NewBroker(ctx, options.monClient), hashSrv: util.NewHashServer(), } if options.monClient != nil { - srv.metricsProvider = metrics.NewMonitorProvider( - ctx, options.monClient, metrics.Discard) + srv.metricsProvider = clients.NewMonitorProvider( + ctx, options.monClient, clients.Discard) } else { srv.metricsProvider = metrics.NewNoopProvider() } @@ -161,6 +161,7 @@ func (s *schedulerServer) Compile( ).Error("Error querying cache server") } } + resp, err := s.scheduler.Schedule(ctx, req) if err == nil && resp.CompileResult == types.CompileResponse_Success && @@ -170,81 +171,120 @@ func (s *schedulerServer) Compile( return resp, err } -func (s *schedulerServer) handleClientConnection(srv grpc.ServerStream) error { - done := make(chan error) - ctx := srv.Context() - go func() { - defer close(done) - for { - metadata := &types.Metadata{} - err := srv.RecvMsg(metadata) - if err != nil { - if errors.Is(err, io.EOF) { - s.lg.Debug(err) - done <- nil - } else { - s.lg.Error(err) - done <- err - } - return - } - if err := s.scheduler.SetToolchains( - ctx, metadata.Toolchains.GetItems()); err != nil { - s.lg.Error(err) - } - } - }() - return <-done -} +// func (s *schedulerServer) handleClientConnection(srv grpc.ServerStream) error { +// done := make(chan error) +// ctx := srv.Context() +// go func() { +// defer close(done) +// for { +// metadata := &types.Metadata{} +// err := srv.RecvMsg(metadata) +// if err != nil { +// if errors.Is(err, io.EOF) { +// s.lg.Debug(err) +// done <- nil +// } else { +// s.lg.Error(err) +// done <- err +// } +// return +// } +// if err := s.scheduler.SetToolchains( +// ctx, metadata.Toolchains.GetItems()); err != nil { +// s.lg.Error(err) +// } +// } +// }() +// return <-done +// } + +// func (s *schedulerServer) ConnectAgent( +// srv types.Scheduler_ConnectAgentServer, +// ) error { +// ctx := srv.Context() +// if err := meta.CheckContext(ctx); err != nil { +// s.lg.Error(err) +// return err +// } +// if err := s.scheduler.AgentConnected(ctx); err != nil { +// s.lg.Error(err) +// return err +// } -func (s *schedulerServer) ConnectAgent( - srv types.Scheduler_ConnectAgentServer, +// s.metricsProvider.Post(&scmetrics.AgentCount{ +// Count: s.agentCount.Inc(), +// }) +// defer func() { +// s.metricsProvider.Post(&scmetrics.AgentCount{ +// Count: s.agentCount.Dec(), +// }) +// }() + +// return s.handleClientConnection(srv) +// } + +// func (s *schedulerServer) StreamMetadata( +// srv types.Scheduler_StreamMetadataServer, +// ) error { +// ctx := srv.Context() +// if err := meta.CheckContext(ctx); err != nil { +// s.lg.Error(err) +// return err +// } + +// if err := s.scheduler.ConsumerdConnected(ctx); err != nil { +// s.lg.Error(err) +// return err +// } + +// s.metricsProvider.Post(&scmetrics.CdCount{ +// Count: s.consumerdCount.Inc(), +// }) +// defer func() { +// s.metricsProvider.Post(&scmetrics.CdCount{ +// Count: s.consumerdCount.Dec(), +// }) +// }() + +// return s.handleClientConnection(srv) +// } + +func (s *schedulerServer) StreamIncomingTasks( + srv types.Scheduler_StreamIncomingTasksServer, ) error { ctx := srv.Context() if err := meta.CheckContext(ctx); err != nil { s.lg.Error(err) return err } - if err := s.scheduler.AgentConnected(ctx); err != nil { - s.lg.Error(err) - return err - } - s.metricsProvider.Post(&scmetrics.AgentCount{ - Count: s.agentCount.Inc(), - }) - defer func() { - s.metricsProvider.Post(&scmetrics.AgentCount{ - Count: s.agentCount.Dec(), - }) - }() + s.broker.HandleIncomingTasksStream(srv) + + select { + case <-srv.Context().Done(): + case <-s.srvContext.Done(): + } - return s.handleClientConnection(srv) + return nil } -func (s *schedulerServer) ConnectConsumerd( - srv types.Scheduler_ConnectConsumerdServer, +func (s *schedulerServer) StreamOutgoingTasks( + srv types.Scheduler_StreamOutgoingTasksServer, ) error { ctx := srv.Context() if err := meta.CheckContext(ctx); err != nil { s.lg.Error(err) return err } - if err := s.scheduler.ConsumerdConnected(ctx); err != nil { - s.lg.Error(err) - return err - } - s.metricsProvider.Post(&scmetrics.CdCount{ - Count: s.consumerdCount.Inc(), - }) - defer func() { - s.metricsProvider.Post(&scmetrics.CdCount{ - Count: s.consumerdCount.Dec(), - }) - }() + s.broker.HandleOutgoingTasksStream(srv) + + select { + case <-srv.Context().Done(): + case <-s.srvContext.Done(): + } - return s.handleClientConnection(srv) + return nil } func (s *schedulerServer) postCounts() { @@ -265,14 +305,14 @@ func (s *schedulerServer) postTotals() { func (s *schedulerServer) postAgentStats() { for _, stat := range <-s.scheduler.CalcAgentStats() { - s.metricsProvider.Post(metrics.WithContext(stat.agentTasksTotal, stat.agentCtx)) - s.metricsProvider.Post(metrics.WithContext(stat.agentWeight, stat.agentCtx)) + s.metricsProvider.PostContext(stat.agentTasksTotal, stat.agentCtx) + s.metricsProvider.PostContext(stat.agentWeight, stat.agentCtx) } } func (s *schedulerServer) postConsumerdStats() { for _, stat := range <-s.scheduler.CalcConsumerdStats() { - s.metricsProvider.Post(metrics.WithContext(stat.cdRemoteTasksTotal, stat.consumerdCtx)) + s.metricsProvider.PostContext(stat.cdRemoteTasksTotal, stat.consumerdCtx) } } diff --git a/pkg/apps/scheduler/types.go b/pkg/apps/scheduler/types.go index 3c69e2b..9e8214d 100644 --- a/pkg/apps/scheduler/types.go +++ b/pkg/apps/scheduler/types.go @@ -6,6 +6,7 @@ import ( scmetrics "github.com/cobalt77/kubecc/pkg/apps/scheduler/metrics" "github.com/cobalt77/kubecc/pkg/meta" + "github.com/cobalt77/kubecc/pkg/metrics" "github.com/cobalt77/kubecc/pkg/types" "go.uber.org/atomic" ) @@ -13,23 +14,25 @@ import ( type remoteInfo struct { UUID string Context context.Context - UsageLimits *types.UsageLimits + UsageLimits *metrics.UsageLimits SystemInfo *types.SystemInfo - Toolchains []*types.Toolchain CompletedTasks *atomic.Int64 } type Consumerd struct { remoteInfo *sync.RWMutex + + Toolchains *metrics.Toolchains + Stream types.Scheduler_StreamOutgoingTasksServer } type Agent struct { remoteInfo *sync.RWMutex - Client types.Scheduler_StreamTasksServer - QueueStatus types.QueueStatus + Toolchains *metrics.Toolchains + Stream types.Scheduler_StreamIncomingTasksServer } func remoteInfoFromContext(ctx context.Context) remoteInfo { @@ -41,23 +44,6 @@ func remoteInfoFromContext(ctx context.Context) remoteInfo { } } -func (a Agent) Weight() int32 { - if a.UsageLimits == nil { - // Use a default value of the number of cpu threads - // until the agent posts its own usage limits - return a.SystemInfo.CpuThreads - } - switch a.QueueStatus { - case types.Available, types.Queueing: - return a.UsageLimits.GetConcurrentProcessLimit() - case types.QueuePressure: - return a.UsageLimits.GetConcurrentProcessLimit() / 2 - case types.QueueFull: - return 0 - } - return 0 -} - type agentStats struct { agentCtx context.Context agentTasksTotal *scmetrics.AgentTasksTotal diff --git a/pkg/metrics/keyedbuffer.go b/pkg/clients/keyedbuffer.go similarity index 75% rename from pkg/metrics/keyedbuffer.go rename to pkg/clients/keyedbuffer.go index 8ac8845..478d547 100644 --- a/pkg/metrics/keyedbuffer.go +++ b/pkg/clients/keyedbuffer.go @@ -1,11 +1,14 @@ -package metrics +package clients import ( "context" "github.com/cobalt77/kubecc/pkg/meta" + "github.com/cobalt77/kubecc/pkg/metrics" "github.com/cobalt77/kubecc/pkg/servers" "github.com/cobalt77/kubecc/pkg/types" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" ) type keyedBufferMonitorProvider struct { @@ -13,8 +16,8 @@ type keyedBufferMonitorProvider struct { enableWaitRx chan bool } -func runWaitReceiver(postQueue chan KeyedMetric, enableQueue <-chan bool) { - latestMessages := map[string]KeyedMetric{} +func runWaitReceiver(postQueue chan proto.Message, enableQueue <-chan bool) { + latestMessages := map[string]proto.Message{} for { if e := <-enableQueue; !e { // Already disabled @@ -27,7 +30,11 @@ func runWaitReceiver(postQueue chan KeyedMetric, enableQueue <-chan bool) { // Post queue closed return } - latestMessages[m.Key()] = m + any, err := anypb.New(m) + if err != nil { + panic(err) + } + latestMessages[any.GetTypeUrl()] = m case e := <-enableQueue: if e { // Already enabled @@ -48,14 +55,14 @@ func runWaitReceiver(postQueue chan KeyedMetric, enableQueue <-chan bool) { func NewKeyedBufferMonitorProvider( ctx context.Context, client types.MonitorClient, -) Provider { +) metrics.Provider { provider := &keyedBufferMonitorProvider{ monitorProvider: monitorProvider{ ctx: ctx, lg: meta.Log(ctx), monClient: client, // Buffer enough for the WaitReceiver to keep up - postQueue: make(chan KeyedMetric, 10), + postQueue: make(chan proto.Message, 10), queueStrategy: Block, }, enableWaitRx: make(chan bool), diff --git a/pkg/metrics/listener.go b/pkg/clients/listener.go similarity index 79% rename from pkg/metrics/listener.go rename to pkg/clients/listener.go index 3abb73a..ea0865a 100644 --- a/pkg/metrics/listener.go +++ b/pkg/clients/listener.go @@ -1,4 +1,4 @@ -package metrics +package clients import ( "context" @@ -7,16 +7,17 @@ import ( "reflect" "sync" - mmetrics "github.com/cobalt77/kubecc/pkg/apps/monitor/metrics" "github.com/cobalt77/kubecc/pkg/meta" + "github.com/cobalt77/kubecc/pkg/metrics" "github.com/cobalt77/kubecc/pkg/servers" "github.com/cobalt77/kubecc/pkg/types" - "github.com/cobalt77/kubecc/pkg/util" "github.com/tinylib/msgp/msgp" "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" ) type monitorListener struct { @@ -32,7 +33,7 @@ func NewListener( ctx context.Context, client types.MonitorClient, streamOpts ...servers.StreamManagerOption, -) Listener { +) metrics.Listener { listener := &monitorListener{ ctx: ctx, lg: meta.Log(ctx), @@ -45,7 +46,7 @@ func NewListener( } func (l *monitorListener) OnProviderAdded(handler func(context.Context, string)) { - doUpdate := func(providers *mmetrics.Providers) { + doUpdate := func(providers *metrics.Providers) { for uuid := range providers.Items { if _, ok := l.knownProviders[uuid]; !ok { pctx, cancel := context.WithCancel(context.Background()) @@ -62,30 +63,21 @@ func (l *monitorListener) OnProviderAdded(handler func(context.Context, string)) } } - l.OnValueChanged(mmetrics.MetaBucket, func(providers *mmetrics.Providers) { + l.OnValueChanged(metrics.MetaBucket, func(providers *metrics.Providers) { l.providersMutex.Lock() defer l.providersMutex.Unlock() doUpdate(providers) - }).OrExpired(func() RetryOptions { + }).OrExpired(func() metrics.RetryOptions { l.providersMutex.Lock() defer l.providersMutex.Unlock() - doUpdate(&mmetrics.Providers{ - Items: make(map[string]mmetrics.ProviderInfo), - }) - return Retry + doUpdate(&metrics.Providers{}) + return metrics.Retry }) } -type RetryOptions uint32 - -const ( - NoRetry RetryOptions = iota - Retry -) - type changeListener struct { ctx context.Context - expiredHandler func() RetryOptions + expiredHandler func() metrics.RetryOptions handler reflect.Value ehMutex *sync.Mutex monClient types.MonitorClient @@ -93,25 +85,24 @@ type changeListener struct { argType reflect.Type } -func (cl *changeListener) HandleStream(stream grpc.ClientStream) error { +func (cl *changeListener) HandleStream(clientStream grpc.ClientStream) error { + stream := clientStream.(types.Monitor_ListenClient) lg := meta.Log(cl.ctx) argValue := reflect.New(cl.argType) - if _, ok := argValue.Interface().(msgp.Decodable); !ok { - panic("Handler argument is not msgp.Decodable") + var msgReflect protoreflect.ProtoMessage + if msg, ok := argValue.Interface().(proto.Message); !ok { + msgReflect = msg + panic("Handler argument does not implement proto.Message") } - decodable := argValue.Interface().(msgp.Decodable) - for { - rawData := &types.Value{} - err := stream.RecvMsg(rawData) + any, err := stream.Recv() if errors.Is(err, io.EOF) { lg.Debug(err) return nil } switch status.Code(err) { case codes.OK: - err = util.DecodeMsgp(rawData.Data, decodable) - if err != nil { + if err := any.UnmarshalTo(msgReflect); err != nil { lg.With(zap.Error(err)).Error("Error decoding value") return err } @@ -120,7 +111,7 @@ func (cl *changeListener) HandleStream(stream grpc.ClientStream) error { cl.ehMutex.Lock() if cl.expiredHandler != nil { retryOp := cl.expiredHandler() - if retryOp == Retry { + if retryOp == metrics.Retry { cl.ehMutex.Unlock() return err } @@ -146,7 +137,7 @@ func (s *changeListener) Target() string { return "monitor" } -func (c *changeListener) OrExpired(handler func() RetryOptions) { +func (c *changeListener) OrExpired(handler func() metrics.RetryOptions) { c.ehMutex.Lock() defer c.ehMutex.Unlock() c.expiredHandler = handler @@ -169,7 +160,7 @@ func handlerArgType(handler interface{}) (reflect.Type, reflect.Value) { func (l *monitorListener) OnValueChanged( bucket string, handler interface{}, // func(type) -) ChangeListener { +) metrics.ChangeListener { argType, funcValue := handlerArgType(handler) cl := &changeListener{ ctx: l.ctx, diff --git a/pkg/metrics/provider.go b/pkg/clients/provider.go similarity index 54% rename from pkg/metrics/provider.go rename to pkg/clients/provider.go index b325ca7..0ea7443 100644 --- a/pkg/metrics/provider.go +++ b/pkg/clients/provider.go @@ -1,24 +1,29 @@ -package metrics +package clients import ( "context" "errors" "io" + "sync" "github.com/cobalt77/kubecc/pkg/meta" + "github.com/cobalt77/kubecc/pkg/metrics" "github.com/cobalt77/kubecc/pkg/servers" "github.com/cobalt77/kubecc/pkg/types" - "github.com/cobalt77/kubecc/pkg/util" "go.uber.org/zap" "google.golang.org/grpc" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" ) type monitorProvider struct { - ctx context.Context - lg *zap.SugaredLogger - monClient types.MonitorClient - postQueue chan KeyedMetric - queueStrategy QueueStrategy + ctx context.Context + lg *zap.SugaredLogger + monClient types.MonitorClient + postQueue chan proto.Message + queueStrategy QueueStrategy + metricCtxMap map[string]context.CancelFunc + metricCtxMapMutex *sync.Mutex } type QueueStrategy int @@ -33,19 +38,21 @@ func NewMonitorProvider( ctx context.Context, client types.MonitorClient, qs QueueStrategy, -) Provider { - var postQueue chan KeyedMetric +) metrics.Provider { + var postQueue chan proto.Message if (qs & Buffered) != 0 { - postQueue = make(chan KeyedMetric, 1e6) + postQueue = make(chan proto.Message, 1e6) } else { - postQueue = make(chan KeyedMetric) + postQueue = make(chan proto.Message) } provider := &monitorProvider{ - ctx: ctx, - monClient: client, - postQueue: postQueue, - lg: meta.Log(ctx), + ctx: ctx, + monClient: client, + postQueue: postQueue, + lg: meta.Log(ctx), + metricCtxMap: make(map[string]context.CancelFunc), + metricCtxMapMutex: &sync.Mutex{}, } mgr := servers.NewStreamManager(ctx, provider) @@ -57,40 +64,17 @@ func (p *monitorProvider) HandleStream(stream grpc.ClientStream) error { for { select { case metric := <-p.postQueue: + any, err := anypb.New(metric) key := &types.Key{ Bucket: meta.UUID(p.ctx), - Name: metric.Key(), + Name: any.GetTypeUrl(), } p.lg.With( types.ShortID(key.ShortID()), ).Debug("Posting metric") - if mctx, ok := metric.(ContextMetric); ok { - // The metric has a (presumably cancelable) context - // If it is canceled, send a deleter to the server - go func() { - if mctx.Context() != nil { - select { - case <-mctx.Context().Done(): - p.Post(deleter{key: key.Name}) - case <-p.ctx.Done(): - } - } else { - <-p.ctx.Done() - } - }() - } - // Set the value to nil, which deletes the key, if metric is a deleter - var value *types.Value = nil - switch metric.(type) { - case deleter: - default: - value = &types.Value{ - Data: util.EncodeMsgp(metric), - } - } - err := stream.SendMsg(&types.Metric{ + err = stream.SendMsg(&types.Metric{ Key: key, - Value: value, + Value: any, }) if err != nil { if errors.Is(err, io.EOF) { @@ -118,7 +102,40 @@ func (p *monitorProvider) Target() string { return "monitor" } -func (p *monitorProvider) Post(metric KeyedMetric) { +func (p *monitorProvider) PostContext(metric proto.Message, ctx context.Context) { + p.Post(metric) + p.metricCtxMapMutex.Lock() + defer p.metricCtxMapMutex.Unlock() + any, err := anypb.New(metric) + if err != nil { + panic(err) + } + key := any.GetTypeUrl() + if cancel, ok := p.metricCtxMap[key]; ok { + cancel() + } + localCtx, cancel := context.WithCancel(ctx) + p.metricCtxMap[key] = cancel + // When the context is done, send a deleter to the server + go func() { + defer func() { + p.metricCtxMapMutex.Lock() + defer p.metricCtxMapMutex.Unlock() + delete(p.metricCtxMap, key) + }() + select { + case <-ctx.Done(): + p.Post(&metrics.Deleter{ + Key: key, + }) + case <-localCtx.Done(): // the map context + return + case <-p.ctx.Done(): + } + }() +} + +func (p *monitorProvider) Post(metric proto.Message) { if (p.queueStrategy & Discard) == 0 { // Block is the default p.postQueue <- metric diff --git a/pkg/metrics/common/common.go b/pkg/metrics/common/common.go deleted file mode 100644 index d5df4fc..0000000 --- a/pkg/metrics/common/common.go +++ /dev/null @@ -1,50 +0,0 @@ -package common - -//go:generate msgp - -type QueueParamsCompleter interface { - CompleteQueueParams(*QueueParams) -} - -type TaskStatusCompleter interface { - CompleteTaskStatus(*TaskStatus) -} - -type QueueStatusCompleter interface { - CompleteQueueStatus(*QueueStatus) -} - -type QueueParams struct { - ConcurrentProcessLimit int32 `msg:"concurrentProcessLimit"` - QueuePressureMultiplier float64 `msg:"queuePressureMultiplier"` - QueueRejectMultiplier float64 `msg:"queueRejectMultiplier"` -} - -func (QueueParams) Key() string { - return "QueueParams" -} - -type TaskStatus struct { - NumRunning int32 `msg:"numRunning"` - NumQueued int32 `msg:"numQueued"` - NumDelegated int32 `msg:"numDelegated"` -} - -func (TaskStatus) Key() string { - return "TaskStatus" -} - -type QueueStatus struct { - QueueStatus int32 `msg:"queueStatus"` -} - -func (QueueStatus) Key() string { - return "QueueStatus" -} - -type Alive struct { -} - -func (Alive) Key() string { - return "Alive" -} diff --git a/pkg/metrics/common/common_gen.go b/pkg/metrics/common/common_gen.go deleted file mode 100644 index 78fdad5..0000000 --- a/pkg/metrics/common/common_gen.go +++ /dev/null @@ -1,499 +0,0 @@ -package common - -// Code generated by github.com/tinylib/msgp DO NOT EDIT. - -import ( - "github.com/tinylib/msgp/msgp" -) - -// DecodeMsg implements msgp.Decodable -func (z *Alive) DecodeMsg(dc *msgp.Reader) (err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, err = dc.ReadMapHeader() - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - default: - err = dc.Skip() - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - return -} - -// EncodeMsg implements msgp.Encodable -func (z Alive) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 0 - err = en.Append(0x80) - if err != nil { - return - } - return -} - -// MarshalMsg implements msgp.Marshaler -func (z Alive) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 0 - o = append(o, 0x80) - return -} - -// UnmarshalMsg implements msgp.Unmarshaler -func (z *Alive) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - o = bts - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z Alive) Msgsize() (s int) { - s = 1 - return -} - -// DecodeMsg implements msgp.Decodable -func (z *QueueParams) DecodeMsg(dc *msgp.Reader) (err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, err = dc.ReadMapHeader() - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "concurrentProcessLimit": - z.ConcurrentProcessLimit, err = dc.ReadInt32() - if err != nil { - err = msgp.WrapError(err, "ConcurrentProcessLimit") - return - } - case "queuePressureMultiplier": - z.QueuePressureMultiplier, err = dc.ReadFloat64() - if err != nil { - err = msgp.WrapError(err, "QueuePressureMultiplier") - return - } - case "queueRejectMultiplier": - z.QueueRejectMultiplier, err = dc.ReadFloat64() - if err != nil { - err = msgp.WrapError(err, "QueueRejectMultiplier") - return - } - default: - err = dc.Skip() - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - return -} - -// EncodeMsg implements msgp.Encodable -func (z QueueParams) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 3 - // write "concurrentProcessLimit" - err = en.Append(0x83, 0xb6, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x69, 0x6d, 0x69, 0x74) - if err != nil { - return - } - err = en.WriteInt32(z.ConcurrentProcessLimit) - if err != nil { - err = msgp.WrapError(err, "ConcurrentProcessLimit") - return - } - // write "queuePressureMultiplier" - err = en.Append(0xb7, 0x71, 0x75, 0x65, 0x75, 0x65, 0x50, 0x72, 0x65, 0x73, 0x73, 0x75, 0x72, 0x65, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72) - if err != nil { - return - } - err = en.WriteFloat64(z.QueuePressureMultiplier) - if err != nil { - err = msgp.WrapError(err, "QueuePressureMultiplier") - return - } - // write "queueRejectMultiplier" - err = en.Append(0xb5, 0x71, 0x75, 0x65, 0x75, 0x65, 0x52, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72) - if err != nil { - return - } - err = en.WriteFloat64(z.QueueRejectMultiplier) - if err != nil { - err = msgp.WrapError(err, "QueueRejectMultiplier") - return - } - return -} - -// MarshalMsg implements msgp.Marshaler -func (z QueueParams) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 3 - // string "concurrentProcessLimit" - o = append(o, 0x83, 0xb6, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x69, 0x6d, 0x69, 0x74) - o = msgp.AppendInt32(o, z.ConcurrentProcessLimit) - // string "queuePressureMultiplier" - o = append(o, 0xb7, 0x71, 0x75, 0x65, 0x75, 0x65, 0x50, 0x72, 0x65, 0x73, 0x73, 0x75, 0x72, 0x65, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72) - o = msgp.AppendFloat64(o, z.QueuePressureMultiplier) - // string "queueRejectMultiplier" - o = append(o, 0xb5, 0x71, 0x75, 0x65, 0x75, 0x65, 0x52, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72) - o = msgp.AppendFloat64(o, z.QueueRejectMultiplier) - return -} - -// UnmarshalMsg implements msgp.Unmarshaler -func (z *QueueParams) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "concurrentProcessLimit": - z.ConcurrentProcessLimit, bts, err = msgp.ReadInt32Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "ConcurrentProcessLimit") - return - } - case "queuePressureMultiplier": - z.QueuePressureMultiplier, bts, err = msgp.ReadFloat64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "QueuePressureMultiplier") - return - } - case "queueRejectMultiplier": - z.QueueRejectMultiplier, bts, err = msgp.ReadFloat64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "QueueRejectMultiplier") - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - o = bts - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z QueueParams) Msgsize() (s int) { - s = 1 + 23 + msgp.Int32Size + 24 + msgp.Float64Size + 22 + msgp.Float64Size - return -} - -// DecodeMsg implements msgp.Decodable -func (z *QueueStatus) DecodeMsg(dc *msgp.Reader) (err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, err = dc.ReadMapHeader() - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "queueStatus": - z.QueueStatus, err = dc.ReadInt32() - if err != nil { - err = msgp.WrapError(err, "QueueStatus") - return - } - default: - err = dc.Skip() - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - return -} - -// EncodeMsg implements msgp.Encodable -func (z QueueStatus) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 1 - // write "queueStatus" - err = en.Append(0x81, 0xab, 0x71, 0x75, 0x65, 0x75, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73) - if err != nil { - return - } - err = en.WriteInt32(z.QueueStatus) - if err != nil { - err = msgp.WrapError(err, "QueueStatus") - return - } - return -} - -// MarshalMsg implements msgp.Marshaler -func (z QueueStatus) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 1 - // string "queueStatus" - o = append(o, 0x81, 0xab, 0x71, 0x75, 0x65, 0x75, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73) - o = msgp.AppendInt32(o, z.QueueStatus) - return -} - -// UnmarshalMsg implements msgp.Unmarshaler -func (z *QueueStatus) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "queueStatus": - z.QueueStatus, bts, err = msgp.ReadInt32Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "QueueStatus") - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - o = bts - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z QueueStatus) Msgsize() (s int) { - s = 1 + 12 + msgp.Int32Size - return -} - -// DecodeMsg implements msgp.Decodable -func (z *TaskStatus) DecodeMsg(dc *msgp.Reader) (err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, err = dc.ReadMapHeader() - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "numRunning": - z.NumRunning, err = dc.ReadInt32() - if err != nil { - err = msgp.WrapError(err, "NumRunning") - return - } - case "numQueued": - z.NumQueued, err = dc.ReadInt32() - if err != nil { - err = msgp.WrapError(err, "NumQueued") - return - } - case "numDelegated": - z.NumDelegated, err = dc.ReadInt32() - if err != nil { - err = msgp.WrapError(err, "NumDelegated") - return - } - default: - err = dc.Skip() - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - return -} - -// EncodeMsg implements msgp.Encodable -func (z TaskStatus) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 3 - // write "numRunning" - err = en.Append(0x83, 0xaa, 0x6e, 0x75, 0x6d, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67) - if err != nil { - return - } - err = en.WriteInt32(z.NumRunning) - if err != nil { - err = msgp.WrapError(err, "NumRunning") - return - } - // write "numQueued" - err = en.Append(0xa9, 0x6e, 0x75, 0x6d, 0x51, 0x75, 0x65, 0x75, 0x65, 0x64) - if err != nil { - return - } - err = en.WriteInt32(z.NumQueued) - if err != nil { - err = msgp.WrapError(err, "NumQueued") - return - } - // write "numDelegated" - err = en.Append(0xac, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x6c, 0x65, 0x67, 0x61, 0x74, 0x65, 0x64) - if err != nil { - return - } - err = en.WriteInt32(z.NumDelegated) - if err != nil { - err = msgp.WrapError(err, "NumDelegated") - return - } - return -} - -// MarshalMsg implements msgp.Marshaler -func (z TaskStatus) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 3 - // string "numRunning" - o = append(o, 0x83, 0xaa, 0x6e, 0x75, 0x6d, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67) - o = msgp.AppendInt32(o, z.NumRunning) - // string "numQueued" - o = append(o, 0xa9, 0x6e, 0x75, 0x6d, 0x51, 0x75, 0x65, 0x75, 0x65, 0x64) - o = msgp.AppendInt32(o, z.NumQueued) - // string "numDelegated" - o = append(o, 0xac, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x6c, 0x65, 0x67, 0x61, 0x74, 0x65, 0x64) - o = msgp.AppendInt32(o, z.NumDelegated) - return -} - -// UnmarshalMsg implements msgp.Unmarshaler -func (z *TaskStatus) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "numRunning": - z.NumRunning, bts, err = msgp.ReadInt32Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "NumRunning") - return - } - case "numQueued": - z.NumQueued, bts, err = msgp.ReadInt32Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "NumQueued") - return - } - case "numDelegated": - z.NumDelegated, bts, err = msgp.ReadInt32Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "NumDelegated") - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - o = bts - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z TaskStatus) Msgsize() (s int) { - s = 1 + 11 + msgp.Int32Size + 10 + msgp.Int32Size + 13 + msgp.Int32Size - return -} diff --git a/pkg/metrics/common/common_gen_test.go b/pkg/metrics/common/common_gen_test.go deleted file mode 100644 index d8a5959..0000000 --- a/pkg/metrics/common/common_gen_test.go +++ /dev/null @@ -1,462 +0,0 @@ -package common - -// Code generated by github.com/tinylib/msgp DO NOT EDIT. - -import ( - "bytes" - "testing" - - "github.com/tinylib/msgp/msgp" -) - -func TestMarshalUnmarshalAlive(t *testing.T) { - v := Alive{} - bts, err := v.MarshalMsg(nil) - if err != nil { - t.Fatal(err) - } - left, err := v.UnmarshalMsg(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) - } - - left, err = msgp.Skip(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after Skip(): %q", len(left), left) - } -} - -func BenchmarkMarshalMsgAlive(b *testing.B) { - v := Alive{} - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.MarshalMsg(nil) - } -} - -func BenchmarkAppendMsgAlive(b *testing.B) { - v := Alive{} - bts := make([]byte, 0, v.Msgsize()) - bts, _ = v.MarshalMsg(bts[0:0]) - b.SetBytes(int64(len(bts))) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - bts, _ = v.MarshalMsg(bts[0:0]) - } -} - -func BenchmarkUnmarshalAlive(b *testing.B) { - v := Alive{} - bts, _ := v.MarshalMsg(nil) - b.ReportAllocs() - b.SetBytes(int64(len(bts))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err := v.UnmarshalMsg(bts) - if err != nil { - b.Fatal(err) - } - } -} - -func TestEncodeDecodeAlive(t *testing.T) { - v := Alive{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - - m := v.Msgsize() - if buf.Len() > m { - t.Log("WARNING: TestEncodeDecodeAlive Msgsize() is inaccurate") - } - - vn := Alive{} - err := msgp.Decode(&buf, &vn) - if err != nil { - t.Error(err) - } - - buf.Reset() - msgp.Encode(&buf, &v) - err = msgp.NewReader(&buf).Skip() - if err != nil { - t.Error(err) - } -} - -func BenchmarkEncodeAlive(b *testing.B) { - v := Alive{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - en := msgp.NewWriter(msgp.Nowhere) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.EncodeMsg(en) - } - en.Flush() -} - -func BenchmarkDecodeAlive(b *testing.B) { - v := Alive{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - rd := msgp.NewEndlessReader(buf.Bytes(), b) - dc := msgp.NewReader(rd) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - err := v.DecodeMsg(dc) - if err != nil { - b.Fatal(err) - } - } -} - -func TestMarshalUnmarshalQueueParams(t *testing.T) { - v := QueueParams{} - bts, err := v.MarshalMsg(nil) - if err != nil { - t.Fatal(err) - } - left, err := v.UnmarshalMsg(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) - } - - left, err = msgp.Skip(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after Skip(): %q", len(left), left) - } -} - -func BenchmarkMarshalMsgQueueParams(b *testing.B) { - v := QueueParams{} - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.MarshalMsg(nil) - } -} - -func BenchmarkAppendMsgQueueParams(b *testing.B) { - v := QueueParams{} - bts := make([]byte, 0, v.Msgsize()) - bts, _ = v.MarshalMsg(bts[0:0]) - b.SetBytes(int64(len(bts))) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - bts, _ = v.MarshalMsg(bts[0:0]) - } -} - -func BenchmarkUnmarshalQueueParams(b *testing.B) { - v := QueueParams{} - bts, _ := v.MarshalMsg(nil) - b.ReportAllocs() - b.SetBytes(int64(len(bts))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err := v.UnmarshalMsg(bts) - if err != nil { - b.Fatal(err) - } - } -} - -func TestEncodeDecodeQueueParams(t *testing.T) { - v := QueueParams{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - - m := v.Msgsize() - if buf.Len() > m { - t.Log("WARNING: TestEncodeDecodeQueueParams Msgsize() is inaccurate") - } - - vn := QueueParams{} - err := msgp.Decode(&buf, &vn) - if err != nil { - t.Error(err) - } - - buf.Reset() - msgp.Encode(&buf, &v) - err = msgp.NewReader(&buf).Skip() - if err != nil { - t.Error(err) - } -} - -func BenchmarkEncodeQueueParams(b *testing.B) { - v := QueueParams{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - en := msgp.NewWriter(msgp.Nowhere) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.EncodeMsg(en) - } - en.Flush() -} - -func BenchmarkDecodeQueueParams(b *testing.B) { - v := QueueParams{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - rd := msgp.NewEndlessReader(buf.Bytes(), b) - dc := msgp.NewReader(rd) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - err := v.DecodeMsg(dc) - if err != nil { - b.Fatal(err) - } - } -} - -func TestMarshalUnmarshalQueueStatus(t *testing.T) { - v := QueueStatus{} - bts, err := v.MarshalMsg(nil) - if err != nil { - t.Fatal(err) - } - left, err := v.UnmarshalMsg(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) - } - - left, err = msgp.Skip(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after Skip(): %q", len(left), left) - } -} - -func BenchmarkMarshalMsgQueueStatus(b *testing.B) { - v := QueueStatus{} - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.MarshalMsg(nil) - } -} - -func BenchmarkAppendMsgQueueStatus(b *testing.B) { - v := QueueStatus{} - bts := make([]byte, 0, v.Msgsize()) - bts, _ = v.MarshalMsg(bts[0:0]) - b.SetBytes(int64(len(bts))) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - bts, _ = v.MarshalMsg(bts[0:0]) - } -} - -func BenchmarkUnmarshalQueueStatus(b *testing.B) { - v := QueueStatus{} - bts, _ := v.MarshalMsg(nil) - b.ReportAllocs() - b.SetBytes(int64(len(bts))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err := v.UnmarshalMsg(bts) - if err != nil { - b.Fatal(err) - } - } -} - -func TestEncodeDecodeQueueStatus(t *testing.T) { - v := QueueStatus{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - - m := v.Msgsize() - if buf.Len() > m { - t.Log("WARNING: TestEncodeDecodeQueueStatus Msgsize() is inaccurate") - } - - vn := QueueStatus{} - err := msgp.Decode(&buf, &vn) - if err != nil { - t.Error(err) - } - - buf.Reset() - msgp.Encode(&buf, &v) - err = msgp.NewReader(&buf).Skip() - if err != nil { - t.Error(err) - } -} - -func BenchmarkEncodeQueueStatus(b *testing.B) { - v := QueueStatus{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - en := msgp.NewWriter(msgp.Nowhere) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.EncodeMsg(en) - } - en.Flush() -} - -func BenchmarkDecodeQueueStatus(b *testing.B) { - v := QueueStatus{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - rd := msgp.NewEndlessReader(buf.Bytes(), b) - dc := msgp.NewReader(rd) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - err := v.DecodeMsg(dc) - if err != nil { - b.Fatal(err) - } - } -} - -func TestMarshalUnmarshalTaskStatus(t *testing.T) { - v := TaskStatus{} - bts, err := v.MarshalMsg(nil) - if err != nil { - t.Fatal(err) - } - left, err := v.UnmarshalMsg(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) - } - - left, err = msgp.Skip(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after Skip(): %q", len(left), left) - } -} - -func BenchmarkMarshalMsgTaskStatus(b *testing.B) { - v := TaskStatus{} - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.MarshalMsg(nil) - } -} - -func BenchmarkAppendMsgTaskStatus(b *testing.B) { - v := TaskStatus{} - bts := make([]byte, 0, v.Msgsize()) - bts, _ = v.MarshalMsg(bts[0:0]) - b.SetBytes(int64(len(bts))) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - bts, _ = v.MarshalMsg(bts[0:0]) - } -} - -func BenchmarkUnmarshalTaskStatus(b *testing.B) { - v := TaskStatus{} - bts, _ := v.MarshalMsg(nil) - b.ReportAllocs() - b.SetBytes(int64(len(bts))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err := v.UnmarshalMsg(bts) - if err != nil { - b.Fatal(err) - } - } -} - -func TestEncodeDecodeTaskStatus(t *testing.T) { - v := TaskStatus{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - - m := v.Msgsize() - if buf.Len() > m { - t.Log("WARNING: TestEncodeDecodeTaskStatus Msgsize() is inaccurate") - } - - vn := TaskStatus{} - err := msgp.Decode(&buf, &vn) - if err != nil { - t.Error(err) - } - - buf.Reset() - msgp.Encode(&buf, &v) - err = msgp.NewReader(&buf).Skip() - if err != nil { - t.Error(err) - } -} - -func BenchmarkEncodeTaskStatus(b *testing.B) { - v := TaskStatus{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - en := msgp.NewWriter(msgp.Nowhere) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.EncodeMsg(en) - } - en.Flush() -} - -func BenchmarkDecodeTaskStatus(b *testing.B) { - v := TaskStatus{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - rd := msgp.NewEndlessReader(buf.Bytes(), b) - dc := msgp.NewReader(rd) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - err := v.DecodeMsg(dc) - if err != nil { - b.Fatal(err) - } - } -} diff --git a/pkg/metrics/completers.go b/pkg/metrics/completers.go new file mode 100644 index 0000000..85cce49 --- /dev/null +++ b/pkg/metrics/completers.go @@ -0,0 +1,9 @@ +package metrics + +type UsageLimitsCompleter interface { + CompleteUsageLimits(*UsageLimits) +} + +type TaskStatusCompleter interface { + CompleteTaskStatus(*TaskStatus) +} diff --git a/pkg/metrics/metrics.pb.go b/pkg/metrics/metrics.pb.go new file mode 100644 index 0000000..adef862 --- /dev/null +++ b/pkg/metrics/metrics.pb.go @@ -0,0 +1,1650 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.25.0 +// protoc v3.12.3 +// source: pkg/metrics/metrics.proto + +package metrics + +import ( + types "github.com/cobalt77/kubecc/pkg/types" + proto "github.com/golang/protobuf/proto" + any "github.com/golang/protobuf/ptypes/any" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 + +type TaskStatus struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + NumRunning int32 `protobuf:"varint,1,opt,name=NumRunning,proto3" json:"NumRunning,omitempty"` + NumQueued int32 `protobuf:"varint,2,opt,name=NumQueued,proto3" json:"NumQueued,omitempty"` + NumDelegated int32 `protobuf:"varint,3,opt,name=NumDelegated,proto3" json:"NumDelegated,omitempty"` +} + +func (x *TaskStatus) Reset() { + *x = TaskStatus{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_metrics_metrics_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TaskStatus) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TaskStatus) ProtoMessage() {} + +func (x *TaskStatus) ProtoReflect() protoreflect.Message { + mi := &file_pkg_metrics_metrics_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TaskStatus.ProtoReflect.Descriptor instead. +func (*TaskStatus) Descriptor() ([]byte, []int) { + return file_pkg_metrics_metrics_proto_rawDescGZIP(), []int{0} +} + +func (x *TaskStatus) GetNumRunning() int32 { + if x != nil { + return x.NumRunning + } + return 0 +} + +func (x *TaskStatus) GetNumQueued() int32 { + if x != nil { + return x.NumQueued + } + return 0 +} + +func (x *TaskStatus) GetNumDelegated() int32 { + if x != nil { + return x.NumDelegated + } + return 0 +} + +type Toolchains struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Items []*types.Toolchain `protobuf:"bytes,1,rep,name=Items,proto3" json:"Items,omitempty"` +} + +func (x *Toolchains) Reset() { + *x = Toolchains{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_metrics_metrics_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Toolchains) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Toolchains) ProtoMessage() {} + +func (x *Toolchains) ProtoReflect() protoreflect.Message { + mi := &file_pkg_metrics_metrics_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Toolchains.ProtoReflect.Descriptor instead. +func (*Toolchains) Descriptor() ([]byte, []int) { + return file_pkg_metrics_metrics_proto_rawDescGZIP(), []int{1} +} + +func (x *Toolchains) GetItems() []*types.Toolchain { + if x != nil { + return x.Items + } + return nil +} + +type UsageLimits struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ConcurrentProcessLimit int32 `protobuf:"varint,1,opt,name=ConcurrentProcessLimit,proto3" json:"ConcurrentProcessLimit,omitempty"` + QueuePressureMultiplier float64 `protobuf:"fixed64,2,opt,name=QueuePressureMultiplier,proto3" json:"QueuePressureMultiplier,omitempty"` + QueueRejectMultiplier float64 `protobuf:"fixed64,3,opt,name=QueueRejectMultiplier,proto3" json:"QueueRejectMultiplier,omitempty"` +} + +func (x *UsageLimits) Reset() { + *x = UsageLimits{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_metrics_metrics_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UsageLimits) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UsageLimits) ProtoMessage() {} + +func (x *UsageLimits) ProtoReflect() protoreflect.Message { + mi := &file_pkg_metrics_metrics_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UsageLimits.ProtoReflect.Descriptor instead. +func (*UsageLimits) Descriptor() ([]byte, []int) { + return file_pkg_metrics_metrics_proto_rawDescGZIP(), []int{2} +} + +func (x *UsageLimits) GetConcurrentProcessLimit() int32 { + if x != nil { + return x.ConcurrentProcessLimit + } + return 0 +} + +func (x *UsageLimits) GetQueuePressureMultiplier() float64 { + if x != nil { + return x.QueuePressureMultiplier + } + return 0 +} + +func (x *UsageLimits) GetQueueRejectMultiplier() float64 { + if x != nil { + return x.QueueRejectMultiplier + } + return 0 +} + +type AgentInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Node string `protobuf:"bytes,4,opt,name=Node,proto3" json:"Node,omitempty"` + Pod string `protobuf:"bytes,5,opt,name=Pod,proto3" json:"Pod,omitempty"` + Namespace string `protobuf:"bytes,6,opt,name=Namespace,proto3" json:"Namespace,omitempty"` +} + +func (x *AgentInfo) Reset() { + *x = AgentInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_metrics_metrics_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AgentInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AgentInfo) ProtoMessage() {} + +func (x *AgentInfo) ProtoReflect() protoreflect.Message { + mi := &file_pkg_metrics_metrics_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AgentInfo.ProtoReflect.Descriptor instead. +func (*AgentInfo) Descriptor() ([]byte, []int) { + return file_pkg_metrics_metrics_proto_rawDescGZIP(), []int{3} +} + +func (x *AgentInfo) GetNode() string { + if x != nil { + return x.Node + } + return "" +} + +func (x *AgentInfo) GetPod() string { + if x != nil { + return x.Pod + } + return "" +} + +func (x *AgentInfo) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +type Deleter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key string `protobuf:"bytes,1,opt,name=Key,proto3" json:"Key,omitempty"` +} + +func (x *Deleter) Reset() { + *x = Deleter{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_metrics_metrics_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Deleter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Deleter) ProtoMessage() {} + +func (x *Deleter) ProtoReflect() protoreflect.Message { + mi := &file_pkg_metrics_metrics_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Deleter.ProtoReflect.Descriptor instead. +func (*Deleter) Descriptor() ([]byte, []int) { + return file_pkg_metrics_metrics_proto_rawDescGZIP(), []int{4} +} + +func (x *Deleter) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +type TasksCompletedTotal struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Total int64 `protobuf:"varint,1,opt,name=Total,proto3" json:"Total,omitempty"` +} + +func (x *TasksCompletedTotal) Reset() { + *x = TasksCompletedTotal{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_metrics_metrics_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TasksCompletedTotal) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TasksCompletedTotal) ProtoMessage() {} + +func (x *TasksCompletedTotal) ProtoReflect() protoreflect.Message { + mi := &file_pkg_metrics_metrics_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TasksCompletedTotal.ProtoReflect.Descriptor instead. +func (*TasksCompletedTotal) Descriptor() ([]byte, []int) { + return file_pkg_metrics_metrics_proto_rawDescGZIP(), []int{5} +} + +func (x *TasksCompletedTotal) GetTotal() int64 { + if x != nil { + return x.Total + } + return 0 +} + +type TasksFailedTotal struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Total int64 `protobuf:"varint,1,opt,name=Total,proto3" json:"Total,omitempty"` +} + +func (x *TasksFailedTotal) Reset() { + *x = TasksFailedTotal{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_metrics_metrics_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TasksFailedTotal) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TasksFailedTotal) ProtoMessage() {} + +func (x *TasksFailedTotal) ProtoReflect() protoreflect.Message { + mi := &file_pkg_metrics_metrics_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TasksFailedTotal.ProtoReflect.Descriptor instead. +func (*TasksFailedTotal) Descriptor() ([]byte, []int) { + return file_pkg_metrics_metrics_proto_rawDescGZIP(), []int{6} +} + +func (x *TasksFailedTotal) GetTotal() int64 { + if x != nil { + return x.Total + } + return 0 +} + +type SchedulingRequestsTotal struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Total int64 `protobuf:"varint,1,opt,name=Total,proto3" json:"Total,omitempty"` +} + +func (x *SchedulingRequestsTotal) Reset() { + *x = SchedulingRequestsTotal{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_metrics_metrics_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SchedulingRequestsTotal) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SchedulingRequestsTotal) ProtoMessage() {} + +func (x *SchedulingRequestsTotal) ProtoReflect() protoreflect.Message { + mi := &file_pkg_metrics_metrics_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SchedulingRequestsTotal.ProtoReflect.Descriptor instead. +func (*SchedulingRequestsTotal) Descriptor() ([]byte, []int) { + return file_pkg_metrics_metrics_proto_rawDescGZIP(), []int{7} +} + +func (x *SchedulingRequestsTotal) GetTotal() int64 { + if x != nil { + return x.Total + } + return 0 +} + +type AgentCount struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Count int64 `protobuf:"varint,1,opt,name=Count,proto3" json:"Count,omitempty"` +} + +func (x *AgentCount) Reset() { + *x = AgentCount{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_metrics_metrics_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AgentCount) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AgentCount) ProtoMessage() {} + +func (x *AgentCount) ProtoReflect() protoreflect.Message { + mi := &file_pkg_metrics_metrics_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AgentCount.ProtoReflect.Descriptor instead. +func (*AgentCount) Descriptor() ([]byte, []int) { + return file_pkg_metrics_metrics_proto_rawDescGZIP(), []int{8} +} + +func (x *AgentCount) GetCount() int64 { + if x != nil { + return x.Count + } + return 0 +} + +type ConsumerdCount struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Count int64 `protobuf:"varint,1,opt,name=Count,proto3" json:"Count,omitempty"` +} + +func (x *ConsumerdCount) Reset() { + *x = ConsumerdCount{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_metrics_metrics_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ConsumerdCount) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ConsumerdCount) ProtoMessage() {} + +func (x *ConsumerdCount) ProtoReflect() protoreflect.Message { + mi := &file_pkg_metrics_metrics_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ConsumerdCount.ProtoReflect.Descriptor instead. +func (*ConsumerdCount) Descriptor() ([]byte, []int) { + return file_pkg_metrics_metrics_proto_rawDescGZIP(), []int{9} +} + +func (x *ConsumerdCount) GetCount() int64 { + if x != nil { + return x.Count + } + return 0 +} + +type Identifier struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + UUID string `protobuf:"bytes,1,opt,name=UUID,proto3" json:"UUID,omitempty"` +} + +func (x *Identifier) Reset() { + *x = Identifier{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_metrics_metrics_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Identifier) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Identifier) ProtoMessage() {} + +func (x *Identifier) ProtoReflect() protoreflect.Message { + mi := &file_pkg_metrics_metrics_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Identifier.ProtoReflect.Descriptor instead. +func (*Identifier) Descriptor() ([]byte, []int) { + return file_pkg_metrics_metrics_proto_rawDescGZIP(), []int{10} +} + +func (x *Identifier) GetUUID() string { + if x != nil { + return x.UUID + } + return "" +} + +type AgentTasksTotal struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + UUID string `protobuf:"bytes,1,opt,name=UUID,proto3" json:"UUID,omitempty"` + Total int64 `protobuf:"varint,2,opt,name=Total,proto3" json:"Total,omitempty"` +} + +func (x *AgentTasksTotal) Reset() { + *x = AgentTasksTotal{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_metrics_metrics_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AgentTasksTotal) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AgentTasksTotal) ProtoMessage() {} + +func (x *AgentTasksTotal) ProtoReflect() protoreflect.Message { + mi := &file_pkg_metrics_metrics_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AgentTasksTotal.ProtoReflect.Descriptor instead. +func (*AgentTasksTotal) Descriptor() ([]byte, []int) { + return file_pkg_metrics_metrics_proto_rawDescGZIP(), []int{11} +} + +func (x *AgentTasksTotal) GetUUID() string { + if x != nil { + return x.UUID + } + return "" +} + +func (x *AgentTasksTotal) GetTotal() int64 { + if x != nil { + return x.Total + } + return 0 +} + +type ConsumerdTasksTotal struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + UUID string `protobuf:"bytes,1,opt,name=UUID,proto3" json:"UUID,omitempty"` + Total int64 `protobuf:"varint,2,opt,name=Total,proto3" json:"Total,omitempty"` +} + +func (x *ConsumerdTasksTotal) Reset() { + *x = ConsumerdTasksTotal{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_metrics_metrics_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ConsumerdTasksTotal) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ConsumerdTasksTotal) ProtoMessage() {} + +func (x *ConsumerdTasksTotal) ProtoReflect() protoreflect.Message { + mi := &file_pkg_metrics_metrics_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ConsumerdTasksTotal.ProtoReflect.Descriptor instead. +func (*ConsumerdTasksTotal) Descriptor() ([]byte, []int) { + return file_pkg_metrics_metrics_proto_rawDescGZIP(), []int{12} +} + +func (x *ConsumerdTasksTotal) GetUUID() string { + if x != nil { + return x.UUID + } + return "" +} + +func (x *ConsumerdTasksTotal) GetTotal() int64 { + if x != nil { + return x.Total + } + return 0 +} + +type MetricsPostedTotal struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Total int64 `protobuf:"varint,1,opt,name=Total,proto3" json:"Total,omitempty"` +} + +func (x *MetricsPostedTotal) Reset() { + *x = MetricsPostedTotal{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_metrics_metrics_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MetricsPostedTotal) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MetricsPostedTotal) ProtoMessage() {} + +func (x *MetricsPostedTotal) ProtoReflect() protoreflect.Message { + mi := &file_pkg_metrics_metrics_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MetricsPostedTotal.ProtoReflect.Descriptor instead. +func (*MetricsPostedTotal) Descriptor() ([]byte, []int) { + return file_pkg_metrics_metrics_proto_rawDescGZIP(), []int{13} +} + +func (x *MetricsPostedTotal) GetTotal() int64 { + if x != nil { + return x.Total + } + return 0 +} + +type ListenerCount struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Count int32 `protobuf:"varint,1,opt,name=Count,proto3" json:"Count,omitempty"` +} + +func (x *ListenerCount) Reset() { + *x = ListenerCount{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_metrics_metrics_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListenerCount) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListenerCount) ProtoMessage() {} + +func (x *ListenerCount) ProtoReflect() protoreflect.Message { + mi := &file_pkg_metrics_metrics_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListenerCount.ProtoReflect.Descriptor instead. +func (*ListenerCount) Descriptor() ([]byte, []int) { + return file_pkg_metrics_metrics_proto_rawDescGZIP(), []int{14} +} + +func (x *ListenerCount) GetCount() int32 { + if x != nil { + return x.Count + } + return 0 +} + +type ProviderInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + UUID string `protobuf:"bytes,1,opt,name=UUID,proto3" json:"UUID,omitempty"` + Component types.Component `protobuf:"varint,2,opt,name=Component,proto3,enum=types.Component" json:"Component,omitempty"` + Address string `protobuf:"bytes,3,opt,name=Address,proto3" json:"Address,omitempty"` +} + +func (x *ProviderInfo) Reset() { + *x = ProviderInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_metrics_metrics_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ProviderInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ProviderInfo) ProtoMessage() {} + +func (x *ProviderInfo) ProtoReflect() protoreflect.Message { + mi := &file_pkg_metrics_metrics_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ProviderInfo.ProtoReflect.Descriptor instead. +func (*ProviderInfo) Descriptor() ([]byte, []int) { + return file_pkg_metrics_metrics_proto_rawDescGZIP(), []int{15} +} + +func (x *ProviderInfo) GetUUID() string { + if x != nil { + return x.UUID + } + return "" +} + +func (x *ProviderInfo) GetComponent() types.Component { + if x != nil { + return x.Component + } + return types.Component_Component_Unknown +} + +func (x *ProviderInfo) GetAddress() string { + if x != nil { + return x.Address + } + return "" +} + +type Providers struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Items map[string]*ProviderInfo `protobuf:"bytes,1,rep,name=Items,proto3" json:"Items,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *Providers) Reset() { + *x = Providers{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_metrics_metrics_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Providers) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Providers) ProtoMessage() {} + +func (x *Providers) ProtoReflect() protoreflect.Message { + mi := &file_pkg_metrics_metrics_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Providers.ProtoReflect.Descriptor instead. +func (*Providers) Descriptor() ([]byte, []int) { + return file_pkg_metrics_metrics_proto_rawDescGZIP(), []int{16} +} + +func (x *Providers) GetItems() map[string]*ProviderInfo { + if x != nil { + return x.Items + } + return nil +} + +type StoreContents struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Buckets []*BucketSpec `protobuf:"bytes,1,rep,name=Buckets,proto3" json:"Buckets,omitempty"` +} + +func (x *StoreContents) Reset() { + *x = StoreContents{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_metrics_metrics_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StoreContents) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StoreContents) ProtoMessage() {} + +func (x *StoreContents) ProtoReflect() protoreflect.Message { + mi := &file_pkg_metrics_metrics_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StoreContents.ProtoReflect.Descriptor instead. +func (*StoreContents) Descriptor() ([]byte, []int) { + return file_pkg_metrics_metrics_proto_rawDescGZIP(), []int{17} +} + +func (x *StoreContents) GetBuckets() []*BucketSpec { + if x != nil { + return x.Buckets + } + return nil +} + +type BucketSpec struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"` + Data map[string]*any.Any `protobuf:"bytes,2,rep,name=Data,proto3" json:"Data,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *BucketSpec) Reset() { + *x = BucketSpec{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_metrics_metrics_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BucketSpec) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BucketSpec) ProtoMessage() {} + +func (x *BucketSpec) ProtoReflect() protoreflect.Message { + mi := &file_pkg_metrics_metrics_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BucketSpec.ProtoReflect.Descriptor instead. +func (*BucketSpec) Descriptor() ([]byte, []int) { + return file_pkg_metrics_metrics_proto_rawDescGZIP(), []int{18} +} + +func (x *BucketSpec) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *BucketSpec) GetData() map[string]*any.Any { + if x != nil { + return x.Data + } + return nil +} + +type LocalTasksCompleted struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Total int64 `protobuf:"varint,1,opt,name=Total,proto3" json:"Total,omitempty"` +} + +func (x *LocalTasksCompleted) Reset() { + *x = LocalTasksCompleted{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_metrics_metrics_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LocalTasksCompleted) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LocalTasksCompleted) ProtoMessage() {} + +func (x *LocalTasksCompleted) ProtoReflect() protoreflect.Message { + mi := &file_pkg_metrics_metrics_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LocalTasksCompleted.ProtoReflect.Descriptor instead. +func (*LocalTasksCompleted) Descriptor() ([]byte, []int) { + return file_pkg_metrics_metrics_proto_rawDescGZIP(), []int{19} +} + +func (x *LocalTasksCompleted) GetTotal() int64 { + if x != nil { + return x.Total + } + return 0 +} + +type CacheUsage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ObjectCount int64 `protobuf:"varint,1,opt,name=ObjectCount,proto3" json:"ObjectCount,omitempty"` + TotalSize int64 `protobuf:"varint,2,opt,name=TotalSize,proto3" json:"TotalSize,omitempty"` + UsagePercent float64 `protobuf:"fixed64,3,opt,name=UsagePercent,proto3" json:"UsagePercent,omitempty"` +} + +func (x *CacheUsage) Reset() { + *x = CacheUsage{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_metrics_metrics_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CacheUsage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CacheUsage) ProtoMessage() {} + +func (x *CacheUsage) ProtoReflect() protoreflect.Message { + mi := &file_pkg_metrics_metrics_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CacheUsage.ProtoReflect.Descriptor instead. +func (*CacheUsage) Descriptor() ([]byte, []int) { + return file_pkg_metrics_metrics_proto_rawDescGZIP(), []int{20} +} + +func (x *CacheUsage) GetObjectCount() int64 { + if x != nil { + return x.ObjectCount + } + return 0 +} + +func (x *CacheUsage) GetTotalSize() int64 { + if x != nil { + return x.TotalSize + } + return 0 +} + +func (x *CacheUsage) GetUsagePercent() float64 { + if x != nil { + return x.UsagePercent + } + return 0 +} + +type CacheHits struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + CacheHitsTotal int64 `protobuf:"varint,1,opt,name=CacheHitsTotal,proto3" json:"CacheHitsTotal,omitempty"` + CacheMissesTotal int64 `protobuf:"varint,2,opt,name=CacheMissesTotal,proto3" json:"CacheMissesTotal,omitempty"` + CacheHitPercent float64 `protobuf:"fixed64,3,opt,name=CacheHitPercent,proto3" json:"CacheHitPercent,omitempty"` +} + +func (x *CacheHits) Reset() { + *x = CacheHits{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_metrics_metrics_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CacheHits) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CacheHits) ProtoMessage() {} + +func (x *CacheHits) ProtoReflect() protoreflect.Message { + mi := &file_pkg_metrics_metrics_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CacheHits.ProtoReflect.Descriptor instead. +func (*CacheHits) Descriptor() ([]byte, []int) { + return file_pkg_metrics_metrics_proto_rawDescGZIP(), []int{21} +} + +func (x *CacheHits) GetCacheHitsTotal() int64 { + if x != nil { + return x.CacheHitsTotal + } + return 0 +} + +func (x *CacheHits) GetCacheMissesTotal() int64 { + if x != nil { + return x.CacheMissesTotal + } + return 0 +} + +func (x *CacheHits) GetCacheHitPercent() float64 { + if x != nil { + return x.CacheHitPercent + } + return 0 +} + +var File_pkg_metrics_metrics_proto protoreflect.FileDescriptor + +var file_pkg_metrics_metrics_proto_rawDesc = []byte{ + 0x0a, 0x19, 0x70, 0x6b, 0x67, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2f, 0x6d, 0x65, + 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x07, 0x6d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x73, 0x1a, 0x15, 0x70, 0x6b, 0x67, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, + 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x6e, 0x0a, 0x0a, 0x54, 0x61, 0x73, 0x6b, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x4e, 0x75, 0x6d, 0x52, 0x75, 0x6e, 0x6e, 0x69, + 0x6e, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x4e, 0x75, 0x6d, 0x52, 0x75, 0x6e, + 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x1c, 0x0a, 0x09, 0x4e, 0x75, 0x6d, 0x51, 0x75, 0x65, 0x75, 0x65, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x4e, 0x75, 0x6d, 0x51, 0x75, 0x65, 0x75, + 0x65, 0x64, 0x12, 0x22, 0x0a, 0x0c, 0x4e, 0x75, 0x6d, 0x44, 0x65, 0x6c, 0x65, 0x67, 0x61, 0x74, + 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x4e, 0x75, 0x6d, 0x44, 0x65, 0x6c, + 0x65, 0x67, 0x61, 0x74, 0x65, 0x64, 0x22, 0x34, 0x0a, 0x0a, 0x54, 0x6f, 0x6f, 0x6c, 0x63, 0x68, + 0x61, 0x69, 0x6e, 0x73, 0x12, 0x26, 0x0a, 0x05, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x54, 0x6f, 0x6f, 0x6c, + 0x63, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x05, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x22, 0xb5, 0x01, 0x0a, + 0x0b, 0x55, 0x73, 0x61, 0x67, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x12, 0x36, 0x0a, 0x16, + 0x43, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, + 0x73, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x16, 0x43, 0x6f, + 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x4c, + 0x69, 0x6d, 0x69, 0x74, 0x12, 0x38, 0x0a, 0x17, 0x51, 0x75, 0x65, 0x75, 0x65, 0x50, 0x72, 0x65, + 0x73, 0x73, 0x75, 0x72, 0x65, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x17, 0x51, 0x75, 0x65, 0x75, 0x65, 0x50, 0x72, 0x65, 0x73, + 0x73, 0x75, 0x72, 0x65, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x12, 0x34, + 0x0a, 0x15, 0x51, 0x75, 0x65, 0x75, 0x65, 0x52, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x4d, 0x75, 0x6c, + 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x01, 0x52, 0x15, 0x51, + 0x75, 0x65, 0x75, 0x65, 0x52, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, + 0x6c, 0x69, 0x65, 0x72, 0x22, 0x4f, 0x0a, 0x09, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x49, 0x6e, 0x66, + 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x6f, 0x64, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x50, 0x6f, 0x64, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x50, 0x6f, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x4e, 0x61, 0x6d, 0x65, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x4e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x1b, 0x0a, 0x07, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x72, + 0x12, 0x10, 0x0a, 0x03, 0x4b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x4b, + 0x65, 0x79, 0x22, 0x2b, 0x0a, 0x13, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x43, 0x6f, 0x6d, 0x70, 0x6c, + 0x65, 0x74, 0x65, 0x64, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x54, 0x6f, 0x74, + 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x22, + 0x28, 0x0a, 0x10, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x54, 0x6f, + 0x74, 0x61, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x05, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x22, 0x2f, 0x0a, 0x17, 0x53, 0x63, 0x68, + 0x65, 0x64, 0x75, 0x6c, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x54, + 0x6f, 0x74, 0x61, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x05, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x22, 0x22, 0x0a, 0x0a, 0x41, 0x67, + 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x43, 0x6f, 0x75, 0x6e, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x26, + 0x0a, 0x0e, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x72, 0x64, 0x43, 0x6f, 0x75, 0x6e, 0x74, + 0x12, 0x14, 0x0a, 0x05, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x05, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x20, 0x0a, 0x0a, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x66, 0x69, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x55, 0x55, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x55, 0x55, 0x49, 0x44, 0x22, 0x3b, 0x0a, 0x0f, 0x41, 0x67, 0x65, 0x6e, + 0x74, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x55, + 0x55, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x55, 0x55, 0x49, 0x44, 0x12, + 0x14, 0x0a, 0x05, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, + 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x22, 0x3f, 0x0a, 0x13, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, + 0x72, 0x64, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x12, 0x12, 0x0a, 0x04, + 0x55, 0x55, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x55, 0x55, 0x49, 0x44, + 0x12, 0x14, 0x0a, 0x05, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x05, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x22, 0x2a, 0x0a, 0x12, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, + 0x73, 0x50, 0x6f, 0x73, 0x74, 0x65, 0x64, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x12, 0x14, 0x0a, 0x05, + 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x54, 0x6f, 0x74, + 0x61, 0x6c, 0x22, 0x25, 0x0a, 0x0d, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x43, 0x6f, + 0x75, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x05, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x6c, 0x0a, 0x0c, 0x50, 0x72, 0x6f, + 0x76, 0x69, 0x64, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x55, 0x55, 0x49, + 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x55, 0x55, 0x49, 0x44, 0x12, 0x2e, 0x0a, + 0x09, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x10, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, + 0x6e, 0x74, 0x52, 0x09, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x12, 0x18, 0x0a, + 0x07, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x91, 0x01, 0x0a, 0x09, 0x50, 0x72, 0x6f, 0x76, + 0x69, 0x64, 0x65, 0x72, 0x73, 0x12, 0x33, 0x0a, 0x05, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x50, + 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x73, 0x2e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x05, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x1a, 0x4f, 0x0a, 0x0a, 0x49, 0x74, + 0x65, 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2b, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x73, 0x2e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x3e, 0x0a, 0x0d, 0x53, + 0x74, 0x6f, 0x72, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2d, 0x0a, 0x07, + 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, + 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, + 0x65, 0x63, 0x52, 0x07, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x22, 0xa2, 0x01, 0x0a, 0x0a, + 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x31, + 0x0a, 0x04, 0x44, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x65, + 0x63, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x44, 0x61, 0x74, + 0x61, 0x1a, 0x4d, 0x0a, 0x09, 0x44, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x22, 0x2b, 0x0a, 0x13, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x43, 0x6f, + 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x54, 0x6f, 0x74, 0x61, 0x6c, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x22, 0x70, 0x0a, + 0x0a, 0x43, 0x61, 0x63, 0x68, 0x65, 0x55, 0x73, 0x61, 0x67, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x0b, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1c, 0x0a, + 0x09, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x09, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x55, + 0x73, 0x61, 0x67, 0x65, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x01, 0x52, 0x0c, 0x55, 0x73, 0x61, 0x67, 0x65, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x22, + 0x89, 0x01, 0x0a, 0x09, 0x43, 0x61, 0x63, 0x68, 0x65, 0x48, 0x69, 0x74, 0x73, 0x12, 0x26, 0x0a, + 0x0e, 0x43, 0x61, 0x63, 0x68, 0x65, 0x48, 0x69, 0x74, 0x73, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x43, 0x61, 0x63, 0x68, 0x65, 0x48, 0x69, 0x74, 0x73, + 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x12, 0x2a, 0x0a, 0x10, 0x43, 0x61, 0x63, 0x68, 0x65, 0x4d, 0x69, + 0x73, 0x73, 0x65, 0x73, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x10, 0x43, 0x61, 0x63, 0x68, 0x65, 0x4d, 0x69, 0x73, 0x73, 0x65, 0x73, 0x54, 0x6f, 0x74, 0x61, + 0x6c, 0x12, 0x28, 0x0a, 0x0f, 0x43, 0x61, 0x63, 0x68, 0x65, 0x48, 0x69, 0x74, 0x50, 0x65, 0x72, + 0x63, 0x65, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0f, 0x43, 0x61, 0x63, 0x68, + 0x65, 0x48, 0x69, 0x74, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x42, 0x28, 0x5a, 0x26, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x62, 0x61, 0x6c, 0x74, + 0x37, 0x37, 0x2f, 0x6b, 0x75, 0x62, 0x65, 0x63, 0x63, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x6d, 0x65, + 0x74, 0x72, 0x69, 0x63, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_pkg_metrics_metrics_proto_rawDescOnce sync.Once + file_pkg_metrics_metrics_proto_rawDescData = file_pkg_metrics_metrics_proto_rawDesc +) + +func file_pkg_metrics_metrics_proto_rawDescGZIP() []byte { + file_pkg_metrics_metrics_proto_rawDescOnce.Do(func() { + file_pkg_metrics_metrics_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkg_metrics_metrics_proto_rawDescData) + }) + return file_pkg_metrics_metrics_proto_rawDescData +} + +var file_pkg_metrics_metrics_proto_msgTypes = make([]protoimpl.MessageInfo, 24) +var file_pkg_metrics_metrics_proto_goTypes = []interface{}{ + (*TaskStatus)(nil), // 0: metrics.TaskStatus + (*Toolchains)(nil), // 1: metrics.Toolchains + (*UsageLimits)(nil), // 2: metrics.UsageLimits + (*AgentInfo)(nil), // 3: metrics.AgentInfo + (*Deleter)(nil), // 4: metrics.Deleter + (*TasksCompletedTotal)(nil), // 5: metrics.TasksCompletedTotal + (*TasksFailedTotal)(nil), // 6: metrics.TasksFailedTotal + (*SchedulingRequestsTotal)(nil), // 7: metrics.SchedulingRequestsTotal + (*AgentCount)(nil), // 8: metrics.AgentCount + (*ConsumerdCount)(nil), // 9: metrics.ConsumerdCount + (*Identifier)(nil), // 10: metrics.Identifier + (*AgentTasksTotal)(nil), // 11: metrics.AgentTasksTotal + (*ConsumerdTasksTotal)(nil), // 12: metrics.ConsumerdTasksTotal + (*MetricsPostedTotal)(nil), // 13: metrics.MetricsPostedTotal + (*ListenerCount)(nil), // 14: metrics.ListenerCount + (*ProviderInfo)(nil), // 15: metrics.ProviderInfo + (*Providers)(nil), // 16: metrics.Providers + (*StoreContents)(nil), // 17: metrics.StoreContents + (*BucketSpec)(nil), // 18: metrics.BucketSpec + (*LocalTasksCompleted)(nil), // 19: metrics.LocalTasksCompleted + (*CacheUsage)(nil), // 20: metrics.CacheUsage + (*CacheHits)(nil), // 21: metrics.CacheHits + nil, // 22: metrics.Providers.ItemsEntry + nil, // 23: metrics.BucketSpec.DataEntry + (*types.Toolchain)(nil), // 24: types.Toolchain + (types.Component)(0), // 25: types.Component + (*any.Any)(nil), // 26: google.protobuf.Any +} +var file_pkg_metrics_metrics_proto_depIdxs = []int32{ + 24, // 0: metrics.Toolchains.Items:type_name -> types.Toolchain + 25, // 1: metrics.ProviderInfo.Component:type_name -> types.Component + 22, // 2: metrics.Providers.Items:type_name -> metrics.Providers.ItemsEntry + 18, // 3: metrics.StoreContents.Buckets:type_name -> metrics.BucketSpec + 23, // 4: metrics.BucketSpec.Data:type_name -> metrics.BucketSpec.DataEntry + 15, // 5: metrics.Providers.ItemsEntry.value:type_name -> metrics.ProviderInfo + 26, // 6: metrics.BucketSpec.DataEntry.value:type_name -> google.protobuf.Any + 7, // [7:7] is the sub-list for method output_type + 7, // [7:7] is the sub-list for method input_type + 7, // [7:7] is the sub-list for extension type_name + 7, // [7:7] is the sub-list for extension extendee + 0, // [0:7] is the sub-list for field type_name +} + +func init() { file_pkg_metrics_metrics_proto_init() } +func file_pkg_metrics_metrics_proto_init() { + if File_pkg_metrics_metrics_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_pkg_metrics_metrics_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TaskStatus); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_metrics_metrics_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Toolchains); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_metrics_metrics_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UsageLimits); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_metrics_metrics_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AgentInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_metrics_metrics_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Deleter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_metrics_metrics_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TasksCompletedTotal); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_metrics_metrics_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TasksFailedTotal); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_metrics_metrics_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SchedulingRequestsTotal); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_metrics_metrics_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AgentCount); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_metrics_metrics_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ConsumerdCount); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_metrics_metrics_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Identifier); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_metrics_metrics_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AgentTasksTotal); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_metrics_metrics_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ConsumerdTasksTotal); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_metrics_metrics_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MetricsPostedTotal); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_metrics_metrics_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListenerCount); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_metrics_metrics_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ProviderInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_metrics_metrics_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Providers); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_metrics_metrics_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StoreContents); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_metrics_metrics_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BucketSpec); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_metrics_metrics_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LocalTasksCompleted); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_metrics_metrics_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CacheUsage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_metrics_metrics_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CacheHits); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_pkg_metrics_metrics_proto_rawDesc, + NumEnums: 0, + NumMessages: 24, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_pkg_metrics_metrics_proto_goTypes, + DependencyIndexes: file_pkg_metrics_metrics_proto_depIdxs, + MessageInfos: file_pkg_metrics_metrics_proto_msgTypes, + }.Build() + File_pkg_metrics_metrics_proto = out.File + file_pkg_metrics_metrics_proto_rawDesc = nil + file_pkg_metrics_metrics_proto_goTypes = nil + file_pkg_metrics_metrics_proto_depIdxs = nil +} diff --git a/pkg/metrics/metrics.proto b/pkg/metrics/metrics.proto new file mode 100644 index 0000000..244ae3b --- /dev/null +++ b/pkg/metrics/metrics.proto @@ -0,0 +1,116 @@ +syntax = "proto3"; +option go_package = "github.com/cobalt77/kubecc/pkg/metrics"; +import "pkg/types/types.proto"; +import "google/protobuf/any.proto"; + +package metrics; + +message TaskStatus { + int32 NumRunning = 1; + int32 NumQueued = 2; + int32 NumDelegated = 3; +} + +message Toolchains { + repeated types.Toolchain Items = 1; +} + +message UsageLimits { + int32 ConcurrentProcessLimit = 1; + double QueuePressureMultiplier = 2; + double QueueRejectMultiplier = 3; +} + +message AgentInfo { + string Node = 4; + string Pod = 5; + string Namespace = 6; +} + +message Deleter { + string Key = 1; +} + +// Scheduler + +message TasksCompletedTotal { + int64 Total = 1; +} + +message TasksFailedTotal { + int64 Total = 1; +} + +message SchedulingRequestsTotal { + int64 Total = 1; +} + +message AgentCount { + int64 Count = 1; +} + +message ConsumerdCount { + int64 Count = 1; +} + +message Identifier { + string UUID = 1; +} + +message AgentTasksTotal { + string UUID = 1; + int64 Total = 2; +} +message ConsumerdTasksTotal { + string UUID = 1; + int64 Total = 2; +} + +// Monitor + +message MetricsPostedTotal { + int64 Total = 1; +} + +message ListenerCount { + int32 Count = 1; +} + +message ProviderInfo { + string UUID = 1; + types.Component Component = 2; + string Address = 3; +} + +message Providers { + map Items = 1; +} + +message StoreContents { + repeated BucketSpec Buckets = 1; +} + +message BucketSpec { + string Name = 1; + map Data = 2; +} + +// Consumerd + +message LocalTasksCompleted { + int64 Total = 1; +} + +// Cache Server + +message CacheUsage { + int64 ObjectCount = 1; + int64 TotalSize = 2; + double UsagePercent = 3; +} + +message CacheHits { + int64 CacheHitsTotal = 1; + int64 CacheMissesTotal = 2; + double CacheHitPercent = 3; +} diff --git a/pkg/metrics/noop.go b/pkg/metrics/noop.go index cd0eab1..c8df2da 100644 --- a/pkg/metrics/noop.go +++ b/pkg/metrics/noop.go @@ -4,6 +4,7 @@ import ( "context" "google.golang.org/grpc" + "google.golang.org/protobuf/proto" ) type noopProvider struct{} @@ -12,7 +13,9 @@ func NewNoopProvider() Provider { return &noopProvider{} } -func (noopProvider) Post(KeyedMetric) {} +func (noopProvider) Post(proto.Message) {} + +func (noopProvider) PostContext(proto.Message, context.Context) {} type noopListener struct{} diff --git a/pkg/metrics/types.go b/pkg/metrics/types.go index cc247fd..158c440 100644 --- a/pkg/metrics/types.go +++ b/pkg/metrics/types.go @@ -4,21 +4,25 @@ import ( "context" "github.com/cobalt77/kubecc/pkg/servers" - "github.com/tinylib/msgp/msgp" + "google.golang.org/protobuf/proto" ) -type KeyedMetric interface { - msgp.Decodable - msgp.Encodable - Key() string -} +const MetaBucket = "meta" + +type RetryOptions uint32 + +const ( + NoRetry RetryOptions = iota + Retry +) type ContextMetric interface { Context() context.Context } type Provider interface { - Post(metric KeyedMetric) + Post(metric proto.Message) + PostContext(metric proto.Message, ctx context.Context) } type Listener interface { @@ -30,33 +34,3 @@ type ChangeListener interface { servers.StreamHandler OrExpired(handler func() RetryOptions) } - -type contextMetric struct { - KeyedMetric - ctx context.Context -} - -func (cm *contextMetric) Context() context.Context { - return cm.ctx -} - -func WithContext(m KeyedMetric, ctx context.Context) KeyedMetric { - return &contextMetric{ - KeyedMetric: m, - ctx: ctx, - } -} - -type deleter struct { - msgp.Decodable - msgp.Encodable - key string -} - -func (d deleter) Key() string { - return d.key -} - -func (deleter) Context() context.Context { - return nil -} diff --git a/pkg/run/executor.go b/pkg/run/executor.go index 280d3ec..f44b068 100644 --- a/pkg/run/executor.go +++ b/pkg/run/executor.go @@ -2,19 +2,16 @@ package run import ( "github.com/cobalt77/kubecc/pkg/host" - "github.com/cobalt77/kubecc/pkg/metrics/common" - "github.com/cobalt77/kubecc/pkg/types" + "github.com/cobalt77/kubecc/pkg/metrics" "go.uber.org/atomic" ) type ExecutorStatus int type Executor interface { - common.QueueParamsCompleter - common.TaskStatusCompleter - common.QueueStatusCompleter + metrics.UsageLimitsCompleter + metrics.TaskStatusCompleter Exec(task *Task) error - Status() types.QueueStatus } type QueuedExecutor struct { @@ -26,7 +23,7 @@ type QueuedExecutor struct { } type ExecutorOptions struct { - usageLimits *types.UsageLimits + usageLimits *metrics.UsageLimits } type ExecutorOption func(*ExecutorOptions) @@ -36,7 +33,7 @@ func (o *ExecutorOptions) Apply(opts ...ExecutorOption) { } } -func WithUsageLimits(cfg *types.UsageLimits) ExecutorOption { +func WithUsageLimits(cfg *metrics.UsageLimits) ExecutorOption { return func(o *ExecutorOptions) { o.usageLimits = cfg } @@ -47,7 +44,7 @@ func NewQueuedExecutor(opts ...ExecutorOption) *QueuedExecutor { options.Apply(opts...) if options.usageLimits == nil { - options.usageLimits = &types.UsageLimits{ + options.usageLimits = &metrics.UsageLimits{ ConcurrentProcessLimit: host.AutoConcurrentProcessLimit(), QueuePressureMultiplier: 1, QueueRejectMultiplier: 1, @@ -75,7 +72,7 @@ func NewDelegatingExecutor() *DelegatingExecutor { } } -func (x *QueuedExecutor) SetUsageLimits(cfg *types.UsageLimits) { +func (x *QueuedExecutor) SetUsageLimits(cfg *metrics.UsageLimits) { x.usageLimits = cfg go x.workerPool.SetWorkerCount(int(cfg.GetConcurrentProcessLimit())) } @@ -97,38 +94,17 @@ func (x *QueuedExecutor) Exec( return task.Error() } -func (x *QueuedExecutor) Status() types.QueueStatus { - queued := x.numQueued.Load() - running := x.numRunning.Load() - - switch { - case running < x.usageLimits.ConcurrentProcessLimit: - return types.Available - case queued < int32(float64(x.usageLimits.ConcurrentProcessLimit)* - x.usageLimits.QueuePressureMultiplier): - return types.Queueing - case queued < int32(float64(x.usageLimits.ConcurrentProcessLimit)* - x.usageLimits.QueueRejectMultiplier): - return types.QueuePressure - } - return types.QueueFull -} - -func (x *QueuedExecutor) CompleteQueueParams(stat *common.QueueParams) { +func (x *QueuedExecutor) CompleteUsageLimits(stat *metrics.UsageLimits) { stat.ConcurrentProcessLimit = x.usageLimits.ConcurrentProcessLimit stat.QueuePressureMultiplier = x.usageLimits.QueuePressureMultiplier stat.QueueRejectMultiplier = x.usageLimits.QueueRejectMultiplier } -func (x *QueuedExecutor) CompleteTaskStatus(stat *common.TaskStatus) { +func (x *QueuedExecutor) CompleteTaskStatus(stat *metrics.TaskStatus) { stat.NumQueued = x.numQueued.Load() stat.NumRunning = x.numRunning.Load() } -func (x *QueuedExecutor) CompleteQueueStatus(stat *common.QueueStatus) { - stat.QueueStatus = int32(x.Status()) -} - // DelegatingExecutor is an executor that does not run a worker pool, // runs all tasks as soon as possible, and is always available. // It will report that all of its tasks are Delegated, and will not report @@ -149,14 +125,8 @@ func (x *DelegatingExecutor) Exec(task *Task) error { return task.Error() } -func (x *DelegatingExecutor) Status() types.QueueStatus { - return types.Available -} - -func (x *DelegatingExecutor) CompleteQueueParams(stat *common.QueueParams) {} +func (x *DelegatingExecutor) CompleteUsageLimits(stat *metrics.UsageLimits) {} -func (x *DelegatingExecutor) CompleteTaskStatus(stat *common.TaskStatus) { +func (x *DelegatingExecutor) CompleteTaskStatus(stat *metrics.TaskStatus) { stat.NumDelegated = x.numTasks.Load() } - -func (x *DelegatingExecutor) CompleteQueueStatus(stat *common.QueueStatus) {} diff --git a/pkg/run/run_test.go b/pkg/run/run_test.go index a761138..7567b4d 100644 --- a/pkg/run/run_test.go +++ b/pkg/run/run_test.go @@ -5,6 +5,7 @@ import ( . "github.com/cobalt77/kubecc/internal/testutil" "github.com/cobalt77/kubecc/pkg/identity" "github.com/cobalt77/kubecc/pkg/meta" + "github.com/cobalt77/kubecc/pkg/metrics" "github.com/cobalt77/kubecc/pkg/run" "github.com/cobalt77/kubecc/pkg/tracing" "github.com/cobalt77/kubecc/pkg/types" @@ -23,7 +24,7 @@ var _ = Describe("Run", func() { When("something", func() { It("should do something", func() { - exec := run.NewQueuedExecutor(run.WithUsageLimits(&types.UsageLimits{ + exec := run.NewQueuedExecutor(run.WithUsageLimits(&metrics.UsageLimits{ ConcurrentProcessLimit: 1, QueuePressureMultiplier: 1, QueueRejectMultiplier: 1, diff --git a/pkg/run/workerpool.go b/pkg/run/workerpool.go index 7c66742..7fbc045 100644 --- a/pkg/run/workerpool.go +++ b/pkg/run/workerpool.go @@ -51,6 +51,8 @@ type worker struct { func (w *worker) Run() { for { + // Checking stopQueue up front allows it to terminate immediately, + // since if both channels can be read from, go will pick one at random. select { case <-w.stopQueue: return diff --git a/pkg/storage/chain.go b/pkg/storage/chain.go index 7d2836f..e6a03c0 100644 --- a/pkg/storage/chain.go +++ b/pkg/storage/chain.go @@ -5,8 +5,8 @@ import ( "strings" "sync" - "github.com/cobalt77/kubecc/pkg/apps/cachesrv/metrics" "github.com/cobalt77/kubecc/pkg/meta" + "github.com/cobalt77/kubecc/pkg/metrics" "github.com/cobalt77/kubecc/pkg/types" "go.uber.org/zap" "google.golang.org/protobuf/proto" @@ -116,7 +116,7 @@ func (sp *ChainStorageProvider) Query( return results, nil } -func (sp *ChainStorageProvider) UsageInfo() *metrics.UsageInfo { +func (sp *ChainStorageProvider) UsageInfo() *metrics.CacheUsage { return sp.providers[0].UsageInfo() // todo } diff --git a/pkg/storage/provider.go b/pkg/storage/provider.go index 453d9b5..3797a69 100644 --- a/pkg/storage/provider.go +++ b/pkg/storage/provider.go @@ -3,7 +3,7 @@ package storage import ( "context" - "github.com/cobalt77/kubecc/pkg/apps/cachesrv/metrics" + "github.com/cobalt77/kubecc/pkg/metrics" "github.com/cobalt77/kubecc/pkg/types" ) @@ -17,6 +17,6 @@ type StorageProvider interface { Get(context.Context, *types.CacheKey) (*types.CacheObject, error) Query(context.Context, []*types.CacheKey) ([]*types.CacheObjectMeta, error) - UsageInfo() *metrics.UsageInfo + UsageInfo() *metrics.CacheUsage CacheHits() *metrics.CacheHits } diff --git a/pkg/storage/s3.go b/pkg/storage/s3.go index 241d35f..a0197bd 100644 --- a/pkg/storage/s3.go +++ b/pkg/storage/s3.go @@ -9,9 +9,9 @@ import ( "strconv" "time" - "github.com/cobalt77/kubecc/pkg/apps/cachesrv/metrics" "github.com/cobalt77/kubecc/pkg/config" "github.com/cobalt77/kubecc/pkg/meta" + "github.com/cobalt77/kubecc/pkg/metrics" "github.com/cobalt77/kubecc/pkg/types" "github.com/minio/minio-go/v7" "github.com/minio/minio-go/v7/pkg/credentials" @@ -298,8 +298,8 @@ func (sp *s3StorageProvider) Query( return results, nil } -func (sp *s3StorageProvider) UsageInfo() *metrics.UsageInfo { - info := &metrics.UsageInfo{ +func (sp *s3StorageProvider) UsageInfo() *metrics.CacheUsage { + info := &metrics.CacheUsage{ ObjectCount: 0, TotalSize: 0, } diff --git a/pkg/storage/volatile.go b/pkg/storage/volatile.go index eecc213..8911532 100644 --- a/pkg/storage/volatile.go +++ b/pkg/storage/volatile.go @@ -5,9 +5,9 @@ import ( "fmt" "time" - "github.com/cobalt77/kubecc/pkg/apps/cachesrv/metrics" "github.com/cobalt77/kubecc/pkg/config" "github.com/cobalt77/kubecc/pkg/meta" + "github.com/cobalt77/kubecc/pkg/metrics" "github.com/cobalt77/kubecc/pkg/types" "github.com/karlseguin/ccache/v2" "go.uber.org/atomic" @@ -129,7 +129,7 @@ func (sp *volatileStorageProvider) Query( return results, nil } -func (sp *volatileStorageProvider) UsageInfo() *metrics.UsageInfo { +func (sp *volatileStorageProvider) UsageInfo() *metrics.CacheUsage { totalSize := sp.totalSize.Load() var usagePercent float64 if sp.storageLimit == 0 { @@ -137,7 +137,7 @@ func (sp *volatileStorageProvider) UsageInfo() *metrics.UsageInfo { } else { usagePercent = float64(totalSize) / float64(sp.storageLimit) } - return &metrics.UsageInfo{ + return &metrics.CacheUsage{ ObjectCount: int64(sp.cache.ItemCount()), TotalSize: totalSize, UsagePercent: usagePercent, diff --git a/pkg/toolchains/metadata.go b/pkg/toolchains/metadata.go new file mode 100644 index 0000000..f6b7a6c --- /dev/null +++ b/pkg/toolchains/metadata.go @@ -0,0 +1,45 @@ +package toolchains + +import ( + "context" + "errors" + + "github.com/cobalt77/kubecc/pkg/metrics" + "google.golang.org/grpc/metadata" + "google.golang.org/protobuf/proto" +) + +var toolchainsKey = "kubecc-toolchains-metadata-key" + +var ( + ErrNoMetadata = errors.New("No metadata in incoming context") + ErrNoToolchains = errors.New("No toolchains in context") + ErrInvalidData = errors.New("Could not unmarshal proto data") +) + +func CreateMetadata(tcs *metrics.Toolchains) metadata.MD { + data, err := proto.Marshal(tcs) + if err != nil { + panic("Could not marshal proto data") + } + return metadata.New(map[string]string{ + toolchainsKey: string(data), + }) +} + +func FromIncomingContext(ctx context.Context) (*metrics.Toolchains, error) { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return nil, ErrNoMetadata + } + data := md.Get(toolchainsKey) + if len(data) == 0 { + return nil, ErrNoToolchains + } + toolchains := &metrics.Toolchains{} + err := proto.Unmarshal([]byte(data[1]), toolchains) + if err != nil { + return nil, ErrInvalidData + } + return toolchains, nil +} diff --git a/pkg/types/enum.go b/pkg/types/enum.go index c3419e8..094a102 100644 --- a/pkg/types/enum.go +++ b/pkg/types/enum.go @@ -22,11 +22,6 @@ const ( Cache = Component_Component_Cache TestComponent = Component_Component_Test - Available = QueueStatus_Available - Queueing = QueueStatus_Queueing - QueuePressure = QueueStatus_QueuePressure - QueueFull = QueueStatus_QueueFull - Unknown = StorageLocation_StorageLocation_Unknown Memory = StorageLocation_StorageLocation_Memory Disk = StorageLocation_StorageLocation_Disk diff --git a/pkg/types/marshal.go b/pkg/types/marshal.go index d6ece86..e090bb5 100644 --- a/pkg/types/marshal.go +++ b/pkg/types/marshal.go @@ -22,13 +22,6 @@ func NewStringSliceEncoder(slice []string) stringSliceEncoder { } } -func (a *AgentInfo) MarshalLogObject(enc zapcore.ObjectEncoder) error { - enc.AddString("node", a.GetNode()) - enc.AddString("pod", a.GetPod()) - enc.AddString("ns", a.GetNamespace()) - return nil -} - func (r *RunRequest) MarshalLogObject(enc zapcore.ObjectEncoder) error { enc.AddString("dir", r.GetWorkDir()) enc.AddUint32("uid", r.GetUID()) diff --git a/pkg/types/types.pb.go b/pkg/types/types.pb.go index fa7c390..44de676 100644 --- a/pkg/types/types.pb.go +++ b/pkg/types/types.pb.go @@ -2,12 +2,13 @@ // versions: // protoc-gen-go v1.25.0 // protoc v3.12.3 -// source: proto/types.proto +// source: pkg/types/types.proto package types import ( proto "github.com/golang/protobuf/proto" + any "github.com/golang/protobuf/ptypes/any" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -61,11 +62,11 @@ func (x StorageLocation) String() string { } func (StorageLocation) Descriptor() protoreflect.EnumDescriptor { - return file_proto_types_proto_enumTypes[0].Descriptor() + return file_pkg_types_types_proto_enumTypes[0].Descriptor() } func (StorageLocation) Type() protoreflect.EnumType { - return &file_proto_types_proto_enumTypes[0] + return &file_pkg_types_types_proto_enumTypes[0] } func (x StorageLocation) Number() protoreflect.EnumNumber { @@ -74,7 +75,7 @@ func (x StorageLocation) Number() protoreflect.EnumNumber { // Deprecated: Use StorageLocation.Descriptor instead. func (StorageLocation) EnumDescriptor() ([]byte, []int) { - return file_proto_types_proto_rawDescGZIP(), []int{0} + return file_pkg_types_types_proto_rawDescGZIP(), []int{0} } type Component int32 @@ -137,11 +138,11 @@ func (x Component) String() string { } func (Component) Descriptor() protoreflect.EnumDescriptor { - return file_proto_types_proto_enumTypes[1].Descriptor() + return file_pkg_types_types_proto_enumTypes[1].Descriptor() } func (Component) Type() protoreflect.EnumType { - return &file_proto_types_proto_enumTypes[1] + return &file_pkg_types_types_proto_enumTypes[1] } func (x Component) Number() protoreflect.EnumNumber { @@ -150,59 +151,7 @@ func (x Component) Number() protoreflect.EnumNumber { // Deprecated: Use Component.Descriptor instead. func (Component) EnumDescriptor() ([]byte, []int) { - return file_proto_types_proto_rawDescGZIP(), []int{1} -} - -type QueueStatus int32 - -const ( - QueueStatus_Available QueueStatus = 0 - QueueStatus_Queueing QueueStatus = 1 - QueueStatus_QueuePressure QueueStatus = 2 - QueueStatus_QueueFull QueueStatus = 3 -) - -// Enum value maps for QueueStatus. -var ( - QueueStatus_name = map[int32]string{ - 0: "Available", - 1: "Queueing", - 2: "QueuePressure", - 3: "QueueFull", - } - QueueStatus_value = map[string]int32{ - "Available": 0, - "Queueing": 1, - "QueuePressure": 2, - "QueueFull": 3, - } -) - -func (x QueueStatus) Enum() *QueueStatus { - p := new(QueueStatus) - *p = x - return p -} - -func (x QueueStatus) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (QueueStatus) Descriptor() protoreflect.EnumDescriptor { - return file_proto_types_proto_enumTypes[2].Descriptor() -} - -func (QueueStatus) Type() protoreflect.EnumType { - return &file_proto_types_proto_enumTypes[2] -} - -func (x QueueStatus) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use QueueStatus.Descriptor instead. -func (QueueStatus) EnumDescriptor() ([]byte, []int) { - return file_proto_types_proto_rawDescGZIP(), []int{2} + return file_pkg_types_types_proto_rawDescGZIP(), []int{1} } type ToolchainKind int32 @@ -244,11 +193,11 @@ func (x ToolchainKind) String() string { } func (ToolchainKind) Descriptor() protoreflect.EnumDescriptor { - return file_proto_types_proto_enumTypes[3].Descriptor() + return file_pkg_types_types_proto_enumTypes[2].Descriptor() } func (ToolchainKind) Type() protoreflect.EnumType { - return &file_proto_types_proto_enumTypes[3] + return &file_pkg_types_types_proto_enumTypes[2] } func (x ToolchainKind) Number() protoreflect.EnumNumber { @@ -257,7 +206,7 @@ func (x ToolchainKind) Number() protoreflect.EnumNumber { // Deprecated: Use ToolchainKind.Descriptor instead. func (ToolchainKind) EnumDescriptor() ([]byte, []int) { - return file_proto_types_proto_rawDescGZIP(), []int{3} + return file_pkg_types_types_proto_rawDescGZIP(), []int{2} } type ToolchainLang int32 @@ -296,11 +245,11 @@ func (x ToolchainLang) String() string { } func (ToolchainLang) Descriptor() protoreflect.EnumDescriptor { - return file_proto_types_proto_enumTypes[4].Descriptor() + return file_pkg_types_types_proto_enumTypes[3].Descriptor() } func (ToolchainLang) Type() protoreflect.EnumType { - return &file_proto_types_proto_enumTypes[4] + return &file_pkg_types_types_proto_enumTypes[3] } func (x ToolchainLang) Number() protoreflect.EnumNumber { @@ -309,14 +258,15 @@ func (x ToolchainLang) Number() protoreflect.EnumNumber { // Deprecated: Use ToolchainLang.Descriptor instead. func (ToolchainLang) EnumDescriptor() ([]byte, []int) { - return file_proto_types_proto_rawDescGZIP(), []int{4} + return file_pkg_types_types_proto_rawDescGZIP(), []int{3} } type CompileResponse_Result int32 const ( - CompileResponse_Success CompileResponse_Result = 0 - CompileResponse_Fail CompileResponse_Result = 1 + CompileResponse_Success CompileResponse_Result = 0 + CompileResponse_Fail CompileResponse_Result = 1 + CompileResponse_InternalError CompileResponse_Result = 2 ) // Enum value maps for CompileResponse_Result. @@ -324,10 +274,12 @@ var ( CompileResponse_Result_name = map[int32]string{ 0: "Success", 1: "Fail", + 2: "InternalError", } CompileResponse_Result_value = map[string]int32{ - "Success": 0, - "Fail": 1, + "Success": 0, + "Fail": 1, + "InternalError": 2, } ) @@ -342,11 +294,11 @@ func (x CompileResponse_Result) String() string { } func (CompileResponse_Result) Descriptor() protoreflect.EnumDescriptor { - return file_proto_types_proto_enumTypes[5].Descriptor() + return file_pkg_types_types_proto_enumTypes[4].Descriptor() } func (CompileResponse_Result) Type() protoreflect.EnumType { - return &file_proto_types_proto_enumTypes[5] + return &file_pkg_types_types_proto_enumTypes[4] } func (x CompileResponse_Result) Number() protoreflect.EnumNumber { @@ -355,7 +307,7 @@ func (x CompileResponse_Result) Number() protoreflect.EnumNumber { // Deprecated: Use CompileResponse_Result.Descriptor instead. func (CompileResponse_Result) EnumDescriptor() ([]byte, []int) { - return file_proto_types_proto_rawDescGZIP(), []int{28, 0} + return file_pkg_types_types_proto_rawDescGZIP(), []int{22, 0} } type Empty struct { @@ -367,7 +319,7 @@ type Empty struct { func (x *Empty) Reset() { *x = Empty{} if protoimpl.UnsafeEnabled { - mi := &file_proto_types_proto_msgTypes[0] + mi := &file_pkg_types_types_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -380,7 +332,7 @@ func (x *Empty) String() string { func (*Empty) ProtoMessage() {} func (x *Empty) ProtoReflect() protoreflect.Message { - mi := &file_proto_types_proto_msgTypes[0] + mi := &file_pkg_types_types_proto_msgTypes[0] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -393,7 +345,7 @@ func (x *Empty) ProtoReflect() protoreflect.Message { // Deprecated: Use Empty.ProtoReflect.Descriptor instead. func (*Empty) Descriptor() ([]byte, []int) { - return file_proto_types_proto_rawDescGZIP(), []int{0} + return file_pkg_types_types_proto_rawDescGZIP(), []int{0} } type PushRequest struct { @@ -408,7 +360,7 @@ type PushRequest struct { func (x *PushRequest) Reset() { *x = PushRequest{} if protoimpl.UnsafeEnabled { - mi := &file_proto_types_proto_msgTypes[1] + mi := &file_pkg_types_types_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -421,7 +373,7 @@ func (x *PushRequest) String() string { func (*PushRequest) ProtoMessage() {} func (x *PushRequest) ProtoReflect() protoreflect.Message { - mi := &file_proto_types_proto_msgTypes[1] + mi := &file_pkg_types_types_proto_msgTypes[1] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -434,7 +386,7 @@ func (x *PushRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use PushRequest.ProtoReflect.Descriptor instead. func (*PushRequest) Descriptor() ([]byte, []int) { - return file_proto_types_proto_rawDescGZIP(), []int{1} + return file_pkg_types_types_proto_rawDescGZIP(), []int{1} } func (x *PushRequest) GetKey() *CacheKey { @@ -462,7 +414,7 @@ type PullRequest struct { func (x *PullRequest) Reset() { *x = PullRequest{} if protoimpl.UnsafeEnabled { - mi := &file_proto_types_proto_msgTypes[2] + mi := &file_pkg_types_types_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -475,7 +427,7 @@ func (x *PullRequest) String() string { func (*PullRequest) ProtoMessage() {} func (x *PullRequest) ProtoReflect() protoreflect.Message { - mi := &file_proto_types_proto_msgTypes[2] + mi := &file_pkg_types_types_proto_msgTypes[2] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -488,7 +440,7 @@ func (x *PullRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use PullRequest.ProtoReflect.Descriptor instead. func (*PullRequest) Descriptor() ([]byte, []int) { - return file_proto_types_proto_rawDescGZIP(), []int{2} + return file_pkg_types_types_proto_rawDescGZIP(), []int{2} } func (x *PullRequest) GetKey() *CacheKey { @@ -509,7 +461,7 @@ type QueryRequest struct { func (x *QueryRequest) Reset() { *x = QueryRequest{} if protoimpl.UnsafeEnabled { - mi := &file_proto_types_proto_msgTypes[3] + mi := &file_pkg_types_types_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -522,7 +474,7 @@ func (x *QueryRequest) String() string { func (*QueryRequest) ProtoMessage() {} func (x *QueryRequest) ProtoReflect() protoreflect.Message { - mi := &file_proto_types_proto_msgTypes[3] + mi := &file_pkg_types_types_proto_msgTypes[3] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -535,7 +487,7 @@ func (x *QueryRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use QueryRequest.ProtoReflect.Descriptor instead. func (*QueryRequest) Descriptor() ([]byte, []int) { - return file_proto_types_proto_rawDescGZIP(), []int{3} + return file_pkg_types_types_proto_rawDescGZIP(), []int{3} } func (x *QueryRequest) GetKeys() []*CacheKey { @@ -556,7 +508,7 @@ type QueryResponse struct { func (x *QueryResponse) Reset() { *x = QueryResponse{} if protoimpl.UnsafeEnabled { - mi := &file_proto_types_proto_msgTypes[4] + mi := &file_pkg_types_types_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -569,7 +521,7 @@ func (x *QueryResponse) String() string { func (*QueryResponse) ProtoMessage() {} func (x *QueryResponse) ProtoReflect() protoreflect.Message { - mi := &file_proto_types_proto_msgTypes[4] + mi := &file_pkg_types_types_proto_msgTypes[4] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -582,7 +534,7 @@ func (x *QueryResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use QueryResponse.ProtoReflect.Descriptor instead. func (*QueryResponse) Descriptor() ([]byte, []int) { - return file_proto_types_proto_rawDescGZIP(), []int{4} + return file_pkg_types_types_proto_rawDescGZIP(), []int{4} } func (x *QueryResponse) GetResults() []*CacheObjectMeta { @@ -604,7 +556,7 @@ type SyncRequest struct { func (x *SyncRequest) Reset() { *x = SyncRequest{} if protoimpl.UnsafeEnabled { - mi := &file_proto_types_proto_msgTypes[5] + mi := &file_pkg_types_types_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -617,7 +569,7 @@ func (x *SyncRequest) String() string { func (*SyncRequest) ProtoMessage() {} func (x *SyncRequest) ProtoReflect() protoreflect.Message { - mi := &file_proto_types_proto_msgTypes[5] + mi := &file_pkg_types_types_proto_msgTypes[5] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -630,7 +582,7 @@ func (x *SyncRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use SyncRequest.ProtoReflect.Descriptor instead. func (*SyncRequest) Descriptor() ([]byte, []int) { - return file_proto_types_proto_rawDescGZIP(), []int{5} + return file_pkg_types_types_proto_rawDescGZIP(), []int{5} } func (x *SyncRequest) GetLocalCache() []*CacheKey { @@ -658,7 +610,7 @@ type CacheKey struct { func (x *CacheKey) Reset() { *x = CacheKey{} if protoimpl.UnsafeEnabled { - mi := &file_proto_types_proto_msgTypes[6] + mi := &file_pkg_types_types_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -671,7 +623,7 @@ func (x *CacheKey) String() string { func (*CacheKey) ProtoMessage() {} func (x *CacheKey) ProtoReflect() protoreflect.Message { - mi := &file_proto_types_proto_msgTypes[6] + mi := &file_pkg_types_types_proto_msgTypes[6] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -684,7 +636,7 @@ func (x *CacheKey) ProtoReflect() protoreflect.Message { // Deprecated: Use CacheKey.ProtoReflect.Descriptor instead. func (*CacheKey) Descriptor() ([]byte, []int) { - return file_proto_types_proto_rawDescGZIP(), []int{6} + return file_pkg_types_types_proto_rawDescGZIP(), []int{6} } func (x *CacheKey) GetHash() string { @@ -706,7 +658,7 @@ type CacheObject struct { func (x *CacheObject) Reset() { *x = CacheObject{} if protoimpl.UnsafeEnabled { - mi := &file_proto_types_proto_msgTypes[7] + mi := &file_pkg_types_types_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -719,7 +671,7 @@ func (x *CacheObject) String() string { func (*CacheObject) ProtoMessage() {} func (x *CacheObject) ProtoReflect() protoreflect.Message { - mi := &file_proto_types_proto_msgTypes[7] + mi := &file_pkg_types_types_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -732,7 +684,7 @@ func (x *CacheObject) ProtoReflect() protoreflect.Message { // Deprecated: Use CacheObject.ProtoReflect.Descriptor instead. func (*CacheObject) Descriptor() ([]byte, []int) { - return file_proto_types_proto_rawDescGZIP(), []int{7} + return file_pkg_types_types_proto_rawDescGZIP(), []int{7} } func (x *CacheObject) GetData() []byte { @@ -762,7 +714,7 @@ type CacheObjectMeta struct { func (x *CacheObjectMeta) Reset() { *x = CacheObjectMeta{} if protoimpl.UnsafeEnabled { - mi := &file_proto_types_proto_msgTypes[8] + mi := &file_pkg_types_types_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -775,7 +727,7 @@ func (x *CacheObjectMeta) String() string { func (*CacheObjectMeta) ProtoMessage() {} func (x *CacheObjectMeta) ProtoReflect() protoreflect.Message { - mi := &file_proto_types_proto_msgTypes[8] + mi := &file_pkg_types_types_proto_msgTypes[8] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -788,7 +740,7 @@ func (x *CacheObjectMeta) ProtoReflect() protoreflect.Message { // Deprecated: Use CacheObjectMeta.ProtoReflect.Descriptor instead. func (*CacheObjectMeta) Descriptor() ([]byte, []int) { - return file_proto_types_proto_rawDescGZIP(), []int{8} + return file_pkg_types_types_proto_rawDescGZIP(), []int{8} } func (x *CacheObjectMeta) GetTags() map[string]string { @@ -820,13 +772,13 @@ type CacheObjectManaged struct { Size int64 `protobuf:"varint,1,opt,name=Size,proto3" json:"Size,omitempty"` Timestamp int64 `protobuf:"varint,2,opt,name=Timestamp,proto3" json:"Timestamp,omitempty"` Score int64 `protobuf:"varint,3,opt,name=Score,proto3" json:"Score,omitempty"` - Location StorageLocation `protobuf:"varint,5,opt,name=Location,proto3,enum=StorageLocation" json:"Location,omitempty"` + Location StorageLocation `protobuf:"varint,5,opt,name=Location,proto3,enum=types.StorageLocation" json:"Location,omitempty"` } func (x *CacheObjectManaged) Reset() { *x = CacheObjectManaged{} if protoimpl.UnsafeEnabled { - mi := &file_proto_types_proto_msgTypes[9] + mi := &file_pkg_types_types_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -839,7 +791,7 @@ func (x *CacheObjectManaged) String() string { func (*CacheObjectManaged) ProtoMessage() {} func (x *CacheObjectManaged) ProtoReflect() protoreflect.Message { - mi := &file_proto_types_proto_msgTypes[9] + mi := &file_pkg_types_types_proto_msgTypes[9] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -852,7 +804,7 @@ func (x *CacheObjectManaged) ProtoReflect() protoreflect.Message { // Deprecated: Use CacheObjectManaged.ProtoReflect.Descriptor instead. func (*CacheObjectManaged) Descriptor() ([]byte, []int) { - return file_proto_types_proto_rawDescGZIP(), []int{9} + return file_pkg_types_types_proto_rawDescGZIP(), []int{9} } func (x *CacheObjectManaged) GetSize() int64 { @@ -894,7 +846,7 @@ type WhoisRequest struct { func (x *WhoisRequest) Reset() { *x = WhoisRequest{} if protoimpl.UnsafeEnabled { - mi := &file_proto_types_proto_msgTypes[10] + mi := &file_pkg_types_types_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -907,7 +859,7 @@ func (x *WhoisRequest) String() string { func (*WhoisRequest) ProtoMessage() {} func (x *WhoisRequest) ProtoReflect() protoreflect.Message { - mi := &file_proto_types_proto_msgTypes[10] + mi := &file_pkg_types_types_proto_msgTypes[10] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -920,7 +872,7 @@ func (x *WhoisRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use WhoisRequest.ProtoReflect.Descriptor instead. func (*WhoisRequest) Descriptor() ([]byte, []int) { - return file_proto_types_proto_rawDescGZIP(), []int{10} + return file_pkg_types_types_proto_rawDescGZIP(), []int{10} } func (x *WhoisRequest) GetUUID() string { @@ -937,13 +889,13 @@ type WhoisResponse struct { UUID string `protobuf:"bytes,1,opt,name=UUID,proto3" json:"UUID,omitempty"` Address string `protobuf:"bytes,2,opt,name=Address,proto3" json:"Address,omitempty"` - Component Component `protobuf:"varint,3,opt,name=Component,proto3,enum=Component" json:"Component,omitempty"` + Component Component `protobuf:"varint,3,opt,name=Component,proto3,enum=types.Component" json:"Component,omitempty"` } func (x *WhoisResponse) Reset() { *x = WhoisResponse{} if protoimpl.UnsafeEnabled { - mi := &file_proto_types_proto_msgTypes[11] + mi := &file_pkg_types_types_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -956,7 +908,7 @@ func (x *WhoisResponse) String() string { func (*WhoisResponse) ProtoMessage() {} func (x *WhoisResponse) ProtoReflect() protoreflect.Message { - mi := &file_proto_types_proto_msgTypes[11] + mi := &file_pkg_types_types_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -969,7 +921,7 @@ func (x *WhoisResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use WhoisResponse.ProtoReflect.Descriptor instead. func (*WhoisResponse) Descriptor() ([]byte, []int) { - return file_proto_types_proto_rawDescGZIP(), []int{11} + return file_pkg_types_types_proto_rawDescGZIP(), []int{11} } func (x *WhoisResponse) GetUUID() string { @@ -998,14 +950,14 @@ type Metric struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Key *Key `protobuf:"bytes,1,opt,name=Key,proto3" json:"Key,omitempty"` - Value *Value `protobuf:"bytes,2,opt,name=Value,proto3" json:"Value,omitempty"` + Key *Key `protobuf:"bytes,1,opt,name=Key,proto3" json:"Key,omitempty"` + Value *any.Any `protobuf:"bytes,2,opt,name=Value,proto3" json:"Value,omitempty"` } func (x *Metric) Reset() { *x = Metric{} if protoimpl.UnsafeEnabled { - mi := &file_proto_types_proto_msgTypes[12] + mi := &file_pkg_types_types_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1018,7 +970,7 @@ func (x *Metric) String() string { func (*Metric) ProtoMessage() {} func (x *Metric) ProtoReflect() protoreflect.Message { - mi := &file_proto_types_proto_msgTypes[12] + mi := &file_pkg_types_types_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1031,7 +983,7 @@ func (x *Metric) ProtoReflect() protoreflect.Message { // Deprecated: Use Metric.ProtoReflect.Descriptor instead. func (*Metric) Descriptor() ([]byte, []int) { - return file_proto_types_proto_rawDescGZIP(), []int{12} + return file_pkg_types_types_proto_rawDescGZIP(), []int{12} } func (x *Metric) GetKey() *Key { @@ -1041,7 +993,7 @@ func (x *Metric) GetKey() *Key { return nil } -func (x *Metric) GetValue() *Value { +func (x *Metric) GetValue() *any.Any { if x != nil { return x.Value } @@ -1060,7 +1012,7 @@ type Key struct { func (x *Key) Reset() { *x = Key{} if protoimpl.UnsafeEnabled { - mi := &file_proto_types_proto_msgTypes[13] + mi := &file_pkg_types_types_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1073,7 +1025,7 @@ func (x *Key) String() string { func (*Key) ProtoMessage() {} func (x *Key) ProtoReflect() protoreflect.Message { - mi := &file_proto_types_proto_msgTypes[13] + mi := &file_pkg_types_types_proto_msgTypes[13] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1086,7 +1038,7 @@ func (x *Key) ProtoReflect() protoreflect.Message { // Deprecated: Use Key.ProtoReflect.Descriptor instead. func (*Key) Descriptor() ([]byte, []int) { - return file_proto_types_proto_rawDescGZIP(), []int{13} + return file_pkg_types_types_proto_rawDescGZIP(), []int{13} } func (x *Key) GetBucket() string { @@ -1103,217 +1055,13 @@ func (x *Key) GetName() string { return "" } -type Value struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Data []byte `protobuf:"bytes,1,opt,name=Data,proto3" json:"Data,omitempty"` -} - -func (x *Value) Reset() { - *x = Value{} - if protoimpl.UnsafeEnabled { - mi := &file_proto_types_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Value) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Value) ProtoMessage() {} - -func (x *Value) ProtoReflect() protoreflect.Message { - mi := &file_proto_types_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Value.ProtoReflect.Descriptor instead. -func (*Value) Descriptor() ([]byte, []int) { - return file_proto_types_proto_rawDescGZIP(), []int{14} -} - -func (x *Value) GetData() []byte { - if x != nil { - return x.Data - } - return nil -} - -type UsageLimits struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ConcurrentProcessLimit int32 `protobuf:"varint,1,opt,name=ConcurrentProcessLimit,proto3" json:"ConcurrentProcessLimit,omitempty"` - QueuePressureMultiplier float64 `protobuf:"fixed64,2,opt,name=QueuePressureMultiplier,proto3" json:"QueuePressureMultiplier,omitempty"` - QueueRejectMultiplier float64 `protobuf:"fixed64,3,opt,name=QueueRejectMultiplier,proto3" json:"QueueRejectMultiplier,omitempty"` -} - -func (x *UsageLimits) Reset() { - *x = UsageLimits{} - if protoimpl.UnsafeEnabled { - mi := &file_proto_types_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *UsageLimits) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UsageLimits) ProtoMessage() {} - -func (x *UsageLimits) ProtoReflect() protoreflect.Message { - mi := &file_proto_types_proto_msgTypes[15] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UsageLimits.ProtoReflect.Descriptor instead. -func (*UsageLimits) Descriptor() ([]byte, []int) { - return file_proto_types_proto_rawDescGZIP(), []int{15} -} - -func (x *UsageLimits) GetConcurrentProcessLimit() int32 { - if x != nil { - return x.ConcurrentProcessLimit - } - return 0 -} - -func (x *UsageLimits) GetQueuePressureMultiplier() float64 { - if x != nil { - return x.QueuePressureMultiplier - } - return 0 -} - -func (x *UsageLimits) GetQueueRejectMultiplier() float64 { - if x != nil { - return x.QueueRejectMultiplier - } - return 0 -} - -type Toolchains struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Items []*Toolchain `protobuf:"bytes,1,rep,name=Items,proto3" json:"Items,omitempty"` -} - -func (x *Toolchains) Reset() { - *x = Toolchains{} - if protoimpl.UnsafeEnabled { - mi := &file_proto_types_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Toolchains) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Toolchains) ProtoMessage() {} - -func (x *Toolchains) ProtoReflect() protoreflect.Message { - mi := &file_proto_types_proto_msgTypes[16] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Toolchains.ProtoReflect.Descriptor instead. -func (*Toolchains) Descriptor() ([]byte, []int) { - return file_proto_types_proto_rawDescGZIP(), []int{16} -} - -func (x *Toolchains) GetItems() []*Toolchain { - if x != nil { - return x.Items - } - return nil -} - -type Metadata struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Toolchains *Toolchains `protobuf:"bytes,2,opt,name=Toolchains,proto3" json:"Toolchains,omitempty"` -} - -func (x *Metadata) Reset() { - *x = Metadata{} - if protoimpl.UnsafeEnabled { - mi := &file_proto_types_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Metadata) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Metadata) ProtoMessage() {} - -func (x *Metadata) ProtoReflect() protoreflect.Message { - mi := &file_proto_types_proto_msgTypes[17] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Metadata.ProtoReflect.Descriptor instead. -func (*Metadata) Descriptor() ([]byte, []int) { - return file_proto_types_proto_rawDescGZIP(), []int{17} -} - -func (x *Metadata) GetToolchains() *Toolchains { - if x != nil { - return x.Toolchains - } - return nil -} - type Toolchain struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Kind ToolchainKind `protobuf:"varint,1,opt,name=Kind,proto3,enum=ToolchainKind" json:"Kind,omitempty"` - Lang ToolchainLang `protobuf:"varint,2,opt,name=Lang,proto3,enum=ToolchainLang" json:"Lang,omitempty"` + Kind ToolchainKind `protobuf:"varint,1,opt,name=Kind,proto3,enum=types.ToolchainKind" json:"Kind,omitempty"` + Lang ToolchainLang `protobuf:"varint,2,opt,name=Lang,proto3,enum=types.ToolchainLang" json:"Lang,omitempty"` Executable string `protobuf:"bytes,3,opt,name=Executable,proto3" json:"Executable,omitempty"` TargetArch string `protobuf:"bytes,4,opt,name=TargetArch,proto3" json:"TargetArch,omitempty"` Version string `protobuf:"bytes,5,opt,name=Version,proto3" json:"Version,omitempty"` @@ -1323,7 +1071,7 @@ type Toolchain struct { func (x *Toolchain) Reset() { *x = Toolchain{} if protoimpl.UnsafeEnabled { - mi := &file_proto_types_proto_msgTypes[18] + mi := &file_pkg_types_types_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1336,7 +1084,7 @@ func (x *Toolchain) String() string { func (*Toolchain) ProtoMessage() {} func (x *Toolchain) ProtoReflect() protoreflect.Message { - mi := &file_proto_types_proto_msgTypes[18] + mi := &file_pkg_types_types_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1349,7 +1097,7 @@ func (x *Toolchain) ProtoReflect() protoreflect.Message { // Deprecated: Use Toolchain.ProtoReflect.Descriptor instead. func (*Toolchain) Descriptor() ([]byte, []int) { - return file_proto_types_proto_rawDescGZIP(), []int{18} + return file_pkg_types_types_proto_rawDescGZIP(), []int{14} } func (x *Toolchain) GetKind() ToolchainKind { @@ -1407,7 +1155,7 @@ type AgentToolchainInfo struct { func (x *AgentToolchainInfo) Reset() { *x = AgentToolchainInfo{} if protoimpl.UnsafeEnabled { - mi := &file_proto_types_proto_msgTypes[19] + mi := &file_pkg_types_types_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1420,7 +1168,7 @@ func (x *AgentToolchainInfo) String() string { func (*AgentToolchainInfo) ProtoMessage() {} func (x *AgentToolchainInfo) ProtoReflect() protoreflect.Message { - mi := &file_proto_types_proto_msgTypes[19] + mi := &file_pkg_types_types_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1433,7 +1181,7 @@ func (x *AgentToolchainInfo) ProtoReflect() protoreflect.Message { // Deprecated: Use AgentToolchainInfo.ProtoReflect.Descriptor instead. func (*AgentToolchainInfo) Descriptor() ([]byte, []int) { - return file_proto_types_proto_rawDescGZIP(), []int{19} + return file_pkg_types_types_proto_rawDescGZIP(), []int{15} } func (x *AgentToolchainInfo) GetKind() string { @@ -1468,7 +1216,7 @@ type AgentToolchainInfoList struct { func (x *AgentToolchainInfoList) Reset() { *x = AgentToolchainInfoList{} if protoimpl.UnsafeEnabled { - mi := &file_proto_types_proto_msgTypes[20] + mi := &file_pkg_types_types_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1481,7 +1229,7 @@ func (x *AgentToolchainInfoList) String() string { func (*AgentToolchainInfoList) ProtoMessage() {} func (x *AgentToolchainInfoList) ProtoReflect() protoreflect.Message { - mi := &file_proto_types_proto_msgTypes[20] + mi := &file_pkg_types_types_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1494,7 +1242,7 @@ func (x *AgentToolchainInfoList) ProtoReflect() protoreflect.Message { // Deprecated: Use AgentToolchainInfoList.ProtoReflect.Descriptor instead. func (*AgentToolchainInfoList) Descriptor() ([]byte, []int) { - return file_proto_types_proto_rawDescGZIP(), []int{20} + return file_pkg_types_types_proto_rawDescGZIP(), []int{16} } func (x *AgentToolchainInfoList) GetInfo() []*AgentToolchainInfo { @@ -1525,7 +1273,7 @@ type RunRequest struct { func (x *RunRequest) Reset() { *x = RunRequest{} if protoimpl.UnsafeEnabled { - mi := &file_proto_types_proto_msgTypes[21] + mi := &file_pkg_types_types_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1538,7 +1286,7 @@ func (x *RunRequest) String() string { func (*RunRequest) ProtoMessage() {} func (x *RunRequest) ProtoReflect() protoreflect.Message { - mi := &file_proto_types_proto_msgTypes[21] + mi := &file_pkg_types_types_proto_msgTypes[17] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1551,7 +1299,7 @@ func (x *RunRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use RunRequest.ProtoReflect.Descriptor instead. func (*RunRequest) Descriptor() ([]byte, []int) { - return file_proto_types_proto_rawDescGZIP(), []int{21} + return file_pkg_types_types_proto_rawDescGZIP(), []int{17} } func (m *RunRequest) GetCompiler() isRunRequest_Compiler { @@ -1647,7 +1395,7 @@ type RunResponse struct { func (x *RunResponse) Reset() { *x = RunResponse{} if protoimpl.UnsafeEnabled { - mi := &file_proto_types_proto_msgTypes[22] + mi := &file_pkg_types_types_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1660,7 +1408,7 @@ func (x *RunResponse) String() string { func (*RunResponse) ProtoMessage() {} func (x *RunResponse) ProtoReflect() protoreflect.Message { - mi := &file_proto_types_proto_msgTypes[22] + mi := &file_pkg_types_types_proto_msgTypes[18] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1673,7 +1421,7 @@ func (x *RunResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use RunResponse.ProtoReflect.Descriptor instead. func (*RunResponse) Descriptor() ([]byte, []int) { - return file_proto_types_proto_rawDescGZIP(), []int{22} + return file_pkg_types_types_proto_rawDescGZIP(), []int{18} } func (x *RunResponse) GetReturnCode() int32 { @@ -1707,7 +1455,7 @@ type ScheduleRequest struct { func (x *ScheduleRequest) Reset() { *x = ScheduleRequest{} if protoimpl.UnsafeEnabled { - mi := &file_proto_types_proto_msgTypes[23] + mi := &file_pkg_types_types_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1720,7 +1468,7 @@ func (x *ScheduleRequest) String() string { func (*ScheduleRequest) ProtoMessage() {} func (x *ScheduleRequest) ProtoReflect() protoreflect.Message { - mi := &file_proto_types_proto_msgTypes[23] + mi := &file_pkg_types_types_proto_msgTypes[19] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1733,7 +1481,7 @@ func (x *ScheduleRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ScheduleRequest.ProtoReflect.Descriptor instead. func (*ScheduleRequest) Descriptor() ([]byte, []int) { - return file_proto_types_proto_rawDescGZIP(), []int{23} + return file_pkg_types_types_proto_rawDescGZIP(), []int{19} } // scheduler -> consumerd @@ -1746,7 +1494,7 @@ type ScheduleResponse struct { func (x *ScheduleResponse) Reset() { *x = ScheduleResponse{} if protoimpl.UnsafeEnabled { - mi := &file_proto_types_proto_msgTypes[24] + mi := &file_pkg_types_types_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1759,7 +1507,7 @@ func (x *ScheduleResponse) String() string { func (*ScheduleResponse) ProtoMessage() {} func (x *ScheduleResponse) ProtoReflect() protoreflect.Message { - mi := &file_proto_types_proto_msgTypes[24] + mi := &file_pkg_types_types_proto_msgTypes[20] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1772,141 +1520,7 @@ func (x *ScheduleResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ScheduleResponse.ProtoReflect.Descriptor instead. func (*ScheduleResponse) Descriptor() ([]byte, []int) { - return file_proto_types_proto_rawDescGZIP(), []int{24} -} - -type AgentInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Node string `protobuf:"bytes,4,opt,name=Node,proto3" json:"Node,omitempty"` - Pod string `protobuf:"bytes,5,opt,name=Pod,proto3" json:"Pod,omitempty"` - Namespace string `protobuf:"bytes,6,opt,name=Namespace,proto3" json:"Namespace,omitempty"` -} - -func (x *AgentInfo) Reset() { - *x = AgentInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_proto_types_proto_msgTypes[25] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AgentInfo) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AgentInfo) ProtoMessage() {} - -func (x *AgentInfo) ProtoReflect() protoreflect.Message { - mi := &file_proto_types_proto_msgTypes[25] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AgentInfo.ProtoReflect.Descriptor instead. -func (*AgentInfo) Descriptor() ([]byte, []int) { - return file_proto_types_proto_rawDescGZIP(), []int{25} -} - -func (x *AgentInfo) GetNode() string { - if x != nil { - return x.Node - } - return "" -} - -func (x *AgentInfo) GetPod() string { - if x != nil { - return x.Pod - } - return "" -} - -func (x *AgentInfo) GetNamespace() string { - if x != nil { - return x.Namespace - } - return "" -} - -type SystemInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Arch string `protobuf:"bytes,1,opt,name=Arch,proto3" json:"Arch,omitempty"` - CpuThreads int32 `protobuf:"varint,2,opt,name=CpuThreads,proto3" json:"CpuThreads,omitempty"` - SystemMemory uint64 `protobuf:"varint,3,opt,name=SystemMemory,proto3" json:"SystemMemory,omitempty"` - Hostname string `protobuf:"bytes,4,opt,name=Hostname,proto3" json:"Hostname,omitempty"` -} - -func (x *SystemInfo) Reset() { - *x = SystemInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_proto_types_proto_msgTypes[26] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SystemInfo) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SystemInfo) ProtoMessage() {} - -func (x *SystemInfo) ProtoReflect() protoreflect.Message { - mi := &file_proto_types_proto_msgTypes[26] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SystemInfo.ProtoReflect.Descriptor instead. -func (*SystemInfo) Descriptor() ([]byte, []int) { - return file_proto_types_proto_rawDescGZIP(), []int{26} -} - -func (x *SystemInfo) GetArch() string { - if x != nil { - return x.Arch - } - return "" -} - -func (x *SystemInfo) GetCpuThreads() int32 { - if x != nil { - return x.CpuThreads - } - return 0 -} - -func (x *SystemInfo) GetSystemMemory() uint64 { - if x != nil { - return x.SystemMemory - } - return 0 -} - -func (x *SystemInfo) GetHostname() string { - if x != nil { - return x.Hostname - } - return "" + return file_pkg_types_types_proto_rawDescGZIP(), []int{20} } // consumerd -> scheduler -> agent @@ -1915,15 +1529,16 @@ type CompileRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Toolchain *Toolchain `protobuf:"bytes,1,opt,name=Toolchain,proto3" json:"Toolchain,omitempty"` - Args []string `protobuf:"bytes,2,rep,name=Args,proto3" json:"Args,omitempty"` - PreprocessedSource []byte `protobuf:"bytes,3,opt,name=PreprocessedSource,proto3" json:"PreprocessedSource,omitempty"` + RequestID string `protobuf:"bytes,1,opt,name=RequestID,proto3" json:"RequestID,omitempty"` + Toolchain *Toolchain `protobuf:"bytes,2,opt,name=Toolchain,proto3" json:"Toolchain,omitempty"` + Args []string `protobuf:"bytes,3,rep,name=Args,proto3" json:"Args,omitempty"` + PreprocessedSource []byte `protobuf:"bytes,4,opt,name=PreprocessedSource,proto3" json:"PreprocessedSource,omitempty"` } func (x *CompileRequest) Reset() { *x = CompileRequest{} if protoimpl.UnsafeEnabled { - mi := &file_proto_types_proto_msgTypes[27] + mi := &file_pkg_types_types_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1936,7 +1551,7 @@ func (x *CompileRequest) String() string { func (*CompileRequest) ProtoMessage() {} func (x *CompileRequest) ProtoReflect() protoreflect.Message { - mi := &file_proto_types_proto_msgTypes[27] + mi := &file_pkg_types_types_proto_msgTypes[21] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1949,7 +1564,14 @@ func (x *CompileRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use CompileRequest.ProtoReflect.Descriptor instead. func (*CompileRequest) Descriptor() ([]byte, []int) { - return file_proto_types_proto_rawDescGZIP(), []int{27} + return file_pkg_types_types_proto_rawDescGZIP(), []int{21} +} + +func (x *CompileRequest) GetRequestID() string { + if x != nil { + return x.RequestID + } + return "" } func (x *CompileRequest) GetToolchain() *Toolchain { @@ -1979,8 +1601,9 @@ type CompileResponse struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - CompileResult CompileResponse_Result `protobuf:"varint,1,opt,name=CompileResult,proto3,enum=CompileResponse_Result" json:"CompileResult,omitempty"` - CpuSecondsUsed int64 `protobuf:"varint,2,opt,name=CpuSecondsUsed,proto3" json:"CpuSecondsUsed,omitempty"` + RequestID string `protobuf:"bytes,1,opt,name=RequestID,proto3" json:"RequestID,omitempty"` + CompileResult CompileResponse_Result `protobuf:"varint,2,opt,name=CompileResult,proto3,enum=types.CompileResponse_Result" json:"CompileResult,omitempty"` + CpuSecondsUsed int64 `protobuf:"varint,3,opt,name=CpuSecondsUsed,proto3" json:"CpuSecondsUsed,omitempty"` // Types that are assignable to Data: // *CompileResponse_Error // *CompileResponse_CompiledSource @@ -1990,7 +1613,7 @@ type CompileResponse struct { func (x *CompileResponse) Reset() { *x = CompileResponse{} if protoimpl.UnsafeEnabled { - mi := &file_proto_types_proto_msgTypes[28] + mi := &file_pkg_types_types_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2003,7 +1626,7 @@ func (x *CompileResponse) String() string { func (*CompileResponse) ProtoMessage() {} func (x *CompileResponse) ProtoReflect() protoreflect.Message { - mi := &file_proto_types_proto_msgTypes[28] + mi := &file_pkg_types_types_proto_msgTypes[22] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2016,7 +1639,14 @@ func (x *CompileResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use CompileResponse.ProtoReflect.Descriptor instead. func (*CompileResponse) Descriptor() ([]byte, []int) { - return file_proto_types_proto_rawDescGZIP(), []int{28} + return file_pkg_types_types_proto_rawDescGZIP(), []int{22} +} + +func (x *CompileResponse) GetRequestID() string { + if x != nil { + return x.RequestID + } + return "" } func (x *CompileResponse) GetCompileResult() CompileResponse_Result { @@ -2059,383 +1689,440 @@ type isCompileResponse_Data interface { } type CompileResponse_Error struct { - Error string `protobuf:"bytes,3,opt,name=Error,proto3,oneof"` + Error string `protobuf:"bytes,4,opt,name=Error,proto3,oneof"` } type CompileResponse_CompiledSource struct { - CompiledSource []byte `protobuf:"bytes,4,opt,name=CompiledSource,proto3,oneof"` + CompiledSource []byte `protobuf:"bytes,5,opt,name=CompiledSource,proto3,oneof"` } func (*CompileResponse_Error) isCompileResponse_Data() {} func (*CompileResponse_CompiledSource) isCompileResponse_Data() {} -var File_proto_types_proto protoreflect.FileDescriptor - -var file_proto_types_proto_rawDesc = []byte{ - 0x0a, 0x11, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x22, 0x07, 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x50, 0x0a, 0x0b, - 0x50, 0x75, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x03, 0x4b, - 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x43, 0x61, 0x63, 0x68, 0x65, - 0x4b, 0x65, 0x79, 0x52, 0x03, 0x4b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x06, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x43, 0x61, 0x63, 0x68, 0x65, - 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x06, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x2a, - 0x0a, 0x0b, 0x50, 0x75, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, - 0x03, 0x4b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x43, 0x61, 0x63, - 0x68, 0x65, 0x4b, 0x65, 0x79, 0x52, 0x03, 0x4b, 0x65, 0x79, 0x22, 0x2d, 0x0a, 0x0c, 0x51, 0x75, - 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x04, 0x4b, 0x65, - 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x43, 0x61, 0x63, 0x68, 0x65, - 0x4b, 0x65, 0x79, 0x52, 0x04, 0x4b, 0x65, 0x79, 0x73, 0x22, 0x3b, 0x0a, 0x0d, 0x51, 0x75, 0x65, - 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x07, 0x52, 0x65, - 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x43, 0x61, - 0x63, 0x68, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x07, 0x52, - 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0x68, 0x0a, 0x0b, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x29, 0x0a, 0x0a, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x43, 0x61, - 0x63, 0x68, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x43, 0x61, 0x63, 0x68, - 0x65, 0x4b, 0x65, 0x79, 0x52, 0x0a, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x43, 0x61, 0x63, 0x68, 0x65, - 0x12, 0x2e, 0x0a, 0x12, 0x44, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x43, 0x61, 0x63, 0x68, 0x65, - 0x53, 0x69, 0x7a, 0x65, 0x4b, 0x62, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x12, 0x44, 0x65, +type SystemInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Arch string `protobuf:"bytes,1,opt,name=Arch,proto3" json:"Arch,omitempty"` + CpuThreads int32 `protobuf:"varint,2,opt,name=CpuThreads,proto3" json:"CpuThreads,omitempty"` + SystemMemory uint64 `protobuf:"varint,3,opt,name=SystemMemory,proto3" json:"SystemMemory,omitempty"` + Hostname string `protobuf:"bytes,4,opt,name=Hostname,proto3" json:"Hostname,omitempty"` +} + +func (x *SystemInfo) Reset() { + *x = SystemInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_types_types_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SystemInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SystemInfo) ProtoMessage() {} + +func (x *SystemInfo) ProtoReflect() protoreflect.Message { + mi := &file_pkg_types_types_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SystemInfo.ProtoReflect.Descriptor instead. +func (*SystemInfo) Descriptor() ([]byte, []int) { + return file_pkg_types_types_proto_rawDescGZIP(), []int{23} +} + +func (x *SystemInfo) GetArch() string { + if x != nil { + return x.Arch + } + return "" +} + +func (x *SystemInfo) GetCpuThreads() int32 { + if x != nil { + return x.CpuThreads + } + return 0 +} + +func (x *SystemInfo) GetSystemMemory() uint64 { + if x != nil { + return x.SystemMemory + } + return 0 +} + +func (x *SystemInfo) GetHostname() string { + if x != nil { + return x.Hostname + } + return "" +} + +var File_pkg_types_types_proto protoreflect.FileDescriptor + +var file_pkg_types_types_proto_rawDesc = []byte{ + 0x0a, 0x15, 0x70, 0x6b, 0x67, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x74, 0x79, 0x70, 0x65, 0x73, 0x1a, 0x19, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, + 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x07, 0x0a, 0x05, 0x45, 0x6d, 0x70, + 0x74, 0x79, 0x22, 0x5c, 0x0a, 0x0b, 0x50, 0x75, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x21, 0x0a, 0x03, 0x4b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x43, 0x61, 0x63, 0x68, 0x65, 0x4b, 0x65, 0x79, 0x52, + 0x03, 0x4b, 0x65, 0x79, 0x12, 0x2a, 0x0a, 0x06, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x43, 0x61, 0x63, + 0x68, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x06, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x22, 0x30, 0x0a, 0x0b, 0x50, 0x75, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x21, 0x0a, 0x03, 0x4b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x73, 0x2e, 0x43, 0x61, 0x63, 0x68, 0x65, 0x4b, 0x65, 0x79, 0x52, 0x03, 0x4b, + 0x65, 0x79, 0x22, 0x33, 0x0a, 0x0c, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x23, 0x0a, 0x04, 0x4b, 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x0f, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x43, 0x61, 0x63, 0x68, 0x65, 0x4b, 0x65, + 0x79, 0x52, 0x04, 0x4b, 0x65, 0x79, 0x73, 0x22, 0x41, 0x0a, 0x0d, 0x51, 0x75, 0x65, 0x72, 0x79, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x30, 0x0a, 0x07, 0x52, 0x65, 0x73, 0x75, + 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2e, 0x43, 0x61, 0x63, 0x68, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x4d, 0x65, 0x74, + 0x61, 0x52, 0x07, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0x6e, 0x0a, 0x0b, 0x53, 0x79, + 0x6e, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2f, 0x0a, 0x0a, 0x4c, 0x6f, 0x63, + 0x61, 0x6c, 0x43, 0x61, 0x63, 0x68, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x43, 0x61, 0x63, 0x68, 0x65, 0x4b, 0x65, 0x79, 0x52, 0x0a, + 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x43, 0x61, 0x63, 0x68, 0x65, 0x12, 0x2e, 0x0a, 0x12, 0x44, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x43, 0x61, 0x63, 0x68, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x4b, 0x62, - 0x22, 0x1e, 0x0a, 0x08, 0x43, 0x61, 0x63, 0x68, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x12, 0x0a, 0x04, - 0x48, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x48, 0x61, 0x73, 0x68, - 0x22, 0x4f, 0x0a, 0x0b, 0x43, 0x61, 0x63, 0x68, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, - 0x12, 0x0a, 0x04, 0x44, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x44, - 0x61, 0x74, 0x61, 0x12, 0x2c, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x43, 0x61, 0x63, 0x68, 0x65, 0x4f, 0x62, 0x6a, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x12, 0x44, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x43, + 0x61, 0x63, 0x68, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x4b, 0x62, 0x22, 0x1e, 0x0a, 0x08, 0x43, 0x61, + 0x63, 0x68, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x48, 0x61, 0x73, 0x68, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x48, 0x61, 0x73, 0x68, 0x22, 0x55, 0x0a, 0x0b, 0x43, 0x61, + 0x63, 0x68, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x44, 0x61, 0x74, + 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x44, 0x61, 0x74, 0x61, 0x12, 0x32, 0x0a, + 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x16, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x43, 0x61, 0x63, 0x68, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0x22, 0xdd, 0x01, 0x0a, 0x0f, 0x43, 0x61, 0x63, 0x68, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x2e, 0x0a, 0x04, 0x54, 0x61, 0x67, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x43, 0x61, 0x63, 0x68, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x4d, 0x65, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, - 0x04, 0x54, 0x61, 0x67, 0x73, 0x12, 0x26, 0x0a, 0x0e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x44, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x45, - 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x61, 0x74, 0x65, 0x12, 0x39, 0x0a, - 0x0d, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x43, 0x61, 0x63, 0x68, 0x65, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x64, 0x52, 0x0d, 0x4d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x37, 0x0a, 0x09, 0x54, 0x61, 0x67, 0x73, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, - 0x01, 0x22, 0x8a, 0x01, 0x0a, 0x12, 0x43, 0x61, 0x63, 0x68, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x53, 0x69, 0x7a, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1c, 0x0a, 0x09, - 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x09, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x14, 0x0a, 0x05, 0x53, 0x63, - 0x6f, 0x72, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x53, 0x63, 0x6f, 0x72, 0x65, - 0x12, 0x2c, 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4c, 0x6f, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x22, - 0x0a, 0x0c, 0x57, 0x68, 0x6f, 0x69, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, - 0x0a, 0x04, 0x55, 0x55, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x55, 0x55, - 0x49, 0x44, 0x22, 0x67, 0x0a, 0x0d, 0x57, 0x68, 0x6f, 0x69, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x55, 0x55, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x55, 0x55, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x41, 0x64, 0x64, 0x72, 0x65, - 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, - 0x73, 0x12, 0x28, 0x0a, 0x09, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0a, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, - 0x52, 0x09, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x22, 0x3e, 0x0a, 0x06, 0x4d, - 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x16, 0x0a, 0x03, 0x4b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x04, 0x2e, 0x4b, 0x65, 0x79, 0x52, 0x03, 0x4b, 0x65, 0x79, 0x12, 0x1c, 0x0a, - 0x05, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x06, 0x2e, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x31, 0x0a, 0x03, 0x4b, - 0x65, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x06, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, - 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x1b, - 0x0a, 0x05, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x44, 0x61, 0x74, 0x61, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x44, 0x61, 0x74, 0x61, 0x22, 0xb5, 0x01, 0x0a, 0x0b, - 0x55, 0x73, 0x61, 0x67, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x12, 0x36, 0x0a, 0x16, 0x43, - 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, - 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x16, 0x43, 0x6f, 0x6e, - 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x69, - 0x6d, 0x69, 0x74, 0x12, 0x38, 0x0a, 0x17, 0x51, 0x75, 0x65, 0x75, 0x65, 0x50, 0x72, 0x65, 0x73, - 0x73, 0x75, 0x72, 0x65, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x01, 0x52, 0x17, 0x51, 0x75, 0x65, 0x75, 0x65, 0x50, 0x72, 0x65, 0x73, 0x73, - 0x75, 0x72, 0x65, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x12, 0x34, 0x0a, - 0x15, 0x51, 0x75, 0x65, 0x75, 0x65, 0x52, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x4d, 0x75, 0x6c, 0x74, - 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x01, 0x52, 0x15, 0x51, 0x75, - 0x65, 0x75, 0x65, 0x52, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, - 0x69, 0x65, 0x72, 0x22, 0x2e, 0x0a, 0x0a, 0x54, 0x6f, 0x6f, 0x6c, 0x63, 0x68, 0x61, 0x69, 0x6e, - 0x73, 0x12, 0x20, 0x0a, 0x05, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x0a, 0x2e, 0x54, 0x6f, 0x6f, 0x6c, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x05, 0x49, 0x74, - 0x65, 0x6d, 0x73, 0x22, 0x37, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, - 0x2b, 0x0a, 0x0a, 0x54, 0x6f, 0x6f, 0x6c, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x54, 0x6f, 0x6f, 0x6c, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x73, - 0x52, 0x0a, 0x54, 0x6f, 0x6f, 0x6c, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x22, 0xcd, 0x01, 0x0a, - 0x09, 0x54, 0x6f, 0x6f, 0x6c, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x12, 0x22, 0x0a, 0x04, 0x4b, 0x69, - 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0e, 0x2e, 0x54, 0x6f, 0x6f, 0x6c, 0x63, - 0x68, 0x61, 0x69, 0x6e, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x04, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x22, - 0x0a, 0x04, 0x4c, 0x61, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0e, 0x2e, 0x54, - 0x6f, 0x6f, 0x6c, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x52, 0x04, 0x4c, 0x61, - 0x6e, 0x67, 0x12, 0x1e, 0x0a, 0x0a, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x41, 0x72, 0x63, 0x68, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x41, 0x72, - 0x63, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, - 0x50, 0x69, 0x63, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x0a, 0x50, 0x69, 0x63, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x22, 0x5c, 0x0a, 0x12, - 0x41, 0x67, 0x65, 0x6e, 0x74, 0x54, 0x6f, 0x6f, 0x6c, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x6e, - 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x4b, 0x69, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x1a, - 0x0a, 0x08, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x08, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x41, 0x0a, 0x16, 0x41, 0x67, - 0x65, 0x6e, 0x74, 0x54, 0x6f, 0x6f, 0x6c, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x6e, 0x66, 0x6f, - 0x4c, 0x69, 0x73, 0x74, 0x12, 0x27, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x54, 0x6f, 0x6f, 0x6c, 0x63, 0x68, - 0x61, 0x69, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x22, 0xd4, 0x01, + 0x61, 0x22, 0xe9, 0x01, 0x0a, 0x0f, 0x43, 0x61, 0x63, 0x68, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x34, 0x0a, 0x04, 0x54, 0x61, 0x67, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x43, 0x61, 0x63, 0x68, + 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x67, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x54, 0x61, 0x67, 0x73, 0x12, 0x26, 0x0a, 0x0e, 0x45, + 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x0e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x44, + 0x61, 0x74, 0x65, 0x12, 0x3f, 0x0a, 0x0d, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x64, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x73, 0x2e, 0x43, 0x61, 0x63, 0x68, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x4d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x64, 0x52, 0x0d, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x64, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x37, 0x0a, 0x09, 0x54, 0x61, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x90, 0x01, + 0x0a, 0x12, 0x43, 0x61, 0x63, 0x68, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x4d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x53, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x04, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x14, 0x0a, 0x05, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x12, 0x32, 0x0a, 0x08, + 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4c, 0x6f, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x22, 0x22, 0x0a, 0x0c, 0x57, 0x68, 0x6f, 0x69, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x12, 0x0a, 0x04, 0x55, 0x55, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x55, 0x55, 0x49, 0x44, 0x22, 0x6d, 0x0a, 0x0d, 0x57, 0x68, 0x6f, 0x69, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x55, 0x55, 0x49, 0x44, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x55, 0x55, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x41, 0x64, 0x64, + 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x41, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x12, 0x2e, 0x0a, 0x09, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x43, + 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x52, 0x09, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, + 0x65, 0x6e, 0x74, 0x22, 0x52, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x1c, 0x0a, + 0x03, 0x4b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x73, 0x2e, 0x4b, 0x65, 0x79, 0x52, 0x03, 0x4b, 0x65, 0x79, 0x12, 0x2a, 0x0a, 0x05, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, + 0x52, 0x05, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x31, 0x0a, 0x03, 0x4b, 0x65, 0x79, 0x12, 0x16, + 0x0a, 0x06, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, + 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0xd9, 0x01, 0x0a, 0x09, 0x54, + 0x6f, 0x6f, 0x6c, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x12, 0x28, 0x0a, 0x04, 0x4b, 0x69, 0x6e, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x54, + 0x6f, 0x6f, 0x6c, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x04, 0x4b, 0x69, + 0x6e, 0x64, 0x12, 0x28, 0x0a, 0x04, 0x4c, 0x61, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x14, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x54, 0x6f, 0x6f, 0x6c, 0x63, 0x68, 0x61, + 0x69, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x52, 0x04, 0x4c, 0x61, 0x6e, 0x67, 0x12, 0x1e, 0x0a, 0x0a, + 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0a, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x1e, 0x0a, 0x0a, + 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x41, 0x72, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0a, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x41, 0x72, 0x63, 0x68, 0x12, 0x18, 0x0a, 0x07, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x50, 0x69, 0x63, 0x44, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x50, 0x69, 0x63, 0x44, + 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x22, 0x5c, 0x0a, 0x12, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x54, + 0x6f, 0x6f, 0x6c, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, + 0x4b, 0x69, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4b, 0x69, 0x6e, 0x64, + 0x12, 0x16, 0x0a, 0x06, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x06, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x47, 0x0a, 0x16, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x54, 0x6f, 0x6f, + 0x6c, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x2d, + 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x73, 0x2e, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x54, 0x6f, 0x6f, 0x6c, 0x63, 0x68, + 0x61, 0x69, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x22, 0xda, 0x01, 0x0a, 0x0a, 0x52, 0x75, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x04, 0x50, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x04, 0x50, 0x61, - 0x74, 0x68, 0x12, 0x2a, 0x0a, 0x09, 0x54, 0x6f, 0x6f, 0x6c, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x54, 0x6f, 0x6f, 0x6c, 0x63, 0x68, 0x61, 0x69, - 0x6e, 0x48, 0x00, 0x52, 0x09, 0x54, 0x6f, 0x6f, 0x6c, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x12, 0x12, - 0x0a, 0x04, 0x41, 0x72, 0x67, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x41, 0x72, - 0x67, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x55, 0x49, 0x44, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, - 0x03, 0x55, 0x49, 0x44, 0x12, 0x10, 0x0a, 0x03, 0x47, 0x49, 0x44, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x0d, 0x52, 0x03, 0x47, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x57, 0x6f, 0x72, 0x6b, 0x44, 0x69, - 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x57, 0x6f, 0x72, 0x6b, 0x44, 0x69, 0x72, - 0x12, 0x10, 0x0a, 0x03, 0x45, 0x6e, 0x76, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x03, 0x45, - 0x6e, 0x76, 0x12, 0x14, 0x0a, 0x05, 0x53, 0x74, 0x64, 0x69, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x05, 0x53, 0x74, 0x64, 0x69, 0x6e, 0x42, 0x0a, 0x0a, 0x08, 0x43, 0x6f, 0x6d, 0x70, - 0x69, 0x6c, 0x65, 0x72, 0x22, 0x5d, 0x0a, 0x0b, 0x52, 0x75, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x52, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x43, 0x6f, 0x64, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x52, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x43, - 0x6f, 0x64, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x53, 0x74, 0x64, 0x6f, 0x75, 0x74, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x06, 0x53, 0x74, 0x64, 0x6f, 0x75, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x53, - 0x74, 0x64, 0x65, 0x72, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x53, 0x74, 0x64, - 0x65, 0x72, 0x72, 0x22, 0x11, 0x0a, 0x0f, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x12, 0x0a, 0x10, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, - 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x4f, 0x0a, 0x09, 0x41, 0x67, - 0x65, 0x6e, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x6f, 0x64, 0x65, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x50, - 0x6f, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x50, 0x6f, 0x64, 0x12, 0x1c, 0x0a, - 0x09, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x09, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x80, 0x01, 0x0a, 0x0a, - 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x41, 0x72, - 0x63, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x41, 0x72, 0x63, 0x68, 0x12, 0x1e, - 0x0a, 0x0a, 0x43, 0x70, 0x75, 0x54, 0x68, 0x72, 0x65, 0x61, 0x64, 0x73, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x0a, 0x43, 0x70, 0x75, 0x54, 0x68, 0x72, 0x65, 0x61, 0x64, 0x73, 0x12, 0x22, - 0x0a, 0x0c, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x4d, 0x65, 0x6d, 0x6f, - 0x72, 0x79, 0x12, 0x1a, 0x0a, 0x08, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x7e, - 0x0a, 0x0e, 0x43, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x28, 0x0a, 0x09, 0x54, 0x6f, 0x6f, 0x6c, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x54, 0x6f, 0x6f, 0x6c, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x52, - 0x09, 0x54, 0x6f, 0x6f, 0x6c, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x41, 0x72, - 0x67, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x41, 0x72, 0x67, 0x73, 0x12, 0x2e, - 0x0a, 0x12, 0x50, 0x72, 0x65, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x53, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x12, 0x50, 0x72, 0x65, 0x70, - 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, 0xe3, - 0x01, 0x0a, 0x0f, 0x43, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x3d, 0x0a, 0x0d, 0x43, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, - 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x43, 0x6f, 0x6d, 0x70, - 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x75, - 0x6c, 0x74, 0x52, 0x0d, 0x43, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, - 0x74, 0x12, 0x26, 0x0a, 0x0e, 0x43, 0x70, 0x75, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x55, - 0x73, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x43, 0x70, 0x75, 0x53, 0x65, - 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x55, 0x73, 0x65, 0x64, 0x12, 0x16, 0x0a, 0x05, 0x45, 0x72, 0x72, - 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x05, 0x45, 0x72, 0x72, 0x6f, - 0x72, 0x12, 0x28, 0x0a, 0x0e, 0x43, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x64, 0x53, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0e, 0x43, 0x6f, 0x6d, - 0x70, 0x69, 0x6c, 0x65, 0x64, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, 0x1f, 0x0a, 0x06, 0x52, - 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, - 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x46, 0x61, 0x69, 0x6c, 0x10, 0x01, 0x42, 0x06, 0x0a, 0x04, - 0x44, 0x61, 0x74, 0x61, 0x2a, 0x7c, 0x0a, 0x0f, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4c, - 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x53, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, - 0x77, 0x6e, 0x10, 0x00, 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4c, - 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x10, 0x01, - 0x12, 0x18, 0x0a, 0x14, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x44, 0x69, 0x73, 0x6b, 0x10, 0x02, 0x12, 0x16, 0x0a, 0x12, 0x53, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x53, 0x33, - 0x10, 0x03, 0x2a, 0x9b, 0x02, 0x0a, 0x09, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, - 0x12, 0x15, 0x0a, 0x11, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x5f, 0x55, 0x6e, - 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x43, 0x6f, 0x6d, 0x70, 0x6f, - 0x6e, 0x65, 0x6e, 0x74, 0x5f, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, - 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x5f, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, - 0x6c, 0x65, 0x72, 0x10, 0x02, 0x12, 0x18, 0x0a, 0x14, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, - 0x6e, 0x74, 0x5f, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x10, 0x03, 0x12, - 0x16, 0x0a, 0x12, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x5f, 0x43, 0x6f, 0x6e, - 0x73, 0x75, 0x6d, 0x65, 0x72, 0x10, 0x04, 0x12, 0x17, 0x0a, 0x13, 0x43, 0x6f, 0x6d, 0x70, 0x6f, - 0x6e, 0x65, 0x6e, 0x74, 0x5f, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x72, 0x64, 0x10, 0x05, - 0x12, 0x12, 0x0a, 0x0e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x5f, 0x4d, 0x61, - 0x6b, 0x65, 0x10, 0x06, 0x12, 0x12, 0x0a, 0x0e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, - 0x74, 0x5f, 0x54, 0x65, 0x73, 0x74, 0x10, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x43, 0x6f, 0x6d, 0x70, - 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x5f, 0x44, 0x61, 0x73, 0x68, 0x62, 0x6f, 0x61, 0x72, 0x64, 0x10, - 0x08, 0x12, 0x11, 0x0a, 0x0d, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x5f, 0x43, - 0x4c, 0x49, 0x10, 0x09, 0x12, 0x15, 0x0a, 0x11, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, - 0x74, 0x5f, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x10, 0x0a, 0x12, 0x13, 0x0a, 0x0f, 0x43, - 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x5f, 0x43, 0x61, 0x63, 0x68, 0x65, 0x10, 0x0b, - 0x2a, 0x4c, 0x0a, 0x0b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, - 0x0d, 0x0a, 0x09, 0x41, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x10, 0x00, 0x12, 0x0c, - 0x0a, 0x08, 0x51, 0x75, 0x65, 0x75, 0x65, 0x69, 0x6e, 0x67, 0x10, 0x01, 0x12, 0x11, 0x0a, 0x0d, - 0x51, 0x75, 0x65, 0x75, 0x65, 0x50, 0x72, 0x65, 0x73, 0x73, 0x75, 0x72, 0x65, 0x10, 0x02, 0x12, - 0x0d, 0x0a, 0x09, 0x51, 0x75, 0x65, 0x75, 0x65, 0x46, 0x75, 0x6c, 0x6c, 0x10, 0x03, 0x2a, 0x8b, - 0x01, 0x0a, 0x0d, 0x54, 0x6f, 0x6f, 0x6c, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4b, 0x69, 0x6e, 0x64, - 0x12, 0x19, 0x0a, 0x15, 0x54, 0x6f, 0x6f, 0x6c, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4b, 0x69, 0x6e, - 0x64, 0x5f, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x10, 0x00, 0x12, 0x15, 0x0a, 0x11, 0x54, - 0x6f, 0x6f, 0x6c, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4b, 0x69, 0x6e, 0x64, 0x5f, 0x47, 0x6e, 0x75, - 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x6f, 0x6f, 0x6c, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4b, - 0x69, 0x6e, 0x64, 0x5f, 0x43, 0x6c, 0x61, 0x6e, 0x67, 0x10, 0x02, 0x12, 0x16, 0x0a, 0x12, 0x54, - 0x6f, 0x6f, 0x6c, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4b, 0x69, 0x6e, 0x64, 0x5f, 0x54, 0x65, 0x73, - 0x74, 0x10, 0x03, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x6f, 0x6f, 0x6c, 0x63, 0x68, 0x61, 0x69, 0x6e, - 0x4b, 0x69, 0x6e, 0x64, 0x5f, 0x53, 0x6c, 0x65, 0x65, 0x70, 0x10, 0x04, 0x2a, 0x6f, 0x0a, 0x0d, - 0x54, 0x6f, 0x6f, 0x6c, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x12, 0x19, 0x0a, - 0x15, 0x54, 0x6f, 0x6f, 0x6c, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x5f, 0x55, - 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x6f, 0x6f, 0x6c, - 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x5f, 0x43, 0x10, 0x01, 0x12, 0x15, 0x0a, - 0x11, 0x54, 0x6f, 0x6f, 0x6c, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x5f, 0x43, - 0x58, 0x58, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x6f, 0x6f, 0x6c, 0x63, 0x68, 0x61, 0x69, - 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x5f, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x10, 0x03, 0x32, 0x2d, 0x0a, - 0x09, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x72, 0x64, 0x12, 0x20, 0x0a, 0x03, 0x52, 0x75, - 0x6e, 0x12, 0x0b, 0x2e, 0x52, 0x75, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0c, - 0x2e, 0x52, 0x75, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xc1, 0x01, 0x0a, - 0x09, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x12, 0x2c, 0x0a, 0x07, 0x43, 0x6f, - 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x12, 0x0f, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x10, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x0c, 0x43, 0x6f, 0x6e, 0x6e, - 0x65, 0x63, 0x74, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x12, 0x09, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, - 0x61, 0x74, 0x61, 0x1a, 0x06, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x28, 0x01, 0x30, 0x01, 0x12, - 0x29, 0x0a, 0x10, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, - 0x65, 0x72, 0x64, 0x12, 0x09, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x06, - 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x28, 0x01, 0x30, 0x01, 0x12, 0x34, 0x0a, 0x0b, 0x53, 0x74, - 0x72, 0x65, 0x61, 0x6d, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x12, 0x10, 0x2e, 0x43, 0x6f, 0x6d, 0x70, - 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x1a, 0x0f, 0x2e, 0x43, 0x6f, - 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x28, 0x01, 0x30, 0x01, - 0x32, 0x6a, 0x0a, 0x07, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x06, 0x53, - 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x07, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x1a, 0x06, - 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x28, 0x01, 0x30, 0x01, 0x12, 0x18, 0x0a, 0x06, 0x4c, 0x69, - 0x73, 0x74, 0x65, 0x6e, 0x12, 0x04, 0x2e, 0x4b, 0x65, 0x79, 0x1a, 0x06, 0x2e, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x30, 0x01, 0x12, 0x26, 0x0a, 0x05, 0x57, 0x68, 0x6f, 0x69, 0x73, 0x12, 0x0d, 0x2e, - 0x57, 0x68, 0x6f, 0x69, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0e, 0x2e, 0x57, - 0x68, 0x6f, 0x69, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0x97, 0x01, 0x0a, - 0x05, 0x43, 0x61, 0x63, 0x68, 0x65, 0x12, 0x1c, 0x0a, 0x04, 0x50, 0x75, 0x73, 0x68, 0x12, 0x0c, - 0x2e, 0x50, 0x75, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x06, 0x2e, 0x45, - 0x6d, 0x70, 0x74, 0x79, 0x12, 0x22, 0x0a, 0x04, 0x50, 0x75, 0x6c, 0x6c, 0x12, 0x0c, 0x2e, 0x50, - 0x75, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0c, 0x2e, 0x43, 0x61, 0x63, - 0x68, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x26, 0x0a, 0x05, 0x51, 0x75, 0x65, 0x72, - 0x79, 0x12, 0x0d, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x0e, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x24, 0x0a, 0x04, 0x53, 0x79, 0x6e, 0x63, 0x12, 0x0c, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0c, 0x2e, 0x43, 0x61, 0x63, 0x68, 0x65, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x30, 0x01, 0x42, 0x0b, 0x5a, 0x09, 0x70, 0x6b, 0x67, 0x2f, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x74, 0x68, 0x12, 0x30, 0x0a, 0x09, 0x54, 0x6f, 0x6f, 0x6c, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x54, 0x6f, + 0x6f, 0x6c, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x48, 0x00, 0x52, 0x09, 0x54, 0x6f, 0x6f, 0x6c, 0x63, + 0x68, 0x61, 0x69, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x41, 0x72, 0x67, 0x73, 0x18, 0x03, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x04, 0x41, 0x72, 0x67, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x55, 0x49, 0x44, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x55, 0x49, 0x44, 0x12, 0x10, 0x0a, 0x03, 0x47, 0x49, + 0x44, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x47, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, + 0x57, 0x6f, 0x72, 0x6b, 0x44, 0x69, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x57, + 0x6f, 0x72, 0x6b, 0x44, 0x69, 0x72, 0x12, 0x10, 0x0a, 0x03, 0x45, 0x6e, 0x76, 0x18, 0x07, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x03, 0x45, 0x6e, 0x76, 0x12, 0x14, 0x0a, 0x05, 0x53, 0x74, 0x64, 0x69, + 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x53, 0x74, 0x64, 0x69, 0x6e, 0x42, 0x0a, + 0x0a, 0x08, 0x43, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x72, 0x22, 0x5d, 0x0a, 0x0b, 0x52, 0x75, + 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x52, 0x65, 0x74, + 0x75, 0x72, 0x6e, 0x43, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x52, + 0x65, 0x74, 0x75, 0x72, 0x6e, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x53, 0x74, 0x64, + 0x6f, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x53, 0x74, 0x64, 0x6f, 0x75, + 0x74, 0x12, 0x16, 0x0a, 0x06, 0x53, 0x74, 0x64, 0x65, 0x72, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x06, 0x53, 0x74, 0x64, 0x65, 0x72, 0x72, 0x22, 0x11, 0x0a, 0x0f, 0x53, 0x63, 0x68, + 0x65, 0x64, 0x75, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x12, 0x0a, 0x10, + 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0xa2, 0x01, 0x0a, 0x0e, 0x43, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x44, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, + 0x44, 0x12, 0x2e, 0x0a, 0x09, 0x54, 0x6f, 0x6f, 0x6c, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x54, 0x6f, 0x6f, + 0x6c, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x09, 0x54, 0x6f, 0x6f, 0x6c, 0x63, 0x68, 0x61, 0x69, + 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x41, 0x72, 0x67, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x04, 0x41, 0x72, 0x67, 0x73, 0x12, 0x2e, 0x0a, 0x12, 0x50, 0x72, 0x65, 0x70, 0x72, 0x6f, 0x63, + 0x65, 0x73, 0x73, 0x65, 0x64, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x12, 0x50, 0x72, 0x65, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x53, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, 0x9a, 0x02, 0x0a, 0x0f, 0x43, 0x6f, 0x6d, 0x70, 0x69, 0x6c, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x44, 0x12, 0x43, 0x0a, 0x0d, 0x43, 0x6f, 0x6d, 0x70, 0x69, + 0x6c, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1d, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x0d, 0x43, + 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x26, 0x0a, 0x0e, + 0x43, 0x70, 0x75, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x55, 0x73, 0x65, 0x64, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x43, 0x70, 0x75, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, + 0x55, 0x73, 0x65, 0x64, 0x12, 0x16, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x28, 0x0a, 0x0e, + 0x43, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x64, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0e, 0x43, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x64, + 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, 0x32, 0x0a, 0x06, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x10, 0x00, 0x12, 0x08, 0x0a, + 0x04, 0x46, 0x61, 0x69, 0x6c, 0x10, 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x49, 0x6e, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x10, 0x02, 0x42, 0x06, 0x0a, 0x04, 0x44, 0x61, + 0x74, 0x61, 0x22, 0x80, 0x01, 0x0a, 0x0a, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x49, 0x6e, 0x66, + 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x41, 0x72, 0x63, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x41, 0x72, 0x63, 0x68, 0x12, 0x1e, 0x0a, 0x0a, 0x43, 0x70, 0x75, 0x54, 0x68, 0x72, 0x65, + 0x61, 0x64, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x43, 0x70, 0x75, 0x54, 0x68, + 0x72, 0x65, 0x61, 0x64, 0x73, 0x12, 0x22, 0x0a, 0x0c, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x4d, + 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x53, 0x79, 0x73, + 0x74, 0x65, 0x6d, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x12, 0x1a, 0x0a, 0x08, 0x48, 0x6f, 0x73, + 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x48, 0x6f, 0x73, + 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x2a, 0x7c, 0x0a, 0x0f, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x53, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x55, 0x6e, 0x6b, 0x6e, + 0x6f, 0x77, 0x6e, 0x10, 0x00, 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x10, + 0x01, 0x12, 0x18, 0x0a, 0x14, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x44, 0x69, 0x73, 0x6b, 0x10, 0x02, 0x12, 0x16, 0x0a, 0x12, 0x53, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x53, + 0x33, 0x10, 0x03, 0x2a, 0x9b, 0x02, 0x0a, 0x09, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, + 0x74, 0x12, 0x15, 0x0a, 0x11, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x5f, 0x55, + 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x43, 0x6f, 0x6d, 0x70, + 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x5f, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x10, 0x01, 0x12, 0x17, 0x0a, + 0x13, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x5f, 0x53, 0x63, 0x68, 0x65, 0x64, + 0x75, 0x6c, 0x65, 0x72, 0x10, 0x02, 0x12, 0x18, 0x0a, 0x14, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, + 0x65, 0x6e, 0x74, 0x5f, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x10, 0x03, + 0x12, 0x16, 0x0a, 0x12, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x5f, 0x43, 0x6f, + 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x72, 0x10, 0x04, 0x12, 0x17, 0x0a, 0x13, 0x43, 0x6f, 0x6d, 0x70, + 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x5f, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x72, 0x64, 0x10, + 0x05, 0x12, 0x12, 0x0a, 0x0e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x5f, 0x4d, + 0x61, 0x6b, 0x65, 0x10, 0x06, 0x12, 0x12, 0x0a, 0x0e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, + 0x6e, 0x74, 0x5f, 0x54, 0x65, 0x73, 0x74, 0x10, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x43, 0x6f, 0x6d, + 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x5f, 0x44, 0x61, 0x73, 0x68, 0x62, 0x6f, 0x61, 0x72, 0x64, + 0x10, 0x08, 0x12, 0x11, 0x0a, 0x0d, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x5f, + 0x43, 0x4c, 0x49, 0x10, 0x09, 0x12, 0x15, 0x0a, 0x11, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, + 0x6e, 0x74, 0x5f, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x10, 0x0a, 0x12, 0x13, 0x0a, 0x0f, + 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x5f, 0x43, 0x61, 0x63, 0x68, 0x65, 0x10, + 0x0b, 0x2a, 0x8b, 0x01, 0x0a, 0x0d, 0x54, 0x6f, 0x6f, 0x6c, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4b, + 0x69, 0x6e, 0x64, 0x12, 0x19, 0x0a, 0x15, 0x54, 0x6f, 0x6f, 0x6c, 0x63, 0x68, 0x61, 0x69, 0x6e, + 0x4b, 0x69, 0x6e, 0x64, 0x5f, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x10, 0x00, 0x12, 0x15, + 0x0a, 0x11, 0x54, 0x6f, 0x6f, 0x6c, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4b, 0x69, 0x6e, 0x64, 0x5f, + 0x47, 0x6e, 0x75, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x6f, 0x6f, 0x6c, 0x63, 0x68, 0x61, + 0x69, 0x6e, 0x4b, 0x69, 0x6e, 0x64, 0x5f, 0x43, 0x6c, 0x61, 0x6e, 0x67, 0x10, 0x02, 0x12, 0x16, + 0x0a, 0x12, 0x54, 0x6f, 0x6f, 0x6c, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4b, 0x69, 0x6e, 0x64, 0x5f, + 0x54, 0x65, 0x73, 0x74, 0x10, 0x03, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x6f, 0x6f, 0x6c, 0x63, 0x68, + 0x61, 0x69, 0x6e, 0x4b, 0x69, 0x6e, 0x64, 0x5f, 0x53, 0x6c, 0x65, 0x65, 0x70, 0x10, 0x04, 0x2a, + 0x6f, 0x0a, 0x0d, 0x54, 0x6f, 0x6f, 0x6c, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61, 0x6e, 0x67, + 0x12, 0x19, 0x0a, 0x15, 0x54, 0x6f, 0x6f, 0x6c, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61, 0x6e, + 0x67, 0x5f, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x54, + 0x6f, 0x6f, 0x6c, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x5f, 0x43, 0x10, 0x01, + 0x12, 0x15, 0x0a, 0x11, 0x54, 0x6f, 0x6f, 0x6c, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61, 0x6e, + 0x67, 0x5f, 0x43, 0x58, 0x58, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x6f, 0x6f, 0x6c, 0x63, + 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x5f, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x10, 0x03, + 0x32, 0x39, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x72, 0x64, 0x12, 0x2c, 0x0a, + 0x03, 0x52, 0x75, 0x6e, 0x12, 0x11, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x52, 0x75, 0x6e, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x12, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, + 0x52, 0x75, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xd9, 0x01, 0x0a, 0x09, + 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x12, 0x38, 0x0a, 0x07, 0x43, 0x6f, 0x6d, + 0x70, 0x69, 0x6c, 0x65, 0x12, 0x15, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x43, 0x6f, 0x6d, + 0x70, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x48, 0x0a, 0x13, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x49, 0x6e, 0x63, + 0x6f, 0x6d, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x12, 0x16, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x73, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x1a, 0x15, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x69, + 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x28, 0x01, 0x30, 0x01, 0x12, 0x48, 0x0a, + 0x13, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x4f, 0x75, 0x74, 0x67, 0x6f, 0x69, 0x6e, 0x67, 0x54, + 0x61, 0x73, 0x6b, 0x73, 0x12, 0x15, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x43, 0x6f, 0x6d, + 0x70, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x28, 0x01, 0x30, 0x01, 0x32, 0x96, 0x01, 0x0a, 0x07, 0x4d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x12, 0x29, 0x0a, 0x06, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x0d, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x1a, 0x0c, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x73, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x28, 0x01, 0x30, 0x01, 0x12, 0x2c, + 0x0a, 0x06, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x12, 0x0a, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, + 0x2e, 0x4b, 0x65, 0x79, 0x1a, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x30, 0x01, 0x12, 0x32, 0x0a, 0x05, + 0x57, 0x68, 0x6f, 0x69, 0x73, 0x12, 0x13, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x57, 0x68, + 0x6f, 0x69, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x14, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x73, 0x2e, 0x57, 0x68, 0x6f, 0x69, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x32, 0xc7, 0x01, 0x0a, 0x05, 0x43, 0x61, 0x63, 0x68, 0x65, 0x12, 0x28, 0x0a, 0x04, 0x50, 0x75, + 0x73, 0x68, 0x12, 0x12, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x50, 0x75, 0x73, 0x68, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0c, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x45, + 0x6d, 0x70, 0x74, 0x79, 0x12, 0x2e, 0x0a, 0x04, 0x50, 0x75, 0x6c, 0x6c, 0x12, 0x12, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x73, 0x2e, 0x50, 0x75, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x12, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x43, 0x61, 0x63, 0x68, 0x65, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x12, 0x32, 0x0a, 0x05, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x13, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x14, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x30, 0x0a, 0x04, 0x53, 0x79, 0x6e, 0x63, + 0x12, 0x12, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x12, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x43, 0x61, 0x63, + 0x68, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x30, 0x01, 0x42, 0x26, 0x5a, 0x24, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x62, 0x61, 0x6c, 0x74, 0x37, + 0x37, 0x2f, 0x6b, 0x75, 0x62, 0x65, 0x63, 0x63, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x74, 0x79, 0x70, + 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( - file_proto_types_proto_rawDescOnce sync.Once - file_proto_types_proto_rawDescData = file_proto_types_proto_rawDesc + file_pkg_types_types_proto_rawDescOnce sync.Once + file_pkg_types_types_proto_rawDescData = file_pkg_types_types_proto_rawDesc ) -func file_proto_types_proto_rawDescGZIP() []byte { - file_proto_types_proto_rawDescOnce.Do(func() { - file_proto_types_proto_rawDescData = protoimpl.X.CompressGZIP(file_proto_types_proto_rawDescData) +func file_pkg_types_types_proto_rawDescGZIP() []byte { + file_pkg_types_types_proto_rawDescOnce.Do(func() { + file_pkg_types_types_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkg_types_types_proto_rawDescData) }) - return file_proto_types_proto_rawDescData -} - -var file_proto_types_proto_enumTypes = make([]protoimpl.EnumInfo, 6) -var file_proto_types_proto_msgTypes = make([]protoimpl.MessageInfo, 30) -var file_proto_types_proto_goTypes = []interface{}{ - (StorageLocation)(0), // 0: StorageLocation - (Component)(0), // 1: Component - (QueueStatus)(0), // 2: QueueStatus - (ToolchainKind)(0), // 3: ToolchainKind - (ToolchainLang)(0), // 4: ToolchainLang - (CompileResponse_Result)(0), // 5: CompileResponse.Result - (*Empty)(nil), // 6: Empty - (*PushRequest)(nil), // 7: PushRequest - (*PullRequest)(nil), // 8: PullRequest - (*QueryRequest)(nil), // 9: QueryRequest - (*QueryResponse)(nil), // 10: QueryResponse - (*SyncRequest)(nil), // 11: SyncRequest - (*CacheKey)(nil), // 12: CacheKey - (*CacheObject)(nil), // 13: CacheObject - (*CacheObjectMeta)(nil), // 14: CacheObjectMeta - (*CacheObjectManaged)(nil), // 15: CacheObjectManaged - (*WhoisRequest)(nil), // 16: WhoisRequest - (*WhoisResponse)(nil), // 17: WhoisResponse - (*Metric)(nil), // 18: Metric - (*Key)(nil), // 19: Key - (*Value)(nil), // 20: Value - (*UsageLimits)(nil), // 21: UsageLimits - (*Toolchains)(nil), // 22: Toolchains - (*Metadata)(nil), // 23: Metadata - (*Toolchain)(nil), // 24: Toolchain - (*AgentToolchainInfo)(nil), // 25: AgentToolchainInfo - (*AgentToolchainInfoList)(nil), // 26: AgentToolchainInfoList - (*RunRequest)(nil), // 27: RunRequest - (*RunResponse)(nil), // 28: RunResponse - (*ScheduleRequest)(nil), // 29: ScheduleRequest - (*ScheduleResponse)(nil), // 30: ScheduleResponse - (*AgentInfo)(nil), // 31: AgentInfo - (*SystemInfo)(nil), // 32: SystemInfo - (*CompileRequest)(nil), // 33: CompileRequest - (*CompileResponse)(nil), // 34: CompileResponse - nil, // 35: CacheObjectMeta.TagsEntry -} -var file_proto_types_proto_depIdxs = []int32{ - 12, // 0: PushRequest.Key:type_name -> CacheKey - 13, // 1: PushRequest.Object:type_name -> CacheObject - 12, // 2: PullRequest.Key:type_name -> CacheKey - 12, // 3: QueryRequest.Keys:type_name -> CacheKey - 14, // 4: QueryResponse.Results:type_name -> CacheObjectMeta - 12, // 5: SyncRequest.LocalCache:type_name -> CacheKey - 14, // 6: CacheObject.Metadata:type_name -> CacheObjectMeta - 35, // 7: CacheObjectMeta.Tags:type_name -> CacheObjectMeta.TagsEntry - 15, // 8: CacheObjectMeta.ManagedFields:type_name -> CacheObjectManaged - 0, // 9: CacheObjectManaged.Location:type_name -> StorageLocation - 1, // 10: WhoisResponse.Component:type_name -> Component - 19, // 11: Metric.Key:type_name -> Key - 20, // 12: Metric.Value:type_name -> Value - 24, // 13: Toolchains.Items:type_name -> Toolchain - 22, // 14: Metadata.Toolchains:type_name -> Toolchains - 3, // 15: Toolchain.Kind:type_name -> ToolchainKind - 4, // 16: Toolchain.Lang:type_name -> ToolchainLang - 25, // 17: AgentToolchainInfoList.info:type_name -> AgentToolchainInfo - 24, // 18: RunRequest.Toolchain:type_name -> Toolchain - 24, // 19: CompileRequest.Toolchain:type_name -> Toolchain - 5, // 20: CompileResponse.CompileResult:type_name -> CompileResponse.Result - 27, // 21: Consumerd.Run:input_type -> RunRequest - 33, // 22: Scheduler.Compile:input_type -> CompileRequest - 23, // 23: Scheduler.ConnectAgent:input_type -> Metadata - 23, // 24: Scheduler.ConnectConsumerd:input_type -> Metadata - 34, // 25: Scheduler.StreamTasks:input_type -> CompileResponse - 18, // 26: Monitor.Stream:input_type -> Metric - 19, // 27: Monitor.Listen:input_type -> Key - 16, // 28: Monitor.Whois:input_type -> WhoisRequest - 7, // 29: Cache.Push:input_type -> PushRequest - 8, // 30: Cache.Pull:input_type -> PullRequest - 9, // 31: Cache.Query:input_type -> QueryRequest - 11, // 32: Cache.Sync:input_type -> SyncRequest - 28, // 33: Consumerd.Run:output_type -> RunResponse - 34, // 34: Scheduler.Compile:output_type -> CompileResponse - 6, // 35: Scheduler.ConnectAgent:output_type -> Empty - 6, // 36: Scheduler.ConnectConsumerd:output_type -> Empty - 33, // 37: Scheduler.StreamTasks:output_type -> CompileRequest - 6, // 38: Monitor.Stream:output_type -> Empty - 20, // 39: Monitor.Listen:output_type -> Value - 17, // 40: Monitor.Whois:output_type -> WhoisResponse - 6, // 41: Cache.Push:output_type -> Empty - 13, // 42: Cache.Pull:output_type -> CacheObject - 10, // 43: Cache.Query:output_type -> QueryResponse - 13, // 44: Cache.Sync:output_type -> CacheObject - 33, // [33:45] is the sub-list for method output_type - 21, // [21:33] is the sub-list for method input_type - 21, // [21:21] is the sub-list for extension type_name - 21, // [21:21] is the sub-list for extension extendee - 0, // [0:21] is the sub-list for field type_name -} - -func init() { file_proto_types_proto_init() } -func file_proto_types_proto_init() { - if File_proto_types_proto != nil { + return file_pkg_types_types_proto_rawDescData +} + +var file_pkg_types_types_proto_enumTypes = make([]protoimpl.EnumInfo, 5) +var file_pkg_types_types_proto_msgTypes = make([]protoimpl.MessageInfo, 25) +var file_pkg_types_types_proto_goTypes = []interface{}{ + (StorageLocation)(0), // 0: types.StorageLocation + (Component)(0), // 1: types.Component + (ToolchainKind)(0), // 2: types.ToolchainKind + (ToolchainLang)(0), // 3: types.ToolchainLang + (CompileResponse_Result)(0), // 4: types.CompileResponse.Result + (*Empty)(nil), // 5: types.Empty + (*PushRequest)(nil), // 6: types.PushRequest + (*PullRequest)(nil), // 7: types.PullRequest + (*QueryRequest)(nil), // 8: types.QueryRequest + (*QueryResponse)(nil), // 9: types.QueryResponse + (*SyncRequest)(nil), // 10: types.SyncRequest + (*CacheKey)(nil), // 11: types.CacheKey + (*CacheObject)(nil), // 12: types.CacheObject + (*CacheObjectMeta)(nil), // 13: types.CacheObjectMeta + (*CacheObjectManaged)(nil), // 14: types.CacheObjectManaged + (*WhoisRequest)(nil), // 15: types.WhoisRequest + (*WhoisResponse)(nil), // 16: types.WhoisResponse + (*Metric)(nil), // 17: types.Metric + (*Key)(nil), // 18: types.Key + (*Toolchain)(nil), // 19: types.Toolchain + (*AgentToolchainInfo)(nil), // 20: types.AgentToolchainInfo + (*AgentToolchainInfoList)(nil), // 21: types.AgentToolchainInfoList + (*RunRequest)(nil), // 22: types.RunRequest + (*RunResponse)(nil), // 23: types.RunResponse + (*ScheduleRequest)(nil), // 24: types.ScheduleRequest + (*ScheduleResponse)(nil), // 25: types.ScheduleResponse + (*CompileRequest)(nil), // 26: types.CompileRequest + (*CompileResponse)(nil), // 27: types.CompileResponse + (*SystemInfo)(nil), // 28: types.SystemInfo + nil, // 29: types.CacheObjectMeta.TagsEntry + (*any.Any)(nil), // 30: google.protobuf.Any +} +var file_pkg_types_types_proto_depIdxs = []int32{ + 11, // 0: types.PushRequest.Key:type_name -> types.CacheKey + 12, // 1: types.PushRequest.Object:type_name -> types.CacheObject + 11, // 2: types.PullRequest.Key:type_name -> types.CacheKey + 11, // 3: types.QueryRequest.Keys:type_name -> types.CacheKey + 13, // 4: types.QueryResponse.Results:type_name -> types.CacheObjectMeta + 11, // 5: types.SyncRequest.LocalCache:type_name -> types.CacheKey + 13, // 6: types.CacheObject.Metadata:type_name -> types.CacheObjectMeta + 29, // 7: types.CacheObjectMeta.Tags:type_name -> types.CacheObjectMeta.TagsEntry + 14, // 8: types.CacheObjectMeta.ManagedFields:type_name -> types.CacheObjectManaged + 0, // 9: types.CacheObjectManaged.Location:type_name -> types.StorageLocation + 1, // 10: types.WhoisResponse.Component:type_name -> types.Component + 18, // 11: types.Metric.Key:type_name -> types.Key + 30, // 12: types.Metric.Value:type_name -> google.protobuf.Any + 2, // 13: types.Toolchain.Kind:type_name -> types.ToolchainKind + 3, // 14: types.Toolchain.Lang:type_name -> types.ToolchainLang + 20, // 15: types.AgentToolchainInfoList.info:type_name -> types.AgentToolchainInfo + 19, // 16: types.RunRequest.Toolchain:type_name -> types.Toolchain + 19, // 17: types.CompileRequest.Toolchain:type_name -> types.Toolchain + 4, // 18: types.CompileResponse.CompileResult:type_name -> types.CompileResponse.Result + 22, // 19: types.Consumerd.Run:input_type -> types.RunRequest + 26, // 20: types.Scheduler.Compile:input_type -> types.CompileRequest + 27, // 21: types.Scheduler.StreamIncomingTasks:input_type -> types.CompileResponse + 26, // 22: types.Scheduler.StreamOutgoingTasks:input_type -> types.CompileRequest + 17, // 23: types.Monitor.Stream:input_type -> types.Metric + 18, // 24: types.Monitor.Listen:input_type -> types.Key + 15, // 25: types.Monitor.Whois:input_type -> types.WhoisRequest + 6, // 26: types.Cache.Push:input_type -> types.PushRequest + 7, // 27: types.Cache.Pull:input_type -> types.PullRequest + 8, // 28: types.Cache.Query:input_type -> types.QueryRequest + 10, // 29: types.Cache.Sync:input_type -> types.SyncRequest + 23, // 30: types.Consumerd.Run:output_type -> types.RunResponse + 27, // 31: types.Scheduler.Compile:output_type -> types.CompileResponse + 26, // 32: types.Scheduler.StreamIncomingTasks:output_type -> types.CompileRequest + 27, // 33: types.Scheduler.StreamOutgoingTasks:output_type -> types.CompileResponse + 5, // 34: types.Monitor.Stream:output_type -> types.Empty + 30, // 35: types.Monitor.Listen:output_type -> google.protobuf.Any + 16, // 36: types.Monitor.Whois:output_type -> types.WhoisResponse + 5, // 37: types.Cache.Push:output_type -> types.Empty + 12, // 38: types.Cache.Pull:output_type -> types.CacheObject + 9, // 39: types.Cache.Query:output_type -> types.QueryResponse + 12, // 40: types.Cache.Sync:output_type -> types.CacheObject + 30, // [30:41] is the sub-list for method output_type + 19, // [19:30] is the sub-list for method input_type + 19, // [19:19] is the sub-list for extension type_name + 19, // [19:19] is the sub-list for extension extendee + 0, // [0:19] is the sub-list for field type_name +} + +func init() { file_pkg_types_types_proto_init() } +func file_pkg_types_types_proto_init() { + if File_pkg_types_types_proto != nil { return } if !protoimpl.UnsafeEnabled { - file_proto_types_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_pkg_types_types_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Empty); i { case 0: return &v.state @@ -2447,7 +2134,7 @@ func file_proto_types_proto_init() { return nil } } - file_proto_types_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_pkg_types_types_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*PushRequest); i { case 0: return &v.state @@ -2459,7 +2146,7 @@ func file_proto_types_proto_init() { return nil } } - file_proto_types_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_pkg_types_types_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*PullRequest); i { case 0: return &v.state @@ -2471,7 +2158,7 @@ func file_proto_types_proto_init() { return nil } } - file_proto_types_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_pkg_types_types_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*QueryRequest); i { case 0: return &v.state @@ -2483,7 +2170,7 @@ func file_proto_types_proto_init() { return nil } } - file_proto_types_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_pkg_types_types_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*QueryResponse); i { case 0: return &v.state @@ -2495,7 +2182,7 @@ func file_proto_types_proto_init() { return nil } } - file_proto_types_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_pkg_types_types_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*SyncRequest); i { case 0: return &v.state @@ -2507,7 +2194,7 @@ func file_proto_types_proto_init() { return nil } } - file_proto_types_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_pkg_types_types_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*CacheKey); i { case 0: return &v.state @@ -2519,7 +2206,7 @@ func file_proto_types_proto_init() { return nil } } - file_proto_types_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_pkg_types_types_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*CacheObject); i { case 0: return &v.state @@ -2531,7 +2218,7 @@ func file_proto_types_proto_init() { return nil } } - file_proto_types_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_pkg_types_types_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*CacheObjectMeta); i { case 0: return &v.state @@ -2543,7 +2230,7 @@ func file_proto_types_proto_init() { return nil } } - file_proto_types_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_pkg_types_types_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*CacheObjectManaged); i { case 0: return &v.state @@ -2555,7 +2242,7 @@ func file_proto_types_proto_init() { return nil } } - file_proto_types_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + file_pkg_types_types_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*WhoisRequest); i { case 0: return &v.state @@ -2567,7 +2254,7 @@ func file_proto_types_proto_init() { return nil } } - file_proto_types_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + file_pkg_types_types_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*WhoisResponse); i { case 0: return &v.state @@ -2579,7 +2266,7 @@ func file_proto_types_proto_init() { return nil } } - file_proto_types_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + file_pkg_types_types_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Metric); i { case 0: return &v.state @@ -2591,7 +2278,7 @@ func file_proto_types_proto_init() { return nil } } - file_proto_types_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + file_pkg_types_types_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Key); i { case 0: return &v.state @@ -2603,55 +2290,7 @@ func file_proto_types_proto_init() { return nil } } - file_proto_types_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Value); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_proto_types_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UsageLimits); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_proto_types_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Toolchains); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_proto_types_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Metadata); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_proto_types_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + file_pkg_types_types_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Toolchain); i { case 0: return &v.state @@ -2663,7 +2302,7 @@ func file_proto_types_proto_init() { return nil } } - file_proto_types_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + file_pkg_types_types_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*AgentToolchainInfo); i { case 0: return &v.state @@ -2675,7 +2314,7 @@ func file_proto_types_proto_init() { return nil } } - file_proto_types_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + file_pkg_types_types_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*AgentToolchainInfoList); i { case 0: return &v.state @@ -2687,7 +2326,7 @@ func file_proto_types_proto_init() { return nil } } - file_proto_types_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + file_pkg_types_types_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RunRequest); i { case 0: return &v.state @@ -2699,7 +2338,7 @@ func file_proto_types_proto_init() { return nil } } - file_proto_types_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + file_pkg_types_types_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RunResponse); i { case 0: return &v.state @@ -2711,7 +2350,7 @@ func file_proto_types_proto_init() { return nil } } - file_proto_types_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + file_pkg_types_types_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ScheduleRequest); i { case 0: return &v.state @@ -2723,7 +2362,7 @@ func file_proto_types_proto_init() { return nil } } - file_proto_types_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + file_pkg_types_types_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ScheduleResponse); i { case 0: return &v.state @@ -2735,20 +2374,8 @@ func file_proto_types_proto_init() { return nil } } - file_proto_types_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AgentInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_proto_types_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SystemInfo); i { + file_pkg_types_types_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CompileRequest); i { case 0: return &v.state case 1: @@ -2759,8 +2386,8 @@ func file_proto_types_proto_init() { return nil } } - file_proto_types_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CompileRequest); i { + file_pkg_types_types_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CompileResponse); i { case 0: return &v.state case 1: @@ -2771,8 +2398,8 @@ func file_proto_types_proto_init() { return nil } } - file_proto_types_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CompileResponse); i { + file_pkg_types_types_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SystemInfo); i { case 0: return &v.state case 1: @@ -2784,11 +2411,11 @@ func file_proto_types_proto_init() { } } } - file_proto_types_proto_msgTypes[21].OneofWrappers = []interface{}{ + file_pkg_types_types_proto_msgTypes[17].OneofWrappers = []interface{}{ (*RunRequest_Path)(nil), (*RunRequest_Toolchain)(nil), } - file_proto_types_proto_msgTypes[28].OneofWrappers = []interface{}{ + file_pkg_types_types_proto_msgTypes[22].OneofWrappers = []interface{}{ (*CompileResponse_Error)(nil), (*CompileResponse_CompiledSource)(nil), } @@ -2796,19 +2423,19 @@ func file_proto_types_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_proto_types_proto_rawDesc, - NumEnums: 6, - NumMessages: 30, + RawDescriptor: file_pkg_types_types_proto_rawDesc, + NumEnums: 5, + NumMessages: 25, NumExtensions: 0, NumServices: 4, }, - GoTypes: file_proto_types_proto_goTypes, - DependencyIndexes: file_proto_types_proto_depIdxs, - EnumInfos: file_proto_types_proto_enumTypes, - MessageInfos: file_proto_types_proto_msgTypes, + GoTypes: file_pkg_types_types_proto_goTypes, + DependencyIndexes: file_pkg_types_types_proto_depIdxs, + EnumInfos: file_pkg_types_types_proto_enumTypes, + MessageInfos: file_pkg_types_types_proto_msgTypes, }.Build() - File_proto_types_proto = out.File - file_proto_types_proto_rawDesc = nil - file_proto_types_proto_goTypes = nil - file_proto_types_proto_depIdxs = nil + File_pkg_types_types_proto = out.File + file_pkg_types_types_proto_rawDesc = nil + file_pkg_types_types_proto_goTypes = nil + file_pkg_types_types_proto_depIdxs = nil } diff --git a/proto/types.proto b/pkg/types/types.proto similarity index 77% rename from proto/types.proto rename to pkg/types/types.proto index b1a39b2..106f0b6 100644 --- a/proto/types.proto +++ b/pkg/types/types.proto @@ -1,5 +1,8 @@ syntax = "proto3"; -option go_package = "pkg/types"; +option go_package = "github.com/cobalt77/kubecc/pkg/types"; +import "google/protobuf/any.proto"; + +package types; message Empty {} @@ -9,14 +12,13 @@ service Consumerd { service Scheduler { rpc Compile(CompileRequest) returns (CompileResponse); - rpc ConnectAgent(stream Metadata) returns (stream Empty); - rpc ConnectConsumerd(stream Metadata) returns (stream Empty); - rpc StreamTasks(stream CompileResponse) returns (stream CompileRequest); + rpc StreamIncomingTasks(stream CompileResponse) returns (stream CompileRequest); + rpc StreamOutgoingTasks(stream CompileRequest) returns (stream CompileResponse); } service Monitor { rpc Stream(stream Metric) returns (stream Empty); - rpc Listen(Key) returns (stream Value); + rpc Listen(Key) returns (stream google.protobuf.Any); rpc Whois(WhoisRequest) returns (WhoisResponse); } @@ -90,7 +92,7 @@ message WhoisResponse { message Metric { Key Key = 1; - Value Value = 2; + google.protobuf.Any Value = 2; } message Key { @@ -98,24 +100,6 @@ message Key { string Name = 2; } -message Value { - bytes Data = 1; -} - -message UsageLimits { - int32 ConcurrentProcessLimit = 1; - double QueuePressureMultiplier = 2; - double QueueRejectMultiplier = 3; -} - -message Toolchains { - repeated Toolchain Items = 1; -} - -message Metadata { - Toolchains Toolchains = 2; -} - enum Component { Component_Unknown = 0; Component_Agent = 1; @@ -131,13 +115,6 @@ enum Component { Component_Cache = 11; } -enum QueueStatus { - Available = 0; - Queueing = 1; - QueuePressure = 2; - QueueFull = 3; -} - enum ToolchainKind { ToolchainKind_Unknown = 0; ToolchainKind_Gnu = 1; @@ -198,25 +175,12 @@ message ScheduleRequest {} // scheduler -> consumerd message ScheduleResponse {} - -message AgentInfo { - string Node = 4; - string Pod = 5; - string Namespace = 6; -} - -message SystemInfo { - string Arch = 1; - int32 CpuThreads = 2; - uint64 SystemMemory = 3; - string Hostname = 4; -} - // consumerd -> scheduler -> agent message CompileRequest { - Toolchain Toolchain = 1; - repeated string Args = 2; - bytes PreprocessedSource = 3; + string RequestID = 1; + Toolchain Toolchain = 2; + repeated string Args = 3; + bytes PreprocessedSource = 4; } // agent -> scheduler -> consumerd @@ -224,11 +188,20 @@ message CompileResponse { enum Result { Success = 0; Fail = 1; + InternalError = 2; } - Result CompileResult = 1; - int64 CpuSecondsUsed = 2; + string RequestID = 1; + Result CompileResult = 2; + int64 CpuSecondsUsed = 3; oneof Data { - string Error = 3; - bytes CompiledSource = 4; + string Error = 4; + bytes CompiledSource = 5; } } + +message SystemInfo { + string Arch = 1; + int32 CpuThreads = 2; + uint64 SystemMemory = 3; + string Hostname = 4; +} \ No newline at end of file diff --git a/pkg/types/types_grpc.pb.go b/pkg/types/types_grpc.pb.go index 9d5710b..25fd700 100644 --- a/pkg/types/types_grpc.pb.go +++ b/pkg/types/types_grpc.pb.go @@ -4,6 +4,7 @@ package types import ( context "context" + any "github.com/golang/protobuf/ptypes/any" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" @@ -31,7 +32,7 @@ func NewConsumerdClient(cc grpc.ClientConnInterface) ConsumerdClient { func (c *consumerdClient) Run(ctx context.Context, in *RunRequest, opts ...grpc.CallOption) (*RunResponse, error) { out := new(RunResponse) - err := c.cc.Invoke(ctx, "/Consumerd/Run", in, out, opts...) + err := c.cc.Invoke(ctx, "/types.Consumerd/Run", in, out, opts...) if err != nil { return nil, err } @@ -76,7 +77,7 @@ func _Consumerd_Run_Handler(srv interface{}, ctx context.Context, dec func(inter } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/Consumerd/Run", + FullMethod: "/types.Consumerd/Run", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ConsumerdServer).Run(ctx, req.(*RunRequest)) @@ -88,7 +89,7 @@ func _Consumerd_Run_Handler(srv interface{}, ctx context.Context, dec func(inter // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) var Consumerd_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "Consumerd", + ServiceName: "types.Consumerd", HandlerType: (*ConsumerdServer)(nil), Methods: []grpc.MethodDesc{ { @@ -97,7 +98,7 @@ var Consumerd_ServiceDesc = grpc.ServiceDesc{ }, }, Streams: []grpc.StreamDesc{}, - Metadata: "proto/types.proto", + Metadata: "pkg/types/types.proto", } // SchedulerClient is the client API for Scheduler service. @@ -105,9 +106,8 @@ var Consumerd_ServiceDesc = grpc.ServiceDesc{ // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. type SchedulerClient interface { Compile(ctx context.Context, in *CompileRequest, opts ...grpc.CallOption) (*CompileResponse, error) - ConnectAgent(ctx context.Context, opts ...grpc.CallOption) (Scheduler_ConnectAgentClient, error) - ConnectConsumerd(ctx context.Context, opts ...grpc.CallOption) (Scheduler_ConnectConsumerdClient, error) - StreamTasks(ctx context.Context, opts ...grpc.CallOption) (Scheduler_StreamTasksClient, error) + StreamIncomingTasks(ctx context.Context, opts ...grpc.CallOption) (Scheduler_StreamIncomingTasksClient, error) + StreamOutgoingTasks(ctx context.Context, opts ...grpc.CallOption) (Scheduler_StreamOutgoingTasksClient, error) } type schedulerClient struct { @@ -120,100 +120,69 @@ func NewSchedulerClient(cc grpc.ClientConnInterface) SchedulerClient { func (c *schedulerClient) Compile(ctx context.Context, in *CompileRequest, opts ...grpc.CallOption) (*CompileResponse, error) { out := new(CompileResponse) - err := c.cc.Invoke(ctx, "/Scheduler/Compile", in, out, opts...) + err := c.cc.Invoke(ctx, "/types.Scheduler/Compile", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *schedulerClient) ConnectAgent(ctx context.Context, opts ...grpc.CallOption) (Scheduler_ConnectAgentClient, error) { - stream, err := c.cc.NewStream(ctx, &Scheduler_ServiceDesc.Streams[0], "/Scheduler/ConnectAgent", opts...) +func (c *schedulerClient) StreamIncomingTasks(ctx context.Context, opts ...grpc.CallOption) (Scheduler_StreamIncomingTasksClient, error) { + stream, err := c.cc.NewStream(ctx, &Scheduler_ServiceDesc.Streams[0], "/types.Scheduler/StreamIncomingTasks", opts...) if err != nil { return nil, err } - x := &schedulerConnectAgentClient{stream} + x := &schedulerStreamIncomingTasksClient{stream} return x, nil } -type Scheduler_ConnectAgentClient interface { - Send(*Metadata) error - Recv() (*Empty, error) - grpc.ClientStream -} - -type schedulerConnectAgentClient struct { - grpc.ClientStream -} - -func (x *schedulerConnectAgentClient) Send(m *Metadata) error { - return x.ClientStream.SendMsg(m) -} - -func (x *schedulerConnectAgentClient) Recv() (*Empty, error) { - m := new(Empty) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *schedulerClient) ConnectConsumerd(ctx context.Context, opts ...grpc.CallOption) (Scheduler_ConnectConsumerdClient, error) { - stream, err := c.cc.NewStream(ctx, &Scheduler_ServiceDesc.Streams[1], "/Scheduler/ConnectConsumerd", opts...) - if err != nil { - return nil, err - } - x := &schedulerConnectConsumerdClient{stream} - return x, nil -} - -type Scheduler_ConnectConsumerdClient interface { - Send(*Metadata) error - Recv() (*Empty, error) +type Scheduler_StreamIncomingTasksClient interface { + Send(*CompileResponse) error + Recv() (*CompileRequest, error) grpc.ClientStream } -type schedulerConnectConsumerdClient struct { +type schedulerStreamIncomingTasksClient struct { grpc.ClientStream } -func (x *schedulerConnectConsumerdClient) Send(m *Metadata) error { +func (x *schedulerStreamIncomingTasksClient) Send(m *CompileResponse) error { return x.ClientStream.SendMsg(m) } -func (x *schedulerConnectConsumerdClient) Recv() (*Empty, error) { - m := new(Empty) +func (x *schedulerStreamIncomingTasksClient) Recv() (*CompileRequest, error) { + m := new(CompileRequest) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } -func (c *schedulerClient) StreamTasks(ctx context.Context, opts ...grpc.CallOption) (Scheduler_StreamTasksClient, error) { - stream, err := c.cc.NewStream(ctx, &Scheduler_ServiceDesc.Streams[2], "/Scheduler/StreamTasks", opts...) +func (c *schedulerClient) StreamOutgoingTasks(ctx context.Context, opts ...grpc.CallOption) (Scheduler_StreamOutgoingTasksClient, error) { + stream, err := c.cc.NewStream(ctx, &Scheduler_ServiceDesc.Streams[1], "/types.Scheduler/StreamOutgoingTasks", opts...) if err != nil { return nil, err } - x := &schedulerStreamTasksClient{stream} + x := &schedulerStreamOutgoingTasksClient{stream} return x, nil } -type Scheduler_StreamTasksClient interface { - Send(*CompileResponse) error - Recv() (*CompileRequest, error) +type Scheduler_StreamOutgoingTasksClient interface { + Send(*CompileRequest) error + Recv() (*CompileResponse, error) grpc.ClientStream } -type schedulerStreamTasksClient struct { +type schedulerStreamOutgoingTasksClient struct { grpc.ClientStream } -func (x *schedulerStreamTasksClient) Send(m *CompileResponse) error { +func (x *schedulerStreamOutgoingTasksClient) Send(m *CompileRequest) error { return x.ClientStream.SendMsg(m) } -func (x *schedulerStreamTasksClient) Recv() (*CompileRequest, error) { - m := new(CompileRequest) +func (x *schedulerStreamOutgoingTasksClient) Recv() (*CompileResponse, error) { + m := new(CompileResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } @@ -225,9 +194,8 @@ func (x *schedulerStreamTasksClient) Recv() (*CompileRequest, error) { // for forward compatibility type SchedulerServer interface { Compile(context.Context, *CompileRequest) (*CompileResponse, error) - ConnectAgent(Scheduler_ConnectAgentServer) error - ConnectConsumerd(Scheduler_ConnectConsumerdServer) error - StreamTasks(Scheduler_StreamTasksServer) error + StreamIncomingTasks(Scheduler_StreamIncomingTasksServer) error + StreamOutgoingTasks(Scheduler_StreamOutgoingTasksServer) error mustEmbedUnimplementedSchedulerServer() } @@ -238,14 +206,11 @@ type UnimplementedSchedulerServer struct { func (UnimplementedSchedulerServer) Compile(context.Context, *CompileRequest) (*CompileResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method Compile not implemented") } -func (UnimplementedSchedulerServer) ConnectAgent(Scheduler_ConnectAgentServer) error { - return status.Errorf(codes.Unimplemented, "method ConnectAgent not implemented") +func (UnimplementedSchedulerServer) StreamIncomingTasks(Scheduler_StreamIncomingTasksServer) error { + return status.Errorf(codes.Unimplemented, "method StreamIncomingTasks not implemented") } -func (UnimplementedSchedulerServer) ConnectConsumerd(Scheduler_ConnectConsumerdServer) error { - return status.Errorf(codes.Unimplemented, "method ConnectConsumerd not implemented") -} -func (UnimplementedSchedulerServer) StreamTasks(Scheduler_StreamTasksServer) error { - return status.Errorf(codes.Unimplemented, "method StreamTasks not implemented") +func (UnimplementedSchedulerServer) StreamOutgoingTasks(Scheduler_StreamOutgoingTasksServer) error { + return status.Errorf(codes.Unimplemented, "method StreamOutgoingTasks not implemented") } func (UnimplementedSchedulerServer) mustEmbedUnimplementedSchedulerServer() {} @@ -270,7 +235,7 @@ func _Scheduler_Compile_Handler(srv interface{}, ctx context.Context, dec func(i } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/Scheduler/Compile", + FullMethod: "/types.Scheduler/Compile", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SchedulerServer).Compile(ctx, req.(*CompileRequest)) @@ -278,78 +243,52 @@ func _Scheduler_Compile_Handler(srv interface{}, ctx context.Context, dec func(i return interceptor(ctx, in, info, handler) } -func _Scheduler_ConnectAgent_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(SchedulerServer).ConnectAgent(&schedulerConnectAgentServer{stream}) -} - -type Scheduler_ConnectAgentServer interface { - Send(*Empty) error - Recv() (*Metadata, error) - grpc.ServerStream -} - -type schedulerConnectAgentServer struct { - grpc.ServerStream -} - -func (x *schedulerConnectAgentServer) Send(m *Empty) error { - return x.ServerStream.SendMsg(m) -} - -func (x *schedulerConnectAgentServer) Recv() (*Metadata, error) { - m := new(Metadata) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil +func _Scheduler_StreamIncomingTasks_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(SchedulerServer).StreamIncomingTasks(&schedulerStreamIncomingTasksServer{stream}) } -func _Scheduler_ConnectConsumerd_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(SchedulerServer).ConnectConsumerd(&schedulerConnectConsumerdServer{stream}) -} - -type Scheduler_ConnectConsumerdServer interface { - Send(*Empty) error - Recv() (*Metadata, error) +type Scheduler_StreamIncomingTasksServer interface { + Send(*CompileRequest) error + Recv() (*CompileResponse, error) grpc.ServerStream } -type schedulerConnectConsumerdServer struct { +type schedulerStreamIncomingTasksServer struct { grpc.ServerStream } -func (x *schedulerConnectConsumerdServer) Send(m *Empty) error { +func (x *schedulerStreamIncomingTasksServer) Send(m *CompileRequest) error { return x.ServerStream.SendMsg(m) } -func (x *schedulerConnectConsumerdServer) Recv() (*Metadata, error) { - m := new(Metadata) +func (x *schedulerStreamIncomingTasksServer) Recv() (*CompileResponse, error) { + m := new(CompileResponse) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err } return m, nil } -func _Scheduler_StreamTasks_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(SchedulerServer).StreamTasks(&schedulerStreamTasksServer{stream}) +func _Scheduler_StreamOutgoingTasks_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(SchedulerServer).StreamOutgoingTasks(&schedulerStreamOutgoingTasksServer{stream}) } -type Scheduler_StreamTasksServer interface { - Send(*CompileRequest) error - Recv() (*CompileResponse, error) +type Scheduler_StreamOutgoingTasksServer interface { + Send(*CompileResponse) error + Recv() (*CompileRequest, error) grpc.ServerStream } -type schedulerStreamTasksServer struct { +type schedulerStreamOutgoingTasksServer struct { grpc.ServerStream } -func (x *schedulerStreamTasksServer) Send(m *CompileRequest) error { +func (x *schedulerStreamOutgoingTasksServer) Send(m *CompileResponse) error { return x.ServerStream.SendMsg(m) } -func (x *schedulerStreamTasksServer) Recv() (*CompileResponse, error) { - m := new(CompileResponse) +func (x *schedulerStreamOutgoingTasksServer) Recv() (*CompileRequest, error) { + m := new(CompileRequest) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err } @@ -360,7 +299,7 @@ func (x *schedulerStreamTasksServer) Recv() (*CompileResponse, error) { // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) var Scheduler_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "Scheduler", + ServiceName: "types.Scheduler", HandlerType: (*SchedulerServer)(nil), Methods: []grpc.MethodDesc{ { @@ -370,25 +309,19 @@ var Scheduler_ServiceDesc = grpc.ServiceDesc{ }, Streams: []grpc.StreamDesc{ { - StreamName: "ConnectAgent", - Handler: _Scheduler_ConnectAgent_Handler, - ServerStreams: true, - ClientStreams: true, - }, - { - StreamName: "ConnectConsumerd", - Handler: _Scheduler_ConnectConsumerd_Handler, + StreamName: "StreamIncomingTasks", + Handler: _Scheduler_StreamIncomingTasks_Handler, ServerStreams: true, ClientStreams: true, }, { - StreamName: "StreamTasks", - Handler: _Scheduler_StreamTasks_Handler, + StreamName: "StreamOutgoingTasks", + Handler: _Scheduler_StreamOutgoingTasks_Handler, ServerStreams: true, ClientStreams: true, }, }, - Metadata: "proto/types.proto", + Metadata: "pkg/types/types.proto", } // MonitorClient is the client API for Monitor service. @@ -409,7 +342,7 @@ func NewMonitorClient(cc grpc.ClientConnInterface) MonitorClient { } func (c *monitorClient) Stream(ctx context.Context, opts ...grpc.CallOption) (Monitor_StreamClient, error) { - stream, err := c.cc.NewStream(ctx, &Monitor_ServiceDesc.Streams[0], "/Monitor/Stream", opts...) + stream, err := c.cc.NewStream(ctx, &Monitor_ServiceDesc.Streams[0], "/types.Monitor/Stream", opts...) if err != nil { return nil, err } @@ -440,7 +373,7 @@ func (x *monitorStreamClient) Recv() (*Empty, error) { } func (c *monitorClient) Listen(ctx context.Context, in *Key, opts ...grpc.CallOption) (Monitor_ListenClient, error) { - stream, err := c.cc.NewStream(ctx, &Monitor_ServiceDesc.Streams[1], "/Monitor/Listen", opts...) + stream, err := c.cc.NewStream(ctx, &Monitor_ServiceDesc.Streams[1], "/types.Monitor/Listen", opts...) if err != nil { return nil, err } @@ -455,7 +388,7 @@ func (c *monitorClient) Listen(ctx context.Context, in *Key, opts ...grpc.CallOp } type Monitor_ListenClient interface { - Recv() (*Value, error) + Recv() (*any.Any, error) grpc.ClientStream } @@ -463,8 +396,8 @@ type monitorListenClient struct { grpc.ClientStream } -func (x *monitorListenClient) Recv() (*Value, error) { - m := new(Value) +func (x *monitorListenClient) Recv() (*any.Any, error) { + m := new(any.Any) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } @@ -473,7 +406,7 @@ func (x *monitorListenClient) Recv() (*Value, error) { func (c *monitorClient) Whois(ctx context.Context, in *WhoisRequest, opts ...grpc.CallOption) (*WhoisResponse, error) { out := new(WhoisResponse) - err := c.cc.Invoke(ctx, "/Monitor/Whois", in, out, opts...) + err := c.cc.Invoke(ctx, "/types.Monitor/Whois", in, out, opts...) if err != nil { return nil, err } @@ -551,7 +484,7 @@ func _Monitor_Listen_Handler(srv interface{}, stream grpc.ServerStream) error { } type Monitor_ListenServer interface { - Send(*Value) error + Send(*any.Any) error grpc.ServerStream } @@ -559,7 +492,7 @@ type monitorListenServer struct { grpc.ServerStream } -func (x *monitorListenServer) Send(m *Value) error { +func (x *monitorListenServer) Send(m *any.Any) error { return x.ServerStream.SendMsg(m) } @@ -573,7 +506,7 @@ func _Monitor_Whois_Handler(srv interface{}, ctx context.Context, dec func(inter } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/Monitor/Whois", + FullMethod: "/types.Monitor/Whois", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MonitorServer).Whois(ctx, req.(*WhoisRequest)) @@ -585,7 +518,7 @@ func _Monitor_Whois_Handler(srv interface{}, ctx context.Context, dec func(inter // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) var Monitor_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "Monitor", + ServiceName: "types.Monitor", HandlerType: (*MonitorServer)(nil), Methods: []grpc.MethodDesc{ { @@ -606,7 +539,7 @@ var Monitor_ServiceDesc = grpc.ServiceDesc{ ServerStreams: true, }, }, - Metadata: "proto/types.proto", + Metadata: "pkg/types/types.proto", } // CacheClient is the client API for Cache service. @@ -629,7 +562,7 @@ func NewCacheClient(cc grpc.ClientConnInterface) CacheClient { func (c *cacheClient) Push(ctx context.Context, in *PushRequest, opts ...grpc.CallOption) (*Empty, error) { out := new(Empty) - err := c.cc.Invoke(ctx, "/Cache/Push", in, out, opts...) + err := c.cc.Invoke(ctx, "/types.Cache/Push", in, out, opts...) if err != nil { return nil, err } @@ -638,7 +571,7 @@ func (c *cacheClient) Push(ctx context.Context, in *PushRequest, opts ...grpc.Ca func (c *cacheClient) Pull(ctx context.Context, in *PullRequest, opts ...grpc.CallOption) (*CacheObject, error) { out := new(CacheObject) - err := c.cc.Invoke(ctx, "/Cache/Pull", in, out, opts...) + err := c.cc.Invoke(ctx, "/types.Cache/Pull", in, out, opts...) if err != nil { return nil, err } @@ -647,7 +580,7 @@ func (c *cacheClient) Pull(ctx context.Context, in *PullRequest, opts ...grpc.Ca func (c *cacheClient) Query(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (*QueryResponse, error) { out := new(QueryResponse) - err := c.cc.Invoke(ctx, "/Cache/Query", in, out, opts...) + err := c.cc.Invoke(ctx, "/types.Cache/Query", in, out, opts...) if err != nil { return nil, err } @@ -655,7 +588,7 @@ func (c *cacheClient) Query(ctx context.Context, in *QueryRequest, opts ...grpc. } func (c *cacheClient) Sync(ctx context.Context, in *SyncRequest, opts ...grpc.CallOption) (Cache_SyncClient, error) { - stream, err := c.cc.NewStream(ctx, &Cache_ServiceDesc.Streams[0], "/Cache/Sync", opts...) + stream, err := c.cc.NewStream(ctx, &Cache_ServiceDesc.Streams[0], "/types.Cache/Sync", opts...) if err != nil { return nil, err } @@ -736,7 +669,7 @@ func _Cache_Push_Handler(srv interface{}, ctx context.Context, dec func(interfac } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/Cache/Push", + FullMethod: "/types.Cache/Push", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(CacheServer).Push(ctx, req.(*PushRequest)) @@ -754,7 +687,7 @@ func _Cache_Pull_Handler(srv interface{}, ctx context.Context, dec func(interfac } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/Cache/Pull", + FullMethod: "/types.Cache/Pull", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(CacheServer).Pull(ctx, req.(*PullRequest)) @@ -772,7 +705,7 @@ func _Cache_Query_Handler(srv interface{}, ctx context.Context, dec func(interfa } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/Cache/Query", + FullMethod: "/types.Cache/Query", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(CacheServer).Query(ctx, req.(*QueryRequest)) @@ -805,7 +738,7 @@ func (x *cacheSyncServer) Send(m *CacheObject) error { // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) var Cache_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "Cache", + ServiceName: "types.Cache", HandlerType: (*CacheServer)(nil), Methods: []grpc.MethodDesc{ { @@ -828,5 +761,5 @@ var Cache_ServiceDesc = grpc.ServiceDesc{ ServerStreams: true, }, }, - Metadata: "proto/types.proto", + Metadata: "pkg/types/types.proto", } diff --git a/pkg/ui/statusdisplay.go b/pkg/ui/statusdisplay.go index 354e6ec..ddbc64f 100644 --- a/pkg/ui/statusdisplay.go +++ b/pkg/ui/statusdisplay.go @@ -6,7 +6,7 @@ import ( "log" "sync" - "github.com/cobalt77/kubecc/pkg/metrics/common" + "github.com/cobalt77/kubecc/pkg/metrics" "github.com/cobalt77/kubecc/pkg/types" ui "github.com/gizak/termui/v3" "github.com/gizak/termui/v3/widgets" @@ -15,9 +15,8 @@ import ( type agent struct { ctx context.Context info *types.WhoisResponse - queueParams *common.QueueParams - taskStatus *common.TaskStatus - queueStatus *common.QueueStatus + usageLimits *metrics.UsageLimits + taskStatus *metrics.TaskStatus } type StatusDisplay struct { @@ -38,14 +37,13 @@ func (s *StatusDisplay) makeRows() [][]string { defer s.mutex.RUnlock() rows := make([][]string, 0) - header := []string{"ID", "Running Tasks", "Queued Tasks", "Queue Status"} + header := []string{"ID", "Running Tasks", "Queued Tasks"} rows = append(rows, header) for _, agent := range s.agents { row := []string{ fmt.Sprintf("[%s] %s", agent.info.Component.Name(), agent.info.Address), - fmt.Sprintf("%d/%d", agent.taskStatus.NumRunning, agent.queueParams.ConcurrentProcessLimit), + fmt.Sprintf("%d/%d", agent.taskStatus.NumRunning, agent.usageLimits.ConcurrentProcessLimit), fmt.Sprint(agent.taskStatus.NumQueued), - types.QueueStatus(agent.queueStatus.QueueStatus).String(), } rows = append(rows, row) } @@ -57,9 +55,8 @@ func (s *StatusDisplay) AddAgent(ctx context.Context, info *types.WhoisResponse) s.agents = append(s.agents, &agent{ ctx: ctx, info: info, - queueParams: &common.QueueParams{}, - taskStatus: &common.TaskStatus{}, - queueStatus: &common.QueueStatus{}, + usageLimits: &metrics.UsageLimits{}, + taskStatus: &metrics.TaskStatus{}, }) s.mutex.Unlock() s.redraw() @@ -86,12 +83,10 @@ func (s *StatusDisplay) Update(uuid string, params interface{}) { } } switch p := params.(type) { - case *common.QueueParams: - s.agents[index].queueParams = p - case *common.TaskStatus: + case *metrics.UsageLimits: + s.agents[index].usageLimits = p + case *metrics.TaskStatus: s.agents[index].taskStatus = p - case *common.QueueStatus: - s.agents[index].queueStatus = p } s.mutex.Unlock() s.redraw() diff --git a/proto/testpb.proto b/proto/testpb.proto deleted file mode 100644 index ebd8a10..0000000 --- a/proto/testpb.proto +++ /dev/null @@ -1,12 +0,0 @@ -syntax = "proto3"; -option go_package = "internal/testutil"; - -message Baz {} - -service Foo { - rpc Foo(Baz) returns (Baz); -} - -service Bar { - rpc Bar(stream Baz) returns (stream Baz); -} diff --git a/test/integration/integration.go b/test/integration/integration.go index 6461333..2241139 100644 --- a/test/integration/integration.go +++ b/test/integration/integration.go @@ -55,15 +55,6 @@ func NewTestController(ctx context.Context) *TestController { } } -func (tc *TestController) Dial(ctx context.Context) (types.AgentClient, error) { - tc.agentListenersLock.Lock() - defer tc.agentListenersLock.Unlock() - - listener := tc.agentListeners[meta.UUID(ctx)] - cc := dial(ctx, listener) - return types.NewAgentClient(cc), nil -} - func dial( ctx context.Context, dialer *bufconn.Listener, @@ -80,7 +71,7 @@ func dial( return cc } -func (tc *TestController) startAgent(cfg *types.UsageLimits) { +func (tc *TestController) startAgent(cfg *metrics.UsageLimits) { ctx := meta.NewContext( meta.WithProvider(identity.Component, meta.WithValue(types.Agent)), meta.WithProvider(identity.UUID), @@ -110,7 +101,6 @@ func (tc *TestController) startAgent(cfg *types.UsageLimits) { }), agent.WithToolchainRunners(testtoolchain.AddToStore), ) - types.RegisterAgentServer(srv, agentSrv) mgr := servers.NewStreamManager(ctx, agentSrv) go mgr.Run() go agentSrv.StartMetricsProvider() @@ -144,9 +134,6 @@ func (tc *TestController) startScheduler() { cacheClient := types.NewCacheClient(cc) sc := scheduler.NewSchedulerServer(ctx, - scheduler.WithSchedulerOptions( - scheduler.WithAgentDialer(tc), - ), scheduler.WithMonitorClient(monClient), scheduler.WithCacheClient(cacheClient), ) @@ -237,7 +224,7 @@ func (tc *TestController) startCache() { }() } -func (tc *TestController) startConsumerd(cfg *types.UsageLimits) { +func (tc *TestController) startConsumerd(cfg *metrics.UsageLimits) { ctx := meta.NewContext( meta.WithProvider(identity.Component, meta.WithValue(types.Consumerd)), meta.WithProvider(identity.UUID), @@ -283,8 +270,8 @@ func (tc *TestController) startConsumerd(cfg *types.UsageLimits) { } type TestOptions struct { - Clients []*types.UsageLimits - Agents []*types.UsageLimits + Clients []*metrics.UsageLimits + Agents []*metrics.UsageLimits } func (tc *TestController) Start(ops TestOptions) { diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index addff2f..3534511 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -9,6 +9,7 @@ import ( "github.com/cobalt77/kubecc/internal/testutil" "github.com/cobalt77/kubecc/pkg/identity" "github.com/cobalt77/kubecc/pkg/meta" + "github.com/cobalt77/kubecc/pkg/metrics" "github.com/cobalt77/kubecc/pkg/tracing" "github.com/cobalt77/kubecc/pkg/types" "github.com/cobalt77/kubecc/test/integration" @@ -45,14 +46,14 @@ var _ = Describe("Integration test", func() { } testOptions := integration.TestOptions{ - Clients: []*types.UsageLimits{ + Clients: []*metrics.UsageLimits{ { ConcurrentProcessLimit: 18, QueuePressureMultiplier: 1.5, QueueRejectMultiplier: 2.0, }, }, - Agents: []*types.UsageLimits{ + Agents: []*metrics.UsageLimits{ { ConcurrentProcessLimit: 24, QueuePressureMultiplier: 1.5, From e994a7f82635cbd80ca2c3817af38edd202ca5ab Mon Sep 17 00:00:00 2001 From: cobalt77 <8194899+cobalt77@users.noreply.github.com> Date: Mon, 15 Mar 2021 20:40:00 -0400 Subject: [PATCH 03/12] work in progress --- pkg/apps/consumerd/queue.go | 159 ++++++++ pkg/apps/consumerd/server.go | 48 +-- pkg/apps/scheduler/broker.go | 158 ++++++- pkg/apps/scheduler/filter.go | 31 +- pkg/apps/scheduler/scheduler.go | 700 ++++++++++++++++---------------- pkg/apps/scheduler/server.go | 154 ++++--- pkg/apps/scheduler/types.go | 12 +- pkg/run/executor.go | 44 +- pkg/run/run.go | 25 ++ pkg/run/task.go | 14 + test/integration/integration.go | 7 +- 11 files changed, 872 insertions(+), 480 deletions(-) create mode 100644 pkg/apps/consumerd/queue.go diff --git a/pkg/apps/consumerd/queue.go b/pkg/apps/consumerd/queue.go new file mode 100644 index 0000000..afa9144 --- /dev/null +++ b/pkg/apps/consumerd/queue.go @@ -0,0 +1,159 @@ +package consumerd + +import ( + "context" + "sync" + + "github.com/cobalt77/kubecc/pkg/run" +) + +type remoteStatus int + +const ( + unavailable remoteStatus = iota + available + full +) + +type remoteStatusManager struct { + status remoteStatus + cond *sync.Cond +} + +func newRemoteStatusManager() *remoteStatusManager { + rsm := &remoteStatusManager{ + status: unavailable, + cond: sync.NewCond(&sync.Mutex{}), + } + + // todo: watch monitor for scheduler status + return rsm +} + +func (rsm *remoteStatusManager) EnsureStatus(stat remoteStatus) <-chan struct{} { + ch := make(chan struct{}) + defer func() { + go func() { + rsm.cond.L.Lock() + defer rsm.cond.L.Unlock() + + for { + if rsm.status != stat { + close(ch) + return + } + rsm.cond.Wait() + } + }() + }() + + rsm.cond.L.Lock() + defer rsm.cond.L.Unlock() + + for { + if rsm.status == stat { + return ch + } + rsm.cond.Wait() + } +} + +func (rsm *remoteStatusManager) SetStatus(stat remoteStatus) { + rsm.cond.L.Lock() + defer rsm.cond.L.Unlock() + rsm.status = stat + rsm.cond.Broadcast() +} + +type splitTask struct { + local, remote run.PackagedTask +} + +func (s *splitTask) Wait() (interface{}, error) { + select { + case results := <-s.local.C: + return results.Response, results.Err + case results := <-s.remote.C: + return results.Response, results.Err + } +} + +type splitQueue struct { + ctx context.Context + rsm *remoteStatusManager + taskQueue chan *splitTask +} + +type queueAction int + +const ( + requeue queueAction = iota + doNotRequeue +) + +func NewSplitQueue( + ctx context.Context, +) *splitQueue { + sq := &splitQueue{ + ctx: ctx, + taskQueue: make(chan *splitTask), + rsm: newRemoteStatusManager(), + } + + go sq.runLocalQueue() + go sq.runRemoteQueue() + return sq +} + +func (s *splitQueue) In() chan<- *splitTask { + return s.taskQueue +} + +func (s *splitQueue) processTask(pt run.PackagedTask) queueAction { + response, err := pt.F() + if err != nil { + return requeue + } + pt.C <- struct { + Response interface{} + Err error + }{ + Response: response, + Err: err, + } + return doNotRequeue +} + +func (s *splitQueue) runLocalQueue() { + for { + select { + case <-s.ctx.Done(): + return + case task := <-s.taskQueue: + switch s.processTask(task.local) { + case requeue: + s.In() <- task + } + } + } +} + +func (s *splitQueue) runRemoteQueue() { + for { + statusChanged := s.rsm.EnsureStatus(available) + for { + select { + case <-s.ctx.Done(): + return + case <-statusChanged: + goto restart + case task := <-s.taskQueue: + switch s.processTask(task.remote) { + case requeue: + s.In() <- task + } + } + } + restart: + } +} diff --git a/pkg/apps/consumerd/server.go b/pkg/apps/consumerd/server.go index b3b4ee6..ae9a047 100644 --- a/pkg/apps/consumerd/server.go +++ b/pkg/apps/consumerd/server.go @@ -37,7 +37,7 @@ type consumerdServer struct { connection *grpc.ClientConn localExecutor run.Executor remoteExecutor run.Executor - remoteOnly bool + queue *splitQueue numConsumers *atomic.Int32 localTasksCompleted *atomic.Int64 } @@ -118,7 +118,9 @@ func NewConsumerdServer( storeUpdateCh: make(chan struct{}, 1), numConsumers: atomic.NewInt32(0), localTasksCompleted: atomic.NewInt64(0), + queue: NewSplitQueue(ctx), } + if options.schedulerClient != nil { srv.schedulerClient = options.schedulerClient srv.connection = options.schedulerConnection @@ -294,37 +296,25 @@ func (c *consumerdServer) Run( ap := runner.NewArgParser(c.srvContext, req.Args) ap.Parse() - canRunRemote := ap.CanRunRemote() - if !c.schedulerConnected() { - c.lg.Info("Running local, scheduler disconnected") - canRunRemote = false + ctxs := run.Contexts{ + ServerContext: c.srvContext, + ClientContext: ctx, } - // todo - // if !c.remoteOnly && c.localExecutor.Status() == types.Available { - // c.lg.Info("Running local, not at capacity yet") - // canRunRemote = false - // } - if !canRunRemote { - defer c.localTasksCompleted.Inc() - resp, err := runner.RunLocal(ap).Run(run.Contexts{ - ServerContext: c.srvContext, - ClientContext: ctx, - }, c.localExecutor, req) - if err != nil { - return nil, err - } - return resp.(*types.RunResponse), nil - } else { - resp, err := runner.SendRemote(ap, c.schedulerClient).Run(run.Contexts{ - ServerContext: c.srvContext, - ClientContext: ctx, - }, c.remoteExecutor, req) - if err != nil { - return nil, err - } - return resp.(*types.RunResponse), nil + st := &splitTask{ + local: run.Package( + runner.RunLocal(ap), ctxs, c.localExecutor, req), + remote: run.Package( + runner.SendRemote(ap, c.schedulerClient), ctxs, c.remoteExecutor, req), + } + c.queue.In() <- st + + resp, err := st.Wait() + + if err != nil { + return nil, err } + return resp.(*types.RunResponse), nil } // func (c *consumerdServer) HandleStream(stream grpc.ClientStream) error { diff --git a/pkg/apps/scheduler/broker.go b/pkg/apps/scheduler/broker.go index c1bc3af..575faaa 100644 --- a/pkg/apps/scheduler/broker.go +++ b/pkg/apps/scheduler/broker.go @@ -10,8 +10,11 @@ import ( "github.com/cobalt77/kubecc/pkg/meta" "github.com/cobalt77/kubecc/pkg/metrics" "github.com/cobalt77/kubecc/pkg/types" + "github.com/cobalt77/kubecc/pkg/util" "go.uber.org/atomic" "go.uber.org/zap" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" ) type worker struct { @@ -36,6 +39,8 @@ type Broker struct { completedTasks *atomic.Int64 failedTasks *atomic.Int64 requestCount *atomic.Int64 + cacheHitCount *atomic.Int64 + cacheMissCount *atomic.Int64 requestQueue chan *types.CompileRequest responseQueue chan *types.CompileResponse agents map[string]*Agent @@ -44,7 +49,9 @@ type Broker struct { consumerdsMutex *sync.RWMutex filter *ToolchainFilter monClient types.MonitorClient - pendingRequests sync.Map // map[uuid string]Scheduler_StreamOutgoingTasksServer + cacheClient types.CacheClient + hashSrv *util.HashServer + pendingRequests sync.Map // map[uuid string]*Consumerd } func NewBroker(ctx context.Context, monClient types.MonitorClient) *Broker { @@ -61,6 +68,7 @@ func NewBroker(ctx context.Context, monClient types.MonitorClient) *Broker { agentsMutex: &sync.RWMutex{}, consumerdsMutex: &sync.RWMutex{}, filter: NewToolchainFilter(ctx), + hashSrv: util.NewHashServer(), monClient: monClient, } } @@ -84,6 +92,11 @@ func (b *Broker) handleAgentStream( srv types.Scheduler_StreamIncomingTasksServer, filterOutput <-chan interface{}, ) { + b.agentsMutex.RLock() + uuid := meta.UUID(srv.Context()) + agent := b.agents[uuid] + b.agentsMutex.RUnlock() + go func() { b.lg.Debug("Handling agent stream (send)") defer b.lg.Debug("Agent stream done (send)") @@ -120,6 +133,8 @@ func (b *Broker) handleAgentStream( } return } + + agent.CompletedTasks.Inc() b.responseQueue <- resp } }() @@ -128,6 +143,11 @@ func (b *Broker) handleAgentStream( func (b *Broker) handleConsumerdStream( srv types.Scheduler_StreamOutgoingTasksServer, ) { + b.consumerdsMutex.RLock() + uuid := meta.UUID(srv.Context()) + cd := b.consumerds[uuid] + b.consumerdsMutex.RUnlock() + b.lg.Debug("Handling consumerd stream (recv)") defer b.lg.Debug("Consumerd stream done (recv)") @@ -141,8 +161,9 @@ func (b *Broker) handleConsumerdStream( } return } - b.pendingRequests.Store(req.RequestID, srv) - if err := b.filter.Send(req); err != nil { + b.requestCount.Inc() + b.pendingRequests.Store(req.RequestID, cd) + if err := b.filter.Send(srv.Context(), req); err != nil { b.pendingRequests.Delete(req.RequestID) b.responseQueue <- &types.CompileResponse{ RequestID: req.RequestID, @@ -162,8 +183,16 @@ func (b *Broker) handleResponseQueue() { b.lg.Debug("Response queue closed") return } - if stream, ok := b.pendingRequests.LoadAndDelete(resp.RequestID); ok { - err := stream.(types.Scheduler_StreamOutgoingTasksServer).Send(resp) + if value, ok := b.pendingRequests.LoadAndDelete(resp.RequestID); ok { + consumerd := value.(*Consumerd) + consumerd.CompletedTasks.Inc() + switch resp.CompileResult { + case types.CompileResponse_Fail, types.CompileResponse_InternalError: + b.failedTasks.Inc() + case types.CompileResponse_Success: + b.completedTasks.Inc() + } + err := consumerd.Stream.Send(resp) if err != nil { b.lg.With( zap.Error(err), @@ -177,7 +206,7 @@ func (b *Broker) handleResponseQueue() { } } -func (b *Broker) HandleIncomingTasksStream( +func (b *Broker) HandleAgentTaskStream( stream types.Scheduler_StreamIncomingTasksServer, ) { b.agentsMutex.Lock() @@ -211,7 +240,7 @@ func (b *Broker) HandleIncomingTasksStream( }() } -func (b *Broker) HandleOutgoingTasksStream( +func (b *Broker) HandleConsumerdTaskStream( stream types.Scheduler_StreamOutgoingTasksServer, ) { b.consumerdsMutex.Lock() @@ -253,3 +282,118 @@ func (b *Broker) HandleOutgoingTasksStream( delete(b.agents, cd.UUID) }() } + +var float64Epsilon = 1e-6 + +func (b *Broker) CalcAgentStats() <-chan []agentStats { + stats := make(chan []agentStats) + go func() { + statsList := []agentStats{} + b.agentsMutex.RLock() + defer b.agentsMutex.RUnlock() + + for uuid, agent := range b.agents { + agent.RLock() + defer agent.RUnlock() + + stats := agentStats{ + agentCtx: agent.Context, + agentTasksTotal: &metrics.AgentTasksTotal{}, + } + + stats.agentTasksTotal.Total = agent.CompletedTasks.Load() + stats.agentTasksTotal.UUID = uuid + + statsList = append(statsList, stats) + } + + stats <- statsList + }() + return stats +} + +func (b *Broker) CalcConsumerdStats() <-chan []consumerdStats { + stats := make(chan []consumerdStats) + go func() { + statsList := []consumerdStats{} + b.consumerdsMutex.RLock() + defer b.consumerdsMutex.RUnlock() + + for uuid, cd := range b.consumerds { + cd.RLock() + defer cd.RUnlock() + + total := &metrics.ConsumerdTasksTotal{ + Total: cd.CompletedTasks.Load(), + } + total.UUID = uuid + statsList = append(statsList, consumerdStats{ + consumerdCtx: cd.Context, + cdRemoteTasksTotal: total, + }) + } + + stats <- statsList + }() + return stats +} + +func (b *Broker) TaskStats() taskStats { + return taskStats{ + completedTotal: &metrics.TasksCompletedTotal{ + Total: b.completedTasks.Load(), + }, + failedTotal: &metrics.TasksFailedTotal{ + Total: b.failedTasks.Load(), + }, + requestsTotal: &metrics.SchedulingRequestsTotal{ + Total: b.requestCount.Load(), + }, + } +} + +func (b *Broker) PreReceive( + ctx context.Context, + taskCh *taskChannel, + req *types.CompileRequest, +) (action HookAction) { + defer func() { + switch action { + case ProcessRequestNormally: + b.cacheMissCount.Inc() + case RequestIntercepted: + b.cacheHitCount.Inc() + } + }() + + if b.cacheClient == nil { + action = ProcessRequestNormally + return + } + var reqHash string + reqHash = b.hashSrv.Hash(req) + obj, err := b.cacheClient.Pull(ctx, &types.PullRequest{ + Key: &types.CacheKey{ + Hash: reqHash, + }, + }) + switch status.Code(err) { + case codes.OK: + b.responseQueue <- &types.CompileResponse{ + CompileResult: types.CompileResponse_Success, + Data: &types.CompileResponse_CompiledSource{ + CompiledSource: obj.GetData(), + }, + } + + action = RequestIntercepted + return + case codes.NotFound: + default: + b.lg.With( + zap.Error(err), + ).Error("Error querying cache server") + } + action = ProcessRequestNormally + return +} diff --git a/pkg/apps/scheduler/filter.go b/pkg/apps/scheduler/filter.go index 86f4b89..bfbb83e 100644 --- a/pkg/apps/scheduler/filter.go +++ b/pkg/apps/scheduler/filter.go @@ -12,8 +12,9 @@ import ( ) var ( - ErrNoAgents = errors.New("No available agents can run this task") - ErrStreamClosed = errors.New("Task stream closed") + ErrNoAgents = errors.New("No available agents can run this task") + ErrStreamClosed = errors.New("Task stream closed") + ErrRequestRejected = errors.New("The task has been rejected by the server") ) type sender struct { @@ -100,6 +101,18 @@ func (c *taskChannel) AttachReceiver(r *receiver) { } } +type HookAction int + +const ( + ProcessRequestNormally HookAction = iota + RejectRequest + RequestIntercepted +) + +type FilterHook interface { + PreReceive(*taskChannel, *types.CompileRequest) HookAction +} + type ToolchainFilter struct { ctx context.Context senders map[string]*sender // key = uuid @@ -108,6 +121,7 @@ type ToolchainFilter struct { channelsMutex *sync.RWMutex sendersMutex *sync.RWMutex receiversMutex *sync.RWMutex + hooks []FilterHook } func NewToolchainFilter(ctx context.Context) *ToolchainFilter { @@ -237,14 +251,25 @@ func (f *ToolchainFilter) UpdateSenderToolchains( sender.cd.Toolchains = newToolchains } -func (f *ToolchainFilter) Send(req *types.CompileRequest) error { +func (f *ToolchainFilter) Send(ctx context.Context, req *types.CompileRequest) error { taskCh := f.taskChannelForToolchain(req.GetToolchain()) + for _, hook := range f.hooks { + switch hook.PreReceive(taskCh, req) { + case ProcessRequestNormally: + case RejectRequest: + return ErrRequestRejected + case RequestIntercepted: + return nil + } + } if taskCh.rxRefCount.Load() == 0 { return ErrNoAgents } select { case taskCh.C <- req: return nil + case <-ctx.Done(): + return context.Canceled default: return ErrStreamClosed } diff --git a/pkg/apps/scheduler/scheduler.go b/pkg/apps/scheduler/scheduler.go index fe4fb15..ec6993f 100644 --- a/pkg/apps/scheduler/scheduler.go +++ b/pkg/apps/scheduler/scheduler.go @@ -1,352 +1,352 @@ package scheduler -import ( - "context" - "fmt" - "math" - "sync" - - scmetrics "github.com/cobalt77/kubecc/pkg/apps/scheduler/metrics" - "github.com/cobalt77/kubecc/pkg/meta" - "github.com/cobalt77/kubecc/pkg/types" - "github.com/smallnest/weighted" - "go.uber.org/atomic" - "go.uber.org/zap" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -/* -Assumptions: -- Each invocation of the compiler takes a single input and produces a single output -- Each process will consume up to 100% of a single cpu thread -- Agents run in containers belonging to their own kernel cgroup with a limited - CFS quota. -- Compiling locally is always faster than preprocessing locally + compiling remotely. - -Notes: -Agents and consumers are persistently connected to the scheduler. The scheduler -knows which jobs are running at all times and on which agents, and it knows -which jobs are being run locally. - -While building, the ultimate goal is to reach 100% cpu usage on the consumer, -and 100% usage on agents (relative to their cgroup). - -The maximum number of concurrent processes is determined by: -(cfs_quota)/(cfs_period)*(multiple) where multiple is a configurable constant. -*/ - -type Scheduler struct { - SchedulerOptions - ctx context.Context - lg *zap.SugaredLogger - agents map[string]*Agent - consumerds map[string]*Consumerd - agentsMutex *sync.RWMutex - cdsMutex *sync.RWMutex - w weighted.W - wLock *sync.Mutex - completedTasks *atomic.Int64 - failedTasks *atomic.Int64 - requestCount *atomic.Int64 -} - -type SchedulerOptions struct { -} - -type schedulerOption func(*SchedulerOptions) - -func (o *SchedulerOptions) Apply(opts ...schedulerOption) { - for _, op := range opts { - op(o) - } -} - -func NewScheduler(ctx context.Context, opts ...schedulerOption) *Scheduler { - options := SchedulerOptions{} - options.Apply(opts...) - - return &Scheduler{ - SchedulerOptions: options, - w: &weighted.RRW{}, - wLock: &sync.Mutex{}, - ctx: ctx, - lg: meta.Log(ctx), - completedTasks: atomic.NewInt64(0), - failedTasks: atomic.NewInt64(0), - requestCount: atomic.NewInt64(0), - agents: make(map[string]*Agent), - consumerds: make(map[string]*Consumerd), - agentsMutex: &sync.RWMutex{}, - cdsMutex: &sync.RWMutex{}, - } -} - -func (s *Scheduler) Schedule( - ctx context.Context, - req *types.CompileRequest, -) (*types.CompileResponse, error) { - s.lg.Info("Scheduling") - s.requestCount.Inc() - for { - s.wLock.Lock() - next := s.w.Next() - if next == nil { - s.agentsMutex.RLock() - numAgents := len(s.agents) - s.agentsMutex.RUnlock() - - if numAgents > 0 { - // All weights 0 - return nil, status.Error(codes.ResourceExhausted, "All agents busy") - } else { - return nil, status.Error(codes.Unavailable, "No agents available") - } - } - agent := next.(*Agent) - agentStream := agent.Stream - s.wLock.Unlock() - err := agentStream.Send(req) - if status.Code(err) == codes.Unavailable { - s.lg.With( - zap.Error(err), - ).Info("Agent rejected task, re-scheduling...") - continue - } - if err != nil { - s.lg.With(zap.Error(err)).Error("Error from agent") - s.failedTasks.Inc() - return nil, err - } - agent.CompletedTasks.Inc() - s.completedTasks.Inc() - return response, nil - } -} - -func (s *Scheduler) AgentIsConnected(a *Agent) bool { - s.agentsMutex.RLock() - defer s.agentsMutex.RUnlock() - _, ok := s.agents[a.UUID] - return ok -} - -func (s *Scheduler) ConsumerdIsConnected(c *Consumerd) bool { - s.cdsMutex.RLock() - defer s.cdsMutex.RUnlock() - _, ok := s.consumerds[c.UUID] - return ok -} - -func (s *Scheduler) AgentConnected(ctx context.Context) error { - agent := &Agent{ - remoteInfo: remoteInfoFromContext(ctx), - RWMutex: &sync.RWMutex{}, - } - if s.AgentIsConnected(agent) { - return status.Error(codes.AlreadyExists, "Agent already connected") - } - var err error - agent.Stream, err = s.agentDialer.Dial(ctx) - if err != nil { - return status.Error(codes.Internal, - fmt.Sprintf("Error dialing agent: %s", err.Error())) - } - - s.agentsMutex.Lock() - defer s.agentsMutex.Unlock() - - s.lg.With( - zap.String("uuid", agent.UUID), - ).Info(types.Scheduler.Color().Add("Agent connected")) - s.agents[agent.UUID] = agent - - s.wLock.Lock() - s.w.Add(agent, int(agent.Weight())) - s.wLock.Unlock() - - go func() { - <-agent.Context.Done() - s.agentsMutex.Lock() - defer s.agentsMutex.Unlock() - delete(s.agents, agent.UUID) - s.lg.With( - zap.String("uuid", agent.UUID), - ).Info(types.Scheduler.Color().Add("Agent disconnected")) - }() - return nil -} - -func (s *Scheduler) ConsumerdConnected(ctx context.Context) error { - cd := &Consumerd{ - remoteInfo: remoteInfoFromContext(ctx), - RWMutex: &sync.RWMutex{}, - } - if s.ConsumerdIsConnected(cd) { - return status.Error(codes.AlreadyExists, "Consumerd already connected") - } - - s.cdsMutex.Lock() - defer s.cdsMutex.Unlock() - - s.lg.With( - zap.String("uuid", cd.UUID), - ).Info(types.Scheduler.Color().Add("Consumerd connected")) - s.consumerds[cd.UUID] = cd - - go func() { - <-cd.Context.Done() - s.cdsMutex.Lock() - defer s.cdsMutex.Unlock() - - delete(s.consumerds, cd.UUID) - s.lg.With( - zap.String("uuid", cd.UUID), - ).Info(types.Scheduler.Color().Add("Consumerd disconnected")) - }() - return nil -} - -func (s *Scheduler) reweightAll() { - s.wLock.Lock() - defer s.wLock.Unlock() - s.agentsMutex.RLock() - defer s.agentsMutex.RUnlock() - - s.w.RemoveAll() - for _, agent := range s.agents { - agent.RLock() - s.w.Add(agent, int(agent.Weight())) - agent.RUnlock() - } -} - -func (s *Scheduler) SetQueueStatus(ctx context.Context, stat types.QueueStatus) error { - s.agentsMutex.RLock() - defer s.agentsMutex.RUnlock() - - uuid := meta.UUID(ctx) - if agent, ok := s.agents[uuid]; ok { - agent.Lock() - agent.QueueStatus = stat - agent.Unlock() - - s.reweightAll() - } - return nil -} - -func (s *Scheduler) SetToolchains(ctx context.Context, tcs []*types.Toolchain) error { - s.agentsMutex.RLock() - defer s.agentsMutex.RUnlock() - s.cdsMutex.RLock() - defer s.cdsMutex.RUnlock() - - uuid := meta.UUID(ctx) - if agent, ok := s.agents[uuid]; ok { - agent.Lock() - agent.Toolchains = tcs - agent.Unlock() - } else if cd, ok := s.consumerds[uuid]; ok { - cd.Lock() - cd.Toolchains = tcs - cd.Unlock() - } - return nil -} - -var float64Epsilon = 1e-6 - -func (s *Scheduler) CalcAgentStats() <-chan []agentStats { - stats := make(chan []agentStats) - go func() { - var min, max float64 - statsList := []agentStats{} - s.agentsMutex.RLock() - defer s.agentsMutex.RUnlock() - - for uuid, agent := range s.agents { - agent.RLock() - defer agent.RUnlock() - - stats := agentStats{ - agentCtx: agent.Context, - agentTasksTotal: &scmetrics.AgentTasksTotal{}, - agentWeight: &scmetrics.AgentWeight{}, - } - - stats.agentWeight.UUID = uuid - stats.agentTasksTotal.Total = agent.CompletedTasks.Load() - stats.agentTasksTotal.Identifier = stats.agentWeight.Identifier - - w := float64(agent.Weight()) - switch { - case len(statsList) == 0: - min = w - max = w - case w > max: - w = max - case w < min: - w = min - } - - // Set the non-normalized weight here, adjust below - stats.agentWeight.Value = w - statsList = append(statsList, stats) - } - - // Normalize weights - for i, stat := range statsList { - if math.Abs(max-min) <= float64Epsilon { - // If max == min, set each weight to 1, they are all equal - statsList[i].agentWeight.Value = 1.0 - } else { - statsList[i].agentWeight.Value = - (stat.agentWeight.Value - min) / (max - min) - } - } - - stats <- statsList - }() - return stats -} - -func (s *Scheduler) CalcConsumerdStats() <-chan []consumerdStats { - stats := make(chan []consumerdStats) - go func() { - statsList := []consumerdStats{} - s.cdsMutex.RLock() - defer s.cdsMutex.RUnlock() - - for uuid, cd := range s.consumerds { - cd.RLock() - defer cd.RUnlock() - - total := &scmetrics.CdTasksTotal{ - Total: cd.CompletedTasks.Load(), - } - total.Identifier.UUID = uuid - statsList = append(statsList, consumerdStats{ - consumerdCtx: cd.Context, - cdRemoteTasksTotal: total, - }) - } - - stats <- statsList - }() - return stats -} - -func (s *Scheduler) TaskStats() taskStats { - return taskStats{ - completedTotal: &scmetrics.TasksCompletedTotal{ - Total: s.completedTasks.Load(), - }, - failedTotal: &scmetrics.TasksFailedTotal{ - Total: s.failedTasks.Load(), - }, - requestsTotal: &scmetrics.SchedulingRequestsTotal{ - Total: s.requestCount.Load(), - }, - } -} +// import ( +// "context" +// "fmt" +// "math" +// "sync" + +// scmetrics "github.com/cobalt77/kubecc/pkg/apps/scheduler/metrics" +// "github.com/cobalt77/kubecc/pkg/meta" +// "github.com/cobalt77/kubecc/pkg/types" +// "github.com/smallnest/weighted" +// "go.uber.org/atomic" +// "go.uber.org/zap" +// "google.golang.org/grpc/codes" +// "google.golang.org/grpc/status" +// ) + +// /* +// Assumptions: +// - Each invocation of the compiler takes a single input and produces a single output +// - Each process will consume up to 100% of a single cpu thread +// - Agents run in containers belonging to their own kernel cgroup with a limited +// CFS quota. +// - Compiling locally is always faster than preprocessing locally + compiling remotely. + +// Notes: +// Agents and consumers are persistently connected to the scheduler. The scheduler +// knows which jobs are running at all times and on which agents, and it knows +// which jobs are being run locally. + +// While building, the ultimate goal is to reach 100% cpu usage on the consumer, +// and 100% usage on agents (relative to their cgroup). + +// The maximum number of concurrent processes is determined by: +// (cfs_quota)/(cfs_period)*(multiple) where multiple is a configurable constant. +// */ + +// type Scheduler struct { +// SchedulerOptions +// ctx context.Context +// lg *zap.SugaredLogger +// agents map[string]*Agent +// consumerds map[string]*Consumerd +// agentsMutex *sync.RWMutex +// cdsMutex *sync.RWMutex +// w weighted.W +// wLock *sync.Mutex +// completedTasks *atomic.Int64 +// failedTasks *atomic.Int64 +// requestCount *atomic.Int64 +// } + +// type SchedulerOptions struct { +// } + +// type schedulerOption func(*SchedulerOptions) + +// func (o *SchedulerOptions) Apply(opts ...schedulerOption) { +// for _, op := range opts { +// op(o) +// } +// } + +// func NewScheduler(ctx context.Context, opts ...schedulerOption) *Scheduler { +// options := SchedulerOptions{} +// options.Apply(opts...) + +// return &Scheduler{ +// SchedulerOptions: options, +// w: &weighted.RRW{}, +// wLock: &sync.Mutex{}, +// ctx: ctx, +// lg: meta.Log(ctx), +// completedTasks: atomic.NewInt64(0), +// failedTasks: atomic.NewInt64(0), +// requestCount: atomic.NewInt64(0), +// agents: make(map[string]*Agent), +// consumerds: make(map[string]*Consumerd), +// agentsMutex: &sync.RWMutex{}, +// cdsMutex: &sync.RWMutex{}, +// } +// } + +// func (s *Scheduler) Schedule( +// ctx context.Context, +// req *types.CompileRequest, +// ) (*types.CompileResponse, error) { +// s.lg.Info("Scheduling") +// s.requestCount.Inc() +// for { +// s.wLock.Lock() +// next := s.w.Next() +// if next == nil { +// s.agentsMutex.RLock() +// numAgents := len(s.agents) +// s.agentsMutex.RUnlock() + +// if numAgents > 0 { +// // All weights 0 +// return nil, status.Error(codes.ResourceExhausted, "All agents busy") +// } else { +// return nil, status.Error(codes.Unavailable, "No agents available") +// } +// } +// agent := next.(*Agent) +// agentStream := agent.Stream +// s.wLock.Unlock() +// err := agentStream.Send(req) +// if status.Code(err) == codes.Unavailable { +// s.lg.With( +// zap.Error(err), +// ).Info("Agent rejected task, re-scheduling...") +// continue +// } +// if err != nil { +// s.lg.With(zap.Error(err)).Error("Error from agent") +// s.failedTasks.Inc() +// return nil, err +// } +// agent.CompletedTasks.Inc() +// s.completedTasks.Inc() +// return response, nil +// } +// } + +// func (s *Scheduler) AgentIsConnected(a *Agent) bool { +// s.agentsMutex.RLock() +// defer s.agentsMutex.RUnlock() +// _, ok := s.agents[a.UUID] +// return ok +// } + +// func (s *Scheduler) ConsumerdIsConnected(c *Consumerd) bool { +// s.cdsMutex.RLock() +// defer s.cdsMutex.RUnlock() +// _, ok := s.consumerds[c.UUID] +// return ok +// } + +// func (s *Scheduler) AgentConnected(ctx context.Context) error { +// agent := &Agent{ +// remoteInfo: remoteInfoFromContext(ctx), +// RWMutex: &sync.RWMutex{}, +// } +// if s.AgentIsConnected(agent) { +// return status.Error(codes.AlreadyExists, "Agent already connected") +// } +// var err error +// agent.Stream, err = s.agentDialer.Dial(ctx) +// if err != nil { +// return status.Error(codes.Internal, +// fmt.Sprintf("Error dialing agent: %s", err.Error())) +// } + +// s.agentsMutex.Lock() +// defer s.agentsMutex.Unlock() + +// s.lg.With( +// zap.String("uuid", agent.UUID), +// ).Info(types.Scheduler.Color().Add("Agent connected")) +// s.agents[agent.UUID] = agent + +// s.wLock.Lock() +// s.w.Add(agent, int(agent.Weight())) +// s.wLock.Unlock() + +// go func() { +// <-agent.Context.Done() +// s.agentsMutex.Lock() +// defer s.agentsMutex.Unlock() +// delete(s.agents, agent.UUID) +// s.lg.With( +// zap.String("uuid", agent.UUID), +// ).Info(types.Scheduler.Color().Add("Agent disconnected")) +// }() +// return nil +// } + +// func (s *Scheduler) ConsumerdConnected(ctx context.Context) error { +// cd := &Consumerd{ +// remoteInfo: remoteInfoFromContext(ctx), +// RWMutex: &sync.RWMutex{}, +// } +// if s.ConsumerdIsConnected(cd) { +// return status.Error(codes.AlreadyExists, "Consumerd already connected") +// } + +// s.cdsMutex.Lock() +// defer s.cdsMutex.Unlock() + +// s.lg.With( +// zap.String("uuid", cd.UUID), +// ).Info(types.Scheduler.Color().Add("Consumerd connected")) +// s.consumerds[cd.UUID] = cd + +// go func() { +// <-cd.Context.Done() +// s.cdsMutex.Lock() +// defer s.cdsMutex.Unlock() + +// delete(s.consumerds, cd.UUID) +// s.lg.With( +// zap.String("uuid", cd.UUID), +// ).Info(types.Scheduler.Color().Add("Consumerd disconnected")) +// }() +// return nil +// } + +// func (s *Scheduler) reweightAll() { +// s.wLock.Lock() +// defer s.wLock.Unlock() +// s.agentsMutex.RLock() +// defer s.agentsMutex.RUnlock() + +// s.w.RemoveAll() +// for _, agent := range s.agents { +// agent.RLock() +// s.w.Add(agent, int(agent.Weight())) +// agent.RUnlock() +// } +// } + +// func (s *Scheduler) SetQueueStatus(ctx context.Context, stat types.QueueStatus) error { +// s.agentsMutex.RLock() +// defer s.agentsMutex.RUnlock() + +// uuid := meta.UUID(ctx) +// if agent, ok := s.agents[uuid]; ok { +// agent.Lock() +// agent.QueueStatus = stat +// agent.Unlock() + +// s.reweightAll() +// } +// return nil +// } + +// func (s *Scheduler) SetToolchains(ctx context.Context, tcs []*types.Toolchain) error { +// s.agentsMutex.RLock() +// defer s.agentsMutex.RUnlock() +// s.cdsMutex.RLock() +// defer s.cdsMutex.RUnlock() + +// uuid := meta.UUID(ctx) +// if agent, ok := s.agents[uuid]; ok { +// agent.Lock() +// agent.Toolchains = tcs +// agent.Unlock() +// } else if cd, ok := s.consumerds[uuid]; ok { +// cd.Lock() +// cd.Toolchains = tcs +// cd.Unlock() +// } +// return nil +// } + +// var float64Epsilon = 1e-6 + +// func (s *Scheduler) CalcAgentStats() <-chan []agentStats { +// stats := make(chan []agentStats) +// go func() { +// var min, max float64 +// statsList := []agentStats{} +// s.agentsMutex.RLock() +// defer s.agentsMutex.RUnlock() + +// for uuid, agent := range s.agents { +// agent.RLock() +// defer agent.RUnlock() + +// stats := agentStats{ +// agentCtx: agent.Context, +// agentTasksTotal: &scmetrics.AgentTasksTotal{}, +// agentWeight: &scmetrics.AgentWeight{}, +// } + +// stats.agentWeight.UUID = uuid +// stats.agentTasksTotal.Total = agent.CompletedTasks.Load() +// stats.agentTasksTotal.Identifier = stats.agentWeight.Identifier + +// w := float64(agent.Weight()) +// switch { +// case len(statsList) == 0: +// min = w +// max = w +// case w > max: +// w = max +// case w < min: +// w = min +// } + +// // Set the non-normalized weight here, adjust below +// stats.agentWeight.Value = w +// statsList = append(statsList, stats) +// } + +// // Normalize weights +// for i, stat := range statsList { +// if math.Abs(max-min) <= float64Epsilon { +// // If max == min, set each weight to 1, they are all equal +// statsList[i].agentWeight.Value = 1.0 +// } else { +// statsList[i].agentWeight.Value = +// (stat.agentWeight.Value - min) / (max - min) +// } +// } + +// stats <- statsList +// }() +// return stats +// } + +// func (s *Scheduler) CalcConsumerdStats() <-chan []consumerdStats { +// stats := make(chan []consumerdStats) +// go func() { +// statsList := []consumerdStats{} +// s.cdsMutex.RLock() +// defer s.cdsMutex.RUnlock() + +// for uuid, cd := range s.consumerds { +// cd.RLock() +// defer cd.RUnlock() + +// total := &scmetrics.CdTasksTotal{ +// Total: cd.CompletedTasks.Load(), +// } +// total.Identifier.UUID = uuid +// statsList = append(statsList, consumerdStats{ +// consumerdCtx: cd.Context, +// cdRemoteTasksTotal: total, +// }) +// } + +// stats <- statsList +// }() +// return stats +// } + +// func (s *Scheduler) TaskStats() taskStats { +// return taskStats{ +// completedTotal: &scmetrics.TasksCompletedTotal{ +// Total: s.completedTasks.Load(), +// }, +// failedTotal: &scmetrics.TasksFailedTotal{ +// Total: s.failedTasks.Load(), +// }, +// requestsTotal: &scmetrics.SchedulingRequestsTotal{ +// Total: s.requestCount.Load(), +// }, +// } +// } diff --git a/pkg/apps/scheduler/server.go b/pkg/apps/scheduler/server.go index 8e7d5ea..9070512 100644 --- a/pkg/apps/scheduler/server.go +++ b/pkg/apps/scheduler/server.go @@ -4,17 +4,14 @@ import ( "context" "time" - scmetrics "github.com/cobalt77/kubecc/pkg/apps/scheduler/metrics" "github.com/cobalt77/kubecc/pkg/clients" "github.com/cobalt77/kubecc/pkg/meta" "github.com/cobalt77/kubecc/pkg/metrics" - "github.com/cobalt77/kubecc/pkg/servers" "github.com/cobalt77/kubecc/pkg/types" "github.com/cobalt77/kubecc/pkg/util" "go.uber.org/atomic" "go.uber.org/zap" "google.golang.org/grpc/codes" - "google.golang.org/grpc/peer" "google.golang.org/grpc/status" ) @@ -26,19 +23,17 @@ type schedulerServer struct { srvContext context.Context lg *zap.SugaredLogger - scheduler *Scheduler metricsProvider metrics.Provider hashSrv *util.HashServer broker *Broker - agentCount *atomic.Int32 - consumerdCount *atomic.Int32 + agentCount *atomic.Int64 + consumerdCount *atomic.Int64 } type SchedulerServerOptions struct { - schedulerOptions []schedulerOption - monClient types.MonitorClient - cacheClient types.CacheClient + monClient types.MonitorClient + cacheClient types.CacheClient } type schedulerServerOption func(*SchedulerServerOptions) @@ -49,12 +44,6 @@ func (o *SchedulerServerOptions) Apply(opts ...schedulerServerOption) { } } -func WithSchedulerOptions(opts ...schedulerOption) schedulerServerOption { - return func(o *SchedulerServerOptions) { - o.schedulerOptions = opts - } -} - func WithMonitorClient(monClient types.MonitorClient) schedulerServerOption { return func(o *SchedulerServerOptions) { o.monClient = monClient @@ -79,9 +68,8 @@ func NewSchedulerServer( lg: meta.Log(ctx), monClient: options.monClient, cacheClient: options.cacheClient, - scheduler: NewScheduler(ctx, options.schedulerOptions...), - agentCount: atomic.NewInt32(0), - consumerdCount: atomic.NewInt32(0), + agentCount: atomic.NewInt64(0), + consumerdCount: atomic.NewInt64(0), broker: NewBroker(ctx, options.monClient), hashSrv: util.NewHashServer(), } @@ -117,59 +105,59 @@ func (s *schedulerServer) cacheTransaction( } } -func (s *schedulerServer) Compile( - ctx context.Context, - req *types.CompileRequest, -) (*types.CompileResponse, error) { - if err := meta.CheckContext(ctx); err != nil { - return nil, err - } - span, sctx, err := servers.StartSpanFromServer(ctx, "schedule-compile") - if err != nil { - s.lg.Error(err) - } else { - ctx = sctx - defer span.Finish() - } - peer, ok := peer.FromContext(ctx) - if ok { - s.lg.With("peer", peer.Addr.String()).Info("Schedule requested") - } - cacheMiss := false - var reqHash string - if s.cacheClient != nil { - reqHash = s.hashSrv.Hash(req) - obj, err := s.cacheClient.Pull(ctx, &types.PullRequest{ - Key: &types.CacheKey{ - Hash: reqHash, - }, - }) - switch status.Code(err) { - case codes.OK: - s.lg.Info("Cache Hit") - return &types.CompileResponse{ - CompileResult: types.CompileResponse_Success, - Data: &types.CompileResponse_CompiledSource{ - CompiledSource: obj.GetData(), - }, - }, nil - case codes.NotFound: - cacheMiss = true - default: - s.lg.With( - zap.Error(err), - ).Error("Error querying cache server") - } - } +// func (s *schedulerServer) Compile( +// ctx context.Context, +// req *types.CompileRequest, +// ) (*types.CompileResponse, error) { +// if err := meta.CheckContext(ctx); err != nil { +// return nil, err +// } +// span, sctx, err := servers.StartSpanFromServer(ctx, "schedule-compile") +// if err != nil { +// s.lg.Error(err) +// } else { +// ctx = sctx +// defer span.Finish() +// } +// peer, ok := peer.FromContext(ctx) +// if ok { +// s.lg.With("peer", peer.Addr.String()).Info("Schedule requested") +// } +// cacheMiss := false +// var reqHash string +// if s.cacheClient != nil { +// reqHash = s.hashSrv.Hash(req) +// obj, err := s.cacheClient.Pull(ctx, &types.PullRequest{ +// Key: &types.CacheKey{ +// Hash: reqHash, +// }, +// }) +// switch status.Code(err) { +// case codes.OK: +// s.lg.Info("Cache Hit") +// return &types.CompileResponse{ +// CompileResult: types.CompileResponse_Success, +// Data: &types.CompileResponse_CompiledSource{ +// CompiledSource: obj.GetData(), +// }, +// }, nil +// case codes.NotFound: +// cacheMiss = true +// default: +// s.lg.With( +// zap.Error(err), +// ).Error("Error querying cache server") +// } +// } - resp, err := s.scheduler.Schedule(ctx, req) - if err == nil && - resp.CompileResult == types.CompileResponse_Success && - cacheMiss { - go s.cacheTransaction(reqHash, resp) - } - return resp, err -} +// resp, err := s.scheduler.Schedule(ctx, req) +// if err == nil && +// resp.CompileResult == types.CompileResponse_Success && +// cacheMiss { +// go s.cacheTransaction(reqHash, resp) +// } +// return resp, err +// } // func (s *schedulerServer) handleClientConnection(srv grpc.ServerStream) error { // done := make(chan error) @@ -258,7 +246,9 @@ func (s *schedulerServer) StreamIncomingTasks( return err } - s.broker.HandleIncomingTasksStream(srv) + s.broker.HandleAgentTaskStream(srv) + s.agentCount.Inc() + defer s.agentCount.Dec() select { case <-srv.Context().Done(): @@ -277,7 +267,16 @@ func (s *schedulerServer) StreamOutgoingTasks( return err } - s.broker.HandleOutgoingTasksStream(srv) + s.broker.HandleConsumerdTaskStream(srv) + + s.metricsProvider.Post(&metrics.ConsumerdCount{ + Count: s.consumerdCount.Inc(), + }) + defer func() { + s.metricsProvider.Post(&metrics.ConsumerdCount{ + Count: s.consumerdCount.Dec(), + }) + }() select { case <-srv.Context().Done(): @@ -288,30 +287,29 @@ func (s *schedulerServer) StreamOutgoingTasks( } func (s *schedulerServer) postCounts() { - s.metricsProvider.Post(&scmetrics.AgentCount{ + s.metricsProvider.Post(&metrics.AgentCount{ Count: s.agentCount.Load(), }) - s.metricsProvider.Post(&scmetrics.CdCount{ + s.metricsProvider.Post(&metrics.ConsumerdCount{ Count: s.consumerdCount.Load(), }) } func (s *schedulerServer) postTotals() { - stats := s.scheduler.TaskStats() + stats := s.broker.TaskStats() s.metricsProvider.Post(stats.completedTotal) s.metricsProvider.Post(stats.failedTotal) s.metricsProvider.Post(stats.requestsTotal) } func (s *schedulerServer) postAgentStats() { - for _, stat := range <-s.scheduler.CalcAgentStats() { + for _, stat := range <-s.broker.CalcAgentStats() { s.metricsProvider.PostContext(stat.agentTasksTotal, stat.agentCtx) - s.metricsProvider.PostContext(stat.agentWeight, stat.agentCtx) } } func (s *schedulerServer) postConsumerdStats() { - for _, stat := range <-s.scheduler.CalcConsumerdStats() { + for _, stat := range <-s.broker.CalcConsumerdStats() { s.metricsProvider.PostContext(stat.cdRemoteTasksTotal, stat.consumerdCtx) } } diff --git a/pkg/apps/scheduler/types.go b/pkg/apps/scheduler/types.go index 9e8214d..4694260 100644 --- a/pkg/apps/scheduler/types.go +++ b/pkg/apps/scheduler/types.go @@ -4,7 +4,6 @@ import ( "context" "sync" - scmetrics "github.com/cobalt77/kubecc/pkg/apps/scheduler/metrics" "github.com/cobalt77/kubecc/pkg/meta" "github.com/cobalt77/kubecc/pkg/metrics" "github.com/cobalt77/kubecc/pkg/types" @@ -46,17 +45,16 @@ func remoteInfoFromContext(ctx context.Context) remoteInfo { type agentStats struct { agentCtx context.Context - agentTasksTotal *scmetrics.AgentTasksTotal - agentWeight *scmetrics.AgentWeight + agentTasksTotal *metrics.AgentTasksTotal } type consumerdStats struct { consumerdCtx context.Context - cdRemoteTasksTotal *scmetrics.CdTasksTotal + cdRemoteTasksTotal *metrics.ConsumerdTasksTotal } type taskStats struct { - completedTotal *scmetrics.TasksCompletedTotal - failedTotal *scmetrics.TasksFailedTotal - requestsTotal *scmetrics.SchedulingRequestsTotal + completedTotal *metrics.TasksCompletedTotal + failedTotal *metrics.TasksFailedTotal + requestsTotal *metrics.SchedulingRequestsTotal } diff --git a/pkg/run/executor.go b/pkg/run/executor.go index f44b068..6e76430 100644 --- a/pkg/run/executor.go +++ b/pkg/run/executor.go @@ -12,6 +12,7 @@ type Executor interface { metrics.UsageLimitsCompleter metrics.TaskStatusCompleter Exec(task *Task) error + ExecAsync(task *Task) <-chan error } type QueuedExecutor struct { @@ -77,9 +78,7 @@ func (x *QueuedExecutor) SetUsageLimits(cfg *metrics.UsageLimits) { go x.workerPool.SetWorkerCount(int(cfg.GetConcurrentProcessLimit())) } -func (x *QueuedExecutor) Exec( - task *Task, -) error { +func (x *QueuedExecutor) Exec(task *Task) error { x.numQueued.Inc() x.taskQueue <- task x.numQueued.Dec() @@ -94,6 +93,27 @@ func (x *QueuedExecutor) Exec( return task.Error() } +func (x *QueuedExecutor) ExecAsync(task *Task) <-chan error { + ch := make(chan error) + x.numQueued.Inc() + x.taskQueue <- task + x.numQueued.Dec() + + go func() { + x.numRunning.Inc() + select { + case <-task.Done(): + case <-task.ctx.Done(): + } + x.numRunning.Dec() + + ch <- task.Error() + close(ch) + }() + + return ch +} + func (x *QueuedExecutor) CompleteUsageLimits(stat *metrics.UsageLimits) { stat.ConcurrentProcessLimit = x.usageLimits.ConcurrentProcessLimit stat.QueuePressureMultiplier = x.usageLimits.QueuePressureMultiplier @@ -125,6 +145,24 @@ func (x *DelegatingExecutor) Exec(task *Task) error { return task.Error() } +func (x *DelegatingExecutor) ExecAsync(task *Task) <-chan error { + ch := make(chan error) + x.numTasks.Inc() + defer x.numTasks.Dec() + + go func() { + task.Run() + select { + case <-task.Done(): + case <-task.ctx.Done(): + } + ch <- task.Error() + close(ch) + }() + + return ch +} + func (x *DelegatingExecutor) CompleteUsageLimits(stat *metrics.UsageLimits) {} func (x *DelegatingExecutor) CompleteTaskStatus(stat *metrics.TaskStatus) { diff --git a/pkg/run/run.go b/pkg/run/run.go index 87b2471..f679f1d 100644 --- a/pkg/run/run.go +++ b/pkg/run/run.go @@ -21,6 +21,31 @@ type RunnerManager interface { Run(ctx Contexts, x Executor, request interface{}) (response interface{}, err error) } +type PackagedTask struct { + F func() (response interface{}, err error) + C chan struct { + Response interface{} + Err error + } +} + +func Package( + rm RunnerManager, + ctx Contexts, + x Executor, + request interface{}, +) PackagedTask { + return PackagedTask{ + F: func() (response interface{}, err error) { + return rm.Run(ctx, x, request) + }, + C: make(chan struct { + Response interface{} + Err error + }), + } +} + type ArgParser interface { Parse() CanRunRemote() bool diff --git a/pkg/run/task.go b/pkg/run/task.go index 0098e9f..4c7f61c 100644 --- a/pkg/run/task.go +++ b/pkg/run/task.go @@ -53,3 +53,17 @@ func Begin(sctx context.Context, r Runner, tc *types.Toolchain) *Task { span: span, } } + +func (t *Task) Restart() *Task { + span := t.tracer.StartSpan("task-restart", + opentracing.FollowsFrom(t.span.Context())) + sctx := opentracing.ContextWithSpan(t.ctx, span) + return &Task{ + doneCh: make(chan struct{}), + tracer: t.tracer, + ctx: sctx, + tc: t.tc, + runner: t.runner, + span: span, + } +} diff --git a/test/integration/integration.go b/test/integration/integration.go index 2241139..7b5f473 100644 --- a/test/integration/integration.go +++ b/test/integration/integration.go @@ -13,6 +13,7 @@ import ( consumerd "github.com/cobalt77/kubecc/pkg/apps/consumerd" "github.com/cobalt77/kubecc/pkg/apps/monitor" scheduler "github.com/cobalt77/kubecc/pkg/apps/scheduler" + "github.com/cobalt77/kubecc/pkg/clients" "github.com/cobalt77/kubecc/pkg/config" "github.com/cobalt77/kubecc/pkg/host" "github.com/cobalt77/kubecc/pkg/identity" @@ -256,8 +257,8 @@ func (tc *TestController) startConsumerd(cfg *metrics.UsageLimits) { ) types.RegisterConsumerdServer(srv, d) - mgr := servers.NewStreamManager(ctx, d) - go mgr.Run() + // mgr := servers.NewStreamManager(ctx, d) + // go mgr.Run() go d.StartMetricsProvider() cdListener := dial(ctx, listener) cdClient := types.NewConsumerdClient(cdListener) @@ -304,7 +305,7 @@ func (tc *TestController) Start(ops TestOptions) { 1 /*scheduler*/ + 1 /*cache*/) extClient := types.NewMonitorClient(cc) - listener := metrics.NewListener(tc.ctx, extClient) + listener := clients.NewListener(tc.ctx, extClient) listener.OnProviderAdded(func(pctx context.Context, uuid string) { resp, _ := extClient.Whois(tc.ctx, &types.WhoisRequest{ UUID: uuid, From 2dcf3da1b9d65924ce0bb21b1601a894a7d30cdb Mon Sep 17 00:00:00 2001 From: cobalt77 <8194899+cobalt77@users.noreply.github.com> Date: Tue, 16 Mar 2021 23:58:59 -0400 Subject: [PATCH 04/12] Refactoring in progress; moving integration test code --- cmd/kubecc/components/consumerd/consumerd.go | 2 +- internal/testutil/{skip.go => ginkgo.go} | 10 + internal/testutil/toolchain/local.go | 3 +- internal/testutil/toolchain/recv.go | 3 +- pkg/apps/agent/server.go | 16 +- pkg/apps/cachesrv/server.go | 10 +- pkg/apps/consumerd/consumerd_suite_test.go | 25 ++ pkg/apps/consumerd/queue.go | 119 ++---- pkg/apps/consumerd/queue_test.go | 142 +++++++ pkg/apps/consumerd/server.go | 40 +- pkg/apps/monitor/monitor_test.go | 8 +- pkg/apps/monitor/server.go | 6 +- pkg/apps/monitor/store_test.go | 10 + pkg/apps/scheduler/server.go | 10 +- pkg/clients/availability.go | 117 ++++++ pkg/clients/availability_test.go | 121 ++++++ pkg/clients/clients_suite_test.go | 15 + pkg/clients/listener.go | 25 +- pkg/config/spec.go | 1 - pkg/meta/context.go | 7 + pkg/run/task.go | 4 + pkg/servers/grpc.go | 4 +- pkg/test/environment.go | 398 +++++++++++++++++++ pkg/util/msgpack.go | 24 -- test/integration/integration.go | 4 +- test/integration/integration_test.go | 2 +- 26 files changed, 950 insertions(+), 176 deletions(-) rename internal/testutil/{skip.go => ginkgo.go} (72%) create mode 100644 pkg/apps/consumerd/consumerd_suite_test.go create mode 100644 pkg/apps/consumerd/queue_test.go create mode 100644 pkg/clients/availability.go create mode 100644 pkg/clients/availability_test.go create mode 100644 pkg/clients/clients_suite_test.go create mode 100644 pkg/test/environment.go delete mode 100644 pkg/util/msgpack.go diff --git a/cmd/kubecc/components/consumerd/consumerd.go b/cmd/kubecc/components/consumerd/consumerd.go index 52ae58e..69607e0 100644 --- a/cmd/kubecc/components/consumerd/consumerd.go +++ b/cmd/kubecc/components/consumerd/consumerd.go @@ -72,7 +72,7 @@ func run(cmd *cobra.Command, args []string) { }, ), consumerd.WithToolchainRunners(cctoolchain.AddToStore, sleeptoolchain.AddToStore), - consumerd.WithSchedulerClient(schedulerClient, schedulerCC), + consumerd.WithSchedulerClient(schedulerClient), consumerd.WithMonitorClient(monitorClient), ) diff --git a/internal/testutil/skip.go b/internal/testutil/ginkgo.go similarity index 72% rename from internal/testutil/skip.go rename to internal/testutil/ginkgo.go index ca071e3..45d024d 100644 --- a/internal/testutil/skip.go +++ b/internal/testutil/ginkgo.go @@ -2,8 +2,11 @@ package testutil import ( "os" + "path/filepath" + "time" "github.com/onsi/ginkgo" + "github.com/onsi/gomega" ) // InGithubWorkflow returns true if the test is running inside github @@ -22,3 +25,10 @@ func SkipInGithubWorkflow() { return } } + +func ExtendTimeoutsIfDebugging() { + self, _ := os.Executable() + if filepath.Base(self) == "debug.test" { + gomega.SetDefaultEventuallyTimeout(1 * time.Hour) + } +} diff --git a/internal/testutil/toolchain/local.go b/internal/testutil/toolchain/local.go index 829a2ab..621c065 100644 --- a/internal/testutil/toolchain/local.go +++ b/internal/testutil/toolchain/local.go @@ -2,7 +2,6 @@ package toolchain import ( "github.com/cobalt77/kubecc/internal/testutil" - testtoolchain "github.com/cobalt77/kubecc/internal/testutil" "github.com/cobalt77/kubecc/pkg/meta" "github.com/cobalt77/kubecc/pkg/run" "github.com/cobalt77/kubecc/pkg/types" @@ -29,7 +28,7 @@ func (m localRunnerManager) Run( } ap.Parse() task := run.Begin(sctx, - &testtoolchain.SleepRunner{ + &testutil.SleepRunner{ Duration: ap.Duration, }, req.GetToolchain()) err = x.Exec(task) diff --git a/internal/testutil/toolchain/recv.go b/internal/testutil/toolchain/recv.go index a06014d..9fff44a 100644 --- a/internal/testutil/toolchain/recv.go +++ b/internal/testutil/toolchain/recv.go @@ -2,7 +2,6 @@ package toolchain import ( "github.com/cobalt77/kubecc/internal/testutil" - testtoolchain "github.com/cobalt77/kubecc/internal/testutil" "github.com/cobalt77/kubecc/pkg/meta" "github.com/cobalt77/kubecc/pkg/run" "github.com/cobalt77/kubecc/pkg/types" @@ -30,7 +29,7 @@ func (m recvRemoteRunnerManager) Run( } ap.Parse() task := run.Begin(sctx, - &testtoolchain.SleepRunner{ + &testutil.SleepRunner{ Duration: ap.Duration, }, req.GetToolchain()) err = x.Exec(task) diff --git a/pkg/apps/agent/server.go b/pkg/apps/agent/server.go index a7cd678..993afa6 100644 --- a/pkg/apps/agent/server.go +++ b/pkg/apps/agent/server.go @@ -36,39 +36,39 @@ type AgentServerOptions struct { usageLimits *metrics.UsageLimits } -type agentServerOption func(*AgentServerOptions) +type AgentServerOption func(*AgentServerOptions) -func (o *AgentServerOptions) Apply(opts ...agentServerOption) { +func (o *AgentServerOptions) Apply(opts ...AgentServerOption) { for _, op := range opts { op(o) } } -func WithToolchainFinders(args ...toolchains.FinderWithOptions) agentServerOption { +func WithToolchainFinders(args ...toolchains.FinderWithOptions) AgentServerOption { return func(o *AgentServerOptions) { o.toolchainFinders = args } } -func WithToolchainRunners(args ...run.StoreAddFunc) agentServerOption { +func WithToolchainRunners(args ...run.StoreAddFunc) AgentServerOption { return func(o *AgentServerOptions) { o.toolchainRunners = args } } -func WithSchedulerClient(client types.SchedulerClient) agentServerOption { +func WithSchedulerClient(client types.SchedulerClient) AgentServerOption { return func(o *AgentServerOptions) { o.schedulerClient = client } } -func WithMonitorClient(client types.MonitorClient) agentServerOption { +func WithMonitorClient(client types.MonitorClient) AgentServerOption { return func(o *AgentServerOptions) { o.monitorClient = client } } -func WithUsageLimits(usageLimits *metrics.UsageLimits) agentServerOption { +func WithUsageLimits(usageLimits *metrics.UsageLimits) AgentServerOption { return func(o *AgentServerOptions) { o.usageLimits = usageLimits } @@ -76,7 +76,7 @@ func WithUsageLimits(usageLimits *metrics.UsageLimits) agentServerOption { func NewAgentServer( ctx context.Context, - opts ...agentServerOption, + opts ...AgentServerOption, ) *AgentServer { options := AgentServerOptions{} options.Apply(opts...) diff --git a/pkg/apps/cachesrv/server.go b/pkg/apps/cachesrv/server.go index 6f2e585..faa76de 100644 --- a/pkg/apps/cachesrv/server.go +++ b/pkg/apps/cachesrv/server.go @@ -30,15 +30,15 @@ type CacheServerOptions struct { monitorClient types.MonitorClient } -type cacheServerOption func(*CacheServerOptions) +type CacheServerOption func(*CacheServerOptions) -func (o *CacheServerOptions) Apply(opts ...cacheServerOption) { +func (o *CacheServerOptions) Apply(opts ...CacheServerOption) { for _, op := range opts { op(o) } } -func WithStorageProvider(sp storage.StorageProvider) cacheServerOption { +func WithStorageProvider(sp storage.StorageProvider) CacheServerOption { return func(o *CacheServerOptions) { o.storageProvider = sp } @@ -46,7 +46,7 @@ func WithStorageProvider(sp storage.StorageProvider) cacheServerOption { func WithMonitorClient( client types.MonitorClient, -) cacheServerOption { +) CacheServerOption { return func(o *CacheServerOptions) { o.monitorClient = client } @@ -54,7 +54,7 @@ func WithMonitorClient( func NewCacheServer( ctx context.Context, cfg config.CacheSpec, - opts ...cacheServerOption, + opts ...CacheServerOption, ) *CacheServer { options := CacheServerOptions{} options.Apply(opts...) diff --git a/pkg/apps/consumerd/consumerd_suite_test.go b/pkg/apps/consumerd/consumerd_suite_test.go new file mode 100644 index 0000000..2964a04 --- /dev/null +++ b/pkg/apps/consumerd/consumerd_suite_test.go @@ -0,0 +1,25 @@ +package consumerd_test + +import ( + "testing" + + "github.com/cobalt77/kubecc/pkg/test" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var testEnv *test.Environment + +func TestConsumerd(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Consumerd Suite") +} + +var _ = BeforeSuite(func() { + testEnv = test.NewDefaultEnvironment() + testEnv.Start() +}) + +var _ = AfterSuite(func() { + testEnv.Shutdown() +}) diff --git a/pkg/apps/consumerd/queue.go b/pkg/apps/consumerd/queue.go index afa9144..c26b8e3 100644 --- a/pkg/apps/consumerd/queue.go +++ b/pkg/apps/consumerd/queue.go @@ -2,117 +2,64 @@ package consumerd import ( "context" - "sync" + "github.com/cobalt77/kubecc/pkg/clients" "github.com/cobalt77/kubecc/pkg/run" + "github.com/cobalt77/kubecc/pkg/types" ) -type remoteStatus int - -const ( - unavailable remoteStatus = iota - available - full -) - -type remoteStatusManager struct { - status remoteStatus - cond *sync.Cond -} - -func newRemoteStatusManager() *remoteStatusManager { - rsm := &remoteStatusManager{ - status: unavailable, - cond: sync.NewCond(&sync.Mutex{}), - } - - // todo: watch monitor for scheduler status - return rsm -} - -func (rsm *remoteStatusManager) EnsureStatus(stat remoteStatus) <-chan struct{} { - ch := make(chan struct{}) - defer func() { - go func() { - rsm.cond.L.Lock() - defer rsm.cond.L.Unlock() - - for { - if rsm.status != stat { - close(ch) - return - } - rsm.cond.Wait() - } - }() - }() - - rsm.cond.L.Lock() - defer rsm.cond.L.Unlock() - - for { - if rsm.status == stat { - return ch - } - rsm.cond.Wait() - } -} - -func (rsm *remoteStatusManager) SetStatus(stat remoteStatus) { - rsm.cond.L.Lock() - defer rsm.cond.L.Unlock() - rsm.status = stat - rsm.cond.Broadcast() -} - -type splitTask struct { - local, remote run.PackagedTask +type SplitTask struct { + Local, Remote run.PackagedTask } -func (s *splitTask) Wait() (interface{}, error) { +func (s *SplitTask) Wait() (interface{}, error) { select { - case results := <-s.local.C: + case results := <-s.Local.C: return results.Response, results.Err - case results := <-s.remote.C: + case results := <-s.Remote.C: return results.Response, results.Err } } -type splitQueue struct { +type SplitQueue struct { ctx context.Context - rsm *remoteStatusManager - taskQueue chan *splitTask + avc *clients.AvailabilityChecker + taskQueue chan *SplitTask } -type queueAction int +type QueueAction int const ( - requeue queueAction = iota - doNotRequeue + Requeue QueueAction = iota + DoNotRequeue ) func NewSplitQueue( ctx context.Context, -) *splitQueue { - sq := &splitQueue{ + monClient types.MonitorClient, +) *SplitQueue { + sq := &SplitQueue{ ctx: ctx, - taskQueue: make(chan *splitTask), - rsm: newRemoteStatusManager(), + taskQueue: make(chan *SplitTask), + avc: clients.NewAvailabilityChecker( + clients.ComponentFilter(types.Scheduler), + ), } + clients.WatchAvailability(ctx, types.Scheduler, monClient, sq.avc) go sq.runLocalQueue() go sq.runRemoteQueue() return sq } -func (s *splitQueue) In() chan<- *splitTask { +func (s *SplitQueue) In() chan<- *SplitTask { return s.taskQueue } -func (s *splitQueue) processTask(pt run.PackagedTask) queueAction { +func (s *SplitQueue) processTask(pt run.PackagedTask) QueueAction { response, err := pt.F() if err != nil { - return requeue + return Requeue } pt.C <- struct { Response interface{} @@ -121,35 +68,35 @@ func (s *splitQueue) processTask(pt run.PackagedTask) queueAction { Response: response, Err: err, } - return doNotRequeue + return DoNotRequeue } -func (s *splitQueue) runLocalQueue() { +func (s *SplitQueue) runLocalQueue() { for { select { case <-s.ctx.Done(): return case task := <-s.taskQueue: - switch s.processTask(task.local) { - case requeue: + switch s.processTask(task.Local) { + case Requeue: s.In() <- task } } } } -func (s *splitQueue) runRemoteQueue() { +func (s *SplitQueue) runRemoteQueue() { for { - statusChanged := s.rsm.EnsureStatus(available) + available := s.avc.EnsureAvailable() for { select { case <-s.ctx.Done(): return - case <-statusChanged: + case <-available.Done(): goto restart case task := <-s.taskQueue: - switch s.processTask(task.remote) { - case requeue: + switch s.processTask(task.Remote) { + case Requeue: s.In() <- task } } diff --git a/pkg/apps/consumerd/queue_test.go b/pkg/apps/consumerd/queue_test.go new file mode 100644 index 0000000..340e340 --- /dev/null +++ b/pkg/apps/consumerd/queue_test.go @@ -0,0 +1,142 @@ +package consumerd_test + +import ( + "context" + + "github.com/cobalt77/kubecc/internal/logkc" + "github.com/cobalt77/kubecc/internal/testutil" + testtoolchain "github.com/cobalt77/kubecc/internal/testutil/toolchain" + "github.com/cobalt77/kubecc/pkg/apps/consumerd" + "github.com/cobalt77/kubecc/pkg/identity" + "github.com/cobalt77/kubecc/pkg/meta" + "github.com/cobalt77/kubecc/pkg/metrics" + "github.com/cobalt77/kubecc/pkg/run" + "github.com/cobalt77/kubecc/pkg/tracing" + "github.com/cobalt77/kubecc/pkg/types" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "go.uber.org/atomic" +) + +type testExecutor struct { + numTasks *atomic.Int32 + completed *atomic.Int32 +} + +func (x *testExecutor) Exec(task *run.Task) error { + x.numTasks.Inc() + defer x.numTasks.Dec() + + go func() { + defer GinkgoRecover() + task.Run() + }() + select { + case <-task.Done(): + case <-task.Context().Done(): + } + x.completed.Inc() + return task.Error() +} + +func newTestExecutor() *testExecutor { + return &testExecutor{ + numTasks: atomic.NewInt32(0), + completed: atomic.NewInt32(0), + } +} + +func (x *testExecutor) CompleteUsageLimits(*metrics.UsageLimits) { + +} + +func (x *testExecutor) CompleteTaskStatus(s *metrics.TaskStatus) { + s.NumDelegated = x.numTasks.Load() +} + +func (x *testExecutor) ExecAsync(task *run.Task) <-chan error { + panic("not implemented") +} + +var _ = Describe("Split Queue", func() { + testCtx := meta.NewContext( + meta.WithProvider(identity.Component, meta.WithValue(types.TestComponent)), + meta.WithProvider(identity.UUID), + meta.WithProvider(logkc.Logger), + meta.WithProvider(tracing.Tracer), + ) + + numTasks := 100 + taskPool := make(chan *consumerd.SplitTask, numTasks) + cleanup := make(chan context.CancelFunc, 100) + localExec := newTestExecutor() + remoteExec := newTestExecutor() + tc := &types.Toolchain{ + Kind: types.Gnu, + Lang: types.CXX, + Executable: testutil.TestToolchainExecutable, + TargetArch: "testarch", + Version: "0", + PicDefault: true, + } + taskArgs := []string{"-duration", "0"} + rm := &testtoolchain.TestToolchainRunner{} + request := &types.RunRequest{ + Compiler: &types.RunRequest_Toolchain{ + Toolchain: tc, + }, + Args: taskArgs, + UID: 1000, + GID: 1000, + } + + BeforeEach(func() { + schedulerClient := testEnv.NewSchedulerClient() + + Expect(len(taskPool)).To(Equal(0)) + Expect(cap(taskPool)).To(Equal(numTasks)) + + for i := 0; i < numTasks; i++ { + contexts := run.Contexts{ + ServerContext: testCtx, + ClientContext: testCtx, + } + taskPool <- &consumerd.SplitTask{ + Local: run.Package( + rm.RunLocal(rm.NewArgParser(testCtx, taskArgs)), + contexts, + localExec, + request, + ), + Remote: run.Package( + rm.SendRemote( + rm.NewArgParser(testCtx, taskArgs), + schedulerClient, + ), + contexts, + remoteExec, + request, + ), + } + } + + _, cf := testEnv.SpawnMonitor() + cleanup <- cf + }) + + AfterEach(func() { + for c := range cleanup { + c() + } + }) + + PSpecify("when no scheduler is available, the queue should run all tasks locally", func() { + sq := consumerd.NewSplitQueue(testCtx, testEnv.NewMonitorClient()) + for task := range taskPool { + sq.In() <- task + } + Eventually(func() int32 { + return localExec.numTasks.Load() + }).Should(Equal(int32(numTasks))) + }) +}) diff --git a/pkg/apps/consumerd/server.go b/pkg/apps/consumerd/server.go index ae9a047..0f9c79c 100644 --- a/pkg/apps/consumerd/server.go +++ b/pkg/apps/consumerd/server.go @@ -37,35 +37,34 @@ type consumerdServer struct { connection *grpc.ClientConn localExecutor run.Executor remoteExecutor run.Executor - queue *splitQueue + queue *SplitQueue numConsumers *atomic.Int32 localTasksCompleted *atomic.Int64 } type ConsumerdServerOptions struct { - toolchainFinders []toolchains.FinderWithOptions - toolchainRunners []run.StoreAddFunc - schedulerClient types.SchedulerClient - monitorClient types.MonitorClient - schedulerConnection *grpc.ClientConn - usageLimits *metrics.UsageLimits + toolchainFinders []toolchains.FinderWithOptions + toolchainRunners []run.StoreAddFunc + schedulerClient types.SchedulerClient + monitorClient types.MonitorClient + usageLimits *metrics.UsageLimits } -type consumerdServerOption func(*ConsumerdServerOptions) +type ConsumerdServerOption func(*ConsumerdServerOptions) -func (o *ConsumerdServerOptions) Apply(opts ...consumerdServerOption) { +func (o *ConsumerdServerOptions) Apply(opts ...ConsumerdServerOption) { for _, op := range opts { op(o) } } -func WithToolchainFinders(args ...toolchains.FinderWithOptions) consumerdServerOption { +func WithToolchainFinders(args ...toolchains.FinderWithOptions) ConsumerdServerOption { return func(o *ConsumerdServerOptions) { o.toolchainFinders = args } } -func WithToolchainRunners(args ...run.StoreAddFunc) consumerdServerOption { +func WithToolchainRunners(args ...run.StoreAddFunc) ConsumerdServerOption { return func(o *ConsumerdServerOptions) { o.toolchainRunners = args } @@ -73,11 +72,9 @@ func WithToolchainRunners(args ...run.StoreAddFunc) consumerdServerOption { func WithSchedulerClient( client types.SchedulerClient, - cc *grpc.ClientConn, -) consumerdServerOption { +) ConsumerdServerOption { return func(o *ConsumerdServerOptions) { o.schedulerClient = client - o.schedulerConnection = cc } } @@ -85,13 +82,13 @@ func WithSchedulerClient( // outside the cluster. func WithMonitorClient( client types.MonitorClient, -) consumerdServerOption { +) ConsumerdServerOption { return func(o *ConsumerdServerOptions) { o.monitorClient = client } } -func WithUsageLimits(cpuConfig *metrics.UsageLimits) consumerdServerOption { +func WithUsageLimits(cpuConfig *metrics.UsageLimits) ConsumerdServerOption { return func(o *ConsumerdServerOptions) { o.usageLimits = cpuConfig } @@ -99,7 +96,7 @@ func WithUsageLimits(cpuConfig *metrics.UsageLimits) consumerdServerOption { func NewConsumerdServer( ctx context.Context, - opts ...consumerdServerOption, + opts ...ConsumerdServerOption, ) *consumerdServer { options := ConsumerdServerOptions{} options.Apply(opts...) @@ -118,12 +115,11 @@ func NewConsumerdServer( storeUpdateCh: make(chan struct{}, 1), numConsumers: atomic.NewInt32(0), localTasksCompleted: atomic.NewInt64(0), - queue: NewSplitQueue(ctx), + queue: NewSplitQueue(ctx, options.monitorClient), } if options.schedulerClient != nil { srv.schedulerClient = options.schedulerClient - srv.connection = options.schedulerConnection } if options.monitorClient != nil { srv.metricsProvider = clients.NewMonitorProvider(ctx, options.monitorClient, @@ -301,10 +297,10 @@ func (c *consumerdServer) Run( ClientContext: ctx, } - st := &splitTask{ - local: run.Package( + st := &SplitTask{ + Local: run.Package( runner.RunLocal(ap), ctxs, c.localExecutor, req), - remote: run.Package( + Remote: run.Package( runner.SendRemote(ap, c.schedulerClient), ctxs, c.remoteExecutor, req), } c.queue.In() <- st diff --git a/pkg/apps/monitor/monitor_test.go b/pkg/apps/monitor/monitor_test.go index 4f69a22..d952be8 100644 --- a/pkg/apps/monitor/monitor_test.go +++ b/pkg/apps/monitor/monitor_test.go @@ -174,7 +174,7 @@ var _ = Describe("Monitor", func() { )) Expect(err).NotTo(HaveOccurred()) mc := types.NewMonitorClient(cc) - provider = clients.NewMonitorProvider(cctx, mc, clients.Buffered|clients.Block) + provider = clients.NewMonitorProvider(cctx, mc, clients.Buffered) Expect(provider).NotTo(BeNil()) }) It("should create a store", func() { @@ -197,7 +197,7 @@ var _ = Describe("Monitor", func() { }) }) It("should notify the listener", func() { - Eventually(listenerEvents["testKey1Changed"]).Should(Receive(Equal(1))) + Eventually(listenerEvents["testKey1Changed"]).Should(Receive(Equal(int32(1)))) Expect(listenerEvents["testKey2Changed"]).ShouldNot(Receive()) Consistently(listenerEvents["testKey1Changed"]).ShouldNot(Receive()) }) @@ -232,7 +232,7 @@ var _ = Describe("Monitor", func() { lateJoinListenerEvents["providerRemoved"] <- struct{}{} }) Eventually(lateJoinListenerEvents["providerAdded"]).Should(Receive(Equal(providerUuid))) - Eventually(lateJoinListenerEvents["testKey1Changed"]).Should(Receive(Equal(1))) + Eventually(lateJoinListenerEvents["testKey1Changed"]).Should(Receive(Equal(int32(1)))) }) }) When("The provider updates a different key", func() { @@ -285,7 +285,7 @@ var _ = Describe("Monitor", func() { numProviders := 2 numListenersPerKey := 10 numUpdatesPerKey := 1000 - callbackTimeout := 10 * time.Second + callbackTimeout := 60 * time.Second stressTestLoops := 5 if testutil.IsRaceDetectorEnabled() { numListenersPerKey = 10 diff --git a/pkg/apps/monitor/server.go b/pkg/apps/monitor/server.go index 0d66864..d1c70ee 100644 --- a/pkg/apps/monitor/server.go +++ b/pkg/apps/monitor/server.go @@ -272,7 +272,11 @@ func (m *MonitorServer) post(metric *types.Metric) error { store.Delete(metric.Key.Name) return nil } - if store.CAS(metric.Key.Name, metric.Value) { + contents, err := metric.Value.UnmarshalNew() + if err != nil { + return err + } + if store.CAS(metric.Key.Name, contents) { m.lg.With( zap.String("key", metric.Key.ShortID()), ).Debug("Metric updated") diff --git a/pkg/apps/monitor/store_test.go b/pkg/apps/monitor/store_test.go index 0d4e8ee..da9ec4a 100644 --- a/pkg/apps/monitor/store_test.go +++ b/pkg/apps/monitor/store_test.go @@ -3,6 +3,7 @@ package monitor_test import ( "context" "strings" + "time" mapset "github.com/deckarep/golang-set" . "github.com/onsi/ginkgo" @@ -130,4 +131,13 @@ var _ = Describe("Store", func() { store.Delete("key1") Expect(store.Len()).To(Equal(0)) }, 100) + Measure("Throughput", func(b Benchmarker) { + start := time.Now() + for i := 0; i < 1000; i++ { + store.CAS("throughput", &testutil.Test1{Counter: int32(i)}) + } + elapsed := time.Since(start) + b.RecordValueWithPrecision("Updates per second", + float64(1e12/elapsed.Nanoseconds())/1e6, "M", 3) + }, 100) }) diff --git a/pkg/apps/scheduler/server.go b/pkg/apps/scheduler/server.go index 9070512..25db38f 100644 --- a/pkg/apps/scheduler/server.go +++ b/pkg/apps/scheduler/server.go @@ -36,21 +36,21 @@ type SchedulerServerOptions struct { cacheClient types.CacheClient } -type schedulerServerOption func(*SchedulerServerOptions) +type SchedulerServerOption func(*SchedulerServerOptions) -func (o *SchedulerServerOptions) Apply(opts ...schedulerServerOption) { +func (o *SchedulerServerOptions) Apply(opts ...SchedulerServerOption) { for _, op := range opts { op(o) } } -func WithMonitorClient(monClient types.MonitorClient) schedulerServerOption { +func WithMonitorClient(monClient types.MonitorClient) SchedulerServerOption { return func(o *SchedulerServerOptions) { o.monClient = monClient } } -func WithCacheClient(cacheClient types.CacheClient) schedulerServerOption { +func WithCacheClient(cacheClient types.CacheClient) SchedulerServerOption { return func(o *SchedulerServerOptions) { o.cacheClient = cacheClient } @@ -58,7 +58,7 @@ func WithCacheClient(cacheClient types.CacheClient) schedulerServerOption { func NewSchedulerServer( ctx context.Context, - opts ...schedulerServerOption, + opts ...SchedulerServerOption, ) *schedulerServer { options := SchedulerServerOptions{} options.Apply(opts...) diff --git a/pkg/clients/availability.go b/pkg/clients/availability.go new file mode 100644 index 0000000..3f9dcb1 --- /dev/null +++ b/pkg/clients/availability.go @@ -0,0 +1,117 @@ +package clients + +import ( + "context" + "sync" + + "github.com/cobalt77/kubecc/pkg/servers" + "github.com/cobalt77/kubecc/pkg/types" + mapset "github.com/deckarep/golang-set" +) + +type AvailabilityListener interface { + OnComponentAvailable(context.Context, *types.WhoisResponse) +} + +type RemoteStatus int + +const ( + Unavailable RemoteStatus = iota + Available +) + +type AvailabilityChecker struct { + status RemoteStatus + filter AvailabilityFilter + cond *sync.Cond +} + +type AvailabilityFilter = func(*types.WhoisResponse) bool + +func ComponentFilter(c ...types.Component) AvailabilityFilter { + set := mapset.NewSet() + for _, item := range c { + set.Add(item) + } + return func(info *types.WhoisResponse) bool { + return set.Contains(info.Component) + } +} + +func NewAvailabilityChecker(filter AvailabilityFilter) *AvailabilityChecker { + rsm := &AvailabilityChecker{ + status: Unavailable, + filter: filter, + cond: sync.NewCond(&sync.Mutex{}), + } + return rsm +} + +func (rsm *AvailabilityChecker) EnsureAvailable() context.Context { + ctx, cancel := context.WithCancel(context.Background()) + defer func() { + go func() { + rsm.cond.L.Lock() + defer rsm.cond.L.Unlock() + + for { + if rsm.status != Available { + cancel() + return + } + rsm.cond.Wait() + } + }() + }() + + rsm.cond.L.Lock() + defer rsm.cond.L.Unlock() + + for { + if rsm.status == Available { + return ctx + } + rsm.cond.Wait() + } +} + +func (rsm *AvailabilityChecker) OnComponentAvailable( + ctx context.Context, + info *types.WhoisResponse, +) { + if !rsm.filter(info) { + return + } + + rsm.cond.L.Lock() + rsm.status = Available + rsm.cond.Broadcast() + rsm.cond.L.Unlock() + + <-ctx.Done() + + rsm.cond.L.Lock() + rsm.status = Unavailable + rsm.cond.Broadcast() + rsm.cond.L.Unlock() +} + +func WatchAvailability( + ctx context.Context, + component types.Component, + monClient types.MonitorClient, + csl AvailabilityListener, +) { + listener := NewListener(ctx, monClient, servers.WithLogEvents(0)) + listener.OnProviderAdded(func(ctx context.Context, uuid string) { + info, err := monClient.Whois(ctx, &types.WhoisRequest{ + UUID: uuid, + }) + if err != nil { + return + } + if info.Component == component { + csl.OnComponentAvailable(ctx, info) + } + }) +} diff --git a/pkg/clients/availability_test.go b/pkg/clients/availability_test.go new file mode 100644 index 0000000..a5182d0 --- /dev/null +++ b/pkg/clients/availability_test.go @@ -0,0 +1,121 @@ +package clients_test + +import ( + "context" + "time" + + "github.com/google/uuid" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "go.uber.org/atomic" + + "github.com/cobalt77/kubecc/pkg/clients" + "github.com/cobalt77/kubecc/pkg/types" +) + +var _ = Describe("Status Manager", func() { + var avListener *clients.AvailabilityChecker + When("creating an availability listener", func() { + It("should succeed", func() { + avListener = clients.NewAvailabilityChecker( + clients.ComponentFilter(types.TestComponent)) + }) + }) + + numListeners := 1 + Measure("should ensure component availability", func(b Benchmarker) { + available := make([]chan struct{}, numListeners) + unavailable := make([]chan struct{}, numListeners) + for i := 0; i < numListeners; i++ { + available[i] = make(chan struct{}) + unavailable[i] = make(chan struct{}) + go func(i int) { + defer GinkgoRecover() + for { + ctx := avListener.EnsureAvailable() + available[i] <- struct{}{} + <-ctx.Done() + unavailable[i] <- struct{}{} + } + }(i) + } + + By("checking if EnsureAvailable is blocked") + completed := atomic.NewInt32(int32(numListeners)) + for _, ch := range available { + go func(ch chan struct{}) { + defer GinkgoRecover() + defer completed.Dec() + Consistently( + ch, + 100*time.Millisecond, + 10*time.Millisecond, + ).ShouldNot(Receive()) + }(ch) + } + Eventually(completed.Load).Should(Equal(int32(0))) + + By("connecting the component") + var cancel context.CancelFunc + go func() { + defer GinkgoRecover() + ctx, ctxCancel := context.WithCancel(context.Background()) + cancel = ctxCancel + avListener.OnComponentAvailable(ctx, &types.WhoisResponse{ + UUID: uuid.NewString(), + Address: "0.0.0.0", + Component: types.TestComponent, + }) + }() + + By("checking if EnsureAvailable unblocked") + completed.Store(int32(numListeners)) + for _, ch := range available { + go func(ch chan struct{}) { + defer GinkgoRecover() + defer completed.Dec() + Eventually(ch).Should(Receive()) + }(ch) + } + b.Time("checking if EnsureAvailable unblocked", func() { + Eventually(completed.Load).Should(Equal(int32(0))) + }) + for _, ch := range available { + Expect(ch).NotTo(Receive()) + } + + By("disconnecting the component") + completed.Store(int32(numListeners)) + cancel() + for _, ch := range unavailable { + go func(ch chan struct{}) { + defer GinkgoRecover() + defer completed.Dec() + Eventually(ch).Should(Receive()) + }(ch) + } + b.Time("disconnecting the component", func() { + Eventually(completed.Load).Should(Equal(int32(0))) + }) + for _, ch := range unavailable { + Expect(ch).NotTo(Receive()) + } + + By("checking if EnsureAvailable is blocked again") + completed.Store(int32(numListeners)) + for _, ch := range available { + go func(ch chan struct{}) { + defer GinkgoRecover() + defer completed.Dec() + Consistently( + ch, + 100*time.Millisecond, + 10*time.Millisecond, + ).ShouldNot(Receive()) + }(ch) + } + Eventually(completed.Load).Should(Equal(int32(0))) + + numListeners++ + }, 10) +}) diff --git a/pkg/clients/clients_suite_test.go b/pkg/clients/clients_suite_test.go new file mode 100644 index 0000000..f86c35a --- /dev/null +++ b/pkg/clients/clients_suite_test.go @@ -0,0 +1,15 @@ +package clients_test + +import ( + "testing" + + "github.com/cobalt77/kubecc/internal/testutil" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestClients(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Clients Suite") + testutil.ExtendTimeoutsIfDebugging() +} diff --git a/pkg/clients/listener.go b/pkg/clients/listener.go index ea0865a..bae562e 100644 --- a/pkg/clients/listener.go +++ b/pkg/clients/listener.go @@ -11,13 +11,13 @@ import ( "github.com/cobalt77/kubecc/pkg/metrics" "github.com/cobalt77/kubecc/pkg/servers" "github.com/cobalt77/kubecc/pkg/types" - "github.com/tinylib/msgp/msgp" "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/types/known/anypb" ) type monitorListener struct { @@ -90,8 +90,9 @@ func (cl *changeListener) HandleStream(clientStream grpc.ClientStream) error { lg := meta.Log(cl.ctx) argValue := reflect.New(cl.argType) var msgReflect protoreflect.ProtoMessage - if msg, ok := argValue.Interface().(proto.Message); !ok { + if msg, ok := argValue.Interface().(proto.Message); ok { msgReflect = msg + } else { panic("Handler argument does not implement proto.Message") } for { @@ -143,25 +144,29 @@ func (c *changeListener) OrExpired(handler func() metrics.RetryOptions) { c.expiredHandler = handler } -func handlerArgType(handler interface{}) (reflect.Type, reflect.Value) { +func handlerArgType(handler interface{}) (reflect.Type, reflect.Value, string) { funcType := reflect.TypeOf(handler) if funcType.NumIn() != 1 { panic("handler must be a function with one argument") } - valuePtrType := funcType.In(0) - valueType := valuePtrType.Elem() - if !valuePtrType.Implements(reflect.TypeOf((*msgp.Decodable)(nil)).Elem()) { - panic("argument must implement msgp.Decodable") + valueType := funcType.In(0).Elem() + proto, ok := reflect.New(valueType).Interface().(proto.Message) + if !ok { + panic("argument must implement proto.Message") + } + any, err := anypb.New(proto) + if err != nil { + panic(err) } funcValue := reflect.ValueOf(handler) - return valueType, funcValue + return valueType, funcValue, any.GetTypeUrl() } func (l *monitorListener) OnValueChanged( bucket string, handler interface{}, // func(type) ) metrics.ChangeListener { - argType, funcValue := handlerArgType(handler) + argType, funcValue, typeUrl := handlerArgType(handler) cl := &changeListener{ ctx: l.ctx, handler: funcValue, @@ -170,7 +175,7 @@ func (l *monitorListener) OnValueChanged( monClient: l.monClient, key: &types.Key{ Bucket: bucket, - Name: argType.Name(), + Name: typeUrl, }, } mgr := servers.NewStreamManager(l.ctx, cl, l.streamOpts...) diff --git a/pkg/config/spec.go b/pkg/config/spec.go index 8cd0841..a6020ec 100644 --- a/pkg/config/spec.go +++ b/pkg/config/spec.go @@ -50,7 +50,6 @@ type AgentSpec struct { UsageLimits UsageLimitsSpec `json:"usageLimits"` SchedulerAddress string `json:"schedulerAddress"` MonitorAddress string `json:"monitorAddress"` - ListenAddress string `json:"listenAddress"` } type ConsumerSpec struct { diff --git a/pkg/meta/context.go b/pkg/meta/context.go index b446ee6..feb81f1 100644 --- a/pkg/meta/context.go +++ b/pkg/meta/context.go @@ -182,6 +182,13 @@ func WithProvider(kp Provider, opts ...withProviderOption) providerInitInfo { } func NewContext(providers ...providerInitInfo) context.Context { + return NewContextWithParent(context.Background(), providers...) +} + +func NewContextWithParent( + parentCtx context.Context, + providers ...providerInitInfo, +) context.Context { providerMap := map[interface{}]Provider{} for _, mp := range providers { providerMap[mp.KeyProvider.Key()] = mp.KeyProvider diff --git a/pkg/run/task.go b/pkg/run/task.go index 4c7f61c..bd28343 100644 --- a/pkg/run/task.go +++ b/pkg/run/task.go @@ -19,6 +19,10 @@ type Task struct { span opentracing.Span } +func (t *Task) Context() context.Context { + return t.ctx +} + func (t *Task) Run() { if t == nil { return diff --git a/pkg/servers/grpc.go b/pkg/servers/grpc.go index 5fef667..aebb8a6 100644 --- a/pkg/servers/grpc.go +++ b/pkg/servers/grpc.go @@ -18,7 +18,6 @@ import ( "github.com/opentracing/opentracing-go/ext" "go.uber.org/zap" "google.golang.org/grpc/credentials" - "google.golang.org/grpc/encoding/gzip" "google.golang.org/grpc" ) @@ -125,7 +124,8 @@ func Dial( ), grpc.WithDefaultCallOptions( grpc.MaxCallRecvMsgSize(1e8), - grpc.UseCompressor(gzip.Name), + // note: this maybe causes massive slowdowns when used with anypb + // grpc.UseCompressor(gzip.Name), ), ) if options.tls { diff --git a/pkg/test/environment.go b/pkg/test/environment.go new file mode 100644 index 0000000..d93f7ef --- /dev/null +++ b/pkg/test/environment.go @@ -0,0 +1,398 @@ +package test + +import ( + "context" + "net" + "sync" + + "github.com/cobalt77/kubecc/internal/logkc" + "github.com/cobalt77/kubecc/internal/testutil" + testtoolchain "github.com/cobalt77/kubecc/internal/testutil/toolchain" + "github.com/cobalt77/kubecc/pkg/apps/agent" + "github.com/cobalt77/kubecc/pkg/apps/cachesrv" + "github.com/cobalt77/kubecc/pkg/apps/consumerd" + "github.com/cobalt77/kubecc/pkg/apps/monitor" + "github.com/cobalt77/kubecc/pkg/apps/scheduler" + "github.com/cobalt77/kubecc/pkg/clients" + "github.com/cobalt77/kubecc/pkg/config" + "github.com/cobalt77/kubecc/pkg/host" + "github.com/cobalt77/kubecc/pkg/identity" + "github.com/cobalt77/kubecc/pkg/meta" + "github.com/cobalt77/kubecc/pkg/metrics" + "github.com/cobalt77/kubecc/pkg/servers" + "github.com/cobalt77/kubecc/pkg/storage" + "github.com/cobalt77/kubecc/pkg/toolchains" + "github.com/cobalt77/kubecc/pkg/tracing" + "github.com/cobalt77/kubecc/pkg/types" + "github.com/google/uuid" + "github.com/opentracing/opentracing-go" + "go.uber.org/atomic" + "go.uber.org/zap" + "google.golang.org/grpc" + "google.golang.org/grpc/test/bufconn" +) + +type Environment struct { + Config *config.KubeccSpec + envContext context.Context + envCancel context.CancelFunc + portMapping map[string]*bufconn.Listener + agentCount *atomic.Int32 + consumerdCount *atomic.Int32 +} + +var ( + bufferSize = 1024 * 1024 +) + +func dial( + ctx context.Context, + dialer *bufconn.Listener, +) *grpc.ClientConn { + // the uuid here is not relevant, just needs to be unique (pretty sure) + cc, err := servers.Dial(ctx, uuid.NewString(), servers.WithDialOpts( + grpc.WithContextDialer( + func(context.Context, string) (net.Conn, error) { + return dialer.Dial() + }), + )) + if err != nil { + panic(err) + } + return cc +} + +func (e *Environment) SpawnAgent() (context.Context, context.CancelFunc) { + ctx := meta.NewContextWithParent(e.envContext, + meta.WithProvider(identity.Component, meta.WithValue(types.Agent)), + meta.WithProvider(identity.UUID), + meta.WithProvider(logkc.Logger, meta.WithValue( + logkc.New(types.Agent, + logkc.WithName(string(rune('a'+e.agentCount.Load()))), + ), + )), + meta.WithProvider(tracing.Tracer), + meta.WithProvider(host.SystemInfo), + ) + e.agentCount.Inc() + ctx, cancel := context.WithCancel(ctx) + go func() { + <-ctx.Done() + e.agentCount.Dec() + }() + + options := []agent.AgentServerOption{ + agent.WithUsageLimits(&metrics.UsageLimits{ + ConcurrentProcessLimit: int32(e.Config.Agent.UsageLimits.ConcurrentProcessLimit), + QueuePressureMultiplier: e.Config.Agent.UsageLimits.QueuePressureMultiplier, + QueueRejectMultiplier: e.Config.Agent.UsageLimits.QueueRejectMultiplier, + }), + agent.WithToolchainFinders(toolchains.FinderWithOptions{ + Finder: testutil.TestToolchainFinder{}, + }), + agent.WithToolchainRunners(testtoolchain.AddToStore), + } + if addr := e.Config.Monitor.ListenAddress; addr != "" { + cc := dial(ctx, e.portMapping[addr]) + options = append(options, + agent.WithMonitorClient(types.NewMonitorClient(cc))) + } + + if addr := e.Config.Scheduler.ListenAddress; addr != "" { + cc := dial(ctx, e.portMapping[addr]) + options = append(options, + agent.WithSchedulerClient(types.NewSchedulerClient(cc))) + } + + agentSrv := agent.NewAgentServer(ctx, options...) + mgr := servers.NewStreamManager(ctx, agentSrv) + go mgr.Run() + go agentSrv.StartMetricsProvider() + return ctx, cancel +} + +func (e *Environment) SpawnScheduler() (context.Context, context.CancelFunc) { + ctx := meta.NewContextWithParent(e.envContext, + meta.WithProvider(identity.Component, meta.WithValue(types.Scheduler)), + meta.WithProvider(identity.UUID), + meta.WithProvider(logkc.Logger, meta.WithValue( + logkc.New(types.Scheduler, + logkc.WithName("a"), + ), + )), + meta.WithProvider(tracing.Tracer), + ) + ctx, cancel := context.WithCancel(ctx) + lg := meta.Log(ctx) + + srv := servers.NewServer(ctx) + + options := []scheduler.SchedulerServerOption{} + if addr := e.Config.Monitor.ListenAddress; addr != "" { + cc := dial(ctx, e.portMapping[addr]) + options = append(options, + scheduler.WithMonitorClient(types.NewMonitorClient(cc))) + } + + if addr := e.Config.Cache.ListenAddress; addr != "" { + cc := dial(ctx, e.portMapping[addr]) + options = append(options, + scheduler.WithCacheClient(types.NewCacheClient(cc))) + } + + sc := scheduler.NewSchedulerServer(ctx, options...) + types.RegisterSchedulerServer(srv, sc) + go sc.StartMetricsProvider() + go func() { + if err := srv.Serve(e.portMapping[e.Config.Scheduler.ListenAddress]); err != nil { + lg.Info(err) + } + }() + return ctx, cancel +} + +func (e *Environment) SpawnConsumerd(addr string) (context.Context, context.CancelFunc) { + ctx := meta.NewContextWithParent(e.envContext, + meta.WithProvider(identity.Component, meta.WithValue(types.Consumerd)), + meta.WithProvider(identity.UUID), + meta.WithProvider(logkc.Logger, meta.WithValue( + logkc.New(types.Consumerd, + logkc.WithName(string(rune('a'+e.consumerdCount.Load()))), + ), + )), + meta.WithProvider(tracing.Tracer), + meta.WithProvider(host.SystemInfo), + ) + e.agentCount.Inc() + ctx, cancel := context.WithCancel(ctx) + go func() { + <-ctx.Done() + e.agentCount.Dec() + }() + + lg := meta.Log(ctx) + + listener := bufconn.Listen(bufferSize) + e.portMapping[addr] = listener + + options := []consumerd.ConsumerdServerOption{ + consumerd.WithUsageLimits(&metrics.UsageLimits{ + ConcurrentProcessLimit: int32(e.Config.Consumerd.UsageLimits.ConcurrentProcessLimit), + QueuePressureMultiplier: e.Config.Consumerd.UsageLimits.QueuePressureMultiplier, + QueueRejectMultiplier: e.Config.Consumerd.UsageLimits.QueueRejectMultiplier, + }), + consumerd.WithToolchainFinders(toolchains.FinderWithOptions{ + Finder: testutil.TestToolchainFinder{}, + }), + consumerd.WithToolchainRunners(testtoolchain.AddToStore), + } + if addr := e.Config.Monitor.ListenAddress; addr != "" { + cc := dial(ctx, e.portMapping[addr]) + options = append(options, + consumerd.WithMonitorClient(types.NewMonitorClient(cc))) + } + + if addr := e.Config.Scheduler.ListenAddress; addr != "" { + cc := dial(ctx, e.portMapping[addr]) + options = append(options, + consumerd.WithSchedulerClient(types.NewSchedulerClient(cc))) + } + srv := servers.NewServer(ctx) + cd := consumerd.NewConsumerdServer(ctx, options...) + types.RegisterConsumerdServer(srv, cd) + + go cd.StartMetricsProvider() + go func() { + if err := srv.Serve(listener); err != nil { + lg.Info(err) + } + }() + + return ctx, cancel +} + +func (e *Environment) SpawnMonitor() (context.Context, context.CancelFunc) { + ctx := meta.NewContextWithParent(e.envContext, + meta.WithProvider(identity.Component, meta.WithValue(types.Monitor)), + meta.WithProvider(identity.UUID), + meta.WithProvider(logkc.Logger, meta.WithValue( + logkc.New(types.Monitor, + logkc.WithName("a"), + ), + )), + meta.WithProvider(tracing.Tracer), + ) + ctx, cancel := context.WithCancel(ctx) + lg := meta.Log(ctx) + + srv := servers.NewServer(ctx) + + mon := monitor.NewMonitorServer(ctx, monitor.InMemoryStoreCreator) + types.RegisterMonitorServer(srv, mon) + + go func() { + if err := srv.Serve(e.portMapping[e.Config.Monitor.ListenAddress]); err != nil { + lg.Info(err) + } + }() + return ctx, cancel +} + +func (e *Environment) SpawnCache() (context.Context, context.CancelFunc) { + ctx := meta.NewContextWithParent(e.envContext, + meta.WithProvider(identity.Component, meta.WithValue(types.Cache)), + meta.WithProvider(identity.UUID), + meta.WithProvider(logkc.Logger, meta.WithValue( + logkc.New(types.Cache, + logkc.WithName("a"), + ), + )), + meta.WithProvider(tracing.Tracer), + ) + ctx, cancel := context.WithCancel(ctx) + lg := meta.Log(ctx) + + options := []cachesrv.CacheServerOption{} + if addr := e.Config.Monitor.ListenAddress; addr != "" { + cc := dial(ctx, e.portMapping[addr]) + options = append(options, + cachesrv.WithMonitorClient(types.NewMonitorClient(cc))) + } + + providers := []storage.StorageProvider{} + if e.Config.Cache.LocalStorage != nil { + providers = append(providers, + storage.NewVolatileStorageProvider(ctx, *e.Config.Cache.LocalStorage)) + } + if e.Config.Cache.RemoteStorage != nil { + providers = append(providers, + storage.NewS3StorageProvider(ctx, *e.Config.Cache.RemoteStorage)) + } + options = append(options, cachesrv.WithStorageProvider( + storage.NewChainStorageProvider(ctx, providers...), + )) + + cacheSrv := cachesrv.NewCacheServer(ctx, e.Config.Cache, options...) + srv := servers.NewServer(ctx) + + types.RegisterCacheServer(srv, cacheSrv) + go cacheSrv.StartMetricsProvider() + + go func() { + err := srv.Serve(e.portMapping[e.Config.Cache.ListenAddress]) + if err != nil { + lg.With(zap.Error(err)).Error("GRPC error") + } + }() + return ctx, cancel +} + +func NewDefaultEnvironment() *Environment { + schedulerAddr := "9000" + monitorAddr := "9001" + cacheAddr := "9002" + + return &Environment{ + Config: &config.KubeccSpec{ + Global: config.GlobalSpec{ + LogLevel: "debug", + }, + Agent: config.AgentSpec{ + UsageLimits: config.UsageLimitsSpec{ + ConcurrentProcessLimit: 32, + QueuePressureMultiplier: 1.0, + QueueRejectMultiplier: 2.0, + }, + SchedulerAddress: schedulerAddr, + MonitorAddress: monitorAddr, + }, + Scheduler: config.SchedulerSpec{ + MonitorAddress: monitorAddr, + CacheAddress: cacheAddr, + ListenAddress: schedulerAddr, + }, + Monitor: config.MonitorSpec{ + ListenAddress: monitorAddr, + }, + Cache: config.CacheSpec{ + ListenAddress: cacheAddr, + MonitorAddress: monitorAddr, + LocalStorage: &config.LocalStorageSpec{ + Limits: config.StorageLimitsSpec{ + Memory: "1Gi", + }, + }, + }, + Consumerd: config.ConsumerdSpec{ + SchedulerAddress: schedulerAddr, + MonitorAddress: monitorAddr, + DisableTLS: true, + UsageLimits: config.UsageLimitsSpec{ + ConcurrentProcessLimit: 20, + }, + }, + }, + } +} + +func (e *Environment) Start() { + ctx := meta.NewContext( + meta.WithProvider(identity.Component, meta.WithValue(types.TestComponent)), + meta.WithProvider(identity.UUID), + meta.WithProvider(logkc.Logger), + meta.WithProvider(tracing.Tracer), + ) + ctx, cancel := context.WithCancel(ctx) + e.envContext = ctx + e.envCancel = cancel + + e.agentCount = atomic.NewInt32(0) + e.consumerdCount = atomic.NewInt32(0) + e.portMapping = make(map[string]*bufconn.Listener) + + for _, addr := range []string{ + e.Config.Cache.ListenAddress, + e.Config.Monitor.ListenAddress, + e.Config.Scheduler.ListenAddress, + } { + if addr != "" { + e.portMapping[addr] = bufconn.Listen(bufferSize) + } + } + tracer, _ := tracing.Start(e.envContext, types.TestComponent) + opentracing.SetGlobalTracer(tracer) +} + +func (e *Environment) WaitForServers(count int) { + wg := sync.WaitGroup{} + wg.Add(count) + monClient := e.NewMonitorClient() + listener := clients.NewListener(e.envContext, monClient) + listener.OnProviderAdded(func(pctx context.Context, uuid string) { + wg.Done() + }) + wg.Wait() +} + +func (e *Environment) Dial(addr string) *grpc.ClientConn { + return dial(e.envContext, e.portMapping[addr]) +} + +func (e *Environment) NewMonitorClient() types.MonitorClient { + return types.NewMonitorClient( + dial(e.envContext, e.portMapping[e.Config.Monitor.ListenAddress])) +} + +func (e *Environment) NewSchedulerClient() types.SchedulerClient { + return types.NewSchedulerClient( + dial(e.envContext, e.portMapping[e.Config.Scheduler.ListenAddress])) +} + +func (e *Environment) NewCacheClient() types.CacheClient { + return types.NewCacheClient( + dial(e.envContext, e.portMapping[e.Config.Cache.ListenAddress])) +} + +func (e *Environment) Shutdown() { + e.envCancel() +} diff --git a/pkg/util/msgpack.go b/pkg/util/msgpack.go deleted file mode 100644 index 394e300..0000000 --- a/pkg/util/msgpack.go +++ /dev/null @@ -1,24 +0,0 @@ -package util - -import ( - "bytes" - - "github.com/tinylib/msgp/msgp" -) - -func EncodeMsgp(e msgp.Encodable) []byte { - buf := new(bytes.Buffer) - w := msgp.NewWriter(buf) - if err := e.EncodeMsg(w); err != nil { - panic(err) - } - if err := w.Flush(); err != nil { - panic(err) - } - return buf.Bytes() -} - -func DecodeMsgp(buf []byte, into msgp.Decodable) error { - reader := msgp.NewReader(bytes.NewReader(buf)) - return into.DecodeMsg(reader) -} diff --git a/test/integration/integration.go b/test/integration/integration.go index 7b5f473..c82bdaa 100644 --- a/test/integration/integration.go +++ b/test/integration/integration.go @@ -252,12 +252,12 @@ func (tc *TestController) startConsumerd(cfg *metrics.UsageLimits) { }), consumerd.WithUsageLimits(cfg), consumerd.WithToolchainRunners(testtoolchain.AddToStore), - consumerd.WithSchedulerClient(schedulerClient, cc), + consumerd.WithSchedulerClient(schedulerClient), consumerd.WithMonitorClient(monitorClient), ) types.RegisterConsumerdServer(srv, d) - // mgr := servers.NewStreamManager(ctx, d) + // mgr := servers.NewStreamManager(ctx, d)s // go mgr.Run() go d.StartMetricsProvider() cdListener := dial(ctx, listener) diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 3534511..48f71a1 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -71,7 +71,7 @@ var _ = Describe("Integration test", func() { }, }, } - Measure("Run test", func(b Benchmarker) { + PMeasure("Run test", func(b Benchmarker) { var tc *integration.TestController b.Time("Start components", func() { tc = integration.NewTestController(sctx) From 7406cc6db96a60bfa8bc0b9e9cfb445deac3cc7d Mon Sep 17 00:00:00 2001 From: cobalt77 <8194899+cobalt77@users.noreply.github.com> Date: Wed, 17 Mar 2021 17:56:02 -0400 Subject: [PATCH 05/12] Add code snippets for vscode --- .vscode/kubecc.code-snippets | 136 +++++++++++++++++++++++++++++++++++ 1 file changed, 136 insertions(+) create mode 100644 .vscode/kubecc.code-snippets diff --git a/.vscode/kubecc.code-snippets b/.vscode/kubecc.code-snippets new file mode 100644 index 0000000..b52ea4b --- /dev/null +++ b/.vscode/kubecc.code-snippets @@ -0,0 +1,136 @@ +{ + // Place your kubecc workspace snippets here. Each snippet is defined under a snippet name and has a scope, prefix, body and + // description. Add comma separated ids of the languages where the snippet is applicable in the scope field. If scope + // is left empty or omitted, the snippet gets applied to all languages. The prefix is what is + // used to trigger the snippet and the body will be expanded and inserted. Possible variables are: + // $1, $2 for tab stops, $0 for the final cursor position, and ${1:label}, ${2:another} for placeholders. + // Placeholders with the same ids are connected. + // Example: + // "Print to console": { + // "scope": "javascript,typescript", + // "prefix": "log", + // "body": [ + // "console.log('$1');", + // "$2" + // ], + // "description": "Log output to console" + // } + "Function Options": { + "scope": "go", + "prefix": "options", + "body": [ + "type $1Options struct {", + " $0", + "}", + "", + "type $1Option func(*$1Options)", + "", + "func (o *$1Options) Apply(opts ...$1Option) {", + " for _, op := range opts {", + " op(o)", + " }", + "}", + "", + "func With() $1Option {", + " return func(o *$1Options) {", + " // TODO", + " }", + "}", + ], + "description": "Scaffolding for function options" + }, + "With Option": { + "scope": "go", + "prefix": "with", + "body": [ + "func With() $1Option {", + " return func(o *$1Options) {", + " // TODO", + " }", + "}", + ], + "description": "Additional function option" + }, + "New Meta Context": { + "scope": "go", + "prefix": "newctx", + "body": [ + "ctx := meta.NewContext(", + " meta.WithProvider(identity.Component, meta.WithValue(types.$1)),", + " meta.WithProvider(identity.UUID),", + " meta.WithProvider(logkc.Logger),", + " meta.WithProvider(tracing.Tracer),", + " meta.WithProvider(host.SystemInfo),", + ")", + ] + }, + // Testing + "When": { + "scope": "go", + "prefix": "when", + "body": [ + "When(\"$1\", func() {", + " $0", + "})", + ], + }, + "Measure": { + "scope": "go", + "prefix": "measure", + "body": [ + "Measure(\"$1\", func(b Benchmarker) {", + " $0", + " b.Time(\"TODO\", func() {", + "", + " })", + "})", + ], + }, + "It": { + "scope": "go", + "prefix": "it", + "body": [ + "It(\"$1\", func() {", + " $0", + "})", + ] + }, + "Expect ... To Equal": { + "scope": "go", + "prefix": "ete", + "body": "Expect($1).To(Equal($2))", + }, + "Expect ... Not To Equal": { + "scope": "go", + "prefix": "ente", + "body": "Expect($1).NotTo(Equal($2))", + }, + "Expect ... To Receive": { + "scope": "go", + "prefix": "etrx", + "body": "Expect($1).To(Receive())", + }, + "Consistently Should Not Receive": { + "scope": "go", + "prefix": "csnrx", + "body": "Consistently($1).ShouldNot(Receive())", + }, + "Eventually Bool": { + "scope": "go", + "prefix": "evb", + "body": [ + "Eventually(func() bool {", + " $0", + "}).Should(BeTrue())", + ], + }, + "Eventually Int": { + "scope": "go", + "prefix": "evi", + "body": [ + "Eventually(func() int {", + " $1", + "}).Should(BeEquivalentTo($2))", + ], + }, +} \ No newline at end of file From 8f0eaf46089125c3c9f958cdc3acd0114a869438 Mon Sep 17 00:00:00 2001 From: cobalt77 <8194899+cobalt77@users.noreply.github.com> Date: Wed, 17 Mar 2021 18:52:27 -0400 Subject: [PATCH 06/12] add snippet generator --- .vscode/kubecc.code-snippets | 136 ----------------------------------- hack/make-snippets.go | 70 ++++++++++++++++++ hack/snippets.yaml | 81 +++++++++++++++++++++ 3 files changed, 151 insertions(+), 136 deletions(-) delete mode 100644 .vscode/kubecc.code-snippets create mode 100755 hack/make-snippets.go create mode 100644 hack/snippets.yaml diff --git a/.vscode/kubecc.code-snippets b/.vscode/kubecc.code-snippets deleted file mode 100644 index b52ea4b..0000000 --- a/.vscode/kubecc.code-snippets +++ /dev/null @@ -1,136 +0,0 @@ -{ - // Place your kubecc workspace snippets here. Each snippet is defined under a snippet name and has a scope, prefix, body and - // description. Add comma separated ids of the languages where the snippet is applicable in the scope field. If scope - // is left empty or omitted, the snippet gets applied to all languages. The prefix is what is - // used to trigger the snippet and the body will be expanded and inserted. Possible variables are: - // $1, $2 for tab stops, $0 for the final cursor position, and ${1:label}, ${2:another} for placeholders. - // Placeholders with the same ids are connected. - // Example: - // "Print to console": { - // "scope": "javascript,typescript", - // "prefix": "log", - // "body": [ - // "console.log('$1');", - // "$2" - // ], - // "description": "Log output to console" - // } - "Function Options": { - "scope": "go", - "prefix": "options", - "body": [ - "type $1Options struct {", - " $0", - "}", - "", - "type $1Option func(*$1Options)", - "", - "func (o *$1Options) Apply(opts ...$1Option) {", - " for _, op := range opts {", - " op(o)", - " }", - "}", - "", - "func With() $1Option {", - " return func(o *$1Options) {", - " // TODO", - " }", - "}", - ], - "description": "Scaffolding for function options" - }, - "With Option": { - "scope": "go", - "prefix": "with", - "body": [ - "func With() $1Option {", - " return func(o *$1Options) {", - " // TODO", - " }", - "}", - ], - "description": "Additional function option" - }, - "New Meta Context": { - "scope": "go", - "prefix": "newctx", - "body": [ - "ctx := meta.NewContext(", - " meta.WithProvider(identity.Component, meta.WithValue(types.$1)),", - " meta.WithProvider(identity.UUID),", - " meta.WithProvider(logkc.Logger),", - " meta.WithProvider(tracing.Tracer),", - " meta.WithProvider(host.SystemInfo),", - ")", - ] - }, - // Testing - "When": { - "scope": "go", - "prefix": "when", - "body": [ - "When(\"$1\", func() {", - " $0", - "})", - ], - }, - "Measure": { - "scope": "go", - "prefix": "measure", - "body": [ - "Measure(\"$1\", func(b Benchmarker) {", - " $0", - " b.Time(\"TODO\", func() {", - "", - " })", - "})", - ], - }, - "It": { - "scope": "go", - "prefix": "it", - "body": [ - "It(\"$1\", func() {", - " $0", - "})", - ] - }, - "Expect ... To Equal": { - "scope": "go", - "prefix": "ete", - "body": "Expect($1).To(Equal($2))", - }, - "Expect ... Not To Equal": { - "scope": "go", - "prefix": "ente", - "body": "Expect($1).NotTo(Equal($2))", - }, - "Expect ... To Receive": { - "scope": "go", - "prefix": "etrx", - "body": "Expect($1).To(Receive())", - }, - "Consistently Should Not Receive": { - "scope": "go", - "prefix": "csnrx", - "body": "Consistently($1).ShouldNot(Receive())", - }, - "Eventually Bool": { - "scope": "go", - "prefix": "evb", - "body": [ - "Eventually(func() bool {", - " $0", - "}).Should(BeTrue())", - ], - }, - "Eventually Int": { - "scope": "go", - "prefix": "evi", - "body": [ - "Eventually(func() int {", - " $1", - "}).Should(BeEquivalentTo($2))", - ], - }, -} \ No newline at end of file diff --git a/hack/make-snippets.go b/hack/make-snippets.go new file mode 100755 index 0000000..1f72861 --- /dev/null +++ b/hack/make-snippets.go @@ -0,0 +1,70 @@ +///usr/bin/true; exec /usr/bin/env go run "$0" "$@" +package main + +import ( + "encoding/json" + "fmt" + "log" + "os" + "path/filepath" + "strings" + + "sigs.k8s.io/yaml" +) + +type snippets struct { + Items map[string]snippet `json:",inline"` +} + +type snippet struct { + Scope string `json:"scope,omitempty"` + Prefix string `json:"prefix"` + Body string `json:"body"` + Description string `json:"description,omitempty"` +} + +func main() { + here, err := os.Getwd() + if err != nil { + log.Fatal(err) + } + for i := 0; i < 10; i++ { + if _, err := os.Stat(filepath.Join(here, "hack/snippets.yaml")); os.IsNotExist(err) { + here = filepath.Dir(here) + } + break + } + data, err := os.ReadFile(filepath.Join(here, "hack/snippets.yaml")) + if err != nil { + log.Fatal(err) + } + items := map[string]snippet{} + err = yaml.Unmarshal(data, &items) + if err != nil { + log.Fatal(err) + } + if len(items) == 0 { + log.Fatal("No snippets found") + } + for name, sn := range items { + if sn.Scope == "" { + sn.Scope = "go" + } + if sn.Description == "" { + sn.Description = name + } + sn.Body = strings.ReplaceAll(sn.Body, "\t", `\t`) + sn.Body = strings.ReplaceAll(sn.Body, "\n", `\n`) + items[name] = sn + } + jsonData, err := json.MarshalIndent(items, "", " ") + if err != nil { + log.Fatal(err) + } + os.MkdirAll("../.vscode", 0o775) + err = os.WriteFile("../.vscode/kubecc.code-snippets", jsonData, 0o644) + if err != nil { + log.Fatal(err) + } + fmt.Printf("Generated %d snippets\n", len(items)) +} diff --git a/hack/snippets.yaml b/hack/snippets.yaml new file mode 100644 index 0000000..9b00909 --- /dev/null +++ b/hack/snippets.yaml @@ -0,0 +1,81 @@ +Function Options: + prefix: options + body: |- + type $1Options struct { + $0 + } + + type $1Option func(*$1Options) + + func (o *$1Options) Apply(opts ...$1Option) { + for _, op := range opts { + op(o) + } + } + + func With() $1Option { + return func(o *$1Options) { + // TODO + } + } +With Option: + prefix: with + body: |- + func With() $1Option { + return func(o *$1Options) { + // TODO + } + } +New Meta Context: + prefix: newctx + body: |- + ctx := meta.NewContext( + meta.WithProvider(identity.Component, meta.WithValue(types.$1)), + meta.WithProvider(identity.UUID), + meta.WithProvider(logkc.Logger), + meta.WithProvider(tracing.Tracer), + meta.WithProvider(host.SystemInfo), + ) +When: + prefix: when + body: |- + When("$1", func() { + $0 + }) +Measure: + prefix: measure + body: |- + Measure("$1", func(b Benchmarker) { + $0 + b.Time("TODO", func() { + "", + }) + }) +It: + prefix: it + body: |- + It("$1", func() { + $0 + }) +Expect ... To Equal: + prefix: ete + body: Expect($1).To(Equal($2)) +Expect ... To Receive: + prefix: etr + body: Expect($1).To(Receive()) +Consistently Should Not Receive: + prefix: csnr + body: Consistently($1).ShouldNot(Receive() +Eventually Bool: + prefix: evb + body: |- + Eventually(func() bool { + $0 + }).Should(BeTrue()) +Eventually Int: + prefix: evi + body: |- + Eventually(func() int { + $1 + }).Should(BeEquivalentTo($2)) + \ No newline at end of file From 941a6d4da93c000e9dde01d9cb82d6f32854198a Mon Sep 17 00:00:00 2001 From: cobalt77 <8194899+cobalt77@users.noreply.github.com> Date: Wed, 17 Mar 2021 19:07:04 -0400 Subject: [PATCH 07/12] update make-snippets.go --- hack/make-snippets.go | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/hack/make-snippets.go b/hack/make-snippets.go index 1f72861..a84ecbe 100755 --- a/hack/make-snippets.go +++ b/hack/make-snippets.go @@ -13,7 +13,7 @@ import ( ) type snippets struct { - Items map[string]snippet `json:",inline"` + Items map[string]*snippet `json:",inline"` } type snippet struct { @@ -23,6 +23,11 @@ type snippet struct { Description string `json:"description,omitempty"` } +var header = `// Code generated by make-snippets.go. DO NOT EDIT. +// Snippets should be edited in hack/snippets.yaml. +// Re-generate this file by running hack/make-snippets.go. +` + func main() { here, err := os.Getwd() if err != nil { @@ -53,16 +58,17 @@ func main() { if sn.Description == "" { sn.Description = name } - sn.Body = strings.ReplaceAll(sn.Body, "\t", `\t`) - sn.Body = strings.ReplaceAll(sn.Body, "\n", `\n`) + sn.Body = strings.ReplaceAll(sn.Body, " ", ` `) + sn.Body = strings.ReplaceAll(sn.Body, `\\n`, `\n`) items[name] = sn } jsonData, err := json.MarshalIndent(items, "", " ") if err != nil { log.Fatal(err) } - os.MkdirAll("../.vscode", 0o775) - err = os.WriteFile("../.vscode/kubecc.code-snippets", jsonData, 0o644) + os.MkdirAll(filepath.Join(here, ".vscode"), 0o775) + err = os.WriteFile(filepath.Join(here, ".vscode/kubecc.code-snippets"), + append([]byte(header), jsonData...), 0o644) if err != nil { log.Fatal(err) } From e489d90f7fabdb6dd7d2e27b3512acba9418ed8b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 17 Mar 2021 07:17:38 +0000 Subject: [PATCH 08/12] Bump github.com/onsi/ginkgo from 1.15.1 to 1.15.2 Bumps [github.com/onsi/ginkgo](https://github.com/onsi/ginkgo) from 1.15.1 to 1.15.2. - [Release notes](https://github.com/onsi/ginkgo/releases) - [Changelog](https://github.com/onsi/ginkgo/blob/master/CHANGELOG.md) - [Commits](https://github.com/onsi/ginkgo/compare/v1.15.1...v1.15.2) Signed-off-by: dependabot[bot] --- go.mod | 4 ++-- go.sum | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index cc232ed..98ef5c7 100644 --- a/go.mod +++ b/go.mod @@ -29,7 +29,7 @@ require ( github.com/minio/md5-simd v1.1.2 github.com/minio/minio-go/v7 v7.0.10 github.com/mitchellh/copystructure v1.1.1 // indirect - github.com/onsi/ginkgo v1.15.1 + github.com/onsi/ginkgo v1.15.2 github.com/onsi/gomega v1.11.0 github.com/opentracing/opentracing-go v1.2.0 github.com/pkg/errors v0.9.1 @@ -47,7 +47,7 @@ require ( go.uber.org/goleak v1.1.10 // indirect go.uber.org/zap v1.16.0 golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83 // indirect - golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5 // indirect + golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5 golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 // indirect golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba // indirect diff --git a/go.sum b/go.sum index c085c1c..0a4670b 100644 --- a/go.sum +++ b/go.sum @@ -469,6 +469,8 @@ github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108 github.com/onsi/ginkgo v1.14.1/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.15.1 h1:DsXNrKujDlkMS9Rsxmd+Fg7S6Kc5lhE+qX8tY6laOxc= github.com/onsi/ginkgo v1.15.1/go.mod h1:Dd6YFfwBW84ETqqtL0CPyPXillHgY6XhQH3uuCCTr/o= +github.com/onsi/ginkgo v1.15.2 h1:l77YT15o814C2qVL47NOyjV/6RbaP7kKdrvZnxQ3Org= +github.com/onsi/ginkgo v1.15.2/go.mod h1:Dd6YFfwBW84ETqqtL0CPyPXillHgY6XhQH3uuCCTr/o= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= From fab93f5bbd98d1d2007c2b5294578fe7bd365c92 Mon Sep 17 00:00:00 2001 From: cobalt77 <8194899+cobalt77@users.noreply.github.com> Date: Wed, 17 Mar 2021 21:59:02 -0400 Subject: [PATCH 09/12] test environment updates --- go.mod | 2 +- go.sum | 2 + hack/snippets.yaml | 6 + internal/testutil/toolchain/send.go | 2 +- pkg/apps/agent/server.go | 44 +- pkg/apps/consumerd/consumerd_suite_test.go | 1 - pkg/apps/consumerd/queue_test.go | 4 +- pkg/apps/consumerd/server.go | 12 +- pkg/apps/monitor/server.go | 4 +- pkg/clients/listener.go | 11 +- pkg/metrics/noop.go | 2 + pkg/metrics/types.go | 1 + pkg/test/environment.go | 407 ++++++------- test/integration/integration.go | 628 ++++++++++----------- test/integration/integration_suite_test.go | 3 + test/integration/integration_test.go | 105 ++-- 16 files changed, 628 insertions(+), 606 deletions(-) diff --git a/go.mod b/go.mod index 98ef5c7..18d9255 100644 --- a/go.mod +++ b/go.mod @@ -23,7 +23,7 @@ require ( github.com/google/uuid v1.2.0 github.com/googleapis/gnostic v0.5.4 // indirect github.com/huandu/xstrings v1.3.2 // indirect - github.com/imdario/mergo v0.3.11 // indirect + github.com/imdario/mergo v0.3.12 // indirect github.com/karlseguin/ccache/v2 v2.0.8 github.com/mattn/go-runewidth v0.0.10 // indirect github.com/minio/md5-simd v1.1.2 diff --git a/go.sum b/go.sum index 0a4670b..e069982 100644 --- a/go.sum +++ b/go.sum @@ -349,6 +349,8 @@ github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJ github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.11 h1:3tnifQM4i+fbajXKBHXWEH+KvNHqojZ778UH75j3bGA= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= +github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= diff --git a/hack/snippets.yaml b/hack/snippets.yaml index 9b00909..c99e727 100644 --- a/hack/snippets.yaml +++ b/hack/snippets.yaml @@ -57,6 +57,12 @@ It: It("$1", func() { $0 }) +Specify: + prefix: specify + body: |- + Specify("$1", func() { + $0 + }) Expect ... To Equal: prefix: ete body: Expect($1).To(Equal($2)) diff --git a/internal/testutil/toolchain/send.go b/internal/testutil/toolchain/send.go index 13d37b9..4910da6 100644 --- a/internal/testutil/toolchain/send.go +++ b/internal/testutil/toolchain/send.go @@ -8,7 +8,7 @@ import ( ) type sendRemoteRunnerManager struct { - client types.SchedulerClient + client types.SchedulerClient // todo: this needs to be a stream } func (m sendRemoteRunnerManager) Run( diff --git a/pkg/apps/agent/server.go b/pkg/apps/agent/server.go index 993afa6..3183bb4 100644 --- a/pkg/apps/agent/server.go +++ b/pkg/apps/agent/server.go @@ -18,14 +18,17 @@ import ( ) type AgentServer struct { - AgentServerOptions - - srvContext context.Context - executor run.Executor - lg *zap.SugaredLogger - tcStore *toolchains.Store - tcRunStore *run.ToolchainRunnerStore - metricsProvider metrics.Provider + srvContext context.Context + executor run.Executor + lg *zap.SugaredLogger + tcStore *toolchains.Store + tcRunStore *run.ToolchainRunnerStore + metricsProvider metrics.Provider + toolchainFinders []toolchains.FinderWithOptions + toolchainRunners []run.StoreAddFunc + schedulerClient types.SchedulerClient + monitorClient types.MonitorClient + usageLimits *metrics.UsageLimits } type AgentServerOptions struct { @@ -87,12 +90,23 @@ func NewAgentServer( } srv := &AgentServer{ - AgentServerOptions: options, - srvContext: ctx, - lg: meta.Log(ctx), - tcStore: toolchains.Aggregate(ctx, options.toolchainFinders...), - executor: run.NewQueuedExecutor(run.WithUsageLimits(options.usageLimits)), - tcRunStore: runStore, + srvContext: ctx, + lg: meta.Log(ctx), + tcStore: toolchains.Aggregate(ctx, options.toolchainFinders...), + executor: run.NewQueuedExecutor(run.WithUsageLimits(options.usageLimits)), + tcRunStore: runStore, + toolchainFinders: options.toolchainFinders, + toolchainRunners: options.toolchainRunners, + monitorClient: options.monitorClient, + usageLimits: options.usageLimits, + schedulerClient: options.schedulerClient, + } + + if options.monitorClient != nil { + srv.metricsProvider = clients.NewMonitorProvider(ctx, options.monitorClient, + clients.Buffered|clients.Discard) + } else { + srv.metricsProvider = metrics.NewNoopProvider() } mgr := servers.NewStreamManager(ctx, srv) @@ -115,8 +129,6 @@ func (s *AgentServer) postTaskStatus() { func (s *AgentServer) StartMetricsProvider() { s.lg.Info("Starting metrics provider") - s.metricsProvider = clients.NewMonitorProvider(s.srvContext, s.monitorClient, - clients.Buffered|clients.Discard) s.postUsageLimits() fastTimer := util.NewJitteredTimer(time.Second/6, 2.0) diff --git a/pkg/apps/consumerd/consumerd_suite_test.go b/pkg/apps/consumerd/consumerd_suite_test.go index 2964a04..03659fd 100644 --- a/pkg/apps/consumerd/consumerd_suite_test.go +++ b/pkg/apps/consumerd/consumerd_suite_test.go @@ -17,7 +17,6 @@ func TestConsumerd(t *testing.T) { var _ = BeforeSuite(func() { testEnv = test.NewDefaultEnvironment() - testEnv.Start() }) var _ = AfterSuite(func() { diff --git a/pkg/apps/consumerd/queue_test.go b/pkg/apps/consumerd/queue_test.go index 340e340..f712af2 100644 --- a/pkg/apps/consumerd/queue_test.go +++ b/pkg/apps/consumerd/queue_test.go @@ -91,7 +91,7 @@ var _ = Describe("Split Queue", func() { } BeforeEach(func() { - schedulerClient := testEnv.NewSchedulerClient() + schedulerClient := testEnv.NewSchedulerClient(testCtx) Expect(len(taskPool)).To(Equal(0)) Expect(cap(taskPool)).To(Equal(numTasks)) @@ -131,7 +131,7 @@ var _ = Describe("Split Queue", func() { }) PSpecify("when no scheduler is available, the queue should run all tasks locally", func() { - sq := consumerd.NewSplitQueue(testCtx, testEnv.NewMonitorClient()) + sq := consumerd.NewSplitQueue(testCtx, testEnv.NewMonitorClient(testCtx)) for task := range taskPool { sq.In() <- task } diff --git a/pkg/apps/consumerd/server.go b/pkg/apps/consumerd/server.go index 0f9c79c..c3fc2a6 100644 --- a/pkg/apps/consumerd/server.go +++ b/pkg/apps/consumerd/server.go @@ -17,9 +17,7 @@ import ( "go.uber.org/atomic" "go.uber.org/zap" - "google.golang.org/grpc" "google.golang.org/grpc/codes" - "google.golang.org/grpc/connectivity" "google.golang.org/grpc/status" ) @@ -34,7 +32,6 @@ type consumerdServer struct { storeUpdateCh chan struct{} schedulerClient types.SchedulerClient metricsProvider metrics.Provider - connection *grpc.ClientConn localExecutor run.Executor remoteExecutor run.Executor queue *SplitQueue @@ -116,11 +113,9 @@ func NewConsumerdServer( numConsumers: atomic.NewInt32(0), localTasksCompleted: atomic.NewInt64(0), queue: NewSplitQueue(ctx, options.monitorClient), + schedulerClient: options.schedulerClient, } - if options.schedulerClient != nil { - srv.schedulerClient = options.schedulerClient - } if options.monitorClient != nil { srv.metricsProvider = clients.NewMonitorProvider(ctx, options.monitorClient, clients.Buffered|clients.Discard) @@ -130,11 +125,6 @@ func NewConsumerdServer( return srv } -func (c *consumerdServer) schedulerConnected() bool { - return c.schedulerClient != nil && - c.connection.GetState() == connectivity.Ready -} - func (c *consumerdServer) applyToolchainToReq(req *types.RunRequest) error { path := req.GetPath() if path == "" { diff --git a/pkg/apps/monitor/server.go b/pkg/apps/monitor/server.go index d1c70ee..12fb22d 100644 --- a/pkg/apps/monitor/server.go +++ b/pkg/apps/monitor/server.go @@ -74,7 +74,7 @@ func (m *MonitorServer) runPrometheusListener() { } }() - cc, err := servers.Dial(m.srvContext, "bufconn", + cc, err := servers.Dial(m.srvContext, meta.UUID(m.srvContext), servers.WithDialOpts( grpc.WithContextDialer( func(c context.Context, s string) (net.Conn, error) { @@ -372,7 +372,7 @@ func (m *MonitorServer) Whois( return &types.WhoisResponse{ UUID: req.GetUUID(), Address: info.Address, - Component: types.Component(info.Component), + Component: info.Component, }, nil } return nil, status.Error(codes.NotFound, diff --git a/pkg/clients/listener.go b/pkg/clients/listener.go index bae562e..bc39877 100644 --- a/pkg/clients/listener.go +++ b/pkg/clients/listener.go @@ -22,6 +22,7 @@ import ( type monitorListener struct { ctx context.Context + cancel context.CancelFunc monClient types.MonitorClient lg *zap.SugaredLogger streamOpts []servers.StreamManagerOption @@ -34,8 +35,10 @@ func NewListener( client types.MonitorClient, streamOpts ...servers.StreamManagerOption, ) metrics.Listener { + ctx, cancel := context.WithCancel(ctx) listener := &monitorListener{ ctx: ctx, + cancel: cancel, lg: meta.Log(ctx), monClient: client, knownProviders: make(map[string]context.CancelFunc), @@ -49,7 +52,7 @@ func (l *monitorListener) OnProviderAdded(handler func(context.Context, string)) doUpdate := func(providers *metrics.Providers) { for uuid := range providers.Items { if _, ok := l.knownProviders[uuid]; !ok { - pctx, cancel := context.WithCancel(context.Background()) + pctx, cancel := context.WithCancel(l.ctx) l.knownProviders[uuid] = cancel go handler(pctx, uuid) } @@ -97,7 +100,7 @@ func (cl *changeListener) HandleStream(clientStream grpc.ClientStream) error { } for { any, err := stream.Recv() - if errors.Is(err, io.EOF) { + if errors.Is(err, io.EOF) || errors.Is(err, context.Canceled) { lg.Debug(err) return nil } @@ -182,3 +185,7 @@ func (l *monitorListener) OnValueChanged( go mgr.Run() return cl } + +func (l *monitorListener) Stop() { + l.cancel() +} diff --git a/pkg/metrics/noop.go b/pkg/metrics/noop.go index c8df2da..fd3f225 100644 --- a/pkg/metrics/noop.go +++ b/pkg/metrics/noop.go @@ -29,6 +29,8 @@ func (noopListener) OnValueChanged(string, interface{}) ChangeListener { func (noopListener) OnProviderAdded(func(context.Context, string)) {} +func (noopListener) Stop() {} + type noopChangeListener struct{} func (noopChangeListener) TryConnect() (grpc.ClientStream, error) { diff --git a/pkg/metrics/types.go b/pkg/metrics/types.go index 158c440..6f47a15 100644 --- a/pkg/metrics/types.go +++ b/pkg/metrics/types.go @@ -28,6 +28,7 @@ type Provider interface { type Listener interface { OnValueChanged(bucket string, handler interface{}) ChangeListener OnProviderAdded(func(context.Context, string)) + Stop() } type ChangeListener interface { diff --git a/pkg/test/environment.go b/pkg/test/environment.go index d93f7ef..d3280f4 100644 --- a/pkg/test/environment.go +++ b/pkg/test/environment.go @@ -3,7 +3,7 @@ package test import ( "context" "net" - "sync" + "time" "github.com/cobalt77/kubecc/internal/logkc" "github.com/cobalt77/kubecc/internal/testutil" @@ -13,7 +13,6 @@ import ( "github.com/cobalt77/kubecc/pkg/apps/consumerd" "github.com/cobalt77/kubecc/pkg/apps/monitor" "github.com/cobalt77/kubecc/pkg/apps/scheduler" - "github.com/cobalt77/kubecc/pkg/clients" "github.com/cobalt77/kubecc/pkg/config" "github.com/cobalt77/kubecc/pkg/host" "github.com/cobalt77/kubecc/pkg/identity" @@ -24,19 +23,22 @@ import ( "github.com/cobalt77/kubecc/pkg/toolchains" "github.com/cobalt77/kubecc/pkg/tracing" "github.com/cobalt77/kubecc/pkg/types" - "github.com/google/uuid" - "github.com/opentracing/opentracing-go" + mapset "github.com/deckarep/golang-set" + "github.com/imdario/mergo" "go.uber.org/atomic" "go.uber.org/zap" "google.golang.org/grpc" + "google.golang.org/grpc/reflection" + reftypes "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" "google.golang.org/grpc/test/bufconn" ) type Environment struct { - Config *config.KubeccSpec + defaultConfig config.KubeccSpec envContext context.Context envCancel context.CancelFunc - portMapping map[string]*bufconn.Listener + listener *bufconn.Listener + server *grpc.Server agentCount *atomic.Int32 consumerdCount *atomic.Int32 } @@ -45,30 +47,53 @@ var ( bufferSize = 1024 * 1024 ) -func dial( - ctx context.Context, - dialer *bufconn.Listener, -) *grpc.ClientConn { - // the uuid here is not relevant, just needs to be unique (pretty sure) - cc, err := servers.Dial(ctx, uuid.NewString(), servers.WithDialOpts( - grpc.WithContextDialer( - func(context.Context, string) (net.Conn, error) { - return dialer.Dial() - }), - )) - if err != nil { - panic(err) +type SpawnOptions struct { + config config.KubeccSpec +} + +type SpawnOption func(*SpawnOptions) + +func (o *SpawnOptions) Apply(opts ...SpawnOption) { + for _, op := range opts { + op(o) + } +} + +func WithConfig(cfg interface{}) SpawnOption { + return func(o *SpawnOptions) { + switch conf := cfg.(type) { + case config.KubeccSpec: + o.config = conf + case config.AgentSpec: + o.config = config.KubeccSpec{Agent: conf} + case config.ConsumerdSpec: + o.config = config.KubeccSpec{Consumerd: conf} + case config.SchedulerSpec: + o.config = config.KubeccSpec{Scheduler: conf} + case config.CacheSpec: + o.config = config.KubeccSpec{Cache: conf} + case config.MonitorSpec: + o.config = config.KubeccSpec{Monitor: conf} + } } - return cc } -func (e *Environment) SpawnAgent() (context.Context, context.CancelFunc) { +func (e *Environment) SpawnAgent(opts ...SpawnOption) (context.Context, context.CancelFunc) { + so := SpawnOptions{ + config: e.defaultConfig, + } + so.Apply(opts...) + cfg := e.defaultConfig + if err := mergo.Merge(&cfg, so.config); err != nil { + panic(err) + } + ctx := meta.NewContextWithParent(e.envContext, meta.WithProvider(identity.Component, meta.WithValue(types.Agent)), meta.WithProvider(identity.UUID), meta.WithProvider(logkc.Logger, meta.WithValue( logkc.New(types.Agent, - logkc.WithName(string(rune('a'+e.agentCount.Load()))), + logkc.WithName(string('a'+e.agentCount.Load())), ), )), meta.WithProvider(tracing.Tracer), @@ -83,25 +108,16 @@ func (e *Environment) SpawnAgent() (context.Context, context.CancelFunc) { options := []agent.AgentServerOption{ agent.WithUsageLimits(&metrics.UsageLimits{ - ConcurrentProcessLimit: int32(e.Config.Agent.UsageLimits.ConcurrentProcessLimit), - QueuePressureMultiplier: e.Config.Agent.UsageLimits.QueuePressureMultiplier, - QueueRejectMultiplier: e.Config.Agent.UsageLimits.QueueRejectMultiplier, + ConcurrentProcessLimit: int32(cfg.Agent.UsageLimits.ConcurrentProcessLimit), + QueuePressureMultiplier: cfg.Agent.UsageLimits.QueuePressureMultiplier, + QueueRejectMultiplier: cfg.Agent.UsageLimits.QueueRejectMultiplier, }), agent.WithToolchainFinders(toolchains.FinderWithOptions{ Finder: testutil.TestToolchainFinder{}, }), agent.WithToolchainRunners(testtoolchain.AddToStore), - } - if addr := e.Config.Monitor.ListenAddress; addr != "" { - cc := dial(ctx, e.portMapping[addr]) - options = append(options, - agent.WithMonitorClient(types.NewMonitorClient(cc))) - } - - if addr := e.Config.Scheduler.ListenAddress; addr != "" { - cc := dial(ctx, e.portMapping[addr]) - options = append(options, - agent.WithSchedulerClient(types.NewSchedulerClient(cc))) + agent.WithMonitorClient(types.NewMonitorClient(e.Dial(ctx))), + agent.WithSchedulerClient(types.NewSchedulerClient(e.Dial(ctx))), } agentSrv := agent.NewAgentServer(ctx, options...) @@ -111,7 +127,16 @@ func (e *Environment) SpawnAgent() (context.Context, context.CancelFunc) { return ctx, cancel } -func (e *Environment) SpawnScheduler() (context.Context, context.CancelFunc) { +func (e *Environment) SpawnScheduler(opts ...SpawnOption) (context.Context, context.CancelFunc) { + so := SpawnOptions{ + config: e.defaultConfig, + } + so.Apply(opts...) + cfg := e.defaultConfig + if err := mergo.Merge(&cfg, so.config); err != nil { + panic(err) + } + ctx := meta.NewContextWithParent(e.envContext, meta.WithProvider(identity.Component, meta.WithValue(types.Scheduler)), meta.WithProvider(identity.UUID), @@ -123,95 +148,79 @@ func (e *Environment) SpawnScheduler() (context.Context, context.CancelFunc) { meta.WithProvider(tracing.Tracer), ) ctx, cancel := context.WithCancel(ctx) - lg := meta.Log(ctx) - - srv := servers.NewServer(ctx) - options := []scheduler.SchedulerServerOption{} - if addr := e.Config.Monitor.ListenAddress; addr != "" { - cc := dial(ctx, e.portMapping[addr]) - options = append(options, - scheduler.WithMonitorClient(types.NewMonitorClient(cc))) - } - - if addr := e.Config.Cache.ListenAddress; addr != "" { - cc := dial(ctx, e.portMapping[addr]) - options = append(options, - scheduler.WithCacheClient(types.NewCacheClient(cc))) + options := []scheduler.SchedulerServerOption{ + scheduler.WithMonitorClient(types.NewMonitorClient(e.Dial(ctx))), + scheduler.WithCacheClient(types.NewCacheClient(e.Dial(ctx))), } sc := scheduler.NewSchedulerServer(ctx, options...) - types.RegisterSchedulerServer(srv, sc) + types.RegisterSchedulerServer(e.server, sc) go sc.StartMetricsProvider() - go func() { - if err := srv.Serve(e.portMapping[e.Config.Scheduler.ListenAddress]); err != nil { - lg.Info(err) - } - }() + return ctx, cancel } -func (e *Environment) SpawnConsumerd(addr string) (context.Context, context.CancelFunc) { +func (e *Environment) SpawnConsumerd(opts ...SpawnOption) (context.Context, context.CancelFunc) { + so := SpawnOptions{ + config: e.defaultConfig, + } + so.Apply(opts...) + cfg := e.defaultConfig + if err := mergo.Merge(&cfg, so.config); err != nil { + panic(err) + } + ctx := meta.NewContextWithParent(e.envContext, meta.WithProvider(identity.Component, meta.WithValue(types.Consumerd)), meta.WithProvider(identity.UUID), meta.WithProvider(logkc.Logger, meta.WithValue( logkc.New(types.Consumerd, - logkc.WithName(string(rune('a'+e.consumerdCount.Load()))), + logkc.WithName(string('a'+e.consumerdCount.Load())), ), )), meta.WithProvider(tracing.Tracer), meta.WithProvider(host.SystemInfo), ) - e.agentCount.Inc() + e.consumerdCount.Inc() ctx, cancel := context.WithCancel(ctx) go func() { <-ctx.Done() - e.agentCount.Dec() + e.consumerdCount.Dec() }() - lg := meta.Log(ctx) - - listener := bufconn.Listen(bufferSize) - e.portMapping[addr] = listener - options := []consumerd.ConsumerdServerOption{ consumerd.WithUsageLimits(&metrics.UsageLimits{ - ConcurrentProcessLimit: int32(e.Config.Consumerd.UsageLimits.ConcurrentProcessLimit), - QueuePressureMultiplier: e.Config.Consumerd.UsageLimits.QueuePressureMultiplier, - QueueRejectMultiplier: e.Config.Consumerd.UsageLimits.QueueRejectMultiplier, + ConcurrentProcessLimit: int32(cfg.Consumerd.UsageLimits.ConcurrentProcessLimit), + QueuePressureMultiplier: cfg.Consumerd.UsageLimits.QueuePressureMultiplier, + QueueRejectMultiplier: cfg.Consumerd.UsageLimits.QueueRejectMultiplier, }), consumerd.WithToolchainFinders(toolchains.FinderWithOptions{ Finder: testutil.TestToolchainFinder{}, }), consumerd.WithToolchainRunners(testtoolchain.AddToStore), - } - if addr := e.Config.Monitor.ListenAddress; addr != "" { - cc := dial(ctx, e.portMapping[addr]) - options = append(options, - consumerd.WithMonitorClient(types.NewMonitorClient(cc))) + consumerd.WithMonitorClient(types.NewMonitorClient(e.Dial(ctx))), + consumerd.WithSchedulerClient(types.NewSchedulerClient(e.Dial(ctx))), } - if addr := e.Config.Scheduler.ListenAddress; addr != "" { - cc := dial(ctx, e.portMapping[addr]) - options = append(options, - consumerd.WithSchedulerClient(types.NewSchedulerClient(cc))) - } - srv := servers.NewServer(ctx) cd := consumerd.NewConsumerdServer(ctx, options...) - types.RegisterConsumerdServer(srv, cd) + types.RegisterConsumerdServer(e.server, cd) go cd.StartMetricsProvider() - go func() { - if err := srv.Serve(listener); err != nil { - lg.Info(err) - } - }() return ctx, cancel } -func (e *Environment) SpawnMonitor() (context.Context, context.CancelFunc) { +func (e *Environment) SpawnMonitor(opts ...SpawnOption) (context.Context, context.CancelFunc) { + so := SpawnOptions{ + config: e.defaultConfig, + } + so.Apply(opts...) + cfg := e.defaultConfig + if err := mergo.Merge(&cfg, so.config); err != nil { + panic(err) + } + ctx := meta.NewContextWithParent(e.envContext, meta.WithProvider(identity.Component, meta.WithValue(types.Monitor)), meta.WithProvider(identity.UUID), @@ -223,22 +232,23 @@ func (e *Environment) SpawnMonitor() (context.Context, context.CancelFunc) { meta.WithProvider(tracing.Tracer), ) ctx, cancel := context.WithCancel(ctx) - lg := meta.Log(ctx) - - srv := servers.NewServer(ctx) mon := monitor.NewMonitorServer(ctx, monitor.InMemoryStoreCreator) - types.RegisterMonitorServer(srv, mon) + types.RegisterMonitorServer(e.server, mon) - go func() { - if err := srv.Serve(e.portMapping[e.Config.Monitor.ListenAddress]); err != nil { - lg.Info(err) - } - }() return ctx, cancel } -func (e *Environment) SpawnCache() (context.Context, context.CancelFunc) { +func (e *Environment) SpawnCache(opts ...SpawnOption) (context.Context, context.CancelFunc) { + so := SpawnOptions{ + config: e.defaultConfig, + } + so.Apply(opts...) + cfg := e.defaultConfig + if err := mergo.Merge(&cfg, so.config); err != nil { + panic(err) + } + ctx := meta.NewContextWithParent(e.envContext, meta.WithProvider(identity.Component, meta.WithValue(types.Cache)), meta.WithProvider(identity.UUID), @@ -250,92 +260,61 @@ func (e *Environment) SpawnCache() (context.Context, context.CancelFunc) { meta.WithProvider(tracing.Tracer), ) ctx, cancel := context.WithCancel(ctx) - lg := meta.Log(ctx) - options := []cachesrv.CacheServerOption{} - if addr := e.Config.Monitor.ListenAddress; addr != "" { - cc := dial(ctx, e.portMapping[addr]) - options = append(options, - cachesrv.WithMonitorClient(types.NewMonitorClient(cc))) + options := []cachesrv.CacheServerOption{ + cachesrv.WithMonitorClient(types.NewMonitorClient(e.Dial(ctx))), } providers := []storage.StorageProvider{} - if e.Config.Cache.LocalStorage != nil { + if cfg.Cache.LocalStorage != nil { providers = append(providers, - storage.NewVolatileStorageProvider(ctx, *e.Config.Cache.LocalStorage)) + storage.NewVolatileStorageProvider(ctx, *cfg.Cache.LocalStorage)) } - if e.Config.Cache.RemoteStorage != nil { + if cfg.Cache.RemoteStorage != nil { providers = append(providers, - storage.NewS3StorageProvider(ctx, *e.Config.Cache.RemoteStorage)) + storage.NewS3StorageProvider(ctx, *cfg.Cache.RemoteStorage)) } options = append(options, cachesrv.WithStorageProvider( storage.NewChainStorageProvider(ctx, providers...), )) - cacheSrv := cachesrv.NewCacheServer(ctx, e.Config.Cache, options...) - srv := servers.NewServer(ctx) + cacheSrv := cachesrv.NewCacheServer(ctx, cfg.Cache, options...) - types.RegisterCacheServer(srv, cacheSrv) + types.RegisterCacheServer(e.server, cacheSrv) go cacheSrv.StartMetricsProvider() - go func() { - err := srv.Serve(e.portMapping[e.Config.Cache.ListenAddress]) - if err != nil { - lg.With(zap.Error(err)).Error("GRPC error") - } - }() return ctx, cancel } -func NewDefaultEnvironment() *Environment { - schedulerAddr := "9000" - monitorAddr := "9001" - cacheAddr := "9002" - - return &Environment{ - Config: &config.KubeccSpec{ - Global: config.GlobalSpec{ - LogLevel: "debug", - }, - Agent: config.AgentSpec{ - UsageLimits: config.UsageLimitsSpec{ - ConcurrentProcessLimit: 32, - QueuePressureMultiplier: 1.0, - QueueRejectMultiplier: 2.0, - }, - SchedulerAddress: schedulerAddr, - MonitorAddress: monitorAddr, - }, - Scheduler: config.SchedulerSpec{ - MonitorAddress: monitorAddr, - CacheAddress: cacheAddr, - ListenAddress: schedulerAddr, - }, - Monitor: config.MonitorSpec{ - ListenAddress: monitorAddr, +func DefaultConfig() config.KubeccSpec { + return config.KubeccSpec{ + Global: config.GlobalSpec{ + LogLevel: "debug", + }, + Agent: config.AgentSpec{ + UsageLimits: config.UsageLimitsSpec{ + ConcurrentProcessLimit: 32, + QueuePressureMultiplier: 1.0, + QueueRejectMultiplier: 2.0, }, - Cache: config.CacheSpec{ - ListenAddress: cacheAddr, - MonitorAddress: monitorAddr, - LocalStorage: &config.LocalStorageSpec{ - Limits: config.StorageLimitsSpec{ - Memory: "1Gi", - }, + }, + Cache: config.CacheSpec{ + LocalStorage: &config.LocalStorageSpec{ + Limits: config.StorageLimitsSpec{ + Memory: "1Gi", }, }, - Consumerd: config.ConsumerdSpec{ - SchedulerAddress: schedulerAddr, - MonitorAddress: monitorAddr, - DisableTLS: true, - UsageLimits: config.UsageLimitsSpec{ - ConcurrentProcessLimit: 20, - }, + }, + Consumerd: config.ConsumerdSpec{ + DisableTLS: true, + UsageLimits: config.UsageLimitsSpec{ + ConcurrentProcessLimit: 20, }, }, } } -func (e *Environment) Start() { +func NewEnvironment(cfg config.KubeccSpec) *Environment { ctx := meta.NewContext( meta.WithProvider(identity.Component, meta.WithValue(types.TestComponent)), meta.WithProvider(identity.UUID), @@ -343,54 +322,96 @@ func (e *Environment) Start() { meta.WithProvider(tracing.Tracer), ) ctx, cancel := context.WithCancel(ctx) - e.envContext = ctx - e.envCancel = cancel - - e.agentCount = atomic.NewInt32(0) - e.consumerdCount = atomic.NewInt32(0) - e.portMapping = make(map[string]*bufconn.Listener) - - for _, addr := range []string{ - e.Config.Cache.ListenAddress, - e.Config.Monitor.ListenAddress, - e.Config.Scheduler.ListenAddress, - } { - if addr != "" { - e.portMapping[addr] = bufconn.Listen(bufferSize) + + return &Environment{ + defaultConfig: cfg, + envContext: ctx, + envCancel: cancel, + listener: bufconn.Listen(bufferSize), + server: servers.NewServer(ctx), + agentCount: atomic.NewInt32(0), + consumerdCount: atomic.NewInt32(0), + } +} + +func NewDefaultEnvironment() *Environment { + return NewEnvironment(DefaultConfig()) +} + +func (e *Environment) Serve() { + reflection.Register(e.server) + go func() { + err := e.server.Serve(e.listener) + if err != nil { + meta.Log(e.envContext).Error(err) } + }() +} + +func (e *Environment) WaitForServices(names []string) { + c := reftypes.NewServerReflectionClient(e.Dial(e.envContext)) + stream, err := c.ServerReflectionInfo(e.envContext) + if err != nil { + panic(err) + } + for { + err := stream.Send(&reftypes.ServerReflectionRequest{ + MessageRequest: &reftypes.ServerReflectionRequest_ListServices{ListServices: ""}, + }) + if err != nil { + panic(err) + } + response, err := stream.Recv() + if err != nil { + panic(err) + } + list := response.MessageResponse.(*reftypes.ServerReflectionResponse_ListServicesResponse) + services := mapset.NewSet() + for _, svc := range list.ListServicesResponse.Service { + services.Add(svc.GetName()) + } + values := []interface{}{} + for _, name := range names { + values = append(values, name) + } + if services.Contains(values...) { + return + } + meta.Log(e.envContext).With( + zap.Any("have", services), + zap.Any("want", names), + ).Info("Waiting for services") + time.Sleep(250 * time.Millisecond) } - tracer, _ := tracing.Start(e.envContext, types.TestComponent) - opentracing.SetGlobalTracer(tracer) } -func (e *Environment) WaitForServers(count int) { - wg := sync.WaitGroup{} - wg.Add(count) - monClient := e.NewMonitorClient() - listener := clients.NewListener(e.envContext, monClient) - listener.OnProviderAdded(func(pctx context.Context, uuid string) { - wg.Done() - }) - wg.Wait() +func (e *Environment) Dial(ctx context.Context) *grpc.ClientConn { + cc, err := servers.Dial(ctx, "bufconn", servers.WithDialOpts( + grpc.WithContextDialer( + func(context.Context, string) (net.Conn, error) { + return e.listener.Dial() + }), + )) + if err != nil { + panic(err) + } + return cc } -func (e *Environment) Dial(addr string) *grpc.ClientConn { - return dial(e.envContext, e.portMapping[addr]) +func (e *Environment) NewMonitorClient(ctx context.Context) types.MonitorClient { + return types.NewMonitorClient(e.Dial(ctx)) } -func (e *Environment) NewMonitorClient() types.MonitorClient { - return types.NewMonitorClient( - dial(e.envContext, e.portMapping[e.Config.Monitor.ListenAddress])) +func (e *Environment) NewSchedulerClient(ctx context.Context) types.SchedulerClient { + return types.NewSchedulerClient(e.Dial(ctx)) } -func (e *Environment) NewSchedulerClient() types.SchedulerClient { - return types.NewSchedulerClient( - dial(e.envContext, e.portMapping[e.Config.Scheduler.ListenAddress])) +func (e *Environment) NewCacheClient(ctx context.Context) types.CacheClient { + return types.NewCacheClient(e.Dial(ctx)) } -func (e *Environment) NewCacheClient() types.CacheClient { - return types.NewCacheClient( - dial(e.envContext, e.portMapping[e.Config.Cache.ListenAddress])) +func (e *Environment) NewConsumerdClient(ctx context.Context) types.ConsumerdClient { + return types.NewConsumerdClient(e.Dial(ctx)) } func (e *Environment) Shutdown() { diff --git a/test/integration/integration.go b/test/integration/integration.go index c82bdaa..74460d8 100644 --- a/test/integration/integration.go +++ b/test/integration/integration.go @@ -1,332 +1,300 @@ package integration -import ( - "context" - "net" - "sync" - - "github.com/cobalt77/kubecc/internal/logkc" - "github.com/cobalt77/kubecc/internal/testutil" - testtoolchain "github.com/cobalt77/kubecc/internal/testutil/toolchain" - agent "github.com/cobalt77/kubecc/pkg/apps/agent" - "github.com/cobalt77/kubecc/pkg/apps/cachesrv" - consumerd "github.com/cobalt77/kubecc/pkg/apps/consumerd" - "github.com/cobalt77/kubecc/pkg/apps/monitor" - scheduler "github.com/cobalt77/kubecc/pkg/apps/scheduler" - "github.com/cobalt77/kubecc/pkg/clients" - "github.com/cobalt77/kubecc/pkg/config" - "github.com/cobalt77/kubecc/pkg/host" - "github.com/cobalt77/kubecc/pkg/identity" - "github.com/cobalt77/kubecc/pkg/meta" - "github.com/cobalt77/kubecc/pkg/metrics" - "github.com/cobalt77/kubecc/pkg/servers" - "github.com/cobalt77/kubecc/pkg/storage" - "github.com/cobalt77/kubecc/pkg/toolchains" - "github.com/cobalt77/kubecc/pkg/tracing" - "github.com/cobalt77/kubecc/pkg/types" - "github.com/google/uuid" - "github.com/opentracing/opentracing-go" - "go.uber.org/zap" - "google.golang.org/grpc" - "google.golang.org/grpc/test/bufconn" - ctrl "sigs.k8s.io/controller-runtime" -) - -const bufSize = 1024 * 1024 - -type TestController struct { - Consumers []types.ConsumerdClient - ctx context.Context - cancel context.CancelFunc - agentListeners map[string]*bufconn.Listener - agentListenersLock *sync.Mutex - schedListener *bufconn.Listener - monListener *bufconn.Listener - cacheListener *bufconn.Listener -} - -func NewTestController(ctx context.Context) *TestController { - ctx, cancel := context.WithCancel(ctx) - return &TestController{ - ctx: ctx, - cancel: cancel, - agentListeners: make(map[string]*bufconn.Listener), - agentListenersLock: &sync.Mutex{}, - Consumers: []types.ConsumerdClient{}, - } -} - -func dial( - ctx context.Context, - dialer *bufconn.Listener, -) *grpc.ClientConn { - cc, err := servers.Dial(ctx, uuid.NewString(), servers.WithDialOpts( - grpc.WithContextDialer( - func(context.Context, string) (net.Conn, error) { - return dialer.Dial() - }), - )) - if err != nil { - panic(err) - } - return cc -} - -func (tc *TestController) startAgent(cfg *metrics.UsageLimits) { - ctx := meta.NewContext( - meta.WithProvider(identity.Component, meta.WithValue(types.Agent)), - meta.WithProvider(identity.UUID), - meta.WithProvider(logkc.Logger, meta.WithValue( - logkc.New(types.Agent, - logkc.WithName(string(rune('a'+len(tc.agentListeners)))), - ), - )), - meta.WithProvider(tracing.Tracer), - meta.WithProvider(host.SystemInfo), - ) - lg := meta.Log(ctx) - srv := servers.NewServer(ctx) - - listener := bufconn.Listen(bufSize) - tc.agentListeners[meta.UUID(ctx)] = listener - cc := dial(ctx, tc.schedListener) - schedClient := types.NewSchedulerClient(cc) - cc = dial(ctx, tc.monListener) - monClient := types.NewMonitorClient(cc) - agentSrv := agent.NewAgentServer(ctx, - agent.WithSchedulerClient(schedClient), - agent.WithMonitorClient(monClient), - agent.WithUsageLimits(cfg), - agent.WithToolchainFinders(toolchains.FinderWithOptions{ - Finder: testutil.TestToolchainFinder{}, - }), - agent.WithToolchainRunners(testtoolchain.AddToStore), - ) - mgr := servers.NewStreamManager(ctx, agentSrv) - go mgr.Run() - go agentSrv.StartMetricsProvider() - go func() { - if err := srv.Serve(listener); err != nil { - lg.Info(err) - } - }() -} - -func (tc *TestController) startScheduler() { - ctx := meta.NewContext( - meta.WithProvider(identity.Component, meta.WithValue(types.Scheduler)), - meta.WithProvider(identity.UUID), - meta.WithProvider(logkc.Logger, meta.WithValue( - logkc.New(types.Scheduler, - logkc.WithName("a"), - ), - )), - meta.WithProvider(tracing.Tracer), - ) - lg := meta.Log(ctx) - - tc.schedListener = bufconn.Listen(bufSize) - srv := servers.NewServer(ctx) - - cc := dial(ctx, tc.monListener) - monClient := types.NewMonitorClient(cc) - - cc = dial(ctx, tc.cacheListener) - cacheClient := types.NewCacheClient(cc) - - sc := scheduler.NewSchedulerServer(ctx, - scheduler.WithMonitorClient(monClient), - scheduler.WithCacheClient(cacheClient), - ) - types.RegisterSchedulerServer(srv, sc) - go sc.StartMetricsProvider() - go func() { - if err := srv.Serve(tc.schedListener); err != nil { - lg.Info(err) - } - }() -} - -func (tc *TestController) startMonitor() { - ctx := meta.NewContext( - meta.WithProvider(identity.Component, meta.WithValue(types.Monitor)), - meta.WithProvider(identity.UUID), - meta.WithProvider(logkc.Logger, meta.WithValue( - logkc.New(types.Monitor, - logkc.WithName("a"), - ), - )), - meta.WithProvider(tracing.Tracer), - ) - lg := meta.Log(ctx) - - tc.monListener = bufconn.Listen(bufSize) - srv := servers.NewServer(ctx) - - mon := monitor.NewMonitorServer(ctx, monitor.InMemoryStoreCreator) - types.RegisterMonitorServer(srv, mon) - - go func() { - if err := srv.Serve(tc.monListener); err != nil { - lg.Info(err) - } - }() -} - -func (tc *TestController) startCache() { - ctx := meta.NewContext( - meta.WithProvider(identity.Component, meta.WithValue(types.Cache)), - meta.WithProvider(identity.UUID), - meta.WithProvider(logkc.Logger, meta.WithValue( - logkc.New(types.Cache, - logkc.WithName("a"), - ), - )), - meta.WithProvider(tracing.Tracer), - ) - lg := meta.Log(ctx) - - cc := dial(ctx, tc.monListener) - internalMonClient := types.NewMonitorClient(cc) - - tc.cacheListener = bufconn.Listen(bufSize) - srv := servers.NewServer(ctx) - cache := cachesrv.NewCacheServer(ctx, config.CacheSpec{}, - cachesrv.WithStorageProvider( - storage.NewChainStorageProvider(ctx, - storage.NewVolatileStorageProvider(ctx, - config.LocalStorageSpec{ - Limits: config.StorageLimitsSpec{ - Memory: "4Gi", - }, - }, - ), - // storage.NewS3StorageProvider(ctx, - // config.RemoteStorageSpec{ - // Endpoint: "192.168.0.84:9000", - // AccessKey: "minioadmin", - // SecretKey: "minioadmin", - // TLS: false, - // Bucket: "kubecc", - // }, - // ), - ), - ), - cachesrv.WithMonitorClient(internalMonClient), - ) - types.RegisterCacheServer(srv, cache) - go cache.StartMetricsProvider() - - go func() { - err := srv.Serve(tc.cacheListener) - if err != nil { - lg.With(zap.Error(err)).Error("GRPC error") - } - }() -} - -func (tc *TestController) startConsumerd(cfg *metrics.UsageLimits) { - ctx := meta.NewContext( - meta.WithProvider(identity.Component, meta.WithValue(types.Consumerd)), - meta.WithProvider(identity.UUID), - meta.WithProvider(logkc.Logger, meta.WithValue( - logkc.New(types.Consumerd, - logkc.WithName(string(rune('a'+len(tc.Consumers)))), - ), - )), - meta.WithProvider(tracing.Tracer), - meta.WithProvider(host.SystemInfo), - ) - lg := meta.Log(ctx) - - listener := bufconn.Listen(bufSize) - srv := servers.NewServer(ctx) - cc := dial(ctx, tc.schedListener) - schedulerClient := types.NewSchedulerClient(cc) - cc = dial(ctx, tc.monListener) - monitorClient := types.NewMonitorClient(cc) - - d := consumerd.NewConsumerdServer(ctx, - consumerd.WithToolchainFinders(toolchains.FinderWithOptions{ - Finder: testutil.TestToolchainFinder{}, - }), - consumerd.WithUsageLimits(cfg), - consumerd.WithToolchainRunners(testtoolchain.AddToStore), - consumerd.WithSchedulerClient(schedulerClient), - consumerd.WithMonitorClient(monitorClient), - ) - types.RegisterConsumerdServer(srv, d) - - // mgr := servers.NewStreamManager(ctx, d)s - // go mgr.Run() - go d.StartMetricsProvider() - cdListener := dial(ctx, listener) - cdClient := types.NewConsumerdClient(cdListener) - tc.Consumers = append(tc.Consumers, cdClient) - go func() { - if err := srv.Serve(listener); err != nil { - lg.Info(err) - } - }() -} - -type TestOptions struct { - Clients []*metrics.UsageLimits - Agents []*metrics.UsageLimits -} - -func (tc *TestController) Start(ops TestOptions) { - tc.agentListenersLock.Lock() - defer tc.agentListenersLock.Unlock() - - tracer, _ := tracing.Start(tc.ctx, types.TestComponent) - opentracing.SetGlobalTracer(tracer) - - tc.startMonitor() - tc.startCache() - tc.startScheduler() - for _, cfg := range ops.Agents { - tc.startAgent(cfg) - } - for _, cfg := range ops.Clients { - tc.startConsumerd(cfg) - } - - // Hook into the metrics server and wait for all the components to load up - cc, err := servers.Dial(tc.ctx, "127.0.0.1:9097") - if err != nil { - panic(err) - } - waitCtx, waitCancel := context.WithCancel(context.Background()) - wg := sync.WaitGroup{} - wg.Add( - len(ops.Agents) + - len(ops.Clients) + - 1 /*scheduler*/ + - 1 /*cache*/) - extClient := types.NewMonitorClient(cc) - listener := clients.NewListener(tc.ctx, extClient) - listener.OnProviderAdded(func(pctx context.Context, uuid string) { - resp, _ := extClient.Whois(tc.ctx, &types.WhoisRequest{ - UUID: uuid, - }) - if resp.Component == types.Monitor { - return - } - wg.Done() - select { - case <-pctx.Done(): - case <-waitCtx.Done(): - } - }) - wg.Wait() - waitCancel() -} - -func (tc *TestController) Teardown() { - tc.cancel() -} - -func (tc *TestController) Wait() { - <-ctrl.SetupSignalHandler().Done() -} +// const bufSize = 1024 * 1024 + +// type TestController struct { +// Consumers []types.ConsumerdClient +// ctx context.Context +// cancel context.CancelFunc +// agentListeners map[string]*bufconn.Listener +// agentListenersLock *sync.Mutex +// schedListener *bufconn.Listener +// monListener *bufconn.Listener +// cacheListener *bufconn.Listener +// } + +// func NewTestController(ctx context.Context) *TestController { +// ctx, cancel := context.WithCancel(ctx) +// return &TestController{ +// ctx: ctx, +// cancel: cancel, +// agentListeners: make(map[string]*bufconn.Listener), +// agentListenersLock: &sync.Mutex{}, +// Consumers: []types.ConsumerdClient{}, +// } +// } + +// func dial( +// ctx context.Context, +// dialer *bufconn.Listener, +// ) *grpc.ClientConn { +// cc, err := servers.Dial(ctx, uuid.NewString(), servers.WithDialOpts( +// grpc.WithContextDialer( +// func(context.Context, string) (net.Conn, error) { +// return dialer.Dial() +// }), +// )) +// if err != nil { +// panic(err) +// } +// return cc +// } + +// func (tc *TestController) startAgent(cfg *metrics.UsageLimits) { +// ctx := meta.NewContext( +// meta.WithProvider(identity.Component, meta.WithValue(types.Agent)), +// meta.WithProvider(identity.UUID), +// meta.WithProvider(logkc.Logger, meta.WithValue( +// logkc.New(types.Agent, +// logkc.WithName(string(rune('a'+len(tc.agentListeners)))), +// ), +// )), +// meta.WithProvider(tracing.Tracer), +// meta.WithProvider(host.SystemInfo), +// ) +// lg := meta.Log(ctx) +// srv := servers.NewServer(ctx) + +// listener := bufconn.Listen(bufSize) +// tc.agentListeners[meta.UUID(ctx)] = listener +// cc := dial(ctx, tc.schedListener) +// schedClient := types.NewSchedulerClient(cc) +// cc = dial(ctx, tc.monListener) +// monClient := types.NewMonitorClient(cc) +// agentSrv := agent.NewAgentServer(ctx, +// agent.WithSchedulerClient(schedClient), +// agent.WithMonitorClient(monClient), +// agent.WithUsageLimits(cfg), +// agent.WithToolchainFinders(toolchains.FinderWithOptions{ +// Finder: testutil.TestToolchainFinder{}, +// }), +// agent.WithToolchainRunners(testtoolchain.AddToStore), +// ) +// mgr := servers.NewStreamManager(ctx, agentSrv) +// go mgr.Run() +// go agentSrv.StartMetricsProvider() +// go func() { +// if err := srv.Serve(listener); err != nil { +// lg.Info(err) +// } +// }() +// } + +// func (tc *TestController) startScheduler() { +// ctx := meta.NewContext( +// meta.WithProvider(identity.Component, meta.WithValue(types.Scheduler)), +// meta.WithProvider(identity.UUID), +// meta.WithProvider(logkc.Logger, meta.WithValue( +// logkc.New(types.Scheduler, +// logkc.WithName("a"), +// ), +// )), +// meta.WithProvider(tracing.Tracer), +// ) +// lg := meta.Log(ctx) + +// tc.schedListener = bufconn.Listen(bufSize) +// srv := servers.NewServer(ctx) + +// cc := dial(ctx, tc.monListener) +// monClient := types.NewMonitorClient(cc) + +// cc = dial(ctx, tc.cacheListener) +// cacheClient := types.NewCacheClient(cc) + +// sc := scheduler.NewSchedulerServer(ctx, +// scheduler.WithMonitorClient(monClient), +// scheduler.WithCacheClient(cacheClient), +// ) +// types.RegisterSchedulerServer(srv, sc) +// go sc.StartMetricsProvider() +// go func() { +// if err := srv.Serve(tc.schedListener); err != nil { +// lg.Info(err) +// } +// }() +// } + +// func (tc *TestController) startMonitor() { +// ctx := meta.NewContext( +// meta.WithProvider(identity.Component, meta.WithValue(types.Monitor)), +// meta.WithProvider(identity.UUID), +// meta.WithProvider(logkc.Logger, meta.WithValue( +// logkc.New(types.Monitor, +// logkc.WithName("a"), +// ), +// )), +// meta.WithProvider(tracing.Tracer), +// ) +// lg := meta.Log(ctx) + +// tc.monListener = bufconn.Listen(bufSize) +// srv := servers.NewServer(ctx) + +// mon := monitor.NewMonitorServer(ctx, monitor.InMemoryStoreCreator) +// types.RegisterMonitorServer(srv, mon) + +// go func() { +// if err := srv.Serve(tc.monListener); err != nil { +// lg.Info(err) +// } +// }() +// } + +// func (tc *TestController) startCache() { +// ctx := meta.NewContext( +// meta.WithProvider(identity.Component, meta.WithValue(types.Cache)), +// meta.WithProvider(identity.UUID), +// meta.WithProvider(logkc.Logger, meta.WithValue( +// logkc.New(types.Cache, +// logkc.WithName("a"), +// ), +// )), +// meta.WithProvider(tracing.Tracer), +// ) +// lg := meta.Log(ctx) + +// cc := dial(ctx, tc.monListener) +// internalMonClient := types.NewMonitorClient(cc) + +// tc.cacheListener = bufconn.Listen(bufSize) +// srv := servers.NewServer(ctx) +// cache := cachesrv.NewCacheServer(ctx, config.CacheSpec{}, +// cachesrv.WithStorageProvider( +// storage.NewChainStorageProvider(ctx, +// storage.NewVolatileStorageProvider(ctx, +// config.LocalStorageSpec{ +// Limits: config.StorageLimitsSpec{ +// Memory: "4Gi", +// }, +// }, +// ), +// // storage.NewS3StorageProvider(ctx, +// // config.RemoteStorageSpec{ +// // Endpoint: "192.168.0.84:9000", +// // AccessKey: "minioadmin", +// // SecretKey: "minioadmin", +// // TLS: false, +// // Bucket: "kubecc", +// // }, +// // ), +// ), +// ), +// cachesrv.WithMonitorClient(internalMonClient), +// ) +// types.RegisterCacheServer(srv, cache) +// go cache.StartMetricsProvider() + +// go func() { +// err := srv.Serve(tc.cacheListener) +// if err != nil { +// lg.With(zap.Error(err)).Error("GRPC error") +// } +// }() +// } + +// func (tc *TestController) startConsumerd(cfg *metrics.UsageLimits) { +// ctx := meta.NewContext( +// meta.WithProvider(identity.Component, meta.WithValue(types.Consumerd)), +// meta.WithProvider(identity.UUID), +// meta.WithProvider(logkc.Logger, meta.WithValue( +// logkc.New(types.Consumerd, +// logkc.WithName(string(rune('a'+len(tc.Consumers)))), +// ), +// )), +// meta.WithProvider(tracing.Tracer), +// meta.WithProvider(host.SystemInfo), +// ) +// lg := meta.Log(ctx) + +// listener := bufconn.Listen(bufSize) +// srv := servers.NewServer(ctx) +// cc := dial(ctx, tc.schedListener) +// schedulerClient := types.NewSchedulerClient(cc) +// cc = dial(ctx, tc.monListener) +// monitorClient := types.NewMonitorClient(cc) + +// d := consumerd.NewConsumerdServer(ctx, +// consumerd.WithToolchainFinders(toolchains.FinderWithOptions{ +// Finder: testutil.TestToolchainFinder{}, +// }), +// consumerd.WithUsageLimits(cfg), +// consumerd.WithToolchainRunners(testtoolchain.AddToStore), +// consumerd.WithSchedulerClient(schedulerClient), +// consumerd.WithMonitorClient(monitorClient), +// ) +// types.RegisterConsumerdServer(srv, d) + +// // mgr := servers.NewStreamManager(ctx, d)s +// // go mgr.Run() +// go d.StartMetricsProvider() +// cdListener := dial(ctx, listener) +// cdClient := types.NewConsumerdClient(cdListener) +// tc.Consumers = append(tc.Consumers, cdClient) +// go func() { +// if err := srv.Serve(listener); err != nil { +// lg.Info(err) +// } +// }() +// } + +// type TestOptions struct { +// Clients []*metrics.UsageLimits +// Agents []*metrics.UsageLimits +// } + +// func (tc *TestController) Start(ops TestOptions) { +// tc.agentListenersLock.Lock() +// defer tc.agentListenersLock.Unlock() + +// tracer, _ := tracing.Start(tc.ctx, types.TestComponent) +// opentracing.SetGlobalTracer(tracer) + +// tc.startMonitor() +// tc.startCache() +// tc.startScheduler() +// for _, cfg := range ops.Agents { +// tc.startAgent(cfg) +// } +// for _, cfg := range ops.Clients { +// tc.startConsumerd(cfg) +// } + +// // Hook into the metrics server and wait for all the components to load up +// cc, err := servers.Dial(tc.ctx, "127.0.0.1:9097") +// if err != nil { +// panic(err) +// } +// waitCtx, waitCancel := context.WithCancel(context.Background()) +// wg := sync.WaitGroup{} +// wg.Add( +// len(ops.Agents) + +// len(ops.Clients) + +// 1 /*scheduler*/ + +// 1 /*cache*/) +// extClient := types.NewMonitorClient(cc) +// listener := clients.NewListener(tc.ctx, extClient) +// listener.OnProviderAdded(func(pctx context.Context, uuid string) { +// resp, _ := extClient.Whois(tc.ctx, &types.WhoisRequest{ +// UUID: uuid, +// }) +// if resp.Component == types.Monitor { +// return +// } +// wg.Done() +// select { +// case <-pctx.Done(): +// case <-waitCtx.Done(): +// } +// }) +// wg.Wait() +// waitCancel() +// } + +// func (tc *TestController) Teardown() { +// tc.cancel() +// } + +// func (tc *TestController) Wait() { +// <-ctrl.SetupSignalHandler().Done() +// } diff --git a/test/integration/integration_suite_test.go b/test/integration/integration_suite_test.go index 946a4ff..7be8680 100644 --- a/test/integration/integration_suite_test.go +++ b/test/integration/integration_suite_test.go @@ -3,10 +3,13 @@ package integration_test import ( "testing" + "github.com/cobalt77/kubecc/pkg/test" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) +var testEnv *test.Environment + func TestIntegration(t *testing.T) { RegisterFailHandler(Fail) RunSpecs(t, "Integration Suite") diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 48f71a1..f566ca5 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -7,17 +7,17 @@ import ( "github.com/cobalt77/kubecc/internal/logkc" "github.com/cobalt77/kubecc/internal/testutil" + "github.com/cobalt77/kubecc/pkg/config" "github.com/cobalt77/kubecc/pkg/identity" "github.com/cobalt77/kubecc/pkg/meta" - "github.com/cobalt77/kubecc/pkg/metrics" + "github.com/cobalt77/kubecc/pkg/test" "github.com/cobalt77/kubecc/pkg/tracing" "github.com/cobalt77/kubecc/pkg/types" - "github.com/cobalt77/kubecc/test/integration" . "github.com/onsi/ginkgo" - "github.com/opentracing/opentracing-go" ) var _ = Describe("Integration test", func() { + ctx := meta.NewContext( meta.WithProvider(identity.Component, meta.WithValue(types.TestComponent)), meta.WithProvider(identity.UUID), @@ -28,11 +28,6 @@ var _ = Describe("Integration test", func() { ) lg := meta.Log(ctx) - tracer := meta.Tracer(ctx) - span, sctx := opentracing.StartSpanFromContextWithTracer( - ctx, tracer, "integration-test") - defer span.Finish() - numTasks := 400 localJobs := 50 taskPool := make(chan *types.RunRequest, numTasks) @@ -45,63 +40,79 @@ var _ = Describe("Integration test", func() { } } - testOptions := integration.TestOptions{ - Clients: []*metrics.UsageLimits{ - { - ConcurrentProcessLimit: 18, + Specify("Starting components", func() { + testEnv = test.NewDefaultEnvironment() + + testEnv.SpawnMonitor() + testEnv.SpawnCache() + testEnv.SpawnScheduler() + + testEnv.SpawnAgent(test.WithConfig(config.AgentSpec{ + UsageLimits: config.UsageLimitsSpec{ + ConcurrentProcessLimit: 24, QueuePressureMultiplier: 1.5, QueueRejectMultiplier: 2.0, }, - }, - Agents: []*metrics.UsageLimits{ - { - ConcurrentProcessLimit: 24, + })) + + testEnv.SpawnAgent(test.WithConfig(config.AgentSpec{ + UsageLimits: config.UsageLimitsSpec{ + ConcurrentProcessLimit: 32, QueuePressureMultiplier: 1.5, QueueRejectMultiplier: 2.0, }, - { + })) + + testEnv.SpawnAgent(test.WithConfig(config.AgentSpec{ + UsageLimits: config.UsageLimitsSpec{ ConcurrentProcessLimit: 16, QueuePressureMultiplier: 1.5, QueueRejectMultiplier: 2.0, }, - { - ConcurrentProcessLimit: 32, + })) + + testEnv.SpawnConsumerd(test.WithConfig(config.ConsumerdSpec{ + ListenAddress: "1111", + UsageLimits: config.UsageLimitsSpec{ + ConcurrentProcessLimit: 18, QueuePressureMultiplier: 1.5, QueueRejectMultiplier: 2.0, }, - }, - } - PMeasure("Run test", func(b Benchmarker) { - var tc *integration.TestController - b.Time("Start components", func() { - tc = integration.NewTestController(sctx) - tc.Start(testOptions) + })) + + go testEnv.Serve() + + testEnv.WaitForServices([]string{ + types.Monitor_ServiceDesc.ServiceName, + types.Cache_ServiceDesc.ServiceName, + types.Scheduler_ServiceDesc.ServiceName, + types.Consumerd_ServiceDesc.ServiceName, }) - defer tc.Teardown() + }) + Measure("Run test", func(b Benchmarker) { + cd := testEnv.NewConsumerdClient(ctx) wg := sync.WaitGroup{} - wg.Add(len(tc.Consumers) * localJobs) + wg.Add(localJobs) - for _, c := range tc.Consumers { - for i := 0; i < localJobs; i++ { - go func(cd types.ConsumerdClient) { - defer wg.Done() - for { - select { - case task := <-taskPool: - b.Time("Run task", func() { - _, err := cd.Run(sctx, task) - if err != nil { - panic(err) - } - }) - default: - lg.Info("Finished") - return - } + for i := 0; i < localJobs; i++ { + go func(cd types.ConsumerdClient) { + defer wg.Done() + for { + select { + case task := <-taskPool: + b.Time("Run task", func() { + _, err := cd.Run(ctx, task) + if err != nil { + panic(err) + } + }) + default: + lg.Info("Finished") + return } - }(c) - } + } + }(cd) } wg.Wait() }, 1) From 57dbf8ab26e99f1b36e85818523bf634a04f3adc Mon Sep 17 00:00:00 2001 From: cobalt77 <8194899+cobalt77@users.noreply.github.com> Date: Wed, 17 Mar 2021 22:43:52 -0400 Subject: [PATCH 10/12] consumerd testing work in progress --- hack/snippets.yaml | 3 + pkg/apps/consumerd/consumerd_suite_test.go | 116 +++++++++++- pkg/apps/consumerd/queue_test.go | 204 ++++++++------------- pkg/apps/monitor/monitor_test.go | 3 +- pkg/apps/monitor/server.go | 6 +- pkg/config/spec.go | 3 +- pkg/test/environment.go | 5 +- 7 files changed, 202 insertions(+), 138 deletions(-) diff --git a/hack/snippets.yaml b/hack/snippets.yaml index c99e727..7af7804 100644 --- a/hack/snippets.yaml +++ b/hack/snippets.yaml @@ -69,6 +69,9 @@ Expect ... To Equal: Expect ... To Receive: prefix: etr body: Expect($1).To(Receive()) +Expect Error Not To Have Occurred: + prefix: eent + body: Expect(err).NotTo(HaveOccurred()) Consistently Should Not Receive: prefix: csnr body: Consistently($1).ShouldNot(Receive() diff --git a/pkg/apps/consumerd/consumerd_suite_test.go b/pkg/apps/consumerd/consumerd_suite_test.go index 03659fd..b55a89b 100644 --- a/pkg/apps/consumerd/consumerd_suite_test.go +++ b/pkg/apps/consumerd/consumerd_suite_test.go @@ -3,22 +3,124 @@ package consumerd_test import ( "testing" + "github.com/cobalt77/kubecc/internal/logkc" + "github.com/cobalt77/kubecc/internal/testutil" + testtoolchain "github.com/cobalt77/kubecc/internal/testutil/toolchain" + "github.com/cobalt77/kubecc/pkg/apps/consumerd" + "github.com/cobalt77/kubecc/pkg/identity" + "github.com/cobalt77/kubecc/pkg/meta" + "github.com/cobalt77/kubecc/pkg/metrics" + "github.com/cobalt77/kubecc/pkg/run" "github.com/cobalt77/kubecc/pkg/test" + "github.com/cobalt77/kubecc/pkg/tracing" + "github.com/cobalt77/kubecc/pkg/types" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" + "go.uber.org/atomic" ) -var testEnv *test.Environment +var ( + testEnv *test.Environment + testCtx = meta.NewContext( + meta.WithProvider(identity.Component, meta.WithValue(types.TestComponent)), + meta.WithProvider(identity.UUID), + meta.WithProvider(logkc.Logger), + meta.WithProvider(tracing.Tracer), + ) + testToolchainRunner = &testtoolchain.TestToolchainRunner{} + taskArgs = []string{"-duration", "0"} + localExec = newTestExecutor() + remoteExec = newTestExecutor() +) + +func makeTaskPool(numTasks int) chan *consumerd.SplitTask { + taskPool := make(chan *consumerd.SplitTask, numTasks) + tc := &types.Toolchain{ + Kind: types.Gnu, + Lang: types.CXX, + Executable: testutil.TestToolchainExecutable, + TargetArch: "testarch", + Version: "0", + PicDefault: true, + } + + for i := 0; i < numTasks; i++ { + contexts := run.Contexts{ + ServerContext: testCtx, + ClientContext: testCtx, + } + request := &types.RunRequest{ + Compiler: &types.RunRequest_Toolchain{ + Toolchain: tc, + }, + Args: taskArgs, + UID: 1000, + GID: 1000, + } + + taskPool <- &consumerd.SplitTask{ + Local: run.Package( + testToolchainRunner.RunLocal( + testToolchainRunner.NewArgParser(testCtx, taskArgs)), + contexts, + localExec, + request, + ), + Remote: run.Package( + testToolchainRunner.SendRemote( + testToolchainRunner.NewArgParser(testCtx, taskArgs), + nil, + ), + contexts, + remoteExec, + request, + ), + } + } + return taskPool +} func TestConsumerd(t *testing.T) { RegisterFailHandler(Fail) RunSpecs(t, "Consumerd Suite") } -var _ = BeforeSuite(func() { - testEnv = test.NewDefaultEnvironment() -}) +type testExecutor struct { + numTasks *atomic.Int32 + completed *atomic.Int32 +} + +func (x *testExecutor) Exec(task *run.Task) error { + x.numTasks.Inc() + defer x.numTasks.Dec() + + go func() { + defer GinkgoRecover() + task.Run() + }() + select { + case <-task.Done(): + case <-task.Context().Done(): + } + x.completed.Inc() + return task.Error() +} -var _ = AfterSuite(func() { - testEnv.Shutdown() -}) +func newTestExecutor() *testExecutor { + return &testExecutor{ + numTasks: atomic.NewInt32(0), + completed: atomic.NewInt32(0), + } +} + +func (x *testExecutor) CompleteUsageLimits(*metrics.UsageLimits) { + +} + +func (x *testExecutor) CompleteTaskStatus(s *metrics.TaskStatus) { + s.NumDelegated = x.numTasks.Load() +} + +func (x *testExecutor) ExecAsync(task *run.Task) <-chan error { + panic("not implemented") +} diff --git a/pkg/apps/consumerd/queue_test.go b/pkg/apps/consumerd/queue_test.go index f712af2..05a9624 100644 --- a/pkg/apps/consumerd/queue_test.go +++ b/pkg/apps/consumerd/queue_test.go @@ -1,142 +1,92 @@ package consumerd_test import ( - "context" + "time" - "github.com/cobalt77/kubecc/internal/logkc" - "github.com/cobalt77/kubecc/internal/testutil" - testtoolchain "github.com/cobalt77/kubecc/internal/testutil/toolchain" "github.com/cobalt77/kubecc/pkg/apps/consumerd" - "github.com/cobalt77/kubecc/pkg/identity" - "github.com/cobalt77/kubecc/pkg/meta" - "github.com/cobalt77/kubecc/pkg/metrics" - "github.com/cobalt77/kubecc/pkg/run" - "github.com/cobalt77/kubecc/pkg/tracing" + "github.com/cobalt77/kubecc/pkg/test" "github.com/cobalt77/kubecc/pkg/types" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" - "go.uber.org/atomic" ) -type testExecutor struct { - numTasks *atomic.Int32 - completed *atomic.Int32 -} - -func (x *testExecutor) Exec(task *run.Task) error { - x.numTasks.Inc() - defer x.numTasks.Dec() - - go func() { - defer GinkgoRecover() - task.Run() - }() - select { - case <-task.Done(): - case <-task.Context().Done(): - } - x.completed.Inc() - return task.Error() -} - -func newTestExecutor() *testExecutor { - return &testExecutor{ - numTasks: atomic.NewInt32(0), - completed: atomic.NewInt32(0), - } -} - -func (x *testExecutor) CompleteUsageLimits(*metrics.UsageLimits) { - -} - -func (x *testExecutor) CompleteTaskStatus(s *metrics.TaskStatus) { - s.NumDelegated = x.numTasks.Load() -} - -func (x *testExecutor) ExecAsync(task *run.Task) <-chan error { - panic("not implemented") -} - var _ = Describe("Split Queue", func() { - testCtx := meta.NewContext( - meta.WithProvider(identity.Component, meta.WithValue(types.TestComponent)), - meta.WithProvider(identity.UUID), - meta.WithProvider(logkc.Logger), - meta.WithProvider(tracing.Tracer), - ) - numTasks := 100 - taskPool := make(chan *consumerd.SplitTask, numTasks) - cleanup := make(chan context.CancelFunc, 100) - localExec := newTestExecutor() - remoteExec := newTestExecutor() - tc := &types.Toolchain{ - Kind: types.Gnu, - Lang: types.CXX, - Executable: testutil.TestToolchainExecutable, - TargetArch: "testarch", - Version: "0", - PicDefault: true, - } - taskArgs := []string{"-duration", "0"} - rm := &testtoolchain.TestToolchainRunner{} - request := &types.RunRequest{ - Compiler: &types.RunRequest_Toolchain{ - Toolchain: tc, - }, - Args: taskArgs, - UID: 1000, - GID: 1000, - } - - BeforeEach(func() { - schedulerClient := testEnv.NewSchedulerClient(testCtx) - - Expect(len(taskPool)).To(Equal(0)) - Expect(cap(taskPool)).To(Equal(numTasks)) - - for i := 0; i < numTasks; i++ { - contexts := run.Contexts{ - ServerContext: testCtx, - ClientContext: testCtx, - } - taskPool <- &consumerd.SplitTask{ - Local: run.Package( - rm.RunLocal(rm.NewArgParser(testCtx, taskArgs)), - contexts, - localExec, - request, - ), - Remote: run.Package( - rm.SendRemote( - rm.NewArgParser(testCtx, taskArgs), - schedulerClient, - ), - contexts, - remoteExec, - request, - ), - } - } - - _, cf := testEnv.SpawnMonitor() - cleanup <- cf - }) - - AfterEach(func() { - for c := range cleanup { - c() - } + When("when no scheduler is available", func() { + Specify("startup", func() { + testEnv = test.NewDefaultEnvironment() + testEnv.SpawnMonitor() + go testEnv.Serve() + testEnv.WaitForServices([]string{ + types.Monitor_ServiceDesc.ServiceName, + }) + }) + Specify("the queue should run all tasks locally", func() { + taskPool := makeTaskPool(numTasks) + sq := consumerd.NewSplitQueue(testCtx, testEnv.NewMonitorClient(testCtx)) + go func() { + defer GinkgoRecover() + for { + select { + case task := <-taskPool: + sq.In() <- task + go func() { + _, err := task.Wait() + Expect(err).NotTo(HaveOccurred()) + + }() + default: + return + } + } + }() + Eventually(func() int32 { + return localExec.completed.Load() + }).Should(Equal(int32(numTasks))) + }) + Specify("shutdown", func() { + testEnv.Shutdown() + }) }) - - PSpecify("when no scheduler is available, the queue should run all tasks locally", func() { - sq := consumerd.NewSplitQueue(testCtx, testEnv.NewMonitorClient(testCtx)) - for task := range taskPool { - sq.In() <- task - } - Eventually(func() int32 { - return localExec.numTasks.Load() - }).Should(Equal(int32(numTasks))) + When("a scheduler is available", func() { + Specify("startup", func() { + testEnv = test.NewDefaultEnvironment() + testEnv.SpawnMonitor() + testEnv.SpawnScheduler() + go testEnv.Serve() + testEnv.WaitForServices([]string{ + types.Monitor_ServiceDesc.ServiceName, + types.Scheduler_ServiceDesc.ServiceName, + }) + }) + Specify("the queue should split tasks between local and remote", func() { + taskPool := makeTaskPool(numTasks) + sq := consumerd.NewSplitQueue(testCtx, testEnv.NewMonitorClient(testCtx)) + time.Sleep(100 * time.Millisecond) + go func() { + defer GinkgoRecover() + for { + select { + case task := <-taskPool: + sq.In() <- task + go func() { + _, err := task.Wait() + Expect(err).NotTo(HaveOccurred()) + + }() + default: + return + } + } + }() + Eventually(func() int32 { + return localExec.completed.Load() + remoteExec.completed.Load() + }).Should(Equal(int32(numTasks))) + Expect(localExec.completed.Load()).To(BeNumerically(">", 0)) + Expect(remoteExec.completed.Load()).To(BeNumerically(">", 0)) + }) + Specify("shutdown", func() { + testEnv.Shutdown() + }) }) }) diff --git a/pkg/apps/monitor/monitor_test.go b/pkg/apps/monitor/monitor_test.go index d952be8..fe2f69c 100644 --- a/pkg/apps/monitor/monitor_test.go +++ b/pkg/apps/monitor/monitor_test.go @@ -11,6 +11,7 @@ import ( "github.com/cobalt77/kubecc/internal/testutil" "github.com/cobalt77/kubecc/pkg/apps/monitor" "github.com/cobalt77/kubecc/pkg/clients" + "github.com/cobalt77/kubecc/pkg/config" "github.com/cobalt77/kubecc/pkg/identity" "github.com/cobalt77/kubecc/pkg/meta" "github.com/cobalt77/kubecc/pkg/metrics" @@ -82,7 +83,7 @@ var _ = Describe("Monitor", func() { logkc.WithLogLevel(zapcore.WarnLevel)))), meta.WithProvider(tracing.Tracer), ) - mon := monitor.NewMonitorServer(monitorCtx, storeCreator) + mon := monitor.NewMonitorServer(monitorCtx, config.MonitorSpec{}, storeCreator) listener = bufconn.Listen(1024 * 1024) srv := servers.NewServer(monitorCtx, servers.WithServerOpts( grpc.NumStreamWorkers(12), diff --git a/pkg/apps/monitor/server.go b/pkg/apps/monitor/server.go index 12fb22d..756b1b3 100644 --- a/pkg/apps/monitor/server.go +++ b/pkg/apps/monitor/server.go @@ -8,6 +8,7 @@ import ( "net" "sync" + "github.com/cobalt77/kubecc/pkg/config" "github.com/cobalt77/kubecc/pkg/meta" "github.com/cobalt77/kubecc/pkg/metrics" "github.com/cobalt77/kubecc/pkg/servers" @@ -42,6 +43,7 @@ type MonitorServer struct { func NewMonitorServer( ctx context.Context, + conf config.MonitorSpec, storeCreator StoreCreator, ) *MonitorServer { srv := &MonitorServer{ @@ -57,7 +59,9 @@ func NewMonitorServer( srv.buckets[metrics.MetaBucket] = storeCreator.NewStore(ctx) srv.providersUpdated() - go srv.runPrometheusListener() + if conf.ServePrometheusMetrics { + go srv.runPrometheusListener() + } return srv } diff --git a/pkg/config/spec.go b/pkg/config/spec.go index a6020ec..731f92a 100644 --- a/pkg/config/spec.go +++ b/pkg/config/spec.go @@ -75,7 +75,8 @@ type SchedulerSpec struct { type MonitorSpec struct { GlobalSpec - ListenAddress string `json:"listenAddress"` + ListenAddress string `json:"listenAddress"` + ServePrometheusMetrics bool `json:"servePrometheusMetrics"` } type CacheSpec struct { diff --git a/pkg/test/environment.go b/pkg/test/environment.go index d3280f4..f4fe68c 100644 --- a/pkg/test/environment.go +++ b/pkg/test/environment.go @@ -233,7 +233,7 @@ func (e *Environment) SpawnMonitor(opts ...SpawnOption) (context.Context, contex ) ctx, cancel := context.WithCancel(ctx) - mon := monitor.NewMonitorServer(ctx, monitor.InMemoryStoreCreator) + mon := monitor.NewMonitorServer(ctx, cfg.Monitor, monitor.InMemoryStoreCreator) types.RegisterMonitorServer(e.server, mon) return ctx, cancel @@ -305,6 +305,9 @@ func DefaultConfig() config.KubeccSpec { }, }, }, + Monitor: config.MonitorSpec{ + ServePrometheusMetrics: false, + }, Consumerd: config.ConsumerdSpec{ DisableTLS: true, UsageLimits: config.UsageLimitsSpec{ From f2c5d535865a925c344380e868539dfb863b9fe0 Mon Sep 17 00:00:00 2001 From: cobalt77 <8194899+cobalt77@users.noreply.github.com> Date: Wed, 17 Mar 2021 23:15:05 -0400 Subject: [PATCH 11/12] fix compiler error --- cmd/kubecc/components/monitor/monitor.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/kubecc/components/monitor/monitor.go b/cmd/kubecc/components/monitor/monitor.go index 5d39e68..fe53edb 100644 --- a/cmd/kubecc/components/monitor/monitor.go +++ b/cmd/kubecc/components/monitor/monitor.go @@ -37,7 +37,7 @@ func run(cmd *cobra.Command, args []string) { lg.With("addr", listener.Addr().String()).Info("Metrics API listening") srv := servers.NewServer(ctx) - monitorServer := monitor.NewMonitorServer(ctx, monitor.InMemoryStoreCreator) + monitorServer := monitor.NewMonitorServer(ctx, conf, monitor.InMemoryStoreCreator) types.RegisterMonitorServer(srv, monitorServer) err = srv.Serve(listener) From 16f392a40b68e6f5c8d98ec315ec5c106ccc8984 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 18 Mar 2021 03:25:54 +0000 Subject: [PATCH 12/12] Bump github.com/golang/protobuf from 1.4.3 to 1.5.1 Bumps [github.com/golang/protobuf](https://github.com/golang/protobuf) from 1.4.3 to 1.5.1. - [Release notes](https://github.com/golang/protobuf/releases) - [Commits](https://github.com/golang/protobuf/compare/v1.4.3...v1.5.1) Signed-off-by: dependabot[bot] --- go.mod | 10 +++------- go.sum | 55 ++++++------------------------------------------------- 2 files changed, 9 insertions(+), 56 deletions(-) diff --git a/go.mod b/go.mod index 18d9255..31b27b7 100644 --- a/go.mod +++ b/go.mod @@ -16,14 +16,13 @@ require ( github.com/go-logr/logr v0.4.0 github.com/go-logr/zapr v0.4.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/gojp/goreportcard v0.0.0-20210127091035-7a8a1ebca86e // indirect - github.com/golang/protobuf v1.4.3 + github.com/golang/protobuf v1.5.1 github.com/google/go-cmp v0.5.5 github.com/google/gofuzz v1.2.0 // indirect github.com/google/uuid v1.2.0 github.com/googleapis/gnostic v0.5.4 // indirect github.com/huandu/xstrings v1.3.2 // indirect - github.com/imdario/mergo v0.3.12 // indirect + github.com/imdario/mergo v0.3.12 github.com/karlseguin/ccache/v2 v2.0.8 github.com/mattn/go-runewidth v0.0.10 // indirect github.com/minio/md5-simd v1.1.2 @@ -36,15 +35,12 @@ require ( github.com/prometheus/client_golang v1.9.0 github.com/prometheus/procfs v0.6.0 // indirect github.com/rivo/uniseg v0.2.0 // indirect - github.com/smallnest/weighted v0.0.0-20201102054551-85ac5c79528c github.com/spf13/cobra v1.1.3 github.com/stretchr/testify v1.7.0 - github.com/tinylib/msgp v1.1.5 github.com/uber/jaeger-client-go v2.25.0+incompatible github.com/uber/jaeger-lib v2.4.0+incompatible // indirect github.com/valyala/bytebufferpool v1.0.0 go.uber.org/atomic v1.7.0 - go.uber.org/goleak v1.1.10 // indirect go.uber.org/zap v1.16.0 golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83 // indirect golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5 @@ -52,7 +48,7 @@ require ( golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba // indirect google.golang.org/grpc v1.36.0 - google.golang.org/protobuf v1.25.0 + google.golang.org/protobuf v1.26.0 gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect k8s.io/api v0.20.4 k8s.io/apiextensions-apiserver v0.20.4 // indirect diff --git a/go.sum b/go.sum index e069982..ba606cc 100644 --- a/go.sum +++ b/go.sum @@ -61,8 +61,6 @@ github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBp github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= -github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/HdrHistogram/hdrhistogram-go v1.0.1 h1:GX8GAYDuhlFQnI2fRDHQhTlkHMz8bEn0jTI6LJU0mpw= github.com/HdrHistogram/hdrhistogram-go v1.0.1/go.mod h1:BWJ+nMSHY3L41Zj7CA3uXnloDp7xxV0YvstAE7nKTaM= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= @@ -90,7 +88,6 @@ github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk5 github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= @@ -100,7 +97,6 @@ github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/banzaicloud/k8s-objectmatcher v1.5.1 h1:u3Ic1JzIUQe0pGGjVQJvCWTNa+t9CiW49IPPovYqAss= github.com/banzaicloud/k8s-objectmatcher v1.5.1/go.mod h1:9MWY5HsM/OaTmoTirczhlO8UALbH722WgdpaaR7Y8OE= -github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -128,9 +124,7 @@ github.com/cobalt77/grpc-opentracing v0.0.0-20210220041601-edf9159a6710/go.mod h github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= @@ -138,7 +132,6 @@ github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7 github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= @@ -148,12 +141,7 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/deckarep/golang-set v1.7.1 h1:SCQV0S6gTtp6itiFrTqI+pfmJ4LN85S1YzhDf9rTHJQ= github.com/deckarep/golang-set v1.7.1/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ= -github.com/dgraph-io/badger/v2 v2.2007.2/go.mod h1:26P/7fbL4kUZVEVKLAKXkBXKOydDmM2p1e+NhhnBCAE= -github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= -github.com/dgraph-io/ristretto v0.0.3/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= -github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= @@ -225,8 +213,6 @@ github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zV github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/gojp/goreportcard v0.0.0-20210127091035-7a8a1ebca86e h1:dIosri706uqS1jZBZ3E+jroCHY3Q2cl9eiawwExScaw= -github.com/gojp/goreportcard v0.0.0-20210127091035-7a8a1ebca86e/go.mod h1:19GXi0dz4yFthI9Q8VmbvIVKoLLyIe/fvPi2VreSGhk= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -254,11 +240,11 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1 h1:jAbXjIeW2ZSW2AwFxlGTDoc2CjI2XujLkV3ArsZFCvc= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -347,8 +333,6 @@ github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1: github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/imdario/mergo v0.3.11 h1:3tnifQM4i+fbajXKBHXWEH+KvNHqojZ778UH75j3bGA= -github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= @@ -386,7 +370,6 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxv github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= @@ -395,7 +378,6 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -469,8 +451,6 @@ github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.1/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= -github.com/onsi/ginkgo v1.15.1 h1:DsXNrKujDlkMS9Rsxmd+Fg7S6Kc5lhE+qX8tY6laOxc= -github.com/onsi/ginkgo v1.15.1/go.mod h1:Dd6YFfwBW84ETqqtL0CPyPXillHgY6XhQH3uuCCTr/o= github.com/onsi/ginkgo v1.15.2 h1:l77YT15o814C2qVL47NOyjV/6RbaP7kKdrvZnxQ3Org= github.com/onsi/ginkgo v1.15.2/go.mod h1:Dd6YFfwBW84ETqqtL0CPyPXillHgY6XhQH3uuCCTr/o= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= @@ -498,8 +478,6 @@ github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtP github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/philhofer/fwd v1.1.1 h1:GdGcTjf5RNAxwS4QLsiMzJYj5KEvPJD3Abr261yRQXQ= -github.com/philhofer/fwd v1.1.1/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -511,7 +489,6 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= -github.com/prometheus/client_golang v0.9.0-pre1.0.20171201122222-661e31bf844d/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= @@ -527,7 +504,6 @@ github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1: github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.0.0-20171117163051-2e54d0b93cba/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= @@ -536,7 +512,6 @@ github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt2 github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.15.0 h1:4fgOnadei3EZvgRwxJ7RMpG1k1pOZth5Pc13tyspaKM= github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= -github.com/prometheus/procfs v0.0.0-20171221151313-8f918ac9ab4b/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -555,7 +530,6 @@ github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6So github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rs/xid v1.2.1 h1:mhH9Nq+C1fY2l1XIpgxIiUOfNpRBYH1kKcr+qfKgjRc= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= -github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= @@ -564,8 +538,6 @@ github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeV github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/smallnest/weighted v0.0.0-20201102054551-85ac5c79528c h1:XBpqxCr2X2HYZMOA+HTDhj8njR4PGhsK+M+geaMAQ20= -github.com/smallnest/weighted v0.0.0-20201102054551-85ac5c79528c/go.mod h1:xc9CoZ+ZBGwajnWto5Aqw/wWg8euy4HtOr6K9Fxp9iw= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= @@ -573,12 +545,10 @@ github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9 github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= github.com/spf13/cobra v1.1.3 h1:xghbfqPkxzxP3C/f3n5DdpAbdKLj4ZE4BWQI362l53M= github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= @@ -588,7 +558,6 @@ github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= @@ -606,16 +575,12 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/tinylib/msgp v1.1.5 h1:2gXmtWueD2HefZHQe1QOy9HVzmFrLOVvsXwXBQ0ayy0= -github.com/tinylib/msgp v1.1.5/go.mod h1:eQsjooMTnV42mHu917E26IogZ2930nFyBQdofk10Udg= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/ttacon/chalk v0.0.0-20160626202418-22c06c80ed31/go.mod h1:onvgF043R+lC5RZ8IT9rBXDaEDnpnw/Cl+HFiw+v/7Q= github.com/uber/jaeger-client-go v2.25.0+incompatible h1:IxcNZ7WRY1Y3G4poYlx24szfsn/3LvK9QHCq9oQw8+U= github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-lib v2.4.0+incompatible h1:fY7QsGQWiCt8pajv4r7JEvmATdCVaWxXbjwyYwsNaLQ= github.com/uber/jaeger-lib v2.4.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= -github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= @@ -623,7 +588,6 @@ github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyC github.com/wsxiaoys/terminal v0.0.0-20160513160801-0940f3fc43a0 h1:3UeQBvD0TFrlVjOeLOBz+CPAI8dnbqNSVwUwRrkp7vQ= github.com/wsxiaoys/terminal v0.0.0-20160513160801-0940f3fc43a0/go.mod h1:IXCdmsXIht47RaVFLEdVnh1t+pgYtTAhQGj73kz+2DM= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -663,7 +627,6 @@ go.uber.org/zap v1.16.0 h1:uFRZXykJGK9lLY4HtgSw44DnIcAM+kRBP7x5m+NpAOM= go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -748,7 +711,6 @@ golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200927032502-5d4f70055728/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= @@ -785,7 +747,6 @@ golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -795,7 +756,6 @@ golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -825,7 +785,6 @@ golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200926100807-9d91bd62050c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -874,7 +833,6 @@ golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628034336-212fb13d595e/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -913,7 +871,6 @@ golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201022035929-9cf592e881e9/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= @@ -1029,8 +986,10 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1119,8 +1078,6 @@ rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8 rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= -sigs.k8s.io/controller-runtime v0.8.2 h1:SBWmI0b3uzMIUD/BIXWNegrCeZmPJ503pOtwxY0LPHM= -sigs.k8s.io/controller-runtime v0.8.2/go.mod h1:U/l+DUopBc1ecfRZ5aviA9JDmGFQKvLf5YkZNx2e0sU= sigs.k8s.io/controller-runtime v0.8.3 h1:GMHvzjTmaWHQB8HadW+dIvBoJuLvZObYJ5YoZruPRao= sigs.k8s.io/controller-runtime v0.8.3/go.mod h1:U/l+DUopBc1ecfRZ5aviA9JDmGFQKvLf5YkZNx2e0sU= sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=