From f0f712126e49048d0404f19bf04c47d8b7860496 Mon Sep 17 00:00:00 2001 From: Nashwan Azhari Date: Mon, 2 May 2022 14:58:52 +0300 Subject: [PATCH 1/9] Vendor onsi/gomega/gmeasure Signed-off-by: Nashwan Azhari --- .../github.com/onsi/gomega/gmeasure/cache.go | 202 +++++++ .../onsi/gomega/gmeasure/enum_support.go | 43 ++ .../onsi/gomega/gmeasure/experiment.go | 526 ++++++++++++++++++ .../onsi/gomega/gmeasure/measurement.go | 235 ++++++++ .../github.com/onsi/gomega/gmeasure/rank.go | 141 +++++ .../github.com/onsi/gomega/gmeasure/stats.go | 153 +++++ .../onsi/gomega/gmeasure/stopwatch.go | 117 ++++ .../onsi/gomega/gmeasure/table/table.go | 370 ++++++++++++ vendor/modules.txt | 2 + 9 files changed, 1789 insertions(+) create mode 100644 vendor/github.com/onsi/gomega/gmeasure/cache.go create mode 100644 vendor/github.com/onsi/gomega/gmeasure/enum_support.go create mode 100644 vendor/github.com/onsi/gomega/gmeasure/experiment.go create mode 100644 vendor/github.com/onsi/gomega/gmeasure/measurement.go create mode 100644 vendor/github.com/onsi/gomega/gmeasure/rank.go create mode 100644 vendor/github.com/onsi/gomega/gmeasure/stats.go create mode 100644 vendor/github.com/onsi/gomega/gmeasure/stopwatch.go create mode 100644 vendor/github.com/onsi/gomega/gmeasure/table/table.go diff --git a/vendor/github.com/onsi/gomega/gmeasure/cache.go b/vendor/github.com/onsi/gomega/gmeasure/cache.go new file mode 100644 index 0000000000..27fab63757 --- /dev/null +++ b/vendor/github.com/onsi/gomega/gmeasure/cache.go @@ -0,0 +1,202 @@ +package gmeasure + +import ( + "crypto/md5" + "encoding/json" + "fmt" + "os" + "path/filepath" + + "github.com/onsi/gomega/internal/gutil" +) + +const CACHE_EXT = ".gmeasure-cache" + +/* +ExperimentCache provides a director-and-file based cache of experiments +*/ +type ExperimentCache struct { + Path string +} + +/* +NewExperimentCache creates and initializes a new cache. Path must point to a directory (if path does not exist, NewExperimentCache will create a directory at path). + +Cached Experiments are stored as separate files in the cache directory - the filename is a hash of the Experiment name. Each file contains two JSON-encoded objects - a CachedExperimentHeader that includes the experiment's name and cache version number, and then the Experiment itself. +*/ +func NewExperimentCache(path string) (ExperimentCache, error) { + stat, err := os.Stat(path) + if os.IsNotExist(err) { + err := os.MkdirAll(path, 0777) + if err != nil { + return ExperimentCache{}, err + } + } else if !stat.IsDir() { + return ExperimentCache{}, fmt.Errorf("%s is not a directory", path) + } + + return ExperimentCache{ + Path: path, + }, nil +} + +/* +CachedExperimentHeader captures the name of the Cached Experiment and its Version +*/ +type CachedExperimentHeader struct { + Name string + Version int +} + +func (cache ExperimentCache) hashOf(name string) string { + return fmt.Sprintf("%x", md5.Sum([]byte(name))) +} + +func (cache ExperimentCache) readHeader(filename string) (CachedExperimentHeader, error) { + out := CachedExperimentHeader{} + f, err := os.Open(filepath.Join(cache.Path, filename)) + if err != nil { + return out, err + } + defer f.Close() + err = json.NewDecoder(f).Decode(&out) + return out, err +} + +/* +List returns a list of all Cached Experiments found in the cache. +*/ +func (cache ExperimentCache) List() ([]CachedExperimentHeader, error) { + var out []CachedExperimentHeader + names, err := gutil.ReadDir(cache.Path) + if err != nil { + return out, err + } + for _, name := range names { + if filepath.Ext(name) != CACHE_EXT { + continue + } + header, err := cache.readHeader(name) + if err != nil { + return out, err + } + out = append(out, header) + } + return out, nil +} + +/* +Clear empties out the cache - this will delete any and all detected cache files in the cache directory. Use with caution! +*/ +func (cache ExperimentCache) Clear() error { + names, err := gutil.ReadDir(cache.Path) + if err != nil { + return err + } + for _, name := range names { + if filepath.Ext(name) != CACHE_EXT { + continue + } + err := os.Remove(filepath.Join(cache.Path, name)) + if err != nil { + return err + } + } + return nil +} + +/* +Load fetches an experiment from the cache. Lookup occurs by name. Load requires that the version numer in the cache is equal to or greater than the passed-in version. + +If an experiment with corresponding name and version >= the passed-in version is found, it is unmarshaled and returned. + +If no experiment is found, or the cached version is smaller than the passed-in version, Load will return nil. + +When paired with Ginkgo you can cache experiments and prevent potentially expensive recomputation with this pattern: + + const EXPERIMENT_VERSION = 1 //bump this to bust the cache and recompute _all_ experiments + + Describe("some experiments", func() { + var cache gmeasure.ExperimentCache + var experiment *gmeasure.Experiment + + BeforeEach(func() { + cache = gmeasure.NewExperimentCache("./gmeasure-cache") + name := CurrentSpecReport().LeafNodeText + experiment = cache.Load(name, EXPERIMENT_VERSION) + if experiment != nil { + AddReportEntry(experiment) + Skip("cached") + } + experiment = gmeasure.NewExperiment(name) + AddReportEntry(experiment) + }) + + It("foo runtime", func() { + experiment.SampleDuration("runtime", func() { + //do stuff + }, gmeasure.SamplingConfig{N:100}) + }) + + It("bar runtime", func() { + experiment.SampleDuration("runtime", func() { + //do stuff + }, gmeasure.SamplingConfig{N:100}) + }) + + AfterEach(func() { + if !CurrentSpecReport().State.Is(types.SpecStateSkipped) { + cache.Save(experiment.Name, EXPERIMENT_VERSION, experiment) + } + }) + }) +*/ +func (cache ExperimentCache) Load(name string, version int) *Experiment { + path := filepath.Join(cache.Path, cache.hashOf(name)+CACHE_EXT) + f, err := os.Open(path) + if err != nil { + return nil + } + defer f.Close() + dec := json.NewDecoder(f) + header := CachedExperimentHeader{} + dec.Decode(&header) + if header.Version < version { + return nil + } + out := NewExperiment("") + err = dec.Decode(out) + if err != nil { + return nil + } + return out +} + +/* +Save stores the passed-in experiment to the cache with the passed-in name and version. +*/ +func (cache ExperimentCache) Save(name string, version int, experiment *Experiment) error { + path := filepath.Join(cache.Path, cache.hashOf(name)+CACHE_EXT) + f, err := os.Create(path) + if err != nil { + return err + } + defer f.Close() + enc := json.NewEncoder(f) + err = enc.Encode(CachedExperimentHeader{ + Name: name, + Version: version, + }) + if err != nil { + return err + } + return enc.Encode(experiment) +} + +/* +Delete removes the experiment with the passed-in name from the cache +*/ +func (cache ExperimentCache) Delete(name string) error { + path := filepath.Join(cache.Path, cache.hashOf(name)+CACHE_EXT) + return os.Remove(path) +} diff --git a/vendor/github.com/onsi/gomega/gmeasure/enum_support.go b/vendor/github.com/onsi/gomega/gmeasure/enum_support.go new file mode 100644 index 0000000000..b5404f9620 --- /dev/null +++ b/vendor/github.com/onsi/gomega/gmeasure/enum_support.go @@ -0,0 +1,43 @@ +package gmeasure + +import "encoding/json" + +type enumSupport struct { + toString map[uint]string + toEnum map[string]uint + maxEnum uint +} + +func newEnumSupport(toString map[uint]string) enumSupport { + toEnum, maxEnum := map[string]uint{}, uint(0) + for k, v := range toString { + toEnum[v] = k + if maxEnum < k { + maxEnum = k + } + } + return enumSupport{toString: toString, toEnum: toEnum, maxEnum: maxEnum} +} + +func (es enumSupport) String(e uint) string { + if e > es.maxEnum { + return es.toString[0] + } + return es.toString[e] +} + +func (es enumSupport) UnmarshJSON(b []byte) (uint, error) { + var dec string + if err := json.Unmarshal(b, &dec); err != nil { + return 0, err + } + out := es.toEnum[dec] // if we miss we get 0 which is what we want anyway + return out, nil +} + +func (es enumSupport) MarshJSON(e uint) ([]byte, error) { + if e == 0 || e > es.maxEnum { + return json.Marshal(nil) + } + return json.Marshal(es.toString[e]) +} diff --git a/vendor/github.com/onsi/gomega/gmeasure/experiment.go b/vendor/github.com/onsi/gomega/gmeasure/experiment.go new file mode 100644 index 0000000000..1c2d300965 --- /dev/null +++ b/vendor/github.com/onsi/gomega/gmeasure/experiment.go @@ -0,0 +1,526 @@ +/* +Package gomega/gmeasure provides support for benchmarking and measuring code. It is intended as a more robust replacement for Ginkgo V1's Measure nodes. + +gmeasure is organized around the metaphor of an Experiment that can record multiple Measurements. A Measurement is a named collection of data points and gmeasure supports +measuring Values (of type float64) and Durations (of type time.Duration). + +Experiments allows the user to record Measurements directly by passing in Values (i.e. float64) or Durations (i.e. time.Duration) +or to measure measurements by passing in functions to measure. When measuring functions Experiments take care of timing the duration of functions (for Duration measurements) +and/or recording returned values (for Value measurements). Experiments also support sampling functions - when told to sample Experiments will run functions repeatedly +and measure and record results. The sampling behavior is configured by passing in a SamplingConfig that can control the maximum number of samples, the maximum duration for sampling (or both) +and the number of concurrent samples to take. + +Measurements can be decorated with additional information. This is supported by passing in special typed decorators when recording measurements. These include: + +- Units("any string") - to attach units to a Value Measurement (Duration Measurements always have units of "duration") +- Style("any Ginkgo color style string") - to attach styling to a Measurement. This styling is used when rendering console information about the measurement in reports. Color style strings are documented at TODO. +- Precision(integer or time.Duration) - to attach precision to a Measurement. This controls how many decimal places to show for Value Measurements and how to round Duration Measurements when rendering them to screen. + +In addition, individual data points in a Measurement can be annotated with an Annotation("any string"). The annotation is associated with the individual data point and is intended to convey additional context about the data point. + +Once measurements are complete, an Experiment can generate a comprehensive report by calling its String() or ColorableString() method. + +Users can also access and analyze the resulting Measurements directly. Use Experiment.Get(NAME) to fetch the Measurement named NAME. This returned struct will have fields containing +all the data points and annotations recorded by the experiment. You can subsequently fetch the Measurement.Stats() to get a Stats struct that contains basic statistical information about the +Measurement (min, max, median, mean, standard deviation). You can order these Stats objects using RankStats() to identify best/worst performers across multpile experiments or measurements. + +gmeasure also supports caching Experiments via an ExperimentCache. The cache supports storing and retreiving experiments by name and version. This allows you to rerun code without +repeating expensive experiments that may not have changed (which can be controlled by the cache version number). It also enables you to compare new experiment runs with older runs to detect +variations in performance/behavior. + +When used with Ginkgo, you can emit experiment reports and encode them in test reports easily using Ginkgo V2's support for Report Entries. +Simply pass your experiment to AddReportEntry to get a report every time the tests run. You can also use AddReportEntry with Measurements to emit all the captured data +and Rankings to emit measurement summaries in rank order. + +Finally, Experiments provide an additional mechanism to measure durations called a Stopwatch. The Stopwatch makes it easy to pepper code with statements that measure elapsed time across +different sections of code and can be useful when debugging or evaluating bottlenecks in a given codepath. +*/ +package gmeasure + +import ( + "fmt" + "math" + "reflect" + "sync" + "time" + + "github.com/onsi/gomega/gmeasure/table" +) + +/* +SamplingConfig configures the Sample family of experiment methods. +These methods invoke passed-in functions repeatedly to sample and record a given measurement. +SamplingConfig is used to control the maximum number of samples or time spent sampling (or both). When both are specified sampling ends as soon as one of the conditions is met. +SamplingConfig can also ensure a minimum interval between samples and can enable concurrent sampling. +*/ +type SamplingConfig struct { + // N - the maximum number of samples to record + N int + // Duration - the maximum amount of time to spend recording samples + Duration time.Duration + // MinSamplingInterval - the minimum time that must elapse between samplings. It is an error to specify both MinSamplingInterval and NumParallel. + MinSamplingInterval time.Duration + // NumParallel - the number of parallel workers to spin up to record samples. It is an error to specify both MinSamplingInterval and NumParallel. + NumParallel int +} + +// The Units decorator allows you to specify units (an arbitrary string) when recording values. It is ignored when recording durations. +// +// e := gmeasure.NewExperiment("My Experiment") +// e.RecordValue("length", 3.141, gmeasure.Units("inches")) +// +// Units are only set the first time a value of a given name is recorded. In the example above any subsequent calls to e.RecordValue("length", X) will maintain the "inches" units even if a new set of Units("UNIT") are passed in later. +type Units string + +// The Annotation decorator allows you to attach an annotation to a given recorded data-point: +// +// For example: +// +// e := gmeasure.NewExperiment("My Experiment") +// e.RecordValue("length", 3.141, gmeasure.Annotation("bob")) +// e.RecordValue("length", 2.71, gmeasure.Annotation("jane")) +// +// ...will result in a Measurement named "length" that records two values )[3.141, 2.71]) annotation with (["bob", "jane"]) +type Annotation string + +// The Style decorator allows you to associate a style with a measurement. This is used to generate colorful console reports using Ginkgo V2's +// console formatter. Styles are strings in curly brackets that correspond to a color or style. +// +// For example: +// +// e := gmeasure.NewExperiment("My Experiment") +// e.RecordValue("length", 3.141, gmeasure.Style("{{blue}}{{bold}}")) +// e.RecordValue("length", 2.71) +// e.RecordDuration("cooking time", 3 * time.Second, gmeasure.Style("{{red}}{{underline}}")) +// e.RecordDuration("cooking time", 2 * time.Second) +// +// will emit a report with blue bold entries for the length measurement and red underlined entries for the cooking time measurement. +// +// Units are only set the first time a value or duration of a given name is recorded. In the example above any subsequent calls to e.RecordValue("length", X) will maintain the "{{blue}}{{bold}}" style even if a new Style is passed in later. +type Style string + +// The PrecisionBundle decorator controls the rounding of value and duration measurements. See Precision(). +type PrecisionBundle struct { + Duration time.Duration + ValueFormat string +} + +// Precision() allows you to specify the precision of a value or duration measurement - this precision is used when rendering the measurement to screen. +// +// To control the precision of Value measurements, pass Precision an integer. This will denote the number of decimal places to render (equivalen to the format string "%.Nf") +// To control the precision of Duration measurements, pass Precision a time.Duration. Duration measurements will be rounded oo the nearest time.Duration when rendered. +// +// For example: +// +// e := gmeasure.NewExperiment("My Experiment") +// e.RecordValue("length", 3.141, gmeasure.Precision(2)) +// e.RecordValue("length", 2.71) +// e.RecordDuration("cooking time", 3214 * time.Millisecond, gmeasure.Precision(100*time.Millisecond)) +// e.RecordDuration("cooking time", 2623 * time.Millisecond) +func Precision(p interface{}) PrecisionBundle { + out := DefaultPrecisionBundle + switch reflect.TypeOf(p) { + case reflect.TypeOf(time.Duration(0)): + out.Duration = p.(time.Duration) + case reflect.TypeOf(int(0)): + out.ValueFormat = fmt.Sprintf("%%.%df", p.(int)) + default: + panic("invalid precision type, must be time.Duration or int") + } + return out +} + +// DefaultPrecisionBundle captures the default precisions for Vale and Duration measurements. +var DefaultPrecisionBundle = PrecisionBundle{ + Duration: 100 * time.Microsecond, + ValueFormat: "%.3f", +} + +type extractedDecorations struct { + annotation Annotation + units Units + precisionBundle PrecisionBundle + style Style +} + +func extractDecorations(args []interface{}) extractedDecorations { + var out extractedDecorations + out.precisionBundle = DefaultPrecisionBundle + + for _, arg := range args { + switch reflect.TypeOf(arg) { + case reflect.TypeOf(out.annotation): + out.annotation = arg.(Annotation) + case reflect.TypeOf(out.units): + out.units = arg.(Units) + case reflect.TypeOf(out.precisionBundle): + out.precisionBundle = arg.(PrecisionBundle) + case reflect.TypeOf(out.style): + out.style = arg.(Style) + default: + panic(fmt.Sprintf("unrecognized argument %#v", arg)) + } + } + + return out +} + +/* +Experiment is gmeasure's core data type. You use experiments to record Measurements and generate reports. +Experiments are thread-safe and all methods can be called from multiple goroutines. +*/ +type Experiment struct { + Name string + + // Measurements includes all Measurements recorded by this experiment. You should access them by name via Get() and GetStats() + Measurements Measurements + lock *sync.Mutex +} + +/* +NexExperiment creates a new experiment with the passed-in name. + +When using Ginkgo we recommend immediately registering the experiment as a ReportEntry: + + experiment = NewExperiment("My Experiment") + AddReportEntry(experiment.Name, experiment) + +this will ensure an experiment report is emitted as part of the test output and exported with any test reports. +*/ +func NewExperiment(name string) *Experiment { + experiment := &Experiment{ + Name: name, + lock: &sync.Mutex{}, + } + return experiment +} + +func (e *Experiment) report(enableStyling bool) string { + t := table.NewTable() + t.TableStyle.EnableTextStyling = enableStyling + t.AppendRow(table.R( + table.C("Name"), table.C("N"), table.C("Min"), table.C("Median"), table.C("Mean"), table.C("StdDev"), table.C("Max"), + table.Divider("="), + "{{bold}}", + )) + + for _, measurement := range e.Measurements { + r := table.R(measurement.Style) + t.AppendRow(r) + switch measurement.Type { + case MeasurementTypeNote: + r.AppendCell(table.C(measurement.Note)) + case MeasurementTypeValue, MeasurementTypeDuration: + name := measurement.Name + if measurement.Units != "" { + name += " [" + measurement.Units + "]" + } + r.AppendCell(table.C(name)) + r.AppendCell(measurement.Stats().cells()...) + } + } + + out := e.Name + "\n" + if enableStyling { + out = "{{bold}}" + out + "{{/}}" + } + out += t.Render() + return out +} + +/* +ColorableString returns a Ginkgo formatted summary of the experiment and all its Measurements. +It is called automatically by Ginkgo's reporting infrastructure when the Experiment is registered as a ReportEntry via AddReportEntry. +*/ +func (e *Experiment) ColorableString() string { + return e.report(true) +} + +/* +ColorableString returns an unformatted summary of the experiment and all its Measurements. +*/ +func (e *Experiment) String() string { + return e.report(false) +} + +/* +RecordNote records a Measurement of type MeasurementTypeNote - this is simply a textual note to annotate the experiment. It will be emitted in any experiment reports. + +RecordNote supports the Style() decoration. +*/ +func (e *Experiment) RecordNote(note string, args ...interface{}) { + decorations := extractDecorations(args) + + e.lock.Lock() + defer e.lock.Unlock() + e.Measurements = append(e.Measurements, Measurement{ + ExperimentName: e.Name, + Type: MeasurementTypeNote, + Note: note, + Style: string(decorations.style), + }) +} + +/* +RecordDuration records the passed-in duration on a Duration Measurement with the passed-in name. If the Measurement does not exist it is created. + +RecordDuration supports the Style(), Precision(), and Annotation() decorations. +*/ +func (e *Experiment) RecordDuration(name string, duration time.Duration, args ...interface{}) { + decorations := extractDecorations(args) + e.recordDuration(name, duration, decorations) +} + +/* +MeasureDuration runs the passed-in callback and times how long it takes to complete. The resulting duration is recorded on a Duration Measurement with the passed-in name. If the Measurement does not exist it is created. + +MeasureDuration supports the Style(), Precision(), and Annotation() decorations. +*/ +func (e *Experiment) MeasureDuration(name string, callback func(), args ...interface{}) time.Duration { + t := time.Now() + callback() + duration := time.Since(t) + e.RecordDuration(name, duration, args...) + return duration +} + +/* +SampleDuration samples the passed-in callback and times how long it takes to complete each sample. +The resulting durations are recorded on a Duration Measurement with the passed-in name. If the Measurement does not exist it is created. + +The callback is given a zero-based index that increments by one between samples. The Sampling is configured via the passed-in SamplingConfig + +SampleDuration supports the Style(), Precision(), and Annotation() decorations. When passed an Annotation() the same annotation is applied to all sample measurements. +*/ +func (e *Experiment) SampleDuration(name string, callback func(idx int), samplingConfig SamplingConfig, args ...interface{}) { + decorations := extractDecorations(args) + e.Sample(func(idx int) { + t := time.Now() + callback(idx) + duration := time.Since(t) + e.recordDuration(name, duration, decorations) + }, samplingConfig) +} + +/* +SampleDuration samples the passed-in callback and times how long it takes to complete each sample. +The resulting durations are recorded on a Duration Measurement with the passed-in name. If the Measurement does not exist it is created. + +The callback is given a zero-based index that increments by one between samples. The callback must return an Annotation - this annotation is attached to the measured duration. + +The Sampling is configured via the passed-in SamplingConfig + +SampleAnnotatedDuration supports the Style() and Precision() decorations. +*/ +func (e *Experiment) SampleAnnotatedDuration(name string, callback func(idx int) Annotation, samplingConfig SamplingConfig, args ...interface{}) { + decorations := extractDecorations(args) + e.Sample(func(idx int) { + t := time.Now() + decorations.annotation = callback(idx) + duration := time.Since(t) + e.recordDuration(name, duration, decorations) + }, samplingConfig) +} + +func (e *Experiment) recordDuration(name string, duration time.Duration, decorations extractedDecorations) { + e.lock.Lock() + defer e.lock.Unlock() + idx := e.Measurements.IdxWithName(name) + if idx == -1 { + measurement := Measurement{ + ExperimentName: e.Name, + Type: MeasurementTypeDuration, + Name: name, + Units: "duration", + Durations: []time.Duration{duration}, + PrecisionBundle: decorations.precisionBundle, + Style: string(decorations.style), + Annotations: []string{string(decorations.annotation)}, + } + e.Measurements = append(e.Measurements, measurement) + } else { + if e.Measurements[idx].Type != MeasurementTypeDuration { + panic(fmt.Sprintf("attempting to record duration with name '%s'. That name is already in-use for recording values.", name)) + } + e.Measurements[idx].Durations = append(e.Measurements[idx].Durations, duration) + e.Measurements[idx].Annotations = append(e.Measurements[idx].Annotations, string(decorations.annotation)) + } +} + +/* +NewStopwatch() returns a stopwatch configured to record duration measurements with this experiment. +*/ +func (e *Experiment) NewStopwatch() *Stopwatch { + return newStopwatch(e) +} + +/* +RecordValue records the passed-in value on a Value Measurement with the passed-in name. If the Measurement does not exist it is created. + +RecordValue supports the Style(), Units(), Precision(), and Annotation() decorations. +*/ +func (e *Experiment) RecordValue(name string, value float64, args ...interface{}) { + decorations := extractDecorations(args) + e.recordValue(name, value, decorations) +} + +/* +MeasureValue runs the passed-in callback and records the return value on a Value Measurement with the passed-in name. If the Measurement does not exist it is created. + +MeasureValue supports the Style(), Units(), Precision(), and Annotation() decorations. +*/ +func (e *Experiment) MeasureValue(name string, callback func() float64, args ...interface{}) float64 { + value := callback() + e.RecordValue(name, value, args...) + return value +} + +/* +SampleValue samples the passed-in callback and records the return value on a Value Measurement with the passed-in name. If the Measurement does not exist it is created. + +The callback is given a zero-based index that increments by one between samples. The callback must return a float64. The Sampling is configured via the passed-in SamplingConfig + +SampleValue supports the Style(), Units(), Precision(), and Annotation() decorations. When passed an Annotation() the same annotation is applied to all sample measurements. +*/ +func (e *Experiment) SampleValue(name string, callback func(idx int) float64, samplingConfig SamplingConfig, args ...interface{}) { + decorations := extractDecorations(args) + e.Sample(func(idx int) { + value := callback(idx) + e.recordValue(name, value, decorations) + }, samplingConfig) +} + +/* +SampleAnnotatedValue samples the passed-in callback and records the return value on a Value Measurement with the passed-in name. If the Measurement does not exist it is created. + +The callback is given a zero-based index that increments by one between samples. The callback must return a float64 and an Annotation - the annotation is attached to the recorded value. + +The Sampling is configured via the passed-in SamplingConfig + +SampleValue supports the Style(), Units(), and Precision() decorations. +*/ +func (e *Experiment) SampleAnnotatedValue(name string, callback func(idx int) (float64, Annotation), samplingConfig SamplingConfig, args ...interface{}) { + decorations := extractDecorations(args) + e.Sample(func(idx int) { + var value float64 + value, decorations.annotation = callback(idx) + e.recordValue(name, value, decorations) + }, samplingConfig) +} + +func (e *Experiment) recordValue(name string, value float64, decorations extractedDecorations) { + e.lock.Lock() + defer e.lock.Unlock() + idx := e.Measurements.IdxWithName(name) + if idx == -1 { + measurement := Measurement{ + ExperimentName: e.Name, + Type: MeasurementTypeValue, + Name: name, + Style: string(decorations.style), + Units: string(decorations.units), + PrecisionBundle: decorations.precisionBundle, + Values: []float64{value}, + Annotations: []string{string(decorations.annotation)}, + } + e.Measurements = append(e.Measurements, measurement) + } else { + if e.Measurements[idx].Type != MeasurementTypeValue { + panic(fmt.Sprintf("attempting to record value with name '%s'. That name is already in-use for recording durations.", name)) + } + e.Measurements[idx].Values = append(e.Measurements[idx].Values, value) + e.Measurements[idx].Annotations = append(e.Measurements[idx].Annotations, string(decorations.annotation)) + } +} + +/* +Sample samples the passed-in callback repeatedly. The sampling is governed by the passed in SamplingConfig. + +The SamplingConfig can limit the total number of samples and/or the total time spent sampling the callback. +The SamplingConfig can also instruct Sample to run with multiple concurrent workers. + +The callback is called with a zero-based index that incerements by one between samples. +*/ +func (e *Experiment) Sample(callback func(idx int), samplingConfig SamplingConfig) { + if samplingConfig.N == 0 && samplingConfig.Duration == 0 { + panic("you must specify at least one of SamplingConfig.N and SamplingConfig.Duration") + } + if samplingConfig.MinSamplingInterval > 0 && samplingConfig.NumParallel > 1 { + panic("you cannot specify both SamplingConfig.MinSamplingInterval and SamplingConfig.NumParallel") + } + maxTime := time.Now().Add(100000 * time.Hour) + if samplingConfig.Duration > 0 { + maxTime = time.Now().Add(samplingConfig.Duration) + } + maxN := math.MaxInt32 + if samplingConfig.N > 0 { + maxN = samplingConfig.N + } + numParallel := 1 + if samplingConfig.NumParallel > numParallel { + numParallel = samplingConfig.NumParallel + } + minSamplingInterval := samplingConfig.MinSamplingInterval + + work := make(chan int) + if numParallel > 1 { + for worker := 0; worker < numParallel; worker++ { + go func() { + for idx := range work { + callback(idx) + } + }() + } + } + + idx := 0 + var avgDt time.Duration + for { + t := time.Now() + if numParallel > 1 { + work <- idx + } else { + callback(idx) + } + dt := time.Since(t) + if numParallel == 1 && dt < minSamplingInterval { + time.Sleep(minSamplingInterval - dt) + dt = time.Since(t) + } + if idx >= numParallel { + avgDt = (avgDt*time.Duration(idx-numParallel) + dt) / time.Duration(idx-numParallel+1) + } + idx += 1 + if idx >= maxN { + return + } + if time.Now().Add(avgDt).After(maxTime) { + return + } + } +} + +/* +Get returns the Measurement with the associated name. If no Measurement is found a zero Measurement{} is returned. +*/ +func (e *Experiment) Get(name string) Measurement { + e.lock.Lock() + defer e.lock.Unlock() + idx := e.Measurements.IdxWithName(name) + if idx == -1 { + return Measurement{} + } + return e.Measurements[idx] +} + +/* +GetStats returns the Stats for the Measurement with the associated name. If no Measurement is found a zero Stats{} is returned. + +experiment.GetStats(name) is equivalent to experiment.Get(name).Stats() +*/ +func (e *Experiment) GetStats(name string) Stats { + measurement := e.Get(name) + e.lock.Lock() + defer e.lock.Unlock() + return measurement.Stats() +} diff --git a/vendor/github.com/onsi/gomega/gmeasure/measurement.go b/vendor/github.com/onsi/gomega/gmeasure/measurement.go new file mode 100644 index 0000000000..103d3ea9d0 --- /dev/null +++ b/vendor/github.com/onsi/gomega/gmeasure/measurement.go @@ -0,0 +1,235 @@ +package gmeasure + +import ( + "fmt" + "math" + "sort" + "time" + + "github.com/onsi/gomega/gmeasure/table" +) + +type MeasurementType uint + +const ( + MeasurementTypeInvalid MeasurementType = iota + MeasurementTypeNote + MeasurementTypeDuration + MeasurementTypeValue +) + +var letEnumSupport = newEnumSupport(map[uint]string{uint(MeasurementTypeInvalid): "INVALID LOG ENTRY TYPE", uint(MeasurementTypeNote): "Note", uint(MeasurementTypeDuration): "Duration", uint(MeasurementTypeValue): "Value"}) + +func (s MeasurementType) String() string { return letEnumSupport.String(uint(s)) } +func (s *MeasurementType) UnmarshalJSON(b []byte) error { + out, err := letEnumSupport.UnmarshJSON(b) + *s = MeasurementType(out) + return err +} +func (s MeasurementType) MarshalJSON() ([]byte, error) { return letEnumSupport.MarshJSON(uint(s)) } + +/* +Measurement records all captured data for a given measurement. You generally don't make Measurements directly - but you can fetch them from Experiments using Get(). + +When using Ginkgo, you can register Measurements as Report Entries via AddReportEntry. This will emit all the captured data points when Ginkgo generates the report. +*/ +type Measurement struct { + // Type is the MeasurementType - one of MeasurementTypeNote, MeasurementTypeDuration, or MeasurementTypeValue + Type MeasurementType + + // ExperimentName is the name of the experiment that this Measurement is associated with + ExperimentName string + + // If Type is MeasurementTypeNote, Note is populated with the note text. + Note string + + // If Type is MeasurementTypeDuration or MeasurementTypeValue, Name is the name of the recorded measurement + Name string + + // Style captures the styling information (if any) for this Measurement + Style string + + // Units capture the units (if any) for this Measurement. Units is set to "duration" if the Type is MeasurementTypeDuration + Units string + + // PrecisionBundle captures the precision to use when rendering data for this Measurement. + // If Type is MeasurementTypeDuration then PrecisionBundle.Duration is used to round any durations before presentation. + // If Type is MeasurementTypeValue then PrecisionBundle.ValueFormat is used to format any values before presentation + PrecisionBundle PrecisionBundle + + // If Type is MeasurementTypeDuration, Durations will contain all durations recorded for this measurement + Durations []time.Duration + + // If Type is MeasurementTypeValue, Values will contain all float64s recorded for this measurement + Values []float64 + + // If Type is MeasurementTypeDuration or MeasurementTypeValue then Annotations will include string annotations for all recorded Durations or Values. + // If the user does not pass-in an Annotation() decoration for a particular value or duration, the corresponding entry in the Annotations slice will be the empty string "" + Annotations []string +} + +type Measurements []Measurement + +func (m Measurements) IdxWithName(name string) int { + for idx, measurement := range m { + if measurement.Name == name { + return idx + } + } + + return -1 +} + +func (m Measurement) report(enableStyling bool) string { + out := "" + style := m.Style + if !enableStyling { + style = "" + } + switch m.Type { + case MeasurementTypeNote: + out += fmt.Sprintf("%s - Note\n%s\n", m.ExperimentName, m.Note) + if style != "" { + out = style + out + "{{/}}" + } + return out + case MeasurementTypeValue, MeasurementTypeDuration: + out += fmt.Sprintf("%s - %s", m.ExperimentName, m.Name) + if m.Units != "" { + out += " [" + m.Units + "]" + } + if style != "" { + out = style + out + "{{/}}" + } + out += "\n" + out += m.Stats().String() + "\n" + } + t := table.NewTable() + t.TableStyle.EnableTextStyling = enableStyling + switch m.Type { + case MeasurementTypeValue: + t.AppendRow(table.R(table.C("Value", table.AlignTypeCenter), table.C("Annotation", table.AlignTypeCenter), table.Divider("="), style)) + for idx := range m.Values { + t.AppendRow(table.R( + table.C(fmt.Sprintf(m.PrecisionBundle.ValueFormat, m.Values[idx]), table.AlignTypeRight), + table.C(m.Annotations[idx], "{{gray}}", table.AlignTypeLeft), + )) + } + case MeasurementTypeDuration: + t.AppendRow(table.R(table.C("Duration", table.AlignTypeCenter), table.C("Annotation", table.AlignTypeCenter), table.Divider("="), style)) + for idx := range m.Durations { + t.AppendRow(table.R( + table.C(m.Durations[idx].Round(m.PrecisionBundle.Duration).String(), style, table.AlignTypeRight), + table.C(m.Annotations[idx], "{{gray}}", table.AlignTypeLeft), + )) + } + } + out += t.Render() + return out +} + +/* +ColorableString generates a styled report that includes all the data points for this Measurement. +It is called automatically by Ginkgo's reporting infrastructure when the Measurement is registered as a ReportEntry via AddReportEntry. +*/ +func (m Measurement) ColorableString() string { + return m.report(true) +} + +/* +String generates an unstyled report that includes all the data points for this Measurement. +*/ +func (m Measurement) String() string { + return m.report(false) +} + +/* +Stats returns a Stats struct summarizing the statistic of this measurement +*/ +func (m Measurement) Stats() Stats { + if m.Type == MeasurementTypeInvalid || m.Type == MeasurementTypeNote { + return Stats{} + } + + out := Stats{ + ExperimentName: m.ExperimentName, + MeasurementName: m.Name, + Style: m.Style, + Units: m.Units, + PrecisionBundle: m.PrecisionBundle, + } + + switch m.Type { + case MeasurementTypeValue: + out.Type = StatsTypeValue + out.N = len(m.Values) + if out.N == 0 { + return out + } + indices, sum := make([]int, len(m.Values)), 0.0 + for idx, v := range m.Values { + indices[idx] = idx + sum += v + } + sort.Slice(indices, func(i, j int) bool { + return m.Values[indices[i]] < m.Values[indices[j]] + }) + out.ValueBundle = map[Stat]float64{ + StatMin: m.Values[indices[0]], + StatMax: m.Values[indices[out.N-1]], + StatMean: sum / float64(out.N), + StatStdDev: 0.0, + } + out.AnnotationBundle = map[Stat]string{ + StatMin: m.Annotations[indices[0]], + StatMax: m.Annotations[indices[out.N-1]], + } + + if out.N%2 == 0 { + out.ValueBundle[StatMedian] = (m.Values[indices[out.N/2]] + m.Values[indices[out.N/2-1]]) / 2.0 + } else { + out.ValueBundle[StatMedian] = m.Values[indices[(out.N-1)/2]] + } + + for _, v := range m.Values { + out.ValueBundle[StatStdDev] += (v - out.ValueBundle[StatMean]) * (v - out.ValueBundle[StatMean]) + } + out.ValueBundle[StatStdDev] = math.Sqrt(out.ValueBundle[StatStdDev] / float64(out.N)) + case MeasurementTypeDuration: + out.Type = StatsTypeDuration + out.N = len(m.Durations) + if out.N == 0 { + return out + } + indices, sum := make([]int, len(m.Durations)), time.Duration(0) + for idx, v := range m.Durations { + indices[idx] = idx + sum += v + } + sort.Slice(indices, func(i, j int) bool { + return m.Durations[indices[i]] < m.Durations[indices[j]] + }) + out.DurationBundle = map[Stat]time.Duration{ + StatMin: m.Durations[indices[0]], + StatMax: m.Durations[indices[out.N-1]], + StatMean: sum / time.Duration(out.N), + } + out.AnnotationBundle = map[Stat]string{ + StatMin: m.Annotations[indices[0]], + StatMax: m.Annotations[indices[out.N-1]], + } + + if out.N%2 == 0 { + out.DurationBundle[StatMedian] = (m.Durations[indices[out.N/2]] + m.Durations[indices[out.N/2-1]]) / 2 + } else { + out.DurationBundle[StatMedian] = m.Durations[indices[(out.N-1)/2]] + } + stdDev := 0.0 + for _, v := range m.Durations { + stdDev += float64(v-out.DurationBundle[StatMean]) * float64(v-out.DurationBundle[StatMean]) + } + out.DurationBundle[StatStdDev] = time.Duration(math.Sqrt(stdDev / float64(out.N))) + } + + return out +} diff --git a/vendor/github.com/onsi/gomega/gmeasure/rank.go b/vendor/github.com/onsi/gomega/gmeasure/rank.go new file mode 100644 index 0000000000..1544cd8f4d --- /dev/null +++ b/vendor/github.com/onsi/gomega/gmeasure/rank.go @@ -0,0 +1,141 @@ +package gmeasure + +import ( + "fmt" + "sort" + + "github.com/onsi/gomega/gmeasure/table" +) + +/* +RankingCriteria is an enum representing the criteria by which Stats should be ranked. The enum names should be self explanatory. e.g. LowerMeanIsBetter means that Stats with lower mean values are considered more beneficial, with the lowest mean being declared the "winner" . +*/ +type RankingCriteria uint + +const ( + LowerMeanIsBetter RankingCriteria = iota + HigherMeanIsBetter + LowerMedianIsBetter + HigherMedianIsBetter + LowerMinIsBetter + HigherMinIsBetter + LowerMaxIsBetter + HigherMaxIsBetter +) + +var rcEnumSupport = newEnumSupport(map[uint]string{uint(LowerMeanIsBetter): "Lower Mean is Better", uint(HigherMeanIsBetter): "Higher Mean is Better", uint(LowerMedianIsBetter): "Lower Median is Better", uint(HigherMedianIsBetter): "Higher Median is Better", uint(LowerMinIsBetter): "Lower Mins is Better", uint(HigherMinIsBetter): "Higher Min is Better", uint(LowerMaxIsBetter): "Lower Max is Better", uint(HigherMaxIsBetter): "Higher Max is Better"}) + +func (s RankingCriteria) String() string { return rcEnumSupport.String(uint(s)) } +func (s *RankingCriteria) UnmarshalJSON(b []byte) error { + out, err := rcEnumSupport.UnmarshJSON(b) + *s = RankingCriteria(out) + return err +} +func (s RankingCriteria) MarshalJSON() ([]byte, error) { return rcEnumSupport.MarshJSON(uint(s)) } + +/* +Ranking ranks a set of Stats by a specified RankingCritera. Use RankStats to create a Ranking. + +When using Ginkgo, you can register Rankings as Report Entries via AddReportEntry. This will emit a formatted table representing the Stats in rank-order when Ginkgo generates the report. +*/ +type Ranking struct { + Criteria RankingCriteria + Stats []Stats +} + +/* +RankStats creates a new ranking of the passed-in stats according to the passed-in criteria. +*/ +func RankStats(criteria RankingCriteria, stats ...Stats) Ranking { + sort.Slice(stats, func(i int, j int) bool { + switch criteria { + case LowerMeanIsBetter: + return stats[i].FloatFor(StatMean) < stats[j].FloatFor(StatMean) + case HigherMeanIsBetter: + return stats[i].FloatFor(StatMean) > stats[j].FloatFor(StatMean) + case LowerMedianIsBetter: + return stats[i].FloatFor(StatMedian) < stats[j].FloatFor(StatMedian) + case HigherMedianIsBetter: + return stats[i].FloatFor(StatMedian) > stats[j].FloatFor(StatMedian) + case LowerMinIsBetter: + return stats[i].FloatFor(StatMin) < stats[j].FloatFor(StatMin) + case HigherMinIsBetter: + return stats[i].FloatFor(StatMin) > stats[j].FloatFor(StatMin) + case LowerMaxIsBetter: + return stats[i].FloatFor(StatMax) < stats[j].FloatFor(StatMax) + case HigherMaxIsBetter: + return stats[i].FloatFor(StatMax) > stats[j].FloatFor(StatMax) + } + return false + }) + + out := Ranking{ + Criteria: criteria, + Stats: stats, + } + + return out +} + +/* +Winner returns the Stats with the most optimal rank based on the specified ranking criteria. For example, if the RankingCriteria is LowerMaxIsBetter then the Stats with the lowest value or duration for StatMax will be returned as the "winner" +*/ +func (c Ranking) Winner() Stats { + if len(c.Stats) == 0 { + return Stats{} + } + return c.Stats[0] +} + +func (c Ranking) report(enableStyling bool) string { + if len(c.Stats) == 0 { + return "Empty Ranking" + } + t := table.NewTable() + t.TableStyle.EnableTextStyling = enableStyling + t.AppendRow(table.R( + table.C("Experiment"), table.C("Name"), table.C("N"), table.C("Min"), table.C("Median"), table.C("Mean"), table.C("StdDev"), table.C("Max"), + table.Divider("="), + "{{bold}}", + )) + + for idx, stats := range c.Stats { + name := stats.MeasurementName + if stats.Units != "" { + name = name + " [" + stats.Units + "]" + } + experimentName := stats.ExperimentName + style := stats.Style + if idx == 0 { + style = "{{bold}}" + style + name += "\n*Winner*" + experimentName += "\n*Winner*" + } + r := table.R(style) + t.AppendRow(r) + r.AppendCell(table.C(experimentName), table.C(name)) + r.AppendCell(stats.cells()...) + + } + out := fmt.Sprintf("Ranking Criteria: %s\n", c.Criteria) + if enableStyling { + out = "{{bold}}" + out + "{{/}}" + } + out += t.Render() + return out +} + +/* +ColorableString generates a styled report that includes a table of the rank-ordered Stats +It is called automatically by Ginkgo's reporting infrastructure when the Ranking is registered as a ReportEntry via AddReportEntry. +*/ +func (c Ranking) ColorableString() string { + return c.report(true) +} + +/* +String generates an unstyled report that includes a table of the rank-ordered Stats +*/ +func (c Ranking) String() string { + return c.report(false) +} diff --git a/vendor/github.com/onsi/gomega/gmeasure/stats.go b/vendor/github.com/onsi/gomega/gmeasure/stats.go new file mode 100644 index 0000000000..8c02e1bdf1 --- /dev/null +++ b/vendor/github.com/onsi/gomega/gmeasure/stats.go @@ -0,0 +1,153 @@ +package gmeasure + +import ( + "fmt" + "time" + + "github.com/onsi/gomega/gmeasure/table" +) + +/* +Stat is an enum representing the statistics you can request of a Stats struct +*/ +type Stat uint + +const ( + StatInvalid Stat = iota + StatMin + StatMax + StatMean + StatMedian + StatStdDev +) + +var statEnumSupport = newEnumSupport(map[uint]string{uint(StatInvalid): "INVALID STAT", uint(StatMin): "Min", uint(StatMax): "Max", uint(StatMean): "Mean", uint(StatMedian): "Median", uint(StatStdDev): "StdDev"}) + +func (s Stat) String() string { return statEnumSupport.String(uint(s)) } +func (s *Stat) UnmarshalJSON(b []byte) error { + out, err := statEnumSupport.UnmarshJSON(b) + *s = Stat(out) + return err +} +func (s Stat) MarshalJSON() ([]byte, error) { return statEnumSupport.MarshJSON(uint(s)) } + +type StatsType uint + +const ( + StatsTypeInvalid StatsType = iota + StatsTypeValue + StatsTypeDuration +) + +var statsTypeEnumSupport = newEnumSupport(map[uint]string{uint(StatsTypeInvalid): "INVALID STATS TYPE", uint(StatsTypeValue): "StatsTypeValue", uint(StatsTypeDuration): "StatsTypeDuration"}) + +func (s StatsType) String() string { return statsTypeEnumSupport.String(uint(s)) } +func (s *StatsType) UnmarshalJSON(b []byte) error { + out, err := statsTypeEnumSupport.UnmarshJSON(b) + *s = StatsType(out) + return err +} +func (s StatsType) MarshalJSON() ([]byte, error) { return statsTypeEnumSupport.MarshJSON(uint(s)) } + +/* +Stats records the key statistics for a given measurement. You generally don't make Stats directly - but you can fetch them from Experiments using GetStats() and from Measurements using Stats(). + +When using Ginkgo, you can register Measurements as Report Entries via AddReportEntry. This will emit all the captured data points when Ginkgo generates the report. +*/ +type Stats struct { + // Type is the StatType - one of StatTypeDuration or StatTypeValue + Type StatsType + + // ExperimentName is the name of the Experiment that recorded the Measurement from which this Stat is derived + ExperimentName string + + // MeasurementName is the name of the Measurement from which this Stat is derived + MeasurementName string + + // Units captures the Units of the Measurement from which this Stat is derived + Units string + + // Style captures the Style of the Measurement from which this Stat is derived + Style string + + // PrecisionBundle captures the precision to use when rendering data for this Measurement. + // If Type is StatTypeDuration then PrecisionBundle.Duration is used to round any durations before presentation. + // If Type is StatTypeValue then PrecisionBundle.ValueFormat is used to format any values before presentation + PrecisionBundle PrecisionBundle + + // N represents the total number of data points in the Meassurement from which this Stat is derived + N int + + // If Type is StatTypeValue, ValueBundle will be populated with float64s representing this Stat's statistics + ValueBundle map[Stat]float64 + + // If Type is StatTypeDuration, DurationBundle will be populated with float64s representing this Stat's statistics + DurationBundle map[Stat]time.Duration + + // AnnotationBundle is populated with Annotations corresponding to the data points that can be associated with a Stat. + // For example AnnotationBundle[StatMin] will return the Annotation for the data point that has the minimum value/duration. + AnnotationBundle map[Stat]string +} + +// String returns a minimal summary of the stats of the form "MIN < [MEDIAN] | ±STDDEV < MAX" +func (s Stats) String() string { + return fmt.Sprintf("%s < [%s] | <%s> ±%s < %s", s.StringFor(StatMin), s.StringFor(StatMedian), s.StringFor(StatMean), s.StringFor(StatStdDev), s.StringFor(StatMax)) +} + +// ValueFor returns the float64 value for a particular Stat. You should only use this if the Stats has Type StatsTypeValue +// For example: +// +// median := experiment.GetStats("length").ValueFor(gmeasure.StatMedian) +// +// will return the median data point for the "length" Measurement. +func (s Stats) ValueFor(stat Stat) float64 { + return s.ValueBundle[stat] +} + +// DurationFor returns the time.Duration for a particular Stat. You should only use this if the Stats has Type StatsTypeDuration +// For example: +// +// mean := experiment.GetStats("runtime").ValueFor(gmeasure.StatMean) +// +// will return the mean duration for the "runtime" Measurement. +func (s Stats) DurationFor(stat Stat) time.Duration { + return s.DurationBundle[stat] +} + +// FloatFor returns a float64 representation of the passed-in Stat. +// When Type is StatsTypeValue this is equivalent to s.ValueFor(stat). +// When Type is StatsTypeDuration this is equivalent to float64(s.DurationFor(stat)) +func (s Stats) FloatFor(stat Stat) float64 { + switch s.Type { + case StatsTypeValue: + return s.ValueFor(stat) + case StatsTypeDuration: + return float64(s.DurationFor(stat)) + } + return 0 +} + +// StringFor returns a formatted string representation of the passed-in Stat. +// The formatting honors the precision directives provided in stats.PrecisionBundle +func (s Stats) StringFor(stat Stat) string { + switch s.Type { + case StatsTypeValue: + return fmt.Sprintf(s.PrecisionBundle.ValueFormat, s.ValueFor(stat)) + case StatsTypeDuration: + return s.DurationFor(stat).Round(s.PrecisionBundle.Duration).String() + } + return "" +} + +func (s Stats) cells() []table.Cell { + out := []table.Cell{} + out = append(out, table.C(fmt.Sprintf("%d", s.N))) + for _, stat := range []Stat{StatMin, StatMedian, StatMean, StatStdDev, StatMax} { + content := s.StringFor(stat) + if s.AnnotationBundle[stat] != "" { + content += "\n" + s.AnnotationBundle[stat] + } + out = append(out, table.C(content)) + } + return out +} diff --git a/vendor/github.com/onsi/gomega/gmeasure/stopwatch.go b/vendor/github.com/onsi/gomega/gmeasure/stopwatch.go new file mode 100644 index 0000000000..634f11f2a4 --- /dev/null +++ b/vendor/github.com/onsi/gomega/gmeasure/stopwatch.go @@ -0,0 +1,117 @@ +package gmeasure + +import "time" + +/* +Stopwatch provides a convenient abstraction for recording durations. There are two ways to make a Stopwatch: + +You can make a Stopwatch from an Experiment via experiment.NewStopwatch(). This is how you first get a hold of a Stopwatch. + +You can subsequently call stopwatch.NewStopwatch() to get a fresh Stopwatch. +This is only necessary if you need to record durations on a different goroutine as a single Stopwatch is not considered thread-safe. + +The Stopwatch starts as soon as it is created. You can Pause() the stopwatch and Reset() it as needed. + +Stopwatches refer back to their parent Experiment. They use this reference to record any measured durations back with the Experiment. +*/ +type Stopwatch struct { + Experiment *Experiment + t time.Time + pauseT time.Time + pauseDuration time.Duration + running bool +} + +func newStopwatch(experiment *Experiment) *Stopwatch { + return &Stopwatch{ + Experiment: experiment, + t: time.Now(), + running: true, + } +} + +/* +NewStopwatch returns a new Stopwatch pointing to the same Experiment as this Stopwatch +*/ +func (s *Stopwatch) NewStopwatch() *Stopwatch { + return newStopwatch(s.Experiment) +} + +/* +Record captures the amount of time that has passed since the Stopwatch was created or most recently Reset(). It records the duration on it's associated Experiment in a Measurement with the passed-in name. + +Record takes all the decorators that experiment.RecordDuration takes (e.g. Annotation("...") can be used to annotate this duration) + +Note that Record does not Reset the Stopwatch. It does, however, return the Stopwatch so the following pattern is common: + + stopwatch := experiment.NewStopwatch() + // first expensive operation + stopwatch.Record("first operation").Reset() //records the duration of the first operation and resets the stopwatch. + // second expensive operation + stopwatch.Record("second operation").Reset() //records the duration of the second operation and resets the stopwatch. + +omitting the Reset() after the first operation would cause the duration recorded for the second operation to include the time elapsed by both the first _and_ second operations. + +The Stopwatch must be running (i.e. not paused) when Record is called. +*/ +func (s *Stopwatch) Record(name string, args ...interface{}) *Stopwatch { + if !s.running { + panic("stopwatch is not running - call Resume or Reset before calling Record") + } + duration := time.Since(s.t) - s.pauseDuration + s.Experiment.RecordDuration(name, duration, args...) + return s +} + +/* +Reset resets the Stopwatch. Subsequent recorded durations will measure the time elapsed from the moment Reset was called. +If the Stopwatch was Paused it is unpaused after calling Reset. +*/ +func (s *Stopwatch) Reset() *Stopwatch { + s.running = true + s.t = time.Now() + s.pauseDuration = 0 + return s +} + +/* +Pause pauses the Stopwatch. While pasued the Stopwatch does not accumulate elapsed time. This is useful for ignoring expensive operations that are incidental to the behavior you are attempting to characterize. +Note: You must call Resume() before you can Record() subsequent measurements. + +For example: + + stopwatch := experiment.NewStopwatch() + // first expensive operation + stopwatch.Record("first operation").Reset() + // second expensive operation - part 1 + stopwatch.Pause() + // something expensive that we don't care about + stopwatch.Resume() + // second expensive operation - part 2 + stopwatch.Record("second operation").Reset() // the recorded duration captures the time elapsed during parts 1 and 2 of the second expensive operation, but not the bit in between + + +The Stopwatch must be running when Pause is called. +*/ +func (s *Stopwatch) Pause() *Stopwatch { + if !s.running { + panic("stopwatch is not running - call Resume or Reset before calling Pause") + } + s.running = false + s.pauseT = time.Now() + return s +} + +/* +Resume resumes a paused Stopwatch. Any time that elapses after Resume is called will be accumulated as elapsed time when a subsequent duration is Recorded. + +The Stopwatch must be Paused when Resume is called +*/ +func (s *Stopwatch) Resume() *Stopwatch { + if s.running { + panic("stopwatch is running - call Pause before calling Resume") + } + s.running = true + s.pauseDuration = s.pauseDuration + time.Since(s.pauseT) + return s +} diff --git a/vendor/github.com/onsi/gomega/gmeasure/table/table.go b/vendor/github.com/onsi/gomega/gmeasure/table/table.go new file mode 100644 index 0000000000..f980b9c7aa --- /dev/null +++ b/vendor/github.com/onsi/gomega/gmeasure/table/table.go @@ -0,0 +1,370 @@ +package table + +// This is a temporary package - Table will move to github.com/onsi/consolable once some more dust settles + +import ( + "reflect" + "strings" + "unicode/utf8" +) + +type AlignType uint + +const ( + AlignTypeLeft AlignType = iota + AlignTypeCenter + AlignTypeRight +) + +type Divider string + +type Row struct { + Cells []Cell + Divider string + Style string +} + +func R(args ...interface{}) *Row { + r := &Row{ + Divider: "-", + } + for _, arg := range args { + switch reflect.TypeOf(arg) { + case reflect.TypeOf(Divider("")): + r.Divider = string(arg.(Divider)) + case reflect.TypeOf(r.Style): + r.Style = arg.(string) + case reflect.TypeOf(Cell{}): + r.Cells = append(r.Cells, arg.(Cell)) + } + } + return r +} + +func (r *Row) AppendCell(cells ...Cell) *Row { + r.Cells = append(r.Cells, cells...) + return r +} + +func (r *Row) Render(widths []int, totalWidth int, tableStyle TableStyle, isLastRow bool) string { + out := "" + if len(r.Cells) == 1 { + out += strings.Join(r.Cells[0].render(totalWidth, r.Style, tableStyle), "\n") + "\n" + } else { + if len(r.Cells) != len(widths) { + panic("row vs width mismatch") + } + renderedCells := make([][]string, len(r.Cells)) + maxHeight := 0 + for colIdx, cell := range r.Cells { + renderedCells[colIdx] = cell.render(widths[colIdx], r.Style, tableStyle) + if len(renderedCells[colIdx]) > maxHeight { + maxHeight = len(renderedCells[colIdx]) + } + } + for colIdx := range r.Cells { + for len(renderedCells[colIdx]) < maxHeight { + renderedCells[colIdx] = append(renderedCells[colIdx], strings.Repeat(" ", widths[colIdx])) + } + } + border := strings.Repeat(" ", tableStyle.Padding) + if tableStyle.VerticalBorders { + border += "|" + border + } + for lineIdx := 0; lineIdx < maxHeight; lineIdx++ { + for colIdx := range r.Cells { + out += renderedCells[colIdx][lineIdx] + if colIdx < len(r.Cells)-1 { + out += border + } + } + out += "\n" + } + } + if tableStyle.HorizontalBorders && !isLastRow && r.Divider != "" { + out += strings.Repeat(string(r.Divider), totalWidth) + "\n" + } + + return out +} + +type Cell struct { + Contents []string + Style string + Align AlignType +} + +func C(contents string, args ...interface{}) Cell { + c := Cell{ + Contents: strings.Split(contents, "\n"), + } + for _, arg := range args { + switch reflect.TypeOf(arg) { + case reflect.TypeOf(c.Style): + c.Style = arg.(string) + case reflect.TypeOf(c.Align): + c.Align = arg.(AlignType) + } + } + return c +} + +func (c Cell) Width() (int, int) { + w, minW := 0, 0 + for _, line := range c.Contents { + lineWidth := utf8.RuneCountInString(line) + if lineWidth > w { + w = lineWidth + } + for _, word := range strings.Split(line, " ") { + wordWidth := utf8.RuneCountInString(word) + if wordWidth > minW { + minW = wordWidth + } + } + } + return w, minW +} + +func (c Cell) alignLine(line string, width int) string { + lineWidth := utf8.RuneCountInString(line) + if lineWidth == width { + return line + } + if lineWidth < width { + gap := width - lineWidth + switch c.Align { + case AlignTypeLeft: + return line + strings.Repeat(" ", gap) + case AlignTypeRight: + return strings.Repeat(" ", gap) + line + case AlignTypeCenter: + leftGap := gap / 2 + rightGap := gap - leftGap + return strings.Repeat(" ", leftGap) + line + strings.Repeat(" ", rightGap) + } + } + return line +} + +func (c Cell) splitWordToWidth(word string, width int) []string { + out := []string{} + n, subWord := 0, "" + for _, c := range word { + subWord += string(c) + n += 1 + if n == width-1 { + out = append(out, subWord+"-") + n, subWord = 0, "" + } + } + return out +} + +func (c Cell) splitToWidth(line string, width int) []string { + lineWidth := utf8.RuneCountInString(line) + if lineWidth <= width { + return []string{line} + } + + outLines := []string{} + words := strings.Split(line, " ") + outWords := []string{words[0]} + length := utf8.RuneCountInString(words[0]) + if length > width { + splitWord := c.splitWordToWidth(words[0], width) + lastIdx := len(splitWord) - 1 + outLines = append(outLines, splitWord[:lastIdx]...) + outWords = []string{splitWord[lastIdx]} + length = utf8.RuneCountInString(splitWord[lastIdx]) + } + + for _, word := range words[1:] { + wordLength := utf8.RuneCountInString(word) + if length+wordLength+1 <= width { + length += wordLength + 1 + outWords = append(outWords, word) + continue + } + outLines = append(outLines, strings.Join(outWords, " ")) + + outWords = []string{word} + length = wordLength + if length > width { + splitWord := c.splitWordToWidth(word, width) + lastIdx := len(splitWord) - 1 + outLines = append(outLines, splitWord[:lastIdx]...) + outWords = []string{splitWord[lastIdx]} + length = utf8.RuneCountInString(splitWord[lastIdx]) + } + } + if len(outWords) > 0 { + outLines = append(outLines, strings.Join(outWords, " ")) + } + + return outLines +} + +func (c Cell) render(width int, style string, tableStyle TableStyle) []string { + out := []string{} + for _, line := range c.Contents { + out = append(out, c.splitToWidth(line, width)...) + } + for idx := range out { + out[idx] = c.alignLine(out[idx], width) + } + + if tableStyle.EnableTextStyling { + style = style + c.Style + if style != "" { + for idx := range out { + out[idx] = style + out[idx] + "{{/}}" + } + } + } + + return out +} + +type TableStyle struct { + Padding int + VerticalBorders bool + HorizontalBorders bool + MaxTableWidth int + MaxColWidth int + EnableTextStyling bool +} + +var DefaultTableStyle = TableStyle{ + Padding: 1, + VerticalBorders: true, + HorizontalBorders: true, + MaxTableWidth: 120, + MaxColWidth: 40, + EnableTextStyling: true, +} + +type Table struct { + Rows []*Row + + TableStyle TableStyle +} + +func NewTable() *Table { + return &Table{ + TableStyle: DefaultTableStyle, + } +} + +func (t *Table) AppendRow(row *Row) *Table { + t.Rows = append(t.Rows, row) + return t +} + +func (t *Table) Render() string { + out := "" + totalWidth, widths := t.computeWidths() + for rowIdx, row := range t.Rows { + out += row.Render(widths, totalWidth, t.TableStyle, rowIdx == len(t.Rows)-1) + } + return out +} + +func (t *Table) computeWidths() (int, []int) { + nCol := 0 + for _, row := range t.Rows { + if len(row.Cells) > nCol { + nCol = len(row.Cells) + } + } + + // lets compute the contribution to width from the borders + padding + borderWidth := t.TableStyle.Padding + if t.TableStyle.VerticalBorders { + borderWidth += 1 + t.TableStyle.Padding + } + totalBorderWidth := borderWidth * (nCol - 1) + + // lets compute the width of each column + widths := make([]int, nCol) + minWidths := make([]int, nCol) + for colIdx := range widths { + for _, row := range t.Rows { + if colIdx >= len(row.Cells) { + // ignore rows with fewer columns + continue + } + w, minWid := row.Cells[colIdx].Width() + if w > widths[colIdx] { + widths[colIdx] = w + } + if minWid > minWidths[colIdx] { + minWidths[colIdx] = minWid + } + } + } + + // do we already fit? + if sum(widths)+totalBorderWidth <= t.TableStyle.MaxTableWidth { + // yes! we're done + return sum(widths) + totalBorderWidth, widths + } + + // clamp the widths and minWidths to MaxColWidth + for colIdx := range widths { + widths[colIdx] = min(widths[colIdx], t.TableStyle.MaxColWidth) + minWidths[colIdx] = min(minWidths[colIdx], t.TableStyle.MaxColWidth) + } + + // do we fit now? + if sum(widths)+totalBorderWidth <= t.TableStyle.MaxTableWidth { + // yes! we're done + return sum(widths) + totalBorderWidth, widths + } + + // hmm... still no... can we possibly squeeze the table in without violating minWidths? + if sum(minWidths)+totalBorderWidth >= t.TableStyle.MaxTableWidth { + // nope - we're just going to have to exceed MaxTableWidth + return sum(minWidths) + totalBorderWidth, minWidths + } + + // looks like we don't fit yet, but we should be able to fit without violating minWidths + // lets start scaling down + n := 0 + for sum(widths)+totalBorderWidth > t.TableStyle.MaxTableWidth { + budget := t.TableStyle.MaxTableWidth - totalBorderWidth + baseline := sum(widths) + + for colIdx := range widths { + widths[colIdx] = max((widths[colIdx]*budget)/baseline, minWidths[colIdx]) + } + n += 1 + if n > 100 { + break // in case we somehow fail to converge + } + } + + return sum(widths) + totalBorderWidth, widths +} + +func sum(s []int) int { + out := 0 + for _, v := range s { + out += v + } + return out +} + +func min(a int, b int) int { + if a < b { + return a + } + return b +} + +func max(a int, b int) int { + if a > b { + return a + } + return b +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 4fd3769a2e..565a0ceb41 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -92,6 +92,8 @@ github.com/onsi/gomega github.com/onsi/gomega/format github.com/onsi/gomega/gbytes github.com/onsi/gomega/gexec +github.com/onsi/gomega/gmeasure +github.com/onsi/gomega/gmeasure/table github.com/onsi/gomega/internal github.com/onsi/gomega/internal/gutil github.com/onsi/gomega/matchers From 52e0c3dca8084f8a29fefb2565bae6a6beb23c71 Mon Sep 17 00:00:00 2001 From: Adelina Tuvenie Date: Mon, 11 Oct 2021 19:31:29 +0300 Subject: [PATCH 2/9] Rewrite pod.go --- pkg/benchmark/container.go | 135 +++++++++++++++++-------------------- pkg/benchmark/pod.go | 116 +++++++++++++++---------------- 2 files changed, 122 insertions(+), 129 deletions(-) diff --git a/pkg/benchmark/container.go b/pkg/benchmark/container.go index 0b53d98671..0d4481f8d8 100644 --- a/pkg/benchmark/container.go +++ b/pkg/benchmark/container.go @@ -1,5 +1,8 @@ +//go:build container +// +build container + /* -Copyright 2017 The Kubernetes Authors. +Copyright 2021 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,14 +20,22 @@ limitations under the License. package benchmark import ( + "encoding/json" + "fmt" + "io/ioutil" + "github.com/kubernetes-sigs/cri-tools/pkg/framework" internalapi "k8s.io/cri-api/pkg/apis" runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" + "github.com/onsi/gomega/gmeasure" ) +type ContainerExperimentData struct { + CreateContainer, StatusContainer, StopContainer, RemoveContainer, StartContainer string +} + var _ = framework.KubeDescribe("Container", func() { f := framework.NewDefaultCRIFramework() @@ -37,84 +48,64 @@ var _ = framework.KubeDescribe("Container", func() { }) Context("benchmark about operations on Container", func() { - var podID string - var podConfig *runtimeapi.PodSandboxConfig - - BeforeEach(func() { - podID, podConfig = framework.CreatePodSandboxForContainer(rc) - }) - - AfterEach(func() { - By("stop PodSandbox") - rc.StopPodSandbox(podID) - By("delete PodSandbox") - rc.RemovePodSandbox(podID) - }) - - Measure("benchmark about basic operations on Container", func(b Benchmarker) { - var containerID string - var err error - - operation := b.Time("create Container", func() { - By("benchmark about creating Container") - containerID = framework.CreateDefaultContainer(rc, ic, podID, podConfig, "Container-for-creating-benchmark-") - }) - Expect(operation.Seconds()).Should(BeNumerically("<", 2), "create Container shouldn't take too long.") - - operation = b.Time("start Container", func() { - By("benchmark about starting Container") + It("benchmark about basic operations on Container", func() { + experiment := gmeasure.NewExperiment("ContainerOps") + experiment.Sample(func(idx int) { + var podID string + var podConfig *runtimeapi.PodSandboxConfig + var containerID string + var err error + + podID, podConfig = framework.CreatePodSandboxForContainer(rc) + + By("CreatingContainer") + stopwatch := experiment.NewStopwatch() + stopwatch.Reset() + containerID = framework.CreateDefaultContainer(rc, ic, podID, podConfig, "Benchmark-container-") + stopwatch.Record("CreateContainer") + + By("StartingContainer") + stopwatch.Reset() err = rc.StartContainer(containerID) - }) + stopwatch.Record("StartContainer") + framework.ExpectNoError(err, "failed to start Container: %v", err) - framework.ExpectNoError(err, "failed to start Container: %v", err) - Expect(operation.Seconds()).Should(BeNumerically("<", 2), "start Container shouldn't take too long.") + By("ContainerStatus") + stopwatch.Reset() + _, err = rc.ContainerStatus(containerID) + stopwatch.Record("StatusContainer") + framework.ExpectNoError(err, "failed to get Container status: %v", err) - operation = b.Time("Container status", func() { - By("benchmark about getting Container status") - _, err = rc.ContainerStatus(containerID, false) - }) - - framework.ExpectNoError(err, "failed to get Container status: %v", err) - Expect(operation.Seconds()).Should(BeNumerically("<", 2), "get container status shouldn't take too long.") - - operation = b.Time("stop Container", func() { - By("benchmark about stoping Container") + By("ContainerStop") + stopwatch.Reset() err = rc.StopContainer(containerID, framework.DefaultStopContainerTimeout) - }) - - framework.ExpectNoError(err, "failed to stop Container: %v", err) - Expect(operation.Seconds()).Should(BeNumerically("<", 2), "stop Container shouldn't take too long.") + stopwatch.Record("StopContainer") + framework.ExpectNoError(err, "failed to stop Container: %v", err) - operation = b.Time("remove Container", func() { - By("benchmark about removing Container") + By("ContainerRemove") + stopwatch.Reset() err = rc.RemoveContainer(containerID) - }) - - framework.ExpectNoError(err, "failed to remove Container: %v", err) - Expect(operation.Seconds()).Should(BeNumerically("<", 2), "remove Container shouldn't take too long.") - - }, defaultOperationTimes) - - Measure("benchmark about listing Container", func(b Benchmarker) { - containerList := make([]string, 0, framework.TestContext.Number) - var err error - - for i := 0; i < framework.TestContext.Number; i++ { - containerID := framework.CreateDefaultContainer(rc, ic, podID, podConfig, "Container-for-listing-benchmark-") - containerList = append(containerList, containerID) + stopwatch.Record("RemoveContainer") + framework.ExpectNoError(err, "failed to remove Container: %v", err) + + By("stop PodSandbox") + rc.StopPodSandbox(podID) + By("delete PodSandbox") + rc.RemovePodSandbox(podID) + + }, gmeasure.SamplingConfig{N: 200, NumParallel: 1}) + + data := ContainerExperimentData{ + CreateContainer: fmt.Sprintf("%v", experiment.Get("CreateContainer").Durations), + StatusContainer: fmt.Sprintf("%v", experiment.Get("StatusContainer").Durations), + StopContainer: fmt.Sprintf("%v", experiment.Get("StopContainer").Durations), + RemoveContainer: fmt.Sprintf("%v", experiment.Get("RemoveContainer").Durations), + StartContainer: fmt.Sprintf("%v", experiment.Get("StartContainer").Durations), } - operation := b.Time("list Container", func() { - _, err = rc.ListContainers(nil) - }) - - framework.ExpectNoError(err, "failed to list Container: %v", err) - Expect(operation.Seconds()).Should(BeNumerically("<", 2), "list Container shouldn't take too long.") + file, _ := json.MarshalIndent(data, "", " ") + _ = ioutil.WriteFile("c:/experiment_container.json", file, 0644) + }) - for _, containerID := range containerList { - rc.StopContainer(containerID, framework.DefaultStopContainerTimeout) - rc.RemoveContainer(containerID) - } - }, defaultOperationTimes) }) }) diff --git a/pkg/benchmark/pod.go b/pkg/benchmark/pod.go index a93a7b269b..238d3fbc0a 100644 --- a/pkg/benchmark/pod.go +++ b/pkg/benchmark/pod.go @@ -1,5 +1,8 @@ +//go:build pod +// +build pod + /* -Copyright 2017 The Kubernetes Authors. +Copyright 2021 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,7 +20,12 @@ limitations under the License. package benchmark import ( + "encoding/json" + "fmt" + "io/ioutil" + "github.com/kubernetes-sigs/cri-tools/pkg/framework" + "github.com/onsi/gomega/gmeasure" internalapi "k8s.io/cri-api/pkg/apis" runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" @@ -29,6 +37,10 @@ const ( defaultOperationTimes int = 20 ) +type ExperimentData struct { + CreatePod, StatusPod, StopPod, RemovePod string +} + var _ = framework.KubeDescribe("PodSandbox", func() { f := framework.NewDefaultCRIFramework() @@ -39,70 +51,60 @@ var _ = framework.KubeDescribe("PodSandbox", func() { }) Context("benchmark about operations on PodSandbox", func() { - Measure("benchmark about lifecycle of PodSandbox", func(b Benchmarker) { - var podID string - var err error - - podSandboxName := "PodSandbox-for-creating-performance-test-" + framework.NewUUID() - uid := framework.DefaultUIDPrefix + framework.NewUUID() - namespace := framework.DefaultNamespacePrefix + framework.NewUUID() - - config := &runtimeapi.PodSandboxConfig{ - Metadata: framework.BuildPodSandboxMetadata(podSandboxName, uid, namespace, framework.DefaultAttempt), - Linux: &runtimeapi.LinuxPodSandboxConfig{}, - Labels: framework.DefaultPodLabels, - } + It("benchmark about lifecycle of PodSandbox", func() { - // TODO: add support of runtime - operation := b.Time("create PodSandbox", func() { - podID, err = c.RunPodSandbox(config, framework.TestContext.RuntimeHandler) - }) - - framework.ExpectNoError(err, "failed to create PodSandbox: %v", err) - Expect(operation.Seconds()).Should(BeNumerically("<", 5), "create PodSandbox shouldn't take too long.") + experiment := gmeasure.NewExperiment("PodLifecycle") + experiment.Sample(func(idx int) { + var podID string + var err error - operation = b.Time("PodSandbox status", func() { - _, err = c.PodSandboxStatus(podID, false) - }) + podSandboxName := "PodSandbox-for-creating-performance-test-" + framework.NewUUID() + uid := framework.DefaultUIDPrefix + framework.NewUUID() + namespace := framework.DefaultNamespacePrefix + framework.NewUUID() - framework.ExpectNoError(err, "failed to get PodSandbox status: %v", err) - Expect(operation.Seconds()).Should(BeNumerically("<", 5), "get PodSandbox status shouldn't take too long.") - - operation = b.Time("stop PodSandbox", func() { - err = c.StopPodSandbox(podID) - }) + config := &runtimeapi.PodSandboxConfig{ + Metadata: framework.BuildPodSandboxMetadata(podSandboxName, uid, namespace, framework.DefaultAttempt), + Linux: &runtimeapi.LinuxPodSandboxConfig{}, + Labels: framework.DefaultPodLabels, + } - framework.ExpectNoError(err, "failed to stop PodSandbox: %v", err) - Expect(operation.Seconds()).Should(BeNumerically("<", 5), "stop PodSandbox shouldn't take too long.") + By("Creating a pod") + stopwatch := experiment.NewStopwatch() - operation = b.Time("remove PodSandbox", func() { - c.RemovePodSandbox(podID) - }) - - framework.ExpectNoError(err, "failed to remove PodSandbox: %v", err) - Expect(operation.Seconds()).Should(BeNumerically("<", 5), "remove PodSandbox shouldn't take too long.") - }, defaultOperationTimes) + podID, err = c.RunPodSandbox(config, framework.TestContext.RuntimeHandler) + stopwatch.Record("CreatePod") + framework.ExpectNoError(err, "failed to create PodSandbox: %v", err) - Measure("benchmark about listing PodSandbox", func(b Benchmarker) { - podList := make([]string, 0, framework.TestContext.Number) - var err error + By("Get Pod status") + stopwatch.Reset() + _, err = c.PodSandboxStatus(podID) + stopwatch.Record("StatusPod") + framework.ExpectNoError(err, "failed to get PodStatus: %v", err) - for i := 0; i < framework.TestContext.Number; i++ { - podID := framework.RunDefaultPodSandbox(c, "PodSandbox-for-list-benchmark-") - podList = append(podList, podID) + By("Stop PodSandbox") + stopwatch.Reset() + err = c.StopPodSandbox(podID) + stopwatch.Record("StopPod") + framework.ExpectNoError(err, "failed to stop PodSandbox: %v", err) + + By("Remove PodSandbox") + stopwatch.Reset() + err = c.RemovePodSandbox(podID) + stopwatch.Record("RemovePod") + framework.ExpectNoError(err, "failed to remove PodSandbox: %v", err) + + }, gmeasure.SamplingConfig{N: 1000, NumParallel: 1}) + + data := ExperimentData{ + CreatePod: fmt.Sprintf("%v", experiment.Get("CreatePod").Durations), + StatusPod: fmt.Sprintf("%v", experiment.Get("StatusPod").Durations), + StopPod: fmt.Sprintf("%v", experiment.Get("StopPod").Durations), + RemovePod: fmt.Sprintf("%v", experiment.Get("RemovePod").Durations), } - operation := b.Time("list PodSandbox", func() { - _, err = c.ListPodSandbox(nil) - }) - - framework.ExpectNoError(err, "failed to list PodSandbox: %v", err) - Expect(operation.Seconds()).Should(BeNumerically("<", 5), "list PodSandbox shouldn't take too long.") - - for _, podID := range podList { - c.StopPodSandbox(podID) - c.RemovePodSandbox(podID) - } - }, defaultOperationTimes) + file, _ := json.MarshalIndent(data, "", " ") + _ = ioutil.WriteFile("c:/experiment_old_hcsshim.json", file, 0644) + }) }) + }) From 2dd37ab89e16ab9aabefc03f64c82b446e9a3f17 Mon Sep 17 00:00:00 2001 From: Nashwan Azhari Date: Fri, 7 Jan 2022 23:56:41 +0200 Subject: [PATCH 3/9] Parametrize benchmark settings. Signed-off-by: Nashwan Azhari --- cmd/critest/cri_test.go | 1 - pkg/benchmark/benchmark.go | 9 ++++++ pkg/benchmark/container.go | 38 ++++++++++++++-------- pkg/benchmark/pod.go | 37 +++++++++++++-------- pkg/framework/test_context.go | 60 +++++++++++++++++++++++++---------- pkg/framework/util.go | 21 +++++++++++- 6 files changed, 120 insertions(+), 46 deletions(-) diff --git a/cmd/critest/cri_test.go b/cmd/critest/cri_test.go index a1bc513d79..189ae8858a 100644 --- a/cmd/critest/cri_test.go +++ b/cmd/critest/cri_test.go @@ -174,7 +174,6 @@ func TestCRISuite(t *testing.T) { flag.Set("ginkgo.focus", "benchmark") flag.Set("ginkgo.succinct", "true") } else { - // Skip benchmark measurements for validation tests. flag.Set("ginkgo.skipMeasurements", "true") } if *parallel > 1 { diff --git a/pkg/benchmark/benchmark.go b/pkg/benchmark/benchmark.go index ce65f5ef1a..43a5260514 100644 --- a/pkg/benchmark/benchmark.go +++ b/pkg/benchmark/benchmark.go @@ -33,6 +33,15 @@ import ( . "github.com/onsi/gomega" ) +// Transforms a slice of `time.Duration`s into their `int64` nanosecond representations. +func getNanosecondsForDurations(durations []time.Duration) []int64 { + var ns []int64 + for _, duration := range durations { + ns = append(ns, duration.Nanoseconds()) + } + return ns +} + // TestPerformance checks configuration parameters (specified through flags) and then runs // benchmark tests using the Ginkgo runner. // If a "report directory" is specified, one or more JUnit test reports will be diff --git a/pkg/benchmark/container.go b/pkg/benchmark/container.go index 0d4481f8d8..76893988e9 100644 --- a/pkg/benchmark/container.go +++ b/pkg/benchmark/container.go @@ -1,6 +1,3 @@ -//go:build container -// +build container - /* Copyright 2021 The Kubernetes Authors. @@ -21,19 +18,20 @@ package benchmark import ( "encoding/json" - "fmt" "io/ioutil" + "path" + "github.com/golang/glog" "github.com/kubernetes-sigs/cri-tools/pkg/framework" internalapi "k8s.io/cri-api/pkg/apis" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" + runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" . "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega/gmeasure" ) type ContainerExperimentData struct { - CreateContainer, StatusContainer, StopContainer, RemoveContainer, StartContainer string + CreateContainer, StatusContainer, StopContainer, RemoveContainer, StartContainer []int64 } var _ = framework.KubeDescribe("Container", func() { @@ -93,18 +91,30 @@ var _ = framework.KubeDescribe("Container", func() { By("delete PodSandbox") rc.RemovePodSandbox(podID) - }, gmeasure.SamplingConfig{N: 200, NumParallel: 1}) + }, gmeasure.SamplingConfig{N: framework.TestContext.BenchmarkingParams.ContainersNumber, NumParallel: framework.TestContext.BenchmarkingParams.ContainersNumberParallel}) data := ContainerExperimentData{ - CreateContainer: fmt.Sprintf("%v", experiment.Get("CreateContainer").Durations), - StatusContainer: fmt.Sprintf("%v", experiment.Get("StatusContainer").Durations), - StopContainer: fmt.Sprintf("%v", experiment.Get("StopContainer").Durations), - RemoveContainer: fmt.Sprintf("%v", experiment.Get("RemoveContainer").Durations), - StartContainer: fmt.Sprintf("%v", experiment.Get("StartContainer").Durations), + CreateContainer: getNanosecondsForDurations(experiment.Get("CreateContainer").Durations), + StartContainer: getNanosecondsForDurations(experiment.Get("StartContainer").Durations), + StatusContainer: getNanosecondsForDurations(experiment.Get("StatusContainer").Durations), + StopContainer: getNanosecondsForDurations(experiment.Get("StopContainer").Durations), + RemoveContainer: getNanosecondsForDurations(experiment.Get("RemoveContainer").Durations), } - file, _ := json.MarshalIndent(data, "", " ") - _ = ioutil.WriteFile("c:/experiment_container.json", file, 0644) + if framework.TestContext.BenchmarkingOutputDir != "" { + filepath := path.Join(framework.TestContext.BenchmarkingOutputDir, "container_benchmark_data.json") + data, err := json.MarshalIndent(data, "", " ") + if err == nil { + err = ioutil.WriteFile(filepath, data, 0644) + if err != nil { + glog.Errorf("Failed to write container benchmark data: %v", filepath) + } + } else { + glog.Errorf("Failed to serialize benchmark data: %v", err) + } + } else { + glog.Infof("No benchmarking output dir provided, skipping writing benchmarking resulsts.") + } }) }) diff --git a/pkg/benchmark/pod.go b/pkg/benchmark/pod.go index 238d3fbc0a..a825a5f231 100644 --- a/pkg/benchmark/pod.go +++ b/pkg/benchmark/pod.go @@ -1,6 +1,3 @@ -//go:build pod -// +build pod - /* Copyright 2021 The Kubernetes Authors. @@ -21,16 +18,16 @@ package benchmark import ( "encoding/json" - "fmt" "io/ioutil" + "path" + "github.com/golang/glog" "github.com/kubernetes-sigs/cri-tools/pkg/framework" - "github.com/onsi/gomega/gmeasure" internalapi "k8s.io/cri-api/pkg/apis" runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" + "github.com/onsi/gomega/gmeasure" ) const ( @@ -38,7 +35,7 @@ const ( ) type ExperimentData struct { - CreatePod, StatusPod, StopPod, RemovePod string + CreatePod, StatusPod, StopPod, RemovePod []int64 } var _ = framework.KubeDescribe("PodSandbox", func() { @@ -93,17 +90,29 @@ var _ = framework.KubeDescribe("PodSandbox", func() { stopwatch.Record("RemovePod") framework.ExpectNoError(err, "failed to remove PodSandbox: %v", err) - }, gmeasure.SamplingConfig{N: 1000, NumParallel: 1}) + }, gmeasure.SamplingConfig{N: framework.TestContext.BenchmarkingParams.PodsNumber, NumParallel: framework.TestContext.BenchmarkingParams.PodsNumberParallel}) data := ExperimentData{ - CreatePod: fmt.Sprintf("%v", experiment.Get("CreatePod").Durations), - StatusPod: fmt.Sprintf("%v", experiment.Get("StatusPod").Durations), - StopPod: fmt.Sprintf("%v", experiment.Get("StopPod").Durations), - RemovePod: fmt.Sprintf("%v", experiment.Get("RemovePod").Durations), + CreatePod: getNanosecondsForDurations(experiment.Get("CreatePod").Durations), + StatusPod: getNanosecondsForDurations(experiment.Get("StatusPod").Durations), + StopPod: getNanosecondsForDurations(experiment.Get("StopPod").Durations), + RemovePod: getNanosecondsForDurations(experiment.Get("RemovePod").Durations), } - file, _ := json.MarshalIndent(data, "", " ") - _ = ioutil.WriteFile("c:/experiment_old_hcsshim.json", file, 0644) + if framework.TestContext.BenchmarkingOutputDir != "" { + filepath := path.Join(framework.TestContext.BenchmarkingOutputDir, "pod_benchmark_data.json") + data, err := json.MarshalIndent(data, "", " ") + if err == nil { + err = ioutil.WriteFile(filepath, data, 0644) + if err != nil { + glog.Errorf("Failed to write container benchmark data: %v", filepath) + } + } else { + glog.Errorf("Failed to serialize benchmark data: %v", err) + } + } else { + glog.Infof("No benchmarking out dir provided, skipping writing benchmarking resulsts.") + } }) }) diff --git a/pkg/framework/test_context.go b/pkg/framework/test_context.go index 3a5c431516..d545ae7878 100644 --- a/pkg/framework/test_context.go +++ b/pkg/framework/test_context.go @@ -25,7 +25,11 @@ import ( "time" "github.com/onsi/ginkgo/v2" - "gopkg.in/yaml.v3" +) + +var ( + testImagesFilePath string + benchamrkSettingFilePath string ) // TestImageList aggregates references to the images used in tests. @@ -34,6 +38,24 @@ type TestImageList struct { WebServerTestImage string `yaml:"webServerTestImage"` } +// BenchmarkingParamsType is the type of benchmarking-related params. +type BenchmarkingParamsType struct { + // ContainersNumber is the number of Containers to run as part of + // the container-related benchmarks. + ContainersNumber int `yaml:"containersNumber"` + + // ContainersNumberParallel is the maximum number of container-related benchmarks + // to run in parallel. + ContainersNumberParallel int `yaml:"containersNumberParallel"` + + // PodsNumber is the number of Pods to run as part of the pod-related benchmarks. + PodsNumber int `yaml:"podsNumber"` + + // PodsNumberParallel is the maximum number of pod -related benchmarks + // to run in parallel. + PodsNumberParallel int `yaml:"podsNumberParallel"` +} + // TestContextType is the type of test context. type TestContextType struct { // Report related settings. @@ -49,11 +71,11 @@ type TestContextType struct { RuntimeHandler string // Test images-related settings. - TestImagesFilePath string - TestImageList TestImageList + TestImageList TestImageList - // Benchmark setting. - Number int + // Benchmarking settings. + BenchmarkingOutputDir string + BenchmarkingParams BenchmarkingParamsType // Test configuration. IsLcow bool @@ -83,7 +105,7 @@ func RegisterFlags() { flag.StringVar(&TestContext.ReportPrefix, "report-prefix", "", "Optional prefix for JUnit XML reports. Default is empty, which doesn't prepend anything to the default name.") flag.StringVar(&TestContext.ReportDir, "report-dir", "", "Path to the directory where the JUnit XML reports should be saved. Default is empty, which doesn't generate these reports.") flag.StringVar(&TestContext.ImageServiceAddr, "image-endpoint", "", "Image service socket for client to connect.") - flag.StringVar(&TestContext.TestImagesFilePath, "test-images-file", "", "Optional path to a YAML file containing references to custom container images to be used in tests.") + flag.StringVar(&testImagesFilePath, "test-images-file", "", "Optional path to a YAML file containing references to custom container images to be used in tests.") flag.DurationVar(&TestContext.ImageServiceTimeout, "image-service-timeout", 300*time.Second, "Timeout when trying to connect to image service.") svcaddr := "unix:///var/run/dockershim.sock" @@ -96,7 +118,9 @@ func RegisterFlags() { flag.StringVar(&TestContext.RuntimeServiceAddr, "runtime-endpoint", svcaddr, "Runtime service socket for client to connect.") flag.DurationVar(&TestContext.RuntimeServiceTimeout, "runtime-service-timeout", 300*time.Second, "Timeout when trying to connect to a runtime service.") flag.StringVar(&TestContext.RuntimeHandler, "runtime-handler", "", "Runtime handler to use in the test.") - flag.IntVar(&TestContext.Number, "number", 5, "Number of PodSandbox/container in listing benchmark test.") + + flag.StringVar(&benchamrkSettingFilePath, "benchmarking-params-file", "", "Optional path to a YAML file specifying benchmarking configuration options.") + flag.StringVar(&TestContext.BenchmarkingOutputDir, "benchmarking-output-dir", "", "Optional path to a directory in which benchmarking data should be placed.") if runtime.GOOS == "windows" { flag.BoolVar(&TestContext.IsLcow, "lcow", false, "Run Linux container on Windows tests instead of Windows container tests") @@ -106,21 +130,25 @@ func RegisterFlags() { flag.StringVar(&TestContext.RegistryPrefix, "registry-prefix", DefaultRegistryPrefix, "A possible registry prefix added to all images, like 'localhost:5000/'") } -// Loads the custom images mapping file (if defined) into the TestContextType. -func (tc TestContextType) LoadCustomImagesFileIntoTestingContext() error { - Logf("Testing context container image list file: %s", TestContext.TestImagesFilePath) - if TestContext.TestImagesFilePath != "" { - fileContent, err := os.ReadFile(TestContext.TestImagesFilePath) +// Loads any external file-based parameters into the TestContextType. +func (tc TestContextType) LoadYamlConfigFiles() error { + // Attempt to load cusom images file: + if testImagesFilePath != "" { + err := LoadYamlFile(testImagesFilePath, &TestContext.TestImageList) if err != nil { - return fmt.Errorf("error reading '%v' file contents: %v", TestContext.TestImagesFilePath, err) + return fmt.Errorf("Error loading custom test images file: %v", err) } + } + Logf("Testing context container image list: %+v", TestContext.TestImageList) - err = yaml.Unmarshal(fileContent, &TestContext.TestImageList) + // Attempt to load benchmark settings file: + if benchamrkSettingFilePath != "" { + err := LoadYamlFile(benchamrkSettingFilePath, &TestContext.BenchmarkingParams) if err != nil { - return fmt.Errorf("error unmarshalling '%v' YAML file: %v", TestContext.TestImagesFilePath, err) + return err } } + Logf("Testing context benchmarking params: %+v", TestContext.BenchmarkingParams) - Logf("Testing context container image list: %+v", TestContext.TestImageList) return nil } diff --git a/pkg/framework/util.go b/pkg/framework/util.go index bce8cdb98d..3160f1266e 100644 --- a/pkg/framework/util.go +++ b/pkg/framework/util.go @@ -18,6 +18,7 @@ package framework import ( "fmt" + "os" "runtime" "strings" "sync" @@ -25,6 +26,7 @@ import ( "github.com/docker/distribution/reference" "github.com/pborman/uuid" + "gopkg.in/yaml.v3" internalapi "k8s.io/cri-api/pkg/apis" runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" "k8s.io/kubernetes/pkg/kubelet/cri/remote" @@ -115,7 +117,7 @@ var _ = BeforeSuite(func() { } // Load any custom image definitions: - err := TestContext.LoadCustomImagesFileIntoTestingContext() + err := TestContext.LoadYamlConfigFiles() if err != nil { panic(err) } @@ -355,3 +357,20 @@ func PullPublicImage(c internalapi.ImageManagerService, imageName string, podCon ExpectNoError(err, "failed to pull image: %v", err) return id } + +// LoadYamlFile attempts to load the given YAML file into the given struct. +func LoadYamlFile(filepath string, obj interface{}) error { + Logf("Attempting to load YAML file %q into %+v", filepath, obj) + fileContent, err := os.ReadFile(filepath) + if err != nil { + return fmt.Errorf("error reading %q file contents: %v", filepath, err) + } + + err = yaml.Unmarshal(fileContent, obj) + if err != nil { + return fmt.Errorf("error unmarshalling %q YAML file: %v", filepath, err) + } + + Logf("Successfully loaded YAML file %q into %+v", filepath, obj) + return nil +} From b40d492e6233e038f22505d518bac22c98bc4650 Mon Sep 17 00:00:00 2001 From: Nashwan Azhari Date: Tue, 15 Feb 2022 15:03:42 +0200 Subject: [PATCH 4/9] Formalize mechanism for recording/managing benchmark results. This patch defines new types and mechanisms for managing benchmark results using a channel-based appriach, as the previous gmeasure.Stopwatch-based approach did not provide a mechanism for associating operations which are part of a larger lifecycle being benchmarked. (e.g. container CRUD operations) Signed-off-by: Nashwan Azhari --- cmd/critest/cri_test.go | 1 + docs/benchmark.md | 26 ++++++ pkg/benchmark/benchmark.go | 11 +-- pkg/benchmark/container.go | 99 +++++++++++++-------- pkg/benchmark/pod.go | 96 ++++++++++++-------- pkg/benchmark/util.go | 177 +++++++++++++++++++++++++++++++++++++ 6 files changed, 331 insertions(+), 79 deletions(-) create mode 100644 pkg/benchmark/util.go diff --git a/cmd/critest/cri_test.go b/cmd/critest/cri_test.go index 189ae8858a..a1bc513d79 100644 --- a/cmd/critest/cri_test.go +++ b/cmd/critest/cri_test.go @@ -174,6 +174,7 @@ func TestCRISuite(t *testing.T) { flag.Set("ginkgo.focus", "benchmark") flag.Set("ginkgo.succinct", "true") } else { + // Skip benchmark measurements for validation tests. flag.Set("ginkgo.skipMeasurements", "true") } if *parallel > 1 { diff --git a/docs/benchmark.md b/docs/benchmark.md index 22d9bc537d..50b8fdba4b 100644 --- a/docs/benchmark.md +++ b/docs/benchmark.md @@ -26,10 +26,32 @@ git clone https://github.com/kubernetes-sigs/cri-tools -b release-1.9 $GOPATH/sr Before running the test, you need to _ensure that the CRI server under test is running and listening on a Unix socket_ or a Windows tcp socket. Because the benchmark tests are designed to request changes (e.g., create/delete) to the containers and verify that correct status is reported, it expects to be the only user of the CRI server. Please make sure that 1) there are no existing CRI-managed containers running on the node, and 2) no other processes (e.g., Kubelet) will interfere with the tests. +### Defining benchmarking parameters + +You can optionally specify some parameters detailing how benchmarks should be run. + +```yaml +# The number of container lifecycle benchmarks to run: +containersNumber: 100 + +# The number of container lifecycle benchmarks to run in parallel. +# The total number of samples will be floor(containersNumber / containersNumberParallel) +containersNumberParallel: 2 + + +# The number of pod lifecycle benchmarks to run: +podsNumber: 1000 +# The number of pod lifecycle benchmarks to run in parallel. +# The total number of samples will be floor(podsNumber/ podsNumberParallel) +podsNumberParallel: 1 +``` + ### Run ```sh critest -benchmark + [--benchmarking-params-file /path/to/params.yml] + [--benchmarking-output-dir /path/to/outdir/] ``` This will @@ -45,5 +67,9 @@ critest connects to Unix: `unix:///var/run/dockershim.sock` or Windows: `tcp://l - `-ginkgo.focus`: Only run the tests that match the regular expression. - `-image-endpoint`: Set the endpoint of image service. Same with runtime-endpoint if not specified. - `-runtime-endpoint`: Set the endpoint of runtime service. Default to Unix: `unix:///var/run/dockershim.sock` or Windows: `tcp://localhost:3735`. +- `-benchmarking-params-file`: optional path to a YAML file containing parameters describing which +benchmarks should be run. +- `-benchmarking-output-dir`: optional path to a pre-existing directory in which to write JSON + files detailing the results of the benchmarks. - `-ginkgo.skip`: Skip the tests that match the regular expression. - `-h`: Should help and all supported options. diff --git a/pkg/benchmark/benchmark.go b/pkg/benchmark/benchmark.go index 43a5260514..a5e9feb68b 100644 --- a/pkg/benchmark/benchmark.go +++ b/pkg/benchmark/benchmark.go @@ -33,14 +33,9 @@ import ( . "github.com/onsi/gomega" ) -// Transforms a slice of `time.Duration`s into their `int64` nanosecond representations. -func getNanosecondsForDurations(durations []time.Duration) []int64 { - var ns []int64 - for _, duration := range durations { - ns = append(ns, duration.Nanoseconds()) - } - return ns -} +const ( + defaultOperationTimes int = 20 +) // TestPerformance checks configuration parameters (specified through flags) and then runs // benchmark tests using the Ginkgo runner. diff --git a/pkg/benchmark/container.go b/pkg/benchmark/container.go index 76893988e9..fbea4145a4 100644 --- a/pkg/benchmark/container.go +++ b/pkg/benchmark/container.go @@ -1,5 +1,5 @@ /* -Copyright 2021 The Kubernetes Authors. +Copyright 2022 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,9 +17,8 @@ limitations under the License. package benchmark import ( - "encoding/json" - "io/ioutil" "path" + "time" "github.com/golang/glog" "github.com/kubernetes-sigs/cri-tools/pkg/framework" @@ -30,10 +29,6 @@ import ( "github.com/onsi/gomega/gmeasure" ) -type ContainerExperimentData struct { - CreateContainer, StatusContainer, StopContainer, RemoveContainer, StartContainer []int64 -} - var _ = framework.KubeDescribe("Container", func() { f := framework.NewDefaultCRIFramework() @@ -47,75 +42,109 @@ var _ = framework.KubeDescribe("Container", func() { Context("benchmark about operations on Container", func() { It("benchmark about basic operations on Container", func() { + // Setup sampling config from TestContext: + samplingConfig := gmeasure.SamplingConfig{ + N: framework.TestContext.BenchmarkingParams.ContainersNumber, + NumParallel: framework.TestContext.BenchmarkingParams.ContainersNumberParallel, + } + if samplingConfig.N < 1 { + samplingConfig.N = 1 + } + if samplingConfig.NumParallel < 1 { + samplingConfig.NumParallel = 1 + } + + // Setup results reporting channel: + resultsSet := LifecycleBenchmarksResultsSet{ + OperationsNames: []string{"CreateContainer", "StartContainer", "StatusContainer", "StopContainer", "RemoveContainer"}, + NumParallel: samplingConfig.NumParallel, + Datapoints: make([]LifecycleBenchmarkDatapoint, 0), + } + resultsManager := NewLifecycleBenchmarksResultsManager( + resultsSet, + 60, + ) + resultsChannel := resultsManager.StartResultsConsumer() + experiment := gmeasure.NewExperiment("ContainerOps") experiment.Sample(func(idx int) { var podID string var podConfig *runtimeapi.PodSandboxConfig var containerID string + var lastStartTime, lastEndTime int64 var err error + durations := make([]int64, len(resultsSet.OperationsNames)) podID, podConfig = framework.CreatePodSandboxForContainer(rc) By("CreatingContainer") - stopwatch := experiment.NewStopwatch() - stopwatch.Reset() + startTime := time.Now().UnixNano() + lastStartTime = startTime containerID = framework.CreateDefaultContainer(rc, ic, podID, podConfig, "Benchmark-container-") - stopwatch.Record("CreateContainer") + lastEndTime = time.Now().UnixNano() + durations[0] = lastEndTime - lastStartTime By("StartingContainer") - stopwatch.Reset() + lastStartTime = time.Now().UnixNano() err = rc.StartContainer(containerID) - stopwatch.Record("StartContainer") + lastEndTime = time.Now().UnixNano() + durations[1] = lastEndTime - lastStartTime framework.ExpectNoError(err, "failed to start Container: %v", err) By("ContainerStatus") - stopwatch.Reset() + lastStartTime = time.Now().UnixNano() _, err = rc.ContainerStatus(containerID) - stopwatch.Record("StatusContainer") + lastEndTime = time.Now().UnixNano() + durations[2] = lastEndTime - lastStartTime framework.ExpectNoError(err, "failed to get Container status: %v", err) By("ContainerStop") - stopwatch.Reset() + lastStartTime = time.Now().UnixNano() err = rc.StopContainer(containerID, framework.DefaultStopContainerTimeout) - stopwatch.Record("StopContainer") + lastEndTime = time.Now().UnixNano() + durations[3] = lastEndTime - lastStartTime framework.ExpectNoError(err, "failed to stop Container: %v", err) By("ContainerRemove") - stopwatch.Reset() + lastStartTime = time.Now().UnixNano() err = rc.RemoveContainer(containerID) - stopwatch.Record("RemoveContainer") + lastEndTime = time.Now().UnixNano() + durations[4] = lastEndTime - lastStartTime framework.ExpectNoError(err, "failed to remove Container: %v", err) + res := LifecycleBenchmarkDatapoint{ + SampleIndex: idx, + StartTime: startTime, + EndTime: lastEndTime, + OperationsDurationsNs: durations, + MetaInfo: map[string]string{"podId": podID, "containerId": containerID}, + } + resultsChannel <- &res + By("stop PodSandbox") rc.StopPodSandbox(podID) By("delete PodSandbox") rc.RemovePodSandbox(podID) - }, gmeasure.SamplingConfig{N: framework.TestContext.BenchmarkingParams.ContainersNumber, NumParallel: framework.TestContext.BenchmarkingParams.ContainersNumberParallel}) + }, samplingConfig) - data := ContainerExperimentData{ - CreateContainer: getNanosecondsForDurations(experiment.Get("CreateContainer").Durations), - StartContainer: getNanosecondsForDurations(experiment.Get("StartContainer").Durations), - StatusContainer: getNanosecondsForDurations(experiment.Get("StatusContainer").Durations), - StopContainer: getNanosecondsForDurations(experiment.Get("StopContainer").Durations), - RemoveContainer: getNanosecondsForDurations(experiment.Get("RemoveContainer").Durations), + // Send nil and give the manager a minute to process any already-queued results: + resultsChannel <- nil + err := resultsManager.AwaitAllResults(60) + if err != nil { + glog.Errorf("Results manager failed to await all results: %s", err) } if framework.TestContext.BenchmarkingOutputDir != "" { filepath := path.Join(framework.TestContext.BenchmarkingOutputDir, "container_benchmark_data.json") - data, err := json.MarshalIndent(data, "", " ") - if err == nil { - err = ioutil.WriteFile(filepath, data, 0644) - if err != nil { - glog.Errorf("Failed to write container benchmark data: %v", filepath) - } - } else { - glog.Errorf("Failed to serialize benchmark data: %v", err) + err = resultsManager.WriteResultsFile(filepath) + if err != nil { + glog.Errorf("Error occurred while writing benchmark results to file %s: %s", filepath, err) } } else { - glog.Infof("No benchmarking output dir provided, skipping writing benchmarking resulsts.") + glog.Infof("No benchmarking output dir provided, skipping writing benchmarking results file.") + glog.Infof("Benchmark results were: %+v", resultsManager.resultsSet) } }) - }) }) diff --git a/pkg/benchmark/pod.go b/pkg/benchmark/pod.go index a825a5f231..dd6c9229ea 100644 --- a/pkg/benchmark/pod.go +++ b/pkg/benchmark/pod.go @@ -1,5 +1,5 @@ /* -Copyright 2021 The Kubernetes Authors. +Copyright 2022 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,9 +17,8 @@ limitations under the License. package benchmark import ( - "encoding/json" - "io/ioutil" "path" + "time" "github.com/golang/glog" "github.com/kubernetes-sigs/cri-tools/pkg/framework" @@ -30,14 +29,6 @@ import ( "github.com/onsi/gomega/gmeasure" ) -const ( - defaultOperationTimes int = 20 -) - -type ExperimentData struct { - CreatePod, StatusPod, StopPod, RemovePod []int64 -} - var _ = framework.KubeDescribe("PodSandbox", func() { f := framework.NewDefaultCRIFramework() @@ -49,11 +40,36 @@ var _ = framework.KubeDescribe("PodSandbox", func() { Context("benchmark about operations on PodSandbox", func() { It("benchmark about lifecycle of PodSandbox", func() { + // Setup sampling config from TestContext: + samplingConfig := gmeasure.SamplingConfig{ + N: framework.TestContext.BenchmarkingParams.PodsNumber, + NumParallel: framework.TestContext.BenchmarkingParams.PodsNumberParallel, + } + if samplingConfig.N < 1 { + samplingConfig.N = 1 + } + if samplingConfig.NumParallel < 1 { + samplingConfig.NumParallel = 1 + } + + // Setup results reporting channel: + resultsSet := LifecycleBenchmarksResultsSet{ + OperationsNames: []string{"CreatePod", "StatusPod", "StopPod", "RemovePod"}, + NumParallel: samplingConfig.NumParallel, + Datapoints: make([]LifecycleBenchmarkDatapoint, 0), + } + resultsManager := NewLifecycleBenchmarksResultsManager( + resultsSet, + 60, + ) + resultsChannel := resultsManager.StartResultsConsumer() experiment := gmeasure.NewExperiment("PodLifecycle") experiment.Sample(func(idx int) { + var lastStartTime, lastEndTime int64 var podID string var err error + durations := make([]int64, len(resultsSet.OperationsNames)) podSandboxName := "PodSandbox-for-creating-performance-test-" + framework.NewUUID() uid := framework.DefaultUIDPrefix + framework.NewUUID() @@ -66,54 +82,62 @@ var _ = framework.KubeDescribe("PodSandbox", func() { } By("Creating a pod") - stopwatch := experiment.NewStopwatch() - + startTime := time.Now().UnixNano() + lastStartTime = startTime podID, err = c.RunPodSandbox(config, framework.TestContext.RuntimeHandler) - stopwatch.Record("CreatePod") + lastEndTime = time.Now().UnixNano() + durations[0] = lastEndTime - lastStartTime framework.ExpectNoError(err, "failed to create PodSandbox: %v", err) By("Get Pod status") - stopwatch.Reset() + lastStartTime = time.Now().UnixNano() _, err = c.PodSandboxStatus(podID) - stopwatch.Record("StatusPod") + lastEndTime = time.Now().UnixNano() + durations[1] = lastEndTime - lastStartTime framework.ExpectNoError(err, "failed to get PodStatus: %v", err) By("Stop PodSandbox") - stopwatch.Reset() + lastStartTime = time.Now().UnixNano() err = c.StopPodSandbox(podID) - stopwatch.Record("StopPod") + lastEndTime = time.Now().UnixNano() + durations[2] = lastEndTime - lastStartTime framework.ExpectNoError(err, "failed to stop PodSandbox: %v", err) By("Remove PodSandbox") - stopwatch.Reset() + lastStartTime = time.Now().UnixNano() err = c.RemovePodSandbox(podID) - stopwatch.Record("RemovePod") + lastEndTime = time.Now().UnixNano() + durations[3] = lastEndTime - lastStartTime framework.ExpectNoError(err, "failed to remove PodSandbox: %v", err) - }, gmeasure.SamplingConfig{N: framework.TestContext.BenchmarkingParams.PodsNumber, NumParallel: framework.TestContext.BenchmarkingParams.PodsNumberParallel}) + res := LifecycleBenchmarkDatapoint{ + SampleIndex: idx, + StartTime: startTime, + EndTime: lastEndTime, + OperationsDurationsNs: durations, + MetaInfo: map[string]string{"podId": podID, "podSandboxName": podSandboxName}, + } + resultsChannel <- &res + + }, samplingConfig) - data := ExperimentData{ - CreatePod: getNanosecondsForDurations(experiment.Get("CreatePod").Durations), - StatusPod: getNanosecondsForDurations(experiment.Get("StatusPod").Durations), - StopPod: getNanosecondsForDurations(experiment.Get("StopPod").Durations), - RemovePod: getNanosecondsForDurations(experiment.Get("RemovePod").Durations), + // Send nil and give the manager a minute to process any already-queued results: + resultsChannel <- nil + err := resultsManager.AwaitAllResults(60) + if err != nil { + glog.Errorf("Results manager failed to await all results: %s", err) } if framework.TestContext.BenchmarkingOutputDir != "" { filepath := path.Join(framework.TestContext.BenchmarkingOutputDir, "pod_benchmark_data.json") - data, err := json.MarshalIndent(data, "", " ") - if err == nil { - err = ioutil.WriteFile(filepath, data, 0644) - if err != nil { - glog.Errorf("Failed to write container benchmark data: %v", filepath) - } - } else { - glog.Errorf("Failed to serialize benchmark data: %v", err) + err = resultsManager.WriteResultsFile(filepath) + if err != nil { + glog.Errorf("Error occurred while writing benchmark results to file %s: %s", filepath, err) } } else { - glog.Infof("No benchmarking out dir provided, skipping writing benchmarking resulsts.") + glog.Infof("No benchmarking out dir provided, skipping writing benchmarking results.") + glog.Infof("Benchmark results were: %+v", resultsManager.resultsSet) } }) }) - }) diff --git a/pkg/benchmark/util.go b/pkg/benchmark/util.go new file mode 100644 index 0000000000..8ff01268f6 --- /dev/null +++ b/pkg/benchmark/util.go @@ -0,0 +1,177 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package benchmark + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "time" + + "github.com/golang/glog" +) + +// LifecycleBenchmarkDatapoint encodes a single benchmark for a lifecycle operation. +// Contains a slice of int64s which represent the duration in nanoseconds of the +// operations which comprise a lifecycle being benchmarked. +// (e.g. the individual CRUD operations which are cycled through during the benchmark) +type LifecycleBenchmarkDatapoint struct { + // int64 index of the sample. + SampleIndex int `json:"sampleIndex"` + + // int64 nanosecond timestamp of the start of the result. + StartTime int64 `json:"startTime"` + + // int64 nanosecond timestamp of the start of the result. + EndTime int64 `json:"endTime"` + + // Slice of int64s representing the durations of individual operations. + // The operation durations should be in the order they were executed in. + // Note that the sum of OperationsDurationsNs need not be exactly equal to the duration + // determined by subtracting StartTime from EndTime, as there may be additional steps + // (e.g. timer setup) performed between the invidual operations. + OperationsDurationsNs []int64 `json:"operationsDurationsNs"` + + // String mapping for adding arbitrary meta-info for the lifecycle result: + MetaInfo map[string]string `json:"metaInfo"` +} + +// LifecycleBenchmarkResultsSet houses results for benchmarks involving resource lifecycles +// which include multiple benchmarked iterations of the cycle. +type LifecycleBenchmarksResultsSet struct { + // Slice of string operation names which represent one cycle. + OperationsNames []string `json:"operationsNames"` + + // The maximum number of lifecycles which were benchmarked in parallel. + // Anything <= 1 should be considered sequential. + NumParallel int `json:"numParallel"` + + // List of datapoints for each lifecycle benchmark. + Datapoints []LifecycleBenchmarkDatapoint `json:"datapoints"` +} + +// Type which tracks lifecycle benchmark results through channels. +type LifecycleBenchmarksResultsManager struct { + // The LifecycleBenchmarksResultsSet where results are added. + resultsSet LifecycleBenchmarksResultsSet + + // Channel for sending results to the manager. + resultsChannel chan *LifecycleBenchmarkDatapoint + + // Channel to indicate when the results consumer goroutine has ended. + resultsOverChannel chan bool + + // Flag to indicate whether the results consumer goroutine is running. + resultsConsumerRunning bool + + // The maximum timeout in seconds to wait between individual results being received. + resultsChannelTimeoutSeconds int +} + +// Instantiates a new LifecycleBenchmarksResultsManager and its internal channels/structures. +func NewLifecycleBenchmarksResultsManager(initialResultsSet LifecycleBenchmarksResultsSet, resultsChannelTimeoutSeconds int) *LifecycleBenchmarksResultsManager { + lbrm := LifecycleBenchmarksResultsManager{ + resultsSet: initialResultsSet, + resultsChannelTimeoutSeconds: resultsChannelTimeoutSeconds, + resultsChannel: make(chan *LifecycleBenchmarkDatapoint), + resultsOverChannel: make(chan bool), + } + if lbrm.resultsSet.Datapoints == nil { + lbrm.resultsSet.Datapoints = make([]LifecycleBenchmarkDatapoint, 0) + } + return &lbrm +} + +// Function which continuously consumes results from the resultsChannel until receiving a nil. +func (lbrm *LifecycleBenchmarksResultsManager) awaitResult() { + numOperations := len(lbrm.resultsSet.OperationsNames) + for { + var res *LifecycleBenchmarkDatapoint + timeout := time.After(time.Duration(lbrm.resultsChannelTimeoutSeconds) * time.Second) + + select { + case res = <-lbrm.resultsChannel: + // Receiving nil indicates results are over: + if res == nil { + glog.Info("Results ended.") + lbrm.resultsConsumerRunning = false + lbrm.resultsOverChannel <- true + return + } + + // Warn if an improper number of results was received: + if len(res.OperationsDurationsNs) != numOperations { + glog.Warningf("Received improper number of datapoints for operations %+v: %+v", lbrm.resultsSet.OperationsNames, res.OperationsDurationsNs) + } + + // Register the result: + lbrm.resultsSet.Datapoints = append(lbrm.resultsSet.Datapoints, *res) + + case <-timeout: + err := fmt.Errorf("Timed out after waiting %d seconds for new results.", lbrm.resultsChannelTimeoutSeconds) + glog.Error(err) + panic(err) + } + } +} + +// Starts the results consumer goroutine and returns the channel to write results to. +// A nil value must be sent after all other results were sent to indicate the end of the result +// stream. +func (lbrm *LifecycleBenchmarksResultsManager) StartResultsConsumer() chan *LifecycleBenchmarkDatapoint { + if !lbrm.resultsConsumerRunning { + lbrm.resultsConsumerRunning = true + go lbrm.awaitResult() + } + return lbrm.resultsChannel +} + +// Waits for the result consumer goroutine and returns all the results registered insofar. +func (lbrm *LifecycleBenchmarksResultsManager) AwaitAllResults(timeoutSeconds int) error { + if !lbrm.resultsConsumerRunning { + return nil + } + + timeout := time.After(time.Duration(timeoutSeconds) * time.Second) + select { + case <-lbrm.resultsOverChannel: + lbrm.resultsConsumerRunning = false + return nil + case <-timeout: + glog.Warningf("Failed to await all results. Results registered so far were: %+v", lbrm.resultsSet) + return fmt.Errorf("Benchmark results waiting timed out after %d seconds.", timeoutSeconds) + } +} + +// Saves the results gathered so far as JSON under the given filepath. +func (lbrm *LifecycleBenchmarksResultsManager) WriteResultsFile(filepath string) error { + if lbrm.resultsConsumerRunning { + return fmt.Errorf("Results consumer is still running and expecting results.") + } + + data, err := json.MarshalIndent(lbrm.resultsSet, "", " ") + if err == nil { + err = ioutil.WriteFile(filepath, data, 0644) + if err != nil { + return fmt.Errorf("Failed to write benchmarks results to file: %v", filepath) + } + } else { + return fmt.Errorf("Failed to serialize benchmark data: %v", err) + } + + return nil +} From 4355d842b05ed15915cbffb06c972367c997b7b2 Mon Sep 17 00:00:00 2001 From: Nashwan Azhari Date: Mon, 21 Mar 2022 13:01:35 +0200 Subject: [PATCH 5/9] Pass verbose flag when querying Container/Pod statuses. Signed-off-by: Nashwan Azhari --- pkg/benchmark/container.go | 2 +- pkg/benchmark/pod.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/benchmark/container.go b/pkg/benchmark/container.go index fbea4145a4..0019010b19 100644 --- a/pkg/benchmark/container.go +++ b/pkg/benchmark/container.go @@ -93,7 +93,7 @@ var _ = framework.KubeDescribe("Container", func() { By("ContainerStatus") lastStartTime = time.Now().UnixNano() - _, err = rc.ContainerStatus(containerID) + _, err = rc.ContainerStatus(containerID, true) lastEndTime = time.Now().UnixNano() durations[2] = lastEndTime - lastStartTime framework.ExpectNoError(err, "failed to get Container status: %v", err) diff --git a/pkg/benchmark/pod.go b/pkg/benchmark/pod.go index dd6c9229ea..ed165050ee 100644 --- a/pkg/benchmark/pod.go +++ b/pkg/benchmark/pod.go @@ -91,7 +91,7 @@ var _ = framework.KubeDescribe("PodSandbox", func() { By("Get Pod status") lastStartTime = time.Now().UnixNano() - _, err = c.PodSandboxStatus(podID) + _, err = c.PodSandboxStatus(podID, true) lastEndTime = time.Now().UnixNano() durations[1] = lastEndTime - lastStartTime framework.ExpectNoError(err, "failed to get PodStatus: %v", err) From 64263891929a30f2828b232286a12facae97caf9 Mon Sep 17 00:00:00 2001 From: Nashwan Azhari Date: Mon, 21 Mar 2022 15:19:59 +0200 Subject: [PATCH 6/9] Parametrize benchmarking-related timeouts. Signed-off-by: Nashwan Azhari --- pkg/benchmark/container.go | 13 +++++++++++-- pkg/benchmark/image.go | 26 ++++++++++++++++++++++---- pkg/benchmark/pod.go | 11 ++++++++++- pkg/benchmark/pod_container.go | 14 +++++++++++++- pkg/framework/test_context.go | 16 ++++++++++++++++ 5 files changed, 72 insertions(+), 8 deletions(-) diff --git a/pkg/benchmark/container.go b/pkg/benchmark/container.go index 0019010b19..331094daa0 100644 --- a/pkg/benchmark/container.go +++ b/pkg/benchmark/container.go @@ -23,12 +23,16 @@ import ( "github.com/golang/glog" "github.com/kubernetes-sigs/cri-tools/pkg/framework" internalapi "k8s.io/cri-api/pkg/apis" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" . "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega/gmeasure" ) +const ( + defaultContainerBenchmarkTimeoutSeconds = 60 +) + var _ = framework.KubeDescribe("Container", func() { f := framework.NewDefaultCRIFramework() @@ -42,6 +46,11 @@ var _ = framework.KubeDescribe("Container", func() { Context("benchmark about operations on Container", func() { It("benchmark about basic operations on Container", func() { + timeout := defaultContainerBenchmarkTimeoutSeconds + if framework.TestContext.BenchmarkingParams.ContainerBenchmarkTimeoutSeconds > 0 { + timeout = framework.TestContext.BenchmarkingParams.ContainerBenchmarkTimeoutSeconds + } + // Setup sampling config from TestContext: samplingConfig := gmeasure.SamplingConfig{ N: framework.TestContext.BenchmarkingParams.ContainersNumber, @@ -62,7 +71,7 @@ var _ = framework.KubeDescribe("Container", func() { } resultsManager := NewLifecycleBenchmarksResultsManager( resultsSet, - 60, + timeout, ) resultsChannel := resultsManager.StartResultsConsumer() diff --git a/pkg/benchmark/image.go b/pkg/benchmark/image.go index 93367a2a28..9c6ad7f980 100644 --- a/pkg/benchmark/image.go +++ b/pkg/benchmark/image.go @@ -27,6 +27,13 @@ import ( . "github.com/onsi/gomega" ) +const ( + defaultImagePullTimeoutSeconds = 1 + defaultImageStatusTimeoutSeconds = 2 + defaultImageRemoveTimeoutSeconds = 2 + defaultImageListTimeoutSeconds = 2 +) + var _ = framework.KubeDescribe("Container", func() { f := framework.NewDefaultCRIFramework() @@ -65,6 +72,17 @@ var _ = framework.KubeDescribe("Container", func() { } }) + imagePullTimeoutSeconds := defaultImagePullTimeoutSeconds + imageStatusTimeoutSeconds := defaultImageStatusTimeoutSeconds + imageRemoveTimeoutSeconds := defaultImageRemoveTimeoutSeconds + imageListTimeoutSeconds := defaultImageListTimeoutSeconds + if framework.TestContext.BenchmarkingParams.ImageBenchmarkTimeoutSeconds > 0 { + imagePullTimeoutSeconds = framework.TestContext.BenchmarkingParams.ImageBenchmarkTimeoutSeconds + imageStatusTimeoutSeconds = framework.TestContext.BenchmarkingParams.ImageBenchmarkTimeoutSeconds + imageRemoveTimeoutSeconds = framework.TestContext.BenchmarkingParams.ImageBenchmarkTimeoutSeconds + imageListTimeoutSeconds = framework.TestContext.BenchmarkingParams.ImageBenchmarkTimeoutSeconds + } + Measure("benchmark about basic operations on Image", func(b Benchmarker) { imageSpec := &runtimeapi.ImageSpec{ Image: testImageList[0], @@ -73,21 +91,21 @@ var _ = framework.KubeDescribe("Container", func() { operation := b.Time("pull Image", func() { framework.PullPublicImage(ic, testImageList[0], nil) }) - Expect(operation.Minutes()).Should(BeNumerically("<", 1), "pull Image shouldn't take too long.") + Expect(operation.Minutes()).Should(BeNumerically("<", imagePullTimeoutSeconds), "pull Image shouldn't take too long.") operation = b.Time("Image status", func() { _, err = ic.ImageStatus(imageSpec, false) }) framework.ExpectNoError(err, "failed to get image status: %v", err) - Expect(operation.Seconds()).Should(BeNumerically("<", 2), "get image status shouldn't take too long.") + Expect(operation.Seconds()).Should(BeNumerically("<", imageStatusTimeoutSeconds), "get image status shouldn't take too long.") operation = b.Time("remove Image", func() { err = ic.RemoveImage(imageSpec) }) framework.ExpectNoError(err, "failed to remove image: %v", err) - Expect(operation.Seconds()).Should(BeNumerically("<", 2), "remove Image shouldn't take too long.") + Expect(operation.Seconds()).Should(BeNumerically("<", imageRemoveTimeoutSeconds), "remove Image shouldn't take too long.") }, defaultOperationTimes) @@ -101,7 +119,7 @@ var _ = framework.KubeDescribe("Container", func() { }) framework.ExpectNoError(err, "failed to list Image: %v", err) - Expect(operation.Seconds()).Should(BeNumerically("<", 2), "list Image shouldn't take too long.") + Expect(operation.Seconds()).Should(BeNumerically("<", imageListTimeoutSeconds), "list Image shouldn't take too long.") }, defaultOperationTimes) }) }) diff --git a/pkg/benchmark/pod.go b/pkg/benchmark/pod.go index ed165050ee..a029831c36 100644 --- a/pkg/benchmark/pod.go +++ b/pkg/benchmark/pod.go @@ -29,6 +29,10 @@ import ( "github.com/onsi/gomega/gmeasure" ) +const ( + defaultPodBenchmarkTimeoutSeconds = 60 +) + var _ = framework.KubeDescribe("PodSandbox", func() { f := framework.NewDefaultCRIFramework() @@ -40,6 +44,11 @@ var _ = framework.KubeDescribe("PodSandbox", func() { Context("benchmark about operations on PodSandbox", func() { It("benchmark about lifecycle of PodSandbox", func() { + timeout := defaultPodBenchmarkTimeoutSeconds + if framework.TestContext.BenchmarkingParams.ContainerBenchmarkTimeoutSeconds > 0 { + timeout = framework.TestContext.BenchmarkingParams.ContainerBenchmarkTimeoutSeconds + } + // Setup sampling config from TestContext: samplingConfig := gmeasure.SamplingConfig{ N: framework.TestContext.BenchmarkingParams.PodsNumber, @@ -60,7 +69,7 @@ var _ = framework.KubeDescribe("PodSandbox", func() { } resultsManager := NewLifecycleBenchmarksResultsManager( resultsSet, - 60, + timeout, ) resultsChannel := resultsManager.StartResultsConsumer() diff --git a/pkg/benchmark/pod_container.go b/pkg/benchmark/pod_container.go index 45688369c5..07a3fa9d53 100644 --- a/pkg/benchmark/pod_container.go +++ b/pkg/benchmark/pod_container.go @@ -25,6 +25,18 @@ import ( . "github.com/onsi/gomega" ) +const ( + defaultPodContainerBenchmarkTimeoutSeconds = 5 +) + +func getPodContainerBenchmarkTimeoutSeconds() int { + timeout := defaultPodContainerBenchmarkTimeoutSeconds + if framework.TestContext.BenchmarkingParams.PodContainerStartBenchmarkTimeoutSeconds > 0 { + timeout = framework.TestContext.BenchmarkingParams.PodContainerStartBenchmarkTimeoutSeconds + } + return timeout +} + var _ = framework.KubeDescribe("PodSandbox", func() { f := framework.NewDefaultCRIFramework() @@ -68,7 +80,7 @@ var _ = framework.KubeDescribe("PodSandbox", func() { }) framework.ExpectNoError(err, "failed to start Container: %v", err) - Expect(operation.Seconds()).Should(BeNumerically("<", 5), "create PodSandbox shouldn't take too long.") + Expect(operation.Seconds()).Should(BeNumerically("<", getPodContainerBenchmarkTimeoutSeconds()), "create PodSandbox shouldn't take too long.") }, defaultOperationTimes) }) }) diff --git a/pkg/framework/test_context.go b/pkg/framework/test_context.go index d545ae7878..36e3d68af2 100644 --- a/pkg/framework/test_context.go +++ b/pkg/framework/test_context.go @@ -48,12 +48,28 @@ type BenchmarkingParamsType struct { // to run in parallel. ContainersNumberParallel int `yaml:"containersNumberParallel"` + // ContainerBenchmarkTimeoutSeconds is the maximum number of seconds acceptable + // for a Container lifecycle benchmark to take. + ContainerBenchmarkTimeoutSeconds int `yaml:"containerBenchmarkTimeoutSeconds"` + // PodsNumber is the number of Pods to run as part of the pod-related benchmarks. PodsNumber int `yaml:"podsNumber"` // PodsNumberParallel is the maximum number of pod -related benchmarks // to run in parallel. PodsNumberParallel int `yaml:"podsNumberParallel"` + + // PodBenchmarkTimeoutSeconds is the maximum number of seconds acceptable + // for a Pod lifecycle benchmark to take. + PodBenchmarkTimeoutSeconds int `yaml:"podBenchmarkTimeoutSeconds"` + + // ImageBenchmarkTimeoutSeconds is the maximum of seconds acceptable for + // image-related benchmarks. + ImageBenchmarkTimeoutSeconds int `yaml:"imageBenchmarkTimeoutSeconds"` + + // ImageBenchmarkTimeoutSeconds is the maximum of seconds acceptable for + // benchmarks focused on Pod+Container start performance. + PodContainerStartBenchmarkTimeoutSeconds int `yaml:"podContainerStartBenchmarkTimeoutSeconds"` } // TestContextType is the type of test context. From 2ba1d0e3e264a07b7e252f0f13e981ab0819dcf1 Mon Sep 17 00:00:00 2001 From: Nashwan Azhari Date: Tue, 19 Apr 2022 15:34:35 +0300 Subject: [PATCH 7/9] Standardize image lifecycle and listing benchmarks. Bring the image-related benchmarks in line with the container and pod benchmaks by parametrizing the benchmark settings and switching to `gmeasure.experiment` for running the benchmarks. Signed-off-by: Nashwan Azhari --- pkg/benchmark/image.go | 257 +++++++++++++++++++++++++--------- pkg/framework/test_context.go | 15 ++ 2 files changed, 207 insertions(+), 65 deletions(-) diff --git a/pkg/benchmark/image.go b/pkg/benchmark/image.go index 9c6ad7f980..3340c79415 100644 --- a/pkg/benchmark/image.go +++ b/pkg/benchmark/image.go @@ -17,109 +17,236 @@ limitations under the License. package benchmark import ( + "path" "runtime" + "time" + "github.com/golang/glog" "github.com/kubernetes-sigs/cri-tools/pkg/framework" internalapi "k8s.io/cri-api/pkg/apis" runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" + "github.com/onsi/gomega/gmeasure" ) const ( - defaultImagePullTimeoutSeconds = 1 - defaultImageStatusTimeoutSeconds = 2 - defaultImageRemoveTimeoutSeconds = 2 - defaultImageListTimeoutSeconds = 2 + defaultImageBenchmarkTimeoutSeconds = 10 ) -var _ = framework.KubeDescribe("Container", func() { +var defaultImageListingBenchmarkImagesAmd64 = []string{ + "busybox:1.26.2-glibc", + "busybox:1-uclibc", + "busybox:1", + "busybox:1-glibc", + "busybox:1-musl", +} +var defaultImageListingBenchmarkImages = []string{ + "busybox:1", + "busybox:1-glibc", + "busybox:1-musl", +} + +var _ = framework.KubeDescribe("Image", func() { + var ic internalapi.ImageManagerService f := framework.NewDefaultCRIFramework() - var ic internalapi.ImageManagerService + var testImageList []string = framework.TestContext.BenchmarkingParams.ImageListingBenchmarkImages + if len(testImageList) == 0 { + if runtime.GOARCH == "amd64" { + testImageList = defaultImageListingBenchmarkImagesAmd64 + } else { + testImageList = defaultImageListingBenchmarkImages + } + } BeforeEach(func() { ic = f.CRIClient.CRIImageClient }) + AfterEach(func() { + for _, imageName := range testImageList { + imageSpec := &runtimeapi.ImageSpec{ + Image: imageName, + } + ic.RemoveImage(imageSpec) + } + }) + Context("benchmark about operations on Image", func() { - var err error + It("benchmark about basic operations on Image", func() { + var err error - var testImageList []string - if runtime.GOARCH == "amd64" { - testImageList = []string{ - "busybox:1.26.2-glibc", - "busybox:1-uclibc", - "busybox:1", - "busybox:1-glibc", - "busybox:1-musl", + imageBenchmarkTimeoutSeconds := defaultImageBenchmarkTimeoutSeconds + if framework.TestContext.BenchmarkingParams.ImageBenchmarkTimeoutSeconds > 0 { + imageBenchmarkTimeoutSeconds = framework.TestContext.BenchmarkingParams.ImageBenchmarkTimeoutSeconds } - } else { - testImageList = []string{ - "busybox:1", - "busybox:1-glibc", - "busybox:1-musl", - } - } - AfterEach(func() { - for _, imageName := range testImageList { - imageSpec := &runtimeapi.ImageSpec{ - Image: imageName, - } - ic.RemoveImage(imageSpec) + imagePullingBenchmarkImage := framework.TestContext.BenchmarkingParams.ImagePullingBenchmarkImage + // NOTE(aznashwan): default to using first test image from listing benchmark images: + if imagePullingBenchmarkImage == "" { + imagePullingBenchmarkImage = testImageList[0] + glog.Infof("Defaulting to using following image: %s", imagePullingBenchmarkImage) } - }) - imagePullTimeoutSeconds := defaultImagePullTimeoutSeconds - imageStatusTimeoutSeconds := defaultImageStatusTimeoutSeconds - imageRemoveTimeoutSeconds := defaultImageRemoveTimeoutSeconds - imageListTimeoutSeconds := defaultImageListTimeoutSeconds - if framework.TestContext.BenchmarkingParams.ImageBenchmarkTimeoutSeconds > 0 { - imagePullTimeoutSeconds = framework.TestContext.BenchmarkingParams.ImageBenchmarkTimeoutSeconds - imageStatusTimeoutSeconds = framework.TestContext.BenchmarkingParams.ImageBenchmarkTimeoutSeconds - imageRemoveTimeoutSeconds = framework.TestContext.BenchmarkingParams.ImageBenchmarkTimeoutSeconds - imageListTimeoutSeconds = framework.TestContext.BenchmarkingParams.ImageBenchmarkTimeoutSeconds - } + // Setup shared sampling config from TestContext: + samplingConfig := gmeasure.SamplingConfig{ + N: framework.TestContext.BenchmarkingParams.ImagesNumber, + NumParallel: framework.TestContext.BenchmarkingParams.ImagesNumberParallel, + } + if samplingConfig.N < 1 { + samplingConfig.N = 1 + } + if samplingConfig.NumParallel < 1 { + samplingConfig.NumParallel = 1 + } - Measure("benchmark about basic operations on Image", func(b Benchmarker) { - imageSpec := &runtimeapi.ImageSpec{ - Image: testImageList[0], + // Setup image lifecycle results reporting channel: + lifecycleResultsSet := LifecycleBenchmarksResultsSet{ + OperationsNames: []string{"PullImage", "StatusImage", "RemoveImage"}, + NumParallel: samplingConfig.NumParallel, + Datapoints: make([]LifecycleBenchmarkDatapoint, 0), } + lifecycleResultsManager := NewLifecycleBenchmarksResultsManager( + lifecycleResultsSet, + imageBenchmarkTimeoutSeconds, + ) + lifecycleResultsChannel := lifecycleResultsManager.StartResultsConsumer() + + // Image lifecycle benchmark experiment: + experiment := gmeasure.NewExperiment("ImageLifecycle") + experiment.Sample(func(idx int) { + var err error + var lastStartTime, lastEndTime int64 + durations := make([]int64, len(lifecycleResultsSet.OperationsNames)) - operation := b.Time("pull Image", func() { - framework.PullPublicImage(ic, testImageList[0], nil) - }) - Expect(operation.Minutes()).Should(BeNumerically("<", imagePullTimeoutSeconds), "pull Image shouldn't take too long.") + imageSpec := &runtimeapi.ImageSpec{ + Image: imagePullingBenchmarkImage, + } - operation = b.Time("Image status", func() { - _, err = ic.ImageStatus(imageSpec, false) - }) + By("Pull Image") + startTime := time.Now().UnixNano() + lastStartTime = startTime + imageId := framework.PullPublicImage(ic, imagePullingBenchmarkImage, nil) + lastEndTime = time.Now().UnixNano() + durations[0] = lastEndTime - lastStartTime - framework.ExpectNoError(err, "failed to get image status: %v", err) - Expect(operation.Seconds()).Should(BeNumerically("<", imageStatusTimeoutSeconds), "get image status shouldn't take too long.") + By("Status Image") + lastStartTime = time.Now().UnixNano() + _, err = ic.ImageStatus(imageSpec, false) + lastEndTime = time.Now().UnixNano() + durations[1] = lastEndTime - lastStartTime + framework.ExpectNoError(err, "failed to status Image: %v", err) - operation = b.Time("remove Image", func() { + By("Remove Image") + lastStartTime = time.Now().UnixNano() err = ic.RemoveImage(imageSpec) - }) + lastEndTime = time.Now().UnixNano() + durations[2] = lastEndTime - lastStartTime + framework.ExpectNoError(err, "failed to remove Image: %v", err) + + res := LifecycleBenchmarkDatapoint{ + SampleIndex: idx, + StartTime: startTime, + EndTime: lastEndTime, + OperationsDurationsNs: durations, + MetaInfo: map[string]string{"imageId": imageId}, + } + lifecycleResultsChannel <- &res + + }, samplingConfig) - framework.ExpectNoError(err, "failed to remove image: %v", err) - Expect(operation.Seconds()).Should(BeNumerically("<", imageRemoveTimeoutSeconds), "remove Image shouldn't take too long.") + // Send nil and give the manager a minute to process any already-queued results: + lifecycleResultsChannel <- nil + err = lifecycleResultsManager.AwaitAllResults(60) + if err != nil { + glog.Errorf("Results manager failed to await all results: %s", err) + } + + if framework.TestContext.BenchmarkingOutputDir != "" { + filepath := path.Join(framework.TestContext.BenchmarkingOutputDir, "image_lifecycle_benchmark_data.json") + err = lifecycleResultsManager.WriteResultsFile(filepath) + if err != nil { + glog.Errorf("Error occurred while writing benchmark results to file %s: %s", filepath, err) + } + } else { + glog.Infof("No benchmarking out dir provided, skipping writing benchmarking results.") + glog.Infof("Image lifecycle results were: %+v", lifecycleResultsManager.resultsSet) + } + }) - }, defaultOperationTimes) + It("benchmark about listing Image", func() { + var err error - Measure("benchmark about listing Image", func(b Benchmarker) { - for _, imageName := range testImageList { - framework.PullPublicImage(ic, imageName, nil) + imageBenchmarkTimeoutSeconds := defaultImageBenchmarkTimeoutSeconds + if framework.TestContext.BenchmarkingParams.ImageBenchmarkTimeoutSeconds > 0 { + imageBenchmarkTimeoutSeconds = framework.TestContext.BenchmarkingParams.ImageBenchmarkTimeoutSeconds } - operation := b.Time("list Container", func() { + // Setup shared sampling config from TestContext: + samplingConfig := gmeasure.SamplingConfig{ + N: framework.TestContext.BenchmarkingParams.ImagesNumber, + NumParallel: framework.TestContext.BenchmarkingParams.ImagesNumberParallel, + } + if samplingConfig.N < 1 { + samplingConfig.N = 1 + } + if samplingConfig.NumParallel < 1 { + samplingConfig.NumParallel = 1 + } + // Setup image lifecycle results reporting channel: + imageListResultsSet := LifecycleBenchmarksResultsSet{ + OperationsNames: []string{"ListImages"}, + NumParallel: samplingConfig.NumParallel, + Datapoints: make([]LifecycleBenchmarkDatapoint, 0), + } + imageListResultsManager := NewLifecycleBenchmarksResultsManager( + imageListResultsSet, + imageBenchmarkTimeoutSeconds, + ) + imagesResultsChannel := imageListResultsManager.StartResultsConsumer() + + // Image listing benchmark experiment: + experiment := gmeasure.NewExperiment("ImageListing") + experiment.Sample(func(idx int) { + var err error + durations := make([]int64, len(imageListResultsSet.OperationsNames)) + + By("List Images") + startTime := time.Now().UnixNano() _, err = ic.ListImages(nil) - }) + endTime := time.Now().UnixNano() + durations[0] = endTime - startTime + framework.ExpectNoError(err, "failed to List images: %v", err) + + res := LifecycleBenchmarkDatapoint{ + SampleIndex: idx, + StartTime: startTime, + EndTime: endTime, + OperationsDurationsNs: durations, + MetaInfo: nil, + } + imagesResultsChannel <- &res + + }, samplingConfig) - framework.ExpectNoError(err, "failed to list Image: %v", err) - Expect(operation.Seconds()).Should(BeNumerically("<", imageListTimeoutSeconds), "list Image shouldn't take too long.") - }, defaultOperationTimes) + // Send nil and give the manager a minute to process any already-queued results: + imagesResultsChannel <- nil + err = imageListResultsManager.AwaitAllResults(60) + if err != nil { + glog.Errorf("Results manager failed to await all results: %s", err) + } + + if framework.TestContext.BenchmarkingOutputDir != "" { + filepath := path.Join(framework.TestContext.BenchmarkingOutputDir, "image_listing_benchmark_data.json") + err = imageListResultsManager.WriteResultsFile(filepath) + if err != nil { + glog.Errorf("Error occurred while writing benchmark results to file %s: %s", filepath, err) + } + } else { + glog.Infof("No benchmarking out dir provided, skipping writing benchmarking results.") + glog.Infof("Image listing results were: %+v", imageListResultsManager.resultsSet) + } + }) }) }) diff --git a/pkg/framework/test_context.go b/pkg/framework/test_context.go index 36e3d68af2..5e8f530714 100644 --- a/pkg/framework/test_context.go +++ b/pkg/framework/test_context.go @@ -63,10 +63,25 @@ type BenchmarkingParamsType struct { // for a Pod lifecycle benchmark to take. PodBenchmarkTimeoutSeconds int `yaml:"podBenchmarkTimeoutSeconds"` + // ImagesNumber is the number of Images to run tests on in image-related benchmarks. + ImagesNumber int `yaml:"imagesNumber"` + + // ImagesNumberParallel is the maximum number of image-related benchmarks + // to run in parallel. + ImagesNumberParallel int `yaml:"imagesNumberParallel"` + // ImageBenchmarkTimeoutSeconds is the maximum of seconds acceptable for // image-related benchmarks. ImageBenchmarkTimeoutSeconds int `yaml:"imageBenchmarkTimeoutSeconds"` + // ImagePullingBenchmarkImage is the string ref to the image to be used in + // image pulling benchmarks. Internally defaults to BusyBox. + ImagePullingBenchmarkImage string `yaml:"imagePullingBenchmarkImage"` + + // ImageListingBenchmarkImages is a list of string image refs to query + // during image listing benchmarks. + ImageListingBenchmarkImages []string `yaml:"imageListingBenchmarkImages"` + // ImageBenchmarkTimeoutSeconds is the maximum of seconds acceptable for // benchmarks focused on Pod+Container start performance. PodContainerStartBenchmarkTimeoutSeconds int `yaml:"podContainerStartBenchmarkTimeoutSeconds"` From 8ba9205ddea3df8e82afe6e1112cb45a796714a5 Mon Sep 17 00:00:00 2001 From: Nashwan Azhari Date: Tue, 26 Apr 2022 15:52:06 +0300 Subject: [PATCH 8/9] Skip benchmarks if associated resource number is not set. Signed-off-by: Nashwan Azhari --- pkg/benchmark/container.go | 4 ++-- pkg/benchmark/image.go | 8 ++++---- pkg/benchmark/pod.go | 4 ++-- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/pkg/benchmark/container.go b/pkg/benchmark/container.go index 331094daa0..559bb7f26a 100644 --- a/pkg/benchmark/container.go +++ b/pkg/benchmark/container.go @@ -56,8 +56,8 @@ var _ = framework.KubeDescribe("Container", func() { N: framework.TestContext.BenchmarkingParams.ContainersNumber, NumParallel: framework.TestContext.BenchmarkingParams.ContainersNumberParallel, } - if samplingConfig.N < 1 { - samplingConfig.N = 1 + if samplingConfig.N <= 0 { + Skip("skipping container lifecycle benchmarks since container number option was not set") } if samplingConfig.NumParallel < 1 { samplingConfig.NumParallel = 1 diff --git a/pkg/benchmark/image.go b/pkg/benchmark/image.go index 3340c79415..bb9e76573d 100644 --- a/pkg/benchmark/image.go +++ b/pkg/benchmark/image.go @@ -94,8 +94,8 @@ var _ = framework.KubeDescribe("Image", func() { N: framework.TestContext.BenchmarkingParams.ImagesNumber, NumParallel: framework.TestContext.BenchmarkingParams.ImagesNumberParallel, } - if samplingConfig.N < 1 { - samplingConfig.N = 1 + if samplingConfig.N <= 0 { + Skip("skipping image lifecycle benchmarks since image number option was not set") } if samplingConfig.NumParallel < 1 { samplingConfig.NumParallel = 1 @@ -188,8 +188,8 @@ var _ = framework.KubeDescribe("Image", func() { N: framework.TestContext.BenchmarkingParams.ImagesNumber, NumParallel: framework.TestContext.BenchmarkingParams.ImagesNumberParallel, } - if samplingConfig.N < 1 { - samplingConfig.N = 1 + if samplingConfig.N <= 0 { + Skip("skipping image listing benchmarks since image listing number option was not set") } if samplingConfig.NumParallel < 1 { samplingConfig.NumParallel = 1 diff --git a/pkg/benchmark/pod.go b/pkg/benchmark/pod.go index a029831c36..35c05985ca 100644 --- a/pkg/benchmark/pod.go +++ b/pkg/benchmark/pod.go @@ -54,8 +54,8 @@ var _ = framework.KubeDescribe("PodSandbox", func() { N: framework.TestContext.BenchmarkingParams.PodsNumber, NumParallel: framework.TestContext.BenchmarkingParams.PodsNumberParallel, } - if samplingConfig.N < 1 { - samplingConfig.N = 1 + if samplingConfig.N <= 0 { + Skip("skipping pod lifecycle benchmarks since pod number option was not set") } if samplingConfig.NumParallel < 1 { samplingConfig.NumParallel = 1 From 4ee0c36761dbbd73edd19b977e884920600e5f71 Mon Sep 17 00:00:00 2001 From: Nashwan Azhari Date: Thu, 28 Apr 2022 19:19:39 +0300 Subject: [PATCH 9/9] Add lifecycle index in benchmark update messages. Signed-off-by: Nashwan Azhari --- pkg/benchmark/container.go | 15 ++++++++------- pkg/benchmark/image.go | 9 +++++---- pkg/benchmark/pod.go | 9 +++++---- 3 files changed, 18 insertions(+), 15 deletions(-) diff --git a/pkg/benchmark/container.go b/pkg/benchmark/container.go index 559bb7f26a..553ade8570 100644 --- a/pkg/benchmark/container.go +++ b/pkg/benchmark/container.go @@ -17,6 +17,7 @@ limitations under the License. package benchmark import ( + "fmt" "path" "time" @@ -86,35 +87,35 @@ var _ = framework.KubeDescribe("Container", func() { podID, podConfig = framework.CreatePodSandboxForContainer(rc) - By("CreatingContainer") + By(fmt.Sprintf("CreatingContainer %d", idx)) startTime := time.Now().UnixNano() lastStartTime = startTime containerID = framework.CreateDefaultContainer(rc, ic, podID, podConfig, "Benchmark-container-") lastEndTime = time.Now().UnixNano() durations[0] = lastEndTime - lastStartTime - By("StartingContainer") + By(fmt.Sprintf("StartingContainer %d", idx)) lastStartTime = time.Now().UnixNano() err = rc.StartContainer(containerID) lastEndTime = time.Now().UnixNano() durations[1] = lastEndTime - lastStartTime framework.ExpectNoError(err, "failed to start Container: %v", err) - By("ContainerStatus") + By(fmt.Sprintf("ContainerStatus %d", idx)) lastStartTime = time.Now().UnixNano() _, err = rc.ContainerStatus(containerID, true) lastEndTime = time.Now().UnixNano() durations[2] = lastEndTime - lastStartTime framework.ExpectNoError(err, "failed to get Container status: %v", err) - By("ContainerStop") + By(fmt.Sprintf("ContainerStop %d", idx)) lastStartTime = time.Now().UnixNano() err = rc.StopContainer(containerID, framework.DefaultStopContainerTimeout) lastEndTime = time.Now().UnixNano() durations[3] = lastEndTime - lastStartTime framework.ExpectNoError(err, "failed to stop Container: %v", err) - By("ContainerRemove") + By(fmt.Sprintf("ContainerRemove %d", idx)) lastStartTime = time.Now().UnixNano() err = rc.RemoveContainer(containerID) lastEndTime = time.Now().UnixNano() @@ -130,9 +131,9 @@ var _ = framework.KubeDescribe("Container", func() { } resultsChannel <- &res - By("stop PodSandbox") + By(fmt.Sprintf("stop PodSandbox %d", idx)) rc.StopPodSandbox(podID) - By("delete PodSandbox") + By(fmt.Sprintf("delete PodSandbox %d", idx)) rc.RemovePodSandbox(podID) }, samplingConfig) diff --git a/pkg/benchmark/image.go b/pkg/benchmark/image.go index bb9e76573d..c68953e604 100644 --- a/pkg/benchmark/image.go +++ b/pkg/benchmark/image.go @@ -17,6 +17,7 @@ limitations under the License. package benchmark import ( + "fmt" "path" "runtime" "time" @@ -124,21 +125,21 @@ var _ = framework.KubeDescribe("Image", func() { Image: imagePullingBenchmarkImage, } - By("Pull Image") + By(fmt.Sprintf("Pull Image %d", idx)) startTime := time.Now().UnixNano() lastStartTime = startTime imageId := framework.PullPublicImage(ic, imagePullingBenchmarkImage, nil) lastEndTime = time.Now().UnixNano() durations[0] = lastEndTime - lastStartTime - By("Status Image") + By(fmt.Sprintf("Status Image %d", idx)) lastStartTime = time.Now().UnixNano() _, err = ic.ImageStatus(imageSpec, false) lastEndTime = time.Now().UnixNano() durations[1] = lastEndTime - lastStartTime framework.ExpectNoError(err, "failed to status Image: %v", err) - By("Remove Image") + By(fmt.Sprintf("Remove Image %d", idx)) lastStartTime = time.Now().UnixNano() err = ic.RemoveImage(imageSpec) lastEndTime = time.Now().UnixNano() @@ -212,7 +213,7 @@ var _ = framework.KubeDescribe("Image", func() { var err error durations := make([]int64, len(imageListResultsSet.OperationsNames)) - By("List Images") + By(fmt.Sprintf("List Images %d", idx)) startTime := time.Now().UnixNano() _, err = ic.ListImages(nil) endTime := time.Now().UnixNano() diff --git a/pkg/benchmark/pod.go b/pkg/benchmark/pod.go index 35c05985ca..85f1a0a35a 100644 --- a/pkg/benchmark/pod.go +++ b/pkg/benchmark/pod.go @@ -17,6 +17,7 @@ limitations under the License. package benchmark import ( + "fmt" "path" "time" @@ -90,7 +91,7 @@ var _ = framework.KubeDescribe("PodSandbox", func() { Labels: framework.DefaultPodLabels, } - By("Creating a pod") + By(fmt.Sprintf("Creating a pod %d", idx)) startTime := time.Now().UnixNano() lastStartTime = startTime podID, err = c.RunPodSandbox(config, framework.TestContext.RuntimeHandler) @@ -98,21 +99,21 @@ var _ = framework.KubeDescribe("PodSandbox", func() { durations[0] = lastEndTime - lastStartTime framework.ExpectNoError(err, "failed to create PodSandbox: %v", err) - By("Get Pod status") + By(fmt.Sprintf("Get Pod status %d", idx)) lastStartTime = time.Now().UnixNano() _, err = c.PodSandboxStatus(podID, true) lastEndTime = time.Now().UnixNano() durations[1] = lastEndTime - lastStartTime framework.ExpectNoError(err, "failed to get PodStatus: %v", err) - By("Stop PodSandbox") + By(fmt.Sprintf("Stop PodSandbox %d", idx)) lastStartTime = time.Now().UnixNano() err = c.StopPodSandbox(podID) lastEndTime = time.Now().UnixNano() durations[2] = lastEndTime - lastStartTime framework.ExpectNoError(err, "failed to stop PodSandbox: %v", err) - By("Remove PodSandbox") + By(fmt.Sprintf("Remove PodSandbox %d", idx)) lastStartTime = time.Now().UnixNano() err = c.RemovePodSandbox(podID) lastEndTime = time.Now().UnixNano()