Skip to content
This repository has been archived by the owner on Jul 31, 2023. It is now read-only.

Commit

Permalink
exclude zero bucket from aggregation_data (#1183)
Browse files Browse the repository at this point in the history
* exclude zero bucket from aggregation_data

* fix error string in test file.

* add one more testcase.
  • Loading branch information
rghetia committed Nov 9, 2019
1 parent 3b5a343 commit aad2c52
Show file tree
Hide file tree
Showing 5 changed files with 150 additions and 10 deletions.
1 change: 0 additions & 1 deletion go.mod
Expand Up @@ -8,7 +8,6 @@ require (
golang.org/x/net v0.0.0-20190620200207-3b0461eec859
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd // indirect
golang.org/x/text v0.3.2 // indirect
google.golang.org/appengine v1.4.0 // indirect
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb // indirect
google.golang.org/grpc v1.20.1
)
Expand Down
9 changes: 5 additions & 4 deletions stats/view/aggregation.go
Expand Up @@ -99,13 +99,14 @@ func Sum() *Aggregation {
// If len(bounds) is 1 then there is no finite buckets, and that single
// element is the common boundary of the overflow and underflow buckets.
func Distribution(bounds ...float64) *Aggregation {
return &Aggregation{
agg := &Aggregation{
Type: AggTypeDistribution,
Buckets: bounds,
newData: func() AggregationData {
return newDistributionData(bounds)
},
}
agg.newData = func() AggregationData {
return newDistributionData(agg)
}
return agg
}

// LastValue only reports the last value recorded using this
Expand Down
6 changes: 3 additions & 3 deletions stats/view/aggregation_data.go
Expand Up @@ -128,12 +128,12 @@ type DistributionData struct {
bounds []float64 // histogram distribution of the values
}

func newDistributionData(bounds []float64) *DistributionData {
bucketCount := len(bounds) + 1
func newDistributionData(agg *Aggregation) *DistributionData {
bucketCount := len(agg.Buckets) + 1
return &DistributionData{
CountPerBucket: make([]int64, bucketCount),
ExemplarsPerBucket: make([]*metricdata.Exemplar, bucketCount),
bounds: bounds,
bounds: agg.Buckets,
Min: math.MaxFloat64,
Max: math.SmallestNonzeroFloat64,
}
Expand Down
10 changes: 8 additions & 2 deletions stats/view/aggregation_data_test.go
Expand Up @@ -26,7 +26,10 @@ import (
)

func TestDataClone(t *testing.T) {
dist := newDistributionData([]float64{1, 2, 3, 4})
agg := &Aggregation{
Buckets: []float64{1, 2, 3, 4},
}
dist := newDistributionData(agg)
dist.Count = 7
dist.Max = 11
dist.Min = 1
Expand Down Expand Up @@ -66,7 +69,10 @@ func TestDataClone(t *testing.T) {
}

func TestDistributionData_addSample(t *testing.T) {
dd := newDistributionData([]float64{1, 2})
agg := &Aggregation{
Buckets: []float64{1, 2},
}
dd := newDistributionData(agg)
attachments1 := map[string]interface{}{"key1": "value1"}
t1 := time.Now()
dd.addSample(0.5, attachments1, t1)
Expand Down
134 changes: 134 additions & 0 deletions stats/view/view_to_metric_test.go
Expand Up @@ -24,6 +24,7 @@ import (

"github.com/google/go-cmp/cmp"
"go.opencensus.io/metric/metricdata"
"go.opencensus.io/metric/metricexport"
"go.opencensus.io/stats"
"go.opencensus.io/tag"
)
Expand Down Expand Up @@ -516,6 +517,139 @@ func TestUnitConversionForAggCount(t *testing.T) {
}
}

type mockExp struct {
metrics []*metricdata.Metric
}

func (me *mockExp) ExportMetrics(ctx context.Context, metrics []*metricdata.Metric) error {
me.metrics = append(me.metrics, metrics...)
return nil
}

var _ metricexport.Exporter = (*mockExp)(nil)

func TestViewToMetric_OutOfOrderWithZeroBuckets(t *testing.T) {
m := stats.Int64("OutOfOrderWithZeroBuckets", "", "")
now := time.Now()
tts := []struct {
v *View
m *metricdata.Metric
}{
{
v: &View{
Name: m.Name() + "_order1",
Measure: m,
Aggregation: Distribution(10, 0, 2),
},
m: &metricdata.Metric{
Descriptor: metricdata.Descriptor{
Name: "OutOfOrderWithZeroBuckets_order1",
Unit: metricdata.UnitDimensionless,
Type: metricdata.TypeCumulativeDistribution,
LabelKeys: []metricdata.LabelKey{},
},
TimeSeries: []*metricdata.TimeSeries{
{Points: []metricdata.Point{
{Value: &metricdata.Distribution{
Count: 3,
Sum: 9.0,
SumOfSquaredDeviation: 8,
BucketOptions: &metricdata.BucketOptions{
Bounds: []float64{2, 10},
},
Buckets: []metricdata.Bucket{
{Count: 1, Exemplar: nil},
{Count: 2, Exemplar: nil},
{Count: 0, Exemplar: nil},
},
},
Time: now,
},
},
StartTime: now,
LabelValues: []metricdata.LabelValue{},
},
},
},
},
{
v: &View{
Name: m.Name() + "_order2",
Measure: m,
Aggregation: Distribution(0, 5, 10),
},
m: &metricdata.Metric{
Descriptor: metricdata.Descriptor{
Name: "OutOfOrderWithZeroBuckets_order2",
Unit: metricdata.UnitDimensionless,
Type: metricdata.TypeCumulativeDistribution,
LabelKeys: []metricdata.LabelKey{},
},
TimeSeries: []*metricdata.TimeSeries{
{Points: []metricdata.Point{
{Value: &metricdata.Distribution{
Count: 3,
Sum: 9.0,
SumOfSquaredDeviation: 8,
BucketOptions: &metricdata.BucketOptions{
Bounds: []float64{5, 10},
},
Buckets: []metricdata.Bucket{
{Count: 2, Exemplar: nil},
{Count: 1, Exemplar: nil},
{Count: 0, Exemplar: nil},
},
},
Time: now,
},
},
StartTime: now,
LabelValues: []metricdata.LabelValue{},
},
},
},
},
}
for _, tt := range tts {
err := Register(tt.v)
if err != nil {
t.Fatalf("error registering view %v, err: %v", tt.v, err)
}

}

stats.Record(context.Background(), m.M(5), m.M(1), m.M(3))
time.Sleep(1 * time.Second)

me := &mockExp{}
reader := metricexport.NewReader()
reader.ReadAndExport(me)

var got *metricdata.Metric
lookup := func(vname string, metrics []*metricdata.Metric) *metricdata.Metric {
for _, m := range metrics {
if m.Descriptor.Name == vname {
return m
}
}
return nil
}

for _, tt := range tts {
got = lookup(tt.v.Name, me.metrics)
if got == nil {
t.Fatalf("metric %s not found in %v\n", tt.v.Name, me.metrics)
}
got.TimeSeries[0].Points[0].Time = now
got.TimeSeries[0].StartTime = now

want := tt.m
if diff := cmp.Diff(got, want); diff != "" {
t.Errorf("buckets differ -got +want: %s \n Serialized got %v\n, Serialized want %v\n", diff, serializeAsJSON(got), serializeAsJSON(want))
}
}
}

func serializeAsJSON(v interface{}) string {
blob, _ := json.MarshalIndent(v, "", " ")
return string(blob)
Expand Down

0 comments on commit aad2c52

Please sign in to comment.