Skip to content

Commit

Permalink
rename and further optimization.
Browse files Browse the repository at this point in the history
Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com>
  • Loading branch information
bwplotka committed Jan 26, 2022
1 parent 870e237 commit 11bdfa3
Show file tree
Hide file tree
Showing 2 changed files with 18 additions and 17 deletions.
33 changes: 17 additions & 16 deletions prometheus/cache/cache.go
Expand Up @@ -43,13 +43,13 @@ var separatorByteSlice = []byte{model.SeparatorByte} // For convenient use with
// Use CachedTGatherer with classic Registry using NewMultiTRegistry and ToTransactionalGatherer helpers.
// NOTE(bwplotka): Experimental, API and behaviour can change.
type CachedTGatherer struct {
metricFamilyByName map[string]*family
mMu sync.RWMutex
metricFamiliesByName map[string]*family
mMu sync.RWMutex
}

func NewCachedTGatherer() *CachedTGatherer {
return &CachedTGatherer{
metricFamilyByName: map[string]*family{},
metricFamiliesByName: map[string]*family{},
}
}

Expand All @@ -69,6 +69,7 @@ type metric struct {
// MetricFamilies pruned and the remaining MetricFamilies sorted by name within
// the slice, with the contained Metrics sorted within each MetricFamily.
func normalizeMetricFamilies(metricFamiliesByName map[string]*family) []*dto.MetricFamily {
// TODO(bwplotka): We could optimize this further by bookkeeping this slice in place.
for _, mf := range metricFamiliesByName {
if cap(mf.Metric) < len(mf.metricsByHash) {
mf.Metric = make([]*dto.Metric, 0, len(mf.metricsByHash))
Expand All @@ -80,9 +81,6 @@ func normalizeMetricFamilies(metricFamiliesByName map[string]*family) []*dto.Met
sort.Sort(internal.MetricSorter(mf.Metric))
}

for _, mf := range metricFamiliesByName {
sort.Sort(internal.MetricSorter(mf.Metric))
}
names := make([]string, 0, len(metricFamiliesByName))
for name, mf := range metricFamiliesByName {
if len(mf.Metric) > 0 {
Expand All @@ -102,8 +100,8 @@ func (c *CachedTGatherer) Gather() (_ []*dto.MetricFamily, done func(), err erro
c.mMu.RLock()

// BenchmarkCachedTGatherer_Update shows, even for 1 million metrics among 1000 families
// this is efficient enough (~300µs and ~50 kB per op), no need to cache it for now.
return normalizeMetricFamilies(c.metricFamilyByName), c.mMu.RUnlock, nil
// this is efficient enough (~400ms and ~50 kB per op), no need to cache it for now.
return normalizeMetricFamilies(c.metricFamiliesByName), c.mMu.RUnlock, nil
}

type Key struct {
Expand Down Expand Up @@ -174,7 +172,7 @@ func (c *CachedTGatherer) Update(reset bool, inserts []Insert, deletions []Key)
}

// Update metric family.
mf, ok := c.metricFamilyByName[inserts[i].FQName]
mf, ok := c.metricFamiliesByName[inserts[i].FQName]
if !ok {
mf = &family{
MetricFamily: &dto.MetricFamily{},
Expand All @@ -186,7 +184,7 @@ func (c *CachedTGatherer) Update(reset bool, inserts []Insert, deletions []Key)
mf.Type = inserts[i].ValueType.ToDTO()
mf.Help = &inserts[i].Help

c.metricFamilyByName[inserts[i].FQName] = mf
c.metricFamiliesByName[inserts[i].FQName] = mf

// Update metric pointer.
hSum := inserts[i].hash()
Expand Down Expand Up @@ -250,7 +248,7 @@ func (c *CachedTGatherer) Update(reset bool, inserts []Insert, deletions []Key)
continue
}

mf, ok := c.metricFamilyByName[del.FQName]
mf, ok := c.metricFamiliesByName[del.FQName]
if !ok {
continue
}
Expand All @@ -261,17 +259,18 @@ func (c *CachedTGatherer) Update(reset bool, inserts []Insert, deletions []Key)
}

if len(mf.metricsByHash) == 1 {
delete(c.metricFamilyByName, del.FQName)
delete(c.metricFamiliesByName, del.FQName)
continue
}

delete(mf.metricsByHash, hSum)
}

if reset {
for name, mf := range c.metricFamilyByName {
// Trading off-time instead of memory allocated for otherwise needed replacement map.
for name, mf := range c.metricFamiliesByName {
if !mf.touched {
delete(c.metricFamilyByName, name)
delete(c.metricFamiliesByName, name)
continue
}
for hash, m := range mf.metricsByHash {
Expand All @@ -281,12 +280,14 @@ func (c *CachedTGatherer) Update(reset bool, inserts []Insert, deletions []Key)
}
}
if len(mf.metricsByHash) == 0 {
delete(c.metricFamilyByName, name)
delete(c.metricFamiliesByName, name)
}
}
}

for _, mf := range c.metricFamilyByName {
// TODO(bwplotka): Potentially move this only for reset, but then code would assume
// you either only update or only reset update. For now we can live with small overhead.
for _, mf := range c.metricFamiliesByName {
mf.touched = false
for _, m := range mf.metricsByHash {
m.touched = false
Expand Down
2 changes: 1 addition & 1 deletion prometheus/cache/cache_test.go
Expand Up @@ -220,7 +220,7 @@ func BenchmarkCachedTGatherer_Update(b *testing.B) {
b.Error("update:", err)
}

if len(c.metricFamilyByName) != 1e3 || len(c.metricFamilyByName["realistic_longer_name_123"].metricsByHash) != 1e3 {
if len(c.metricFamiliesByName) != 1e3 || len(c.metricFamiliesByName["realistic_longer_name_123"].metricsByHash) != 1e3 {
// Ensure we did not generate duplicates.
panic("generated data set gave wrong numbers")
}
Expand Down

0 comments on commit 11bdfa3

Please sign in to comment.