Skip to content

Commit

Permalink
profiler: don't upload full profiles if delta profiling is enabled
Browse files Browse the repository at this point in the history
The full heap, mutex, and block profiles are not needed when delta
profiling is enabled. Removing them (especially the full heap profile)
results in a significant bandwidth and storage cost reduction.
  • Loading branch information
nsrip-dd committed Feb 28, 2022
1 parent 41a7cd6 commit 70354df
Show file tree
Hide file tree
Showing 3 changed files with 21 additions and 23 deletions.
19 changes: 11 additions & 8 deletions profiler/profile.go
Expand Up @@ -214,22 +214,25 @@ func (p *profiler) runProfile(pt ProfileType) ([]*profile, error) {
if err != nil {
return nil, err
}
profs := []*profile{{
name: t.Filename,
data: data,
}}
// Compute the deltaProf (will be nil if not enabled for this profile type).
deltaStart := time.Now()
deltaProf, err := p.deltaProfile(t, data)
if err != nil {
return nil, fmt.Errorf("delta profile error: %s", err)
}
// Report metrics and append deltaProf if not nil.
end := now()
tags := append(p.cfg.tags, pt.Tag())
// TODO(fg) stop uploading non-delta profiles in the next version of
// dd-trace-go after delta profiles are released.
if deltaProf != nil {
var profs []*profile
if !p.cfg.deltaProfiles || deltaProf == nil {
profs = append(profs, &profile{
name: t.Filename,
data: data,
})
} else {
// If the user has enabled delta profiles, then the delta
// profile is all we need for the backend and we can save
// significant storage and bandwidth by not including the full,
// non-delta profile.
profs = append(profs, deltaProf)
p.cfg.statsd.Timing("datadog.profiler.go.delta_time", end.Sub(deltaStart), tags, 1)
}
Expand Down
16 changes: 6 additions & 10 deletions profiler/profile_test.go
Expand Up @@ -139,23 +139,19 @@ main;bar 0 0 8 16
// additional profile type to ease the transition)
profs, err := p.runProfile(profType)
require.NoError(t, err)
require.Equal(t, 2, len(profs))
require.Equal(t, profType.Filename(), profs[0].name)
require.Equal(t, 1, len(profs))
require.Equal(t, "delta-"+profType.Filename(), profs[0].name)
require.Equal(t, prof1, profs[0].data)
require.Equal(t, "delta-"+profType.Filename(), profs[1].name)
require.Equal(t, prof1, profs[1].data)

// second run, should produce p1 profile and delta profile
profs, err = p.runProfile(profType)
require.NoError(t, err)
require.Equal(t, 2, len(profs))
require.Equal(t, profType.Filename(), profs[0].name)
require.Equal(t, prof2, profs[0].data)
require.Equal(t, "delta-"+profType.Filename(), profs[1].name)
require.Equal(t, test.WantDelta.String(), protobufToText(profs[1].data))
require.Equal(t, 1, len(profs))
require.Equal(t, "delta-"+profType.Filename(), profs[0].name)
require.Equal(t, test.WantDelta.String(), protobufToText(profs[0].data))

// check delta prof details like timestamps and duration
deltaProf, err := pprofile.ParseData(profs[1].data)
deltaProf, err := pprofile.ParseData(profs[0].data)
require.NoError(t, err)
require.Equal(t, test.Prof2.Time.UnixNano(), deltaProf.TimeNanos)
require.Equal(t, deltaPeriod.Nanoseconds(), deltaProf.DurationNanos)
Expand Down
9 changes: 4 additions & 5 deletions profiler/profiler_test.go
Expand Up @@ -265,8 +265,8 @@ func TestProfilerInternal(t *testing.T) {
assert.EqualValues(1, startCPU)
assert.EqualValues(1, stopCPU)

// should contain cpu.pprof, metrics.json, heap.pprof, delta-heap.pprof
assert.Equal(4, len(bat.profiles))
// should contain cpu.pprof, metrics.json, delta-heap.pprof
assert.Equal(3, len(bat.profiles))

p.exit <- struct{}{}
<-wait
Expand Down Expand Up @@ -319,11 +319,10 @@ func TestProfilerPassthrough(t *testing.T) {
}

assert := assert.New(t)
// should contain cpu.pprof, heap.pprof, delta-heap.pprof
assert.Equal(3, len(bat.profiles))
// should contain cpu.pprof, delta-heap.pprof
assert.Equal(2, len(bat.profiles))
assert.NotEmpty(bat.profiles[0].data)
assert.NotEmpty(bat.profiles[1].data)
assert.NotEmpty(bat.profiles[2].data)
}

func unstartedProfiler(opts ...Option) (*profiler, error) {
Expand Down

0 comments on commit 70354df

Please sign in to comment.