From b10a13ed61883649cec71c073aaee465708e32e1 Mon Sep 17 00:00:00 2001 From: Emilio Garcia Date: Mon, 25 Apr 2022 13:58:22 -0400 Subject: [PATCH 01/47] harvest log events Adds the ability to ingest and harvest log events that have been structured and formatted to the proper json spec. This is meant to be used by wrapper libraries for popular logging frameworks, and should not be called by users. --- v3/examples/short-lived-process/main.go | 7 +- v3/internal/connect_reply.go | 2 + v3/internal/connect_reply_test.go | 6 +- v3/internal/expect.go | 10 + v3/internal/limits.go | 3 + v3/newrelic/analytics_events.go | 2 +- v3/newrelic/app_run.go | 6 + v3/newrelic/app_run_test.go | 34 ++- v3/newrelic/application.go | 20 ++ v3/newrelic/collector.go | 1 + v3/newrelic/config.go | 28 ++ v3/newrelic/config_options.go | 23 ++ v3/newrelic/config_test.go | 22 ++ v3/newrelic/expect_implementation.go | 47 ++- v3/newrelic/harvest.go | 25 +- v3/newrelic/harvest_test.go | 118 ++++++- v3/newrelic/internal_app.go | 54 ++++ v3/newrelic/log_event.go | 105 +++++++ v3/newrelic/log_events.go | 206 +++++++++++++ v3/newrelic/log_events_test.go | 391 ++++++++++++++++++++++++ 20 files changed, 1080 insertions(+), 30 deletions(-) create mode 100644 v3/newrelic/log_event.go create mode 100644 v3/newrelic/log_events.go create mode 100644 v3/newrelic/log_events_test.go diff --git a/v3/examples/short-lived-process/main.go b/v3/examples/short-lived-process/main.go index 61687488d..eb4d2acf0 100644 --- a/v3/examples/short-lived-process/main.go +++ b/v3/examples/short-lived-process/main.go @@ -4,6 +4,7 @@ package main import ( + "context" "fmt" "os" "time" @@ -13,7 +14,9 @@ import ( func main() { app, err := newrelic.NewApplication( - newrelic.ConfigAppName("Short Lived App"), + newrelic.ConfigAppName("Logs in context testing"), + newrelic.ConfigAppLogForwardingEnabled(true), + newrelic.ConfigDistributedTracerEnabled(true), newrelic.ConfigLicense(os.Getenv("NEW_RELIC_LICENSE_KEY")), newrelic.ConfigDebugLogger(os.Stdout), ) @@ -39,6 +42,8 @@ func main() { }) } + app.RecordFormattedLogEvent(context.Background(), "App Executed Succesfully", "INFO", time.Now().UnixMilli()) + // Shut down the application to flush data to New Relic. app.Shutdown(10 * time.Second) } diff --git a/v3/internal/connect_reply.go b/v3/internal/connect_reply.go index 84b5d00b6..19cd4b721 100644 --- a/v3/internal/connect_reply.go +++ b/v3/internal/connect_reply.go @@ -118,6 +118,7 @@ type EventHarvestConfig struct { Limits struct { TxnEvents *uint `json:"analytic_event_data,omitempty"` CustomEvents *uint `json:"custom_event_data,omitempty"` + LogEvents *uint `json:"log_event_data,omitempty"` ErrorEvents *uint `json:"error_event_data,omitempty"` SpanEvents *uint `json:"span_event_data,omitempty"` } `json:"harvest_limits"` @@ -141,6 +142,7 @@ func DefaultEventHarvestConfig(maxTxnEvents int) EventHarvestConfig { cfg.ReportPeriodMs = DefaultConfigurableEventHarvestMs cfg.Limits.TxnEvents = uintPtr(uint(maxTxnEvents)) cfg.Limits.CustomEvents = uintPtr(uint(MaxCustomEvents)) + cfg.Limits.LogEvents = uintPtr(uint(MaxLogEvents)) cfg.Limits.ErrorEvents = uintPtr(uint(MaxErrorEvents)) return cfg } diff --git a/v3/internal/connect_reply_test.go b/v3/internal/connect_reply_test.go index 64312007a..a2be63a17 100644 --- a/v3/internal/connect_reply_test.go +++ b/v3/internal/connect_reply_test.go @@ -177,7 +177,9 @@ func TestDefaultEventHarvestConfigJSON(t *testing.T) { if err != nil { t.Error(err) } - if string(js) != `{"report_period_ms":60000,"harvest_limits":{"analytic_event_data":10000,"custom_event_data":10000,"error_event_data":100}}` { - t.Error(string(js)) + + expect := `{"report_period_ms":60000,"harvest_limits":{"analytic_event_data":10000,"custom_event_data":10000,"log_event_data":10000,"error_event_data":100}}` + if string(js) != expect { + t.Errorf("DefaultEventHarvestConfig does not match expected valued:\nExpected:\t%s\nActual:\t\t%s", expect, string(js)) } } diff --git a/v3/internal/expect.go b/v3/internal/expect.go index 3dff6e475..638cb224b 100644 --- a/v3/internal/expect.go +++ b/v3/internal/expect.go @@ -26,6 +26,15 @@ type WantError struct { AgentAttributes map[string]interface{} } +// WantLog is a traced log event expectation +type WantLog struct { + Severity string + Message string + SpanID string + TraceID string + Timestamp int64 +} + func uniquePointer() *struct{} { s := struct{}{} return &s @@ -112,6 +121,7 @@ type WantTxn struct { // captured. type Expect interface { ExpectCustomEvents(t Validator, want []WantEvent) + ExpectLogEvents(t Validator, want []WantLog) ExpectErrors(t Validator, want []WantError) ExpectErrorEvents(t Validator, want []WantEvent) diff --git a/v3/internal/limits.go b/v3/internal/limits.go index 7dcb96785..95fcebe6c 100644 --- a/v3/internal/limits.go +++ b/v3/internal/limits.go @@ -17,6 +17,9 @@ const ( // MaxCustomEvents is the maximum number of Transaction Events that can be captured // per 60-second harvest cycle MaxCustomEvents = 10 * 1000 + // MaxLogEvents is the maximum number of Log Events that can be captured per + // 60-second harvest cycle + MaxLogEvents = 10 * 1000 // MaxTxnEvents is the maximum number of Transaction Events that can be captured // per 60-second harvest cycle MaxTxnEvents = 10 * 1000 diff --git a/v3/newrelic/analytics_events.go b/v3/newrelic/analytics_events.go index aa751ceab..6c1fdb63b 100644 --- a/v3/newrelic/analytics_events.go +++ b/v3/newrelic/analytics_events.go @@ -19,8 +19,8 @@ type analyticsEventHeap []analyticsEvent type analyticsEvents struct { numSeen int - events analyticsEventHeap failedHarvests int + events analyticsEventHeap } func (events *analyticsEvents) NumSeen() float64 { return float64(events.numSeen) } diff --git a/v3/newrelic/app_run.go b/v3/newrelic/app_run.go index 3df42ea55..640c544bc 100644 --- a/v3/newrelic/app_run.go +++ b/v3/newrelic/app_run.go @@ -120,6 +120,7 @@ func newAppRun(config config, reply *internal.ConnectReply) *appRun { ReportPeriods: run.ReportPeriods(), MaxTxnEvents: run.MaxTxnEvents(), MaxCustomEvents: run.MaxCustomEvents(), + MaxLogEvents: run.MaxLogEvents(), MaxErrorEvents: run.MaxErrorEvents(), MaxSpanEvents: run.MaxSpanEvents(), } @@ -187,6 +188,7 @@ func (run *appRun) txnTraceThreshold(apdexThreshold time.Duration) time.Duration func (run *appRun) ptrTxnEvents() *uint { return run.Reply.EventData.Limits.TxnEvents } func (run *appRun) ptrCustomEvents() *uint { return run.Reply.EventData.Limits.CustomEvents } +func (run *appRun) ptrLogEvents() *uint { return run.Reply.EventData.Limits.LogEvents } func (run *appRun) ptrErrorEvents() *uint { return run.Reply.EventData.Limits.ErrorEvents } func (run *appRun) ptrSpanEvents() *uint { return run.Reply.EventData.Limits.SpanEvents } @@ -194,6 +196,9 @@ func (run *appRun) MaxTxnEvents() int { return run.limit(run.Config.maxTxnEvents func (run *appRun) MaxCustomEvents() int { return run.limit(internal.MaxCustomEvents, run.ptrCustomEvents) } +func (run *appRun) MaxLogEvents() int { + return run.limit(internal.MaxLogEvents, run.ptrLogEvents) +} func (run *appRun) MaxErrorEvents() int { return run.limit(internal.MaxErrorEvents, run.ptrErrorEvents) } @@ -219,6 +224,7 @@ func (run *appRun) ReportPeriods() map[harvestTypes]time.Duration { for tp, fn := range map[harvestTypes]func() *uint{ harvestTxnEvents: run.ptrTxnEvents, harvestCustomEvents: run.ptrCustomEvents, + harvestLogEvents: run.ptrLogEvents, harvestErrorEvents: run.ptrErrorEvents, harvestSpanEvents: run.ptrSpanEvents, } { diff --git a/v3/newrelic/app_run_test.go b/v3/newrelic/app_run_test.go index 56bd33c2f..289b31848 100644 --- a/v3/newrelic/app_run_test.go +++ b/v3/newrelic/app_run_test.go @@ -130,6 +130,8 @@ func TestEmptyReplyEventHarvestDefaults(t *testing.T) { maxCustomEvents: internal.MaxCustomEvents, maxErrorEvents: internal.MaxErrorEvents, maxSpanEvents: run.Config.DistributedTracer.ReservoirLimit, + maxLogEvents: internal.MaxLogEvents, + periods: map[harvestTypes]time.Duration{ harvestTypesAll: 60 * time.Second, 0: 60 * time.Second, @@ -144,8 +146,9 @@ func TestEventHarvestFieldsAllPopulated(t *testing.T) { "harvest_limits": { "analytic_event_data": 1, "custom_event_data": 2, - "span_event_data": 3, - "error_event_data": 4 + "log_event_data": 3, + "span_event_data": 4, + "error_event_data": 5 } } }}`), internal.PreconnectReply{}) @@ -156,8 +159,9 @@ func TestEventHarvestFieldsAllPopulated(t *testing.T) { assertHarvestConfig(t, run.harvestConfig, expectHarvestConfig{ maxTxnEvents: 1, maxCustomEvents: 2, - maxErrorEvents: 4, - maxSpanEvents: 3, + maxLogEvents: 3, + maxSpanEvents: 4, + maxErrorEvents: 5, periods: map[harvestTypes]time.Duration{ harvestMetricsTraces: 60 * time.Second, harvestTypesEvents: 5 * time.Second, @@ -178,6 +182,7 @@ func TestZeroReportPeriod(t *testing.T) { assertHarvestConfig(t, run.harvestConfig, expectHarvestConfig{ maxTxnEvents: internal.MaxTxnEvents, maxCustomEvents: internal.MaxCustomEvents, + maxLogEvents: internal.MaxLogEvents, maxErrorEvents: internal.MaxErrorEvents, maxSpanEvents: run.Config.DistributedTracer.ReservoirLimit, periods: map[harvestTypes]time.Duration{ @@ -200,6 +205,7 @@ func TestEventHarvestFieldsOnlySpanEvents(t *testing.T) { assertHarvestConfig(t, run.harvestConfig, expectHarvestConfig{ maxTxnEvents: internal.MaxTxnEvents, maxCustomEvents: internal.MaxCustomEvents, + maxLogEvents: internal.MaxLogEvents, maxErrorEvents: internal.MaxErrorEvents, maxSpanEvents: 3, periods: map[harvestTypes]time.Duration{ @@ -224,6 +230,7 @@ func TestEventHarvestFieldsOnlyTxnEvents(t *testing.T) { maxCustomEvents: internal.MaxCustomEvents, maxErrorEvents: internal.MaxErrorEvents, maxSpanEvents: run.Config.DistributedTracer.ReservoirLimit, + maxLogEvents: internal.MaxLogEvents, periods: map[harvestTypes]time.Duration{ harvestTypesAll ^ harvestTxnEvents: 60 * time.Second, harvestTxnEvents: 5 * time.Second, @@ -244,6 +251,7 @@ func TestEventHarvestFieldsOnlyErrorEvents(t *testing.T) { assertHarvestConfig(t, run.harvestConfig, expectHarvestConfig{ maxTxnEvents: internal.MaxTxnEvents, maxCustomEvents: internal.MaxCustomEvents, + maxLogEvents: internal.MaxLogEvents, maxErrorEvents: 3, maxSpanEvents: run.Config.DistributedTracer.ReservoirLimit, periods: map[harvestTypes]time.Duration{ @@ -266,6 +274,7 @@ func TestEventHarvestFieldsOnlyCustomEvents(t *testing.T) { assertHarvestConfig(t, run.harvestConfig, expectHarvestConfig{ maxTxnEvents: internal.MaxTxnEvents, maxCustomEvents: 3, + maxLogEvents: internal.MaxLogEvents, maxErrorEvents: internal.MaxErrorEvents, maxSpanEvents: run.Config.DistributedTracer.ReservoirLimit, periods: map[harvestTypes]time.Duration{ @@ -366,9 +375,13 @@ type expectHarvestConfig struct { maxCustomEvents int maxErrorEvents int maxSpanEvents int + maxLogEvents int periods map[harvestTypes]time.Duration } +func errorExpectNotEqualActual(value string, expect, actual interface{}) error { + return fmt.Errorf("Expected %s value does not match actual; expected: %+v actual: %+v", value, expect, actual) +} func assertHarvestConfig(t testing.TB, hc harvestConfig, expect expectHarvestConfig) { if h, ok := t.(interface { Helper() @@ -376,19 +389,22 @@ func assertHarvestConfig(t testing.TB, hc harvestConfig, expect expectHarvestCon h.Helper() } if max := hc.MaxTxnEvents; max != expect.maxTxnEvents { - t.Error(max, expect.maxTxnEvents) + t.Error(errorExpectNotEqualActual("maxTxnEvents", max, expect.maxTxnEvents)) } if max := hc.MaxCustomEvents; max != expect.maxCustomEvents { - t.Error(max, expect.maxCustomEvents) + t.Error(errorExpectNotEqualActual("MaxCustomEvents", max, expect.maxCustomEvents)) } if max := hc.MaxSpanEvents; max != expect.maxSpanEvents { - t.Error(max, expect.maxSpanEvents) + t.Error(errorExpectNotEqualActual("MaxSpanEvents", max, expect.maxSpanEvents)) } if max := hc.MaxErrorEvents; max != expect.maxErrorEvents { - t.Error(max, expect.maxErrorEvents) + t.Error(errorExpectNotEqualActual("MaxErrorEvents", max, expect.maxErrorEvents)) + } + if max := hc.MaxLogEvents; max != expect.maxLogEvents { + t.Error(errorExpectNotEqualActual("MaxLogEvents", max, expect.maxErrorEvents)) } if periods := hc.ReportPeriods; !reflect.DeepEqual(periods, expect.periods) { - t.Error(periods, expect.periods) + t.Error(errorExpectNotEqualActual("ReportPeriods", periods, expect.periods)) } } diff --git a/v3/newrelic/application.go b/v3/newrelic/application.go index 7a8db268f..584ba30ed 100644 --- a/v3/newrelic/application.go +++ b/v3/newrelic/application.go @@ -4,6 +4,7 @@ package newrelic import ( + "context" "os" "time" ) @@ -51,6 +52,25 @@ func (app *Application) RecordCustomEvent(eventType string, params map[string]in } } +// RecordLogEvent adds a log event from a newrelic context, a log message, a log severity, and a timestamp +// formatted as UnixMilliseconds. +// +// The severity should be either a single word, number, or an empty string. +func (app *Application) RecordLogEvent(context context.Context, message, severity string, timestamp int64) { + if nil == app { + return + } + if nil == app.app { + return + } + err := app.app.RecordLogEvent(context, message, severity, timestamp) + if err != nil { + app.app.Error("unable to record log event", map[string]interface{}{ + "reason": err.Error(), + }) + } +} + // RecordCustomMetric records a custom metric. The metric name you // provide will be prefixed by "Custom/". Custom metrics are not // currently supported in serverless mode. diff --git a/v3/newrelic/collector.go b/v3/newrelic/collector.go index 5ed11dcef..ea61e146c 100644 --- a/v3/newrelic/collector.go +++ b/v3/newrelic/collector.go @@ -30,6 +30,7 @@ const ( cmdConnect = "connect" cmdMetrics = "metric_data" cmdCustomEvents = "custom_event_data" + cmdLogEvents = "log_event_data" cmdTxnEvents = "analytic_event_data" cmdErrorEvents = "error_event_data" cmdErrorData = "error_data" diff --git a/v3/newrelic/config.go b/v3/newrelic/config.go index fe0f7c7f7..e7eeb3464 100644 --- a/v3/newrelic/config.go +++ b/v3/newrelic/config.go @@ -145,6 +145,29 @@ type Config struct { } } + // ApplicationLogging contains settings which control the capture and sending + // of log event data + ApplicationLogging struct { + // If this is disabled, all sub-features are disabled; + // if it is enabled, the individual sub-feature configurations take effect. + // MAY accomplish this by not installing instrumentation, or by early-return/no-op as necessary for an agent. + Enabled bool + // Forwarding controls log forwarding to New Relic One + Forwarding struct { + // Toggles whether the agent gathers log records for sending to New Relic. + Enabled bool + // Number of log records to send per minute to New Relic. + // Controls the overall memory consumption when using log forwarding. + // SHOULD be sent as part of the harvest_limits on Connect. + MaxSamplesStored int + } + Metrics struct { + // Toggles whether the agent gathers the the user facing Logging/lines and Logging/lines/{SEVERITY} + // Logging Metrics used in the Logs chart on the APM Summary page. + Enabled bool + } + } + // BrowserMonitoring contains settings which control the behavior of // Transaction.BrowserTimingHeader. BrowserMonitoring struct { @@ -412,6 +435,11 @@ func defaultConfig() Config { c.TransactionTracer.Attributes.Enabled = true c.TransactionTracer.Segments.Attributes.Enabled = true + c.ApplicationLogging.Enabled = true + c.ApplicationLogging.Forwarding.Enabled = false + c.ApplicationLogging.Forwarding.MaxSamplesStored = internal.MaxLogEvents + c.ApplicationLogging.Metrics.Enabled = true + c.BrowserMonitoring.Enabled = true // browser monitoring attributes are disabled by default c.BrowserMonitoring.Attributes.Enabled = false diff --git a/v3/newrelic/config_options.go b/v3/newrelic/config_options.go index 3ba323924..9ba7c32f6 100644 --- a/v3/newrelic/config_options.go +++ b/v3/newrelic/config_options.go @@ -44,6 +44,29 @@ func ConfigDistributedTracerReservoirLimit(limit int) ConfigOption { return func(cfg *Config) { cfg.DistributedTracer.ReservoirLimit = limit } } +// ConfigAppLogForwadringEnabled enables or disables the collection +// of logs from a users application by the agent +// Defaults: enabled=false, maxSamplesStored=10,000 +func ConfigAppLogForwardingEnabled(enabled bool) ConfigOption { + return func(cfg *Config) { + if enabled == true { + cfg.ApplicationLogging.Enabled = true + cfg.ApplicationLogging.Forwarding.Enabled = true + } else { + cfg.ApplicationLogging.Forwarding.Enabled = false + cfg.ApplicationLogging.Forwarding.MaxSamplesStored = 0 + } + } +} + +// ConfigAppLogForwardingMaxSamplesStored allows users to set the maximium number of +// log events the agent is allowed to collect and store in a given harvest cycle. +func ConfigAppLogForwardingMaxSamplesStored(maxSamplesStored int) ConfigOption { + return func(cfg *Config) { + cfg.ApplicationLogging.Forwarding.MaxSamplesStored = maxSamplesStored + } +} + // ConfigLogger populates the Config's Logger. func ConfigLogger(l Logger) ConfigOption { return func(cfg *Config) { cfg.Logger = l } diff --git a/v3/newrelic/config_test.go b/v3/newrelic/config_test.go index b6df1731b..7ad8e77d4 100644 --- a/v3/newrelic/config_test.go +++ b/v3/newrelic/config_test.go @@ -129,6 +129,16 @@ func TestCopyConfigReferenceFieldsPresent(t *testing.T) { "host":"my-hostname", "settings":{ "AppName":"my appname", + "ApplicationLogging": { + "Enabled": true, + "Forwarding": { + "Enabled": false, + "MaxSamplesStored": 10000 + }, + "Metrics": { + "Enabled": true + } + }, "Attributes":{"Enabled":true,"Exclude":["2"],"Include":["1"]}, "BrowserMonitoring":{ "Attributes":{"Enabled":false,"Exclude":["10"],"Include":["9"]}, @@ -250,6 +260,7 @@ func TestCopyConfigReferenceFieldsPresent(t *testing.T) { "harvest_limits": { "analytic_event_data": 10000, "custom_event_data": 10000, + "log_event_data": 10000, "error_event_data": 100, "span_event_data": 2000 } @@ -302,6 +313,16 @@ func TestCopyConfigReferenceFieldsAbsent(t *testing.T) { "host":"my-hostname", "settings":{ "AppName":"my appname", + "ApplicationLogging": { + "Enabled": true, + "Forwarding": { + "Enabled": false, + "MaxSamplesStored": 10000 + }, + "Metrics": { + "Enabled": true + } + }, "Attributes":{"Enabled":true,"Exclude":null,"Include":null}, "BrowserMonitoring":{ "Attributes":{ @@ -415,6 +436,7 @@ func TestCopyConfigReferenceFieldsAbsent(t *testing.T) { "harvest_limits": { "analytic_event_data": 10000, "custom_event_data": 10000, + "log_event_data": 10000, "error_event_data": 100, "span_event_data": 2000 } diff --git a/v3/newrelic/expect_implementation.go b/v3/newrelic/expect_implementation.go index 5edcc3033..aafaf08a0 100644 --- a/v3/newrelic/expect_implementation.go +++ b/v3/newrelic/expect_implementation.go @@ -4,6 +4,7 @@ package newrelic import ( + "bytes" "encoding/json" "fmt" "time" @@ -159,19 +160,31 @@ func expectAttributes(v internal.Validator, exists map[string]interface{}, expec if len(exists) != len(expect) { v.Error("attributes length difference", len(exists), len(expect)) } - for key, val := range expect { - found, ok := exists[key] + for key, expectVal := range expect { + actualVal, ok := exists[key] if !ok { v.Error("expected attribute not found: ", key) continue } - if val == internal.MatchAnything || val == "*" { + if expectVal == internal.MatchAnything || expectVal == "*" { continue } - v1 := fmt.Sprint(found) - v2 := fmt.Sprint(val) - if v1 != v2 { - v.Error("value difference", fmt.Sprintf("key=%s", key), v1, v2) + + actualString := fmt.Sprint(actualVal) + expectString := fmt.Sprint(expectVal) + switch expectVal.(type) { + case float64: + // json.Number type objects need to be converted into float64 strings + // when compared against a float64 or the comparison will fail due to + // the number formatting being different + if number, ok := actualVal.(json.Number); ok { + numString, _ := number.Float64() + actualString = fmt.Sprint(numString) + } + } + + if expectString != actualString { + v.Error(fmt.Sprintf("Values of key \"%s\" do not match; Expect: %s Actual: %s", key, expectString, actualString)) } } for key, val := range exists { @@ -188,18 +201,36 @@ func expectCustomEvents(v internal.Validator, cs *customEvents, expect []interna expectEvents(v, cs.analyticsEvents, expect, nil) } +func expectLogEvents(v internal.Validator, logEvents *logEvents, expect []internal.WantLog) { + //TODO(egarcia): implement this + return +} + func expectEvent(v internal.Validator, e json.Marshaler, expect internal.WantEvent) { js, err := e.MarshalJSON() if nil != err { v.Error("unable to marshal event", err) return } + + // Because we are unmarshaling into a generic struct without types + // JSON numbers will be set to the float64 type by default, causing + // errors when comparing to the expected integer timestamp value. + decoder := json.NewDecoder(bytes.NewReader(js)) + decoder.UseNumber() var event []map[string]interface{} - err = json.Unmarshal(js, &event) + err = decoder.Decode(&event) if nil != err { v.Error("unable to parse event json", err) return } + + // avoid nil pointer errors or index out of bounds errors + if event == nil || len(event) == 0 { + v.Error("Event can not be nil or empty") + return + } + intrinsics := event[0] userAttributes := event[1] agentAttributes := event[2] diff --git a/v3/newrelic/harvest.go b/v3/newrelic/harvest.go index b92865d4f..08a921c05 100644 --- a/v3/newrelic/harvest.go +++ b/v3/newrelic/harvest.go @@ -22,13 +22,14 @@ const ( harvestMetricsTraces harvestTypes = 1 << iota harvestSpanEvents harvestCustomEvents + harvestLogEvents harvestTxnEvents harvestErrorEvents ) const ( // harvestTypesEvents includes all Event types - harvestTypesEvents = harvestSpanEvents | harvestCustomEvents | harvestTxnEvents | harvestErrorEvents + harvestTypesEvents = harvestSpanEvents | harvestCustomEvents | harvestTxnEvents | harvestErrorEvents | harvestLogEvents // harvestTypesAll includes all harvest types harvestTypesAll = harvestMetricsTraces | harvestTypesEvents ) @@ -66,6 +67,7 @@ type harvest struct { SlowSQLs *slowQueries SpanEvents *spanEvents CustomEvents *customEvents + LogEvents *logEvents TxnEvents *txnEvents ErrorEvents *errorEvents } @@ -92,6 +94,10 @@ func (h *harvest) Ready(now time.Time) *harvest { ready.CustomEvents = h.CustomEvents h.CustomEvents = newCustomEvents(h.CustomEvents.capacity()) } + if 0 != types&harvestLogEvents { + ready.LogEvents = h.LogEvents + h.LogEvents = newLogEvents(h.LogEvents.commonAttributes, h.LogEvents.capacity()) + } if 0 != types&harvestTxnEvents { h.Metrics.addCount(txnEventsSeen, h.TxnEvents.NumSeen(), forced) h.Metrics.addCount(txnEventsSent, h.TxnEvents.NumSaved(), forced) @@ -133,6 +139,9 @@ func (h *harvest) Payloads(splitLargeTxnEvents bool) (ps []payloadCreator) { if nil != h.CustomEvents { ps = append(ps, h.CustomEvents) } + if nil != h.LogEvents { + ps = append(ps, h.LogEvents) + } if nil != h.ErrorEvents { ps = append(ps, h.ErrorEvents) } @@ -162,11 +171,13 @@ func (h *harvest) Payloads(splitLargeTxnEvents bool) (ps []payloadCreator) { } type harvestConfig struct { - ReportPeriods map[harvestTypes]time.Duration - MaxSpanEvents int - MaxCustomEvents int - MaxErrorEvents int - MaxTxnEvents int + ReportPeriods map[harvestTypes]time.Duration + CommonAttributes commonAttributes + MaxSpanEvents int + MaxCustomEvents int + MaxLogEvents int + MaxErrorEvents int + MaxTxnEvents int } // newHarvest returns a new Harvest. @@ -179,6 +190,7 @@ func newHarvest(now time.Time, configurer harvestConfig) *harvest { SlowSQLs: newSlowQueries(maxHarvestSlowSQLs), SpanEvents: newSpanEvents(configurer.MaxSpanEvents), CustomEvents: newCustomEvents(configurer.MaxCustomEvents), + LogEvents: newLogEvents(configurer.CommonAttributes, configurer.MaxLogEvents), TxnEvents: newTxnEvents(configurer.MaxTxnEvents), ErrorEvents: newErrorEvents(configurer.MaxErrorEvents), } @@ -326,6 +338,7 @@ var ( MaxTxnEvents: internal.MaxTxnEvents, MaxSpanEvents: defaultMaxSpanEvents, MaxCustomEvents: internal.MaxCustomEvents, + MaxLogEvents: internal.MaxLogEvents, MaxErrorEvents: internal.MaxErrorEvents, } ) diff --git a/v3/newrelic/harvest_test.go b/v3/newrelic/harvest_test.go index 76317ed4d..fc5ec4d73 100644 --- a/v3/newrelic/harvest_test.go +++ b/v3/newrelic/harvest_test.go @@ -181,7 +181,7 @@ func TestCreateFinalMetricsTraceObserver(t *testing.T) { func TestEmptyPayloads(t *testing.T) { h := newHarvest(time.Now(), dfltHarvestCfgr) payloads := h.Payloads(true) - if len(payloads) != 8 { + if len(payloads) != 9 { t.Error(len(payloads)) } for _, p := range payloads { @@ -260,6 +260,66 @@ func TestHarvestCustomEventsReady(t *testing.T) { }) } +func TestHarvestLogEventsReady(t *testing.T) { + now := time.Now() + fixedHarvestTypes := harvestMetricsTraces & harvestTxnEvents & harvestSpanEvents & harvestLogEvents + h := newHarvest(now, harvestConfig{ + ReportPeriods: map[harvestTypes]time.Duration{ + fixedHarvestTypes: fixedHarvestPeriod, + harvestLogEvents: time.Second * 5, + }, + MaxLogEvents: 3, + }) + timestamp := timeToIntMillis(now) + severity := "INFO" + message := "User 'xyz' logged in" + spanID := "123456789ADF" + traceID := "ADF09876565" + + logEvent := logEvent{ + 0.9, + severity, + message, + spanID, + traceID, + 123456, + } + + h.LogEvents.Add(&logEvent) + ready := h.Ready(now.Add(10 * time.Second)) + payloads := ready.Payloads(true) + if len(payloads) == 0 { + t.Fatal("no payloads generated") + } else if len(payloads) > 1 { + t.Fatalf("too many payloads: %d", len(payloads)) + } + p := payloads[0] + if m := p.EndpointMethod(); m != "log_event_data" { + t.Error(m) + } + data, err := p.Data("agentRunID", now) + if nil != err || nil == data { + t.Error(err, data) + } + if h.LogEvents.capacity() != 3 || h.LogEvents.NumSaved() != 0 { + t.Fatal("log events not correctly reset") + } + + expectLogEvents(t, ready.LogEvents, []internal.WantLog{ + internal.WantLog{ + severity, + message, + spanID, + traceID, + timestamp, + }, + }) + /* expectMetrics(t, h.Metrics, []internal.WantMetric{ + {Name: logEventsSeen, Scope: "", Forced: true, Data: []float64{1, 0, 0, 0, 0, 0}}, + {Name: logEventsSent, Scope: "", Forced: true, Data: []float64{1, 0, 0, 0, 0, 0}}, + }) */ +} + func TestHarvestTxnEventsReady(t *testing.T) { now := time.Now() fixedHarvestTypes := harvestMetricsTraces & harvestCustomEvents & harvestSpanEvents & harvestErrorEvents @@ -473,6 +533,22 @@ func TestMergeFailedHarvest(t *testing.T) { Duration: 1 * time.Second, TotalTime: 2 * time.Second, }, 0) + // timestamp := timeToIntMillis(now) + logLevel := "INFO" + message := "User 'xyz' logged in" + spanID := "123456789ADF" + traceID := "ADF09876565" + + logEvent := logEvent{ + 0.9, + logLevel, + message, + spanID, + traceID, + 123456, + } + + h.LogEvents.Add(&logEvent) customEventParams := map[string]interface{}{"zip": 1} ce, err := createCustomEvent("myEvent", customEventParams, time.Now()) if nil != err { @@ -513,6 +589,9 @@ func TestMergeFailedHarvest(t *testing.T) { if 0 != h.CustomEvents.analyticsEvents.failedHarvests { t.Error(h.CustomEvents.analyticsEvents.failedHarvests) } + if 0 != h.LogEvents.failedHarvests { + t.Error(h.LogEvents.failedHarvests) + } if 0 != h.TxnEvents.analyticsEvents.failedHarvests { t.Error(h.TxnEvents.analyticsEvents.failedHarvests) } @@ -532,6 +611,15 @@ func TestMergeFailedHarvest(t *testing.T) { }, UserAttributes: customEventParams, }}) + /* expectLogEvents(t, h.LogEvents, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "timestamp": timestamp, + "log.level": logLevel, + "message": message, + "span.id": spanID, + "trace.id": traceID, + }, + }})*/ expectErrorEvents(t, h.ErrorEvents, []internal.WantEvent{{ Intrinsics: map[string]interface{}{ "error.class": "klass", @@ -582,6 +670,9 @@ func TestMergeFailedHarvest(t *testing.T) { if 1 != nextHarvest.CustomEvents.analyticsEvents.failedHarvests { t.Error(nextHarvest.CustomEvents.analyticsEvents.failedHarvests) } + if 1 != nextHarvest.LogEvents.failedHarvests { + t.Error(nextHarvest.LogEvents.failedHarvests) + } if 1 != nextHarvest.TxnEvents.analyticsEvents.failedHarvests { t.Error(nextHarvest.TxnEvents.analyticsEvents.failedHarvests) } @@ -601,6 +692,15 @@ func TestMergeFailedHarvest(t *testing.T) { }, UserAttributes: customEventParams, }}) + /* expectLogEvents(t, nextHarvest.LogEvents, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "timestamp": timestamp, + "log.level": logLevel, + "message": message, + "span.id": spanID, + "trace.id": traceID, + }, + }}) */ expectErrorEvents(t, nextHarvest.ErrorEvents, []internal.WantEvent{{ Intrinsics: map[string]interface{}{ "error.class": "klass", @@ -730,10 +830,10 @@ func TestHarvestSplitTxnEvents(t *testing.T) { payloadsWithSplit := h.Payloads(true) payloadsWithoutSplit := h.Payloads(false) - if len(payloadsWithSplit) != 9 { + if len(payloadsWithSplit) != 10 { t.Error(len(payloadsWithSplit)) } - if len(payloadsWithoutSplit) != 8 { + if len(payloadsWithoutSplit) != 9 { t.Error(len(payloadsWithoutSplit)) } } @@ -826,6 +926,9 @@ func TestNewHarvestSetsDefaultValues(t *testing.T) { if cp := h.CustomEvents.capacity(); cp != internal.MaxCustomEvents { t.Error("wrong custom event capacity", cp) } + if cp := h.LogEvents.capacity(); cp != internal.MaxLogEvents { + t.Error("wrong log event capacity", cp) + } if cp := h.ErrorEvents.capacity(); cp != internal.MaxErrorEvents { t.Error("wrong error event capacity", cp) } @@ -845,6 +948,7 @@ func TestNewHarvestUsesConnectReply(t *testing.T) { MaxCustomEvents: 2, MaxErrorEvents: 3, MaxSpanEvents: 4, + MaxLogEvents: 5, }) if cp := h.TxnEvents.capacity(); cp != 1 { @@ -859,6 +963,9 @@ func TestNewHarvestUsesConnectReply(t *testing.T) { if cp := h.SpanEvents.capacity(); cp != 4 { t.Error("wrong span event capacity", cp) } + if cp := h.LogEvents.capacity(); cp != 5 { + t.Error("wrong log event capacity", cp) + } } func TestConfigurableHarvestZeroHarvestLimits(t *testing.T) { @@ -871,6 +978,7 @@ func TestConfigurableHarvestZeroHarvestLimits(t *testing.T) { }, MaxTxnEvents: 0, MaxCustomEvents: 0, + MaxLogEvents: 0, MaxErrorEvents: 0, MaxSpanEvents: 0, }) @@ -880,6 +988,9 @@ func TestConfigurableHarvestZeroHarvestLimits(t *testing.T) { if cp := h.CustomEvents.capacity(); cp != 0 { t.Error("wrong custom event capacity", cp) } + if cp := h.LogEvents.capacity(); cp != 0 { + t.Error("wrong log event capacity", cp) + } if cp := h.ErrorEvents.capacity(); cp != 0 { t.Error("wrong error event capacity", cp) } @@ -891,6 +1002,7 @@ func TestConfigurableHarvestZeroHarvestLimits(t *testing.T) { // safe. h.TxnEvents.AddTxnEvent(&txnEvent{}, 1.0) h.CustomEvents.Add(&customEvent{}) + h.LogEvents.Add(&logEvent{}) h.ErrorEvents.Add(&errorEvent{}, 1.0) h.SpanEvents.addEventPopulated(&sampleSpanEvent) diff --git a/v3/newrelic/internal_app.go b/v3/newrelic/internal_app.go index 767f1782e..6d1b48f63 100644 --- a/v3/newrelic/internal_app.go +++ b/v3/newrelic/internal_app.go @@ -4,6 +4,7 @@ package newrelic import ( + "context" "errors" "fmt" "io" @@ -291,6 +292,13 @@ func (app *app) process() { "server-SpanEvents.Enabled": run.Config.SpanEvents.Enabled, }) } + + run.harvestConfig.CommonAttributes = commonAttributes{ + hostname: app.config.hostname, + entityName: app.config.AppName, + entityGUID: run.Reply.EntityGUID, + } + h = newHarvest(time.Now(), run.harvestConfig) app.setState(run, nil) @@ -542,6 +550,48 @@ func (app *app) RecordCustomEvent(eventType string, params map[string]interface{ return nil } +var ( + errApplicationLoggingDisabled = errors.New("application logging disabled") + errLogForwardingDisabled = errors.New("log forwarding disabled") + + // making a function for this because this huge if statement is an eyesore + isAppLogFowardingDisabled = func(app *app) bool { + return !(app.config.ApplicationLogging.Forwarding.Enabled && + app.config.ApplicationLogging.Forwarding.MaxSamplesStored > 0) + } +) + +func (app *app) RecordLogEvent(context context.Context, message, severity string, timestamp int64) error { + if app.config.Config.HighSecurity { + return errHighSecurityEnabled + } + + if !app.config.ApplicationLogging.Enabled { + return errApplicationLoggingDisabled + } + if isAppLogFowardingDisabled(app) { + return errLogForwardingDisabled + } + + txn := FromContext(context) + traceMetadata := txn.GetTraceMetadata() + logEvent := logEvent{ + severity: severity, + message: message, + traceID: traceMetadata.TraceID, + spanID: traceMetadata.SpanID, + } + err := logEvent.Validate() + if err != nil { + return err + } + + run, _ := app.getState() + + app.Consume(run.Reply.RunID, &logEvent) + return nil +} + var ( errMetricInf = errors.New("invalid metric value: inf") errMetricNaN = errors.New("invalid metric value: NaN") @@ -605,6 +655,10 @@ func (app *app) ExpectCustomEvents(t internal.Validator, want []internal.WantEve expectCustomEvents(extendValidator(t, "custom events"), app.testHarvest.CustomEvents, want) } +func (app *app) ExpectLogEvents(t internal.Validator, want []internal.WantLog) { + expectLogEvents(extendValidator(t, "log events"), app.testHarvest.LogEvents, want) +} + func (app *app) ExpectErrors(t internal.Validator, want []internal.WantError) { t = extendValidator(t, "traced errors") expectErrors(t, app.testHarvest.ErrorTraces, want) diff --git a/v3/newrelic/log_event.go b/v3/newrelic/log_event.go new file mode 100644 index 000000000..d0f8fd805 --- /dev/null +++ b/v3/newrelic/log_event.go @@ -0,0 +1,105 @@ +// Copyright 2020 New Relic Corporation. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package newrelic + +import ( + "bytes" + "errors" + "fmt" + "regexp" +) + +var ( + // regex allows a single word, or number + severityRegexRaw = `^[a-zA-Z]+$|^[0-9]+$` + severityRegex = regexp.MustCompile(severityRegexRaw) + severityUnknown = "UNKNOWN" + + errNilLogEvent = errors.New("log event can not be nil") + errEmptySeverity = errors.New("severity can not be an empty string") + errSeverityTooLarge = fmt.Errorf("severity exceeds length limit of %d", attributeKeyLengthLimit) + errSeverityRegex = fmt.Errorf("severity must match %s", severityRegexRaw) + errMessageSizeZero = errors.New("message must be a non empty string") +) + +type logEvent struct { + priority priority + severity string + message string + spanID string + traceID string + timestamp int64 +} + +// ValidateAndRender validates inputs, and creates a rendered log event with +// a jsonWriter buffer populated by rendered json +func (event *logEvent) Validate() error { + if event == nil { + return errNilLogEvent + } + + // Default severity to "UNKNOWN" if no severity is passed. + if len(event.severity) == 0 { + event.severity = severityUnknown + } + + if ok, err := validateSeverity(event.severity); !ok { + return fmt.Errorf("invalid severity: %s", err) + } + + if len(event.message) == 0 { + return errMessageSizeZero + } + + return nil +} + +// writeJSON prepares JSON in the format expected by the collector. +func (e *logEvent) WriteJSON(buf *bytes.Buffer) { + w := jsonFieldsWriter{buf: buf} + buf.WriteByte('{') + w.stringField("severity", e.severity) + w.stringField("message", e.message) + + if len(e.spanID) > 0 { + w.stringField("span.id", e.spanID) + } + if len(e.traceID) > 0 { + w.stringField("trace.id", e.traceID) + } + + w.needsComma = false + buf.WriteByte(',') + w.intField("timestamp", e.timestamp) + buf.WriteByte('}') +} + +// MarshalJSON is used for testing. +func (e *logEvent) MarshalJSON() ([]byte, error) { + buf := bytes.NewBuffer(make([]byte, 0, 256)) + + e.WriteJSON(buf) + + return buf.Bytes(), nil +} + +// must be a single word or number. If unknown, should be "UNKNOWN" +func validateSeverity(severity string) (bool, error) { + size := len(severity) + if size == 0 { + return false, errEmptySeverity + } + if size > attributeKeyLengthLimit { + return false, errSeverityTooLarge + } + + if !severityRegex.MatchString(severity) { + return false, errSeverityRegex + } + return true, nil +} + +func (e *logEvent) MergeIntoHarvest(h *harvest) { + h.LogEvents.Add(e) +} diff --git a/v3/newrelic/log_events.go b/v3/newrelic/log_events.go new file mode 100644 index 000000000..0493b54a7 --- /dev/null +++ b/v3/newrelic/log_events.go @@ -0,0 +1,206 @@ +// Copyright 2020 New Relic Corporation. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package newrelic + +import ( + "bytes" + "container/heap" + "time" + + "github.com/newrelic/go-agent/v3/internal/jsonx" +) + +type commonAttributes struct { + entityGUID string + entityName string + hostname string +} + +type logEventHeap []logEvent + +type logEvents struct { + numSeen int + failedHarvests int + severityCount map[string]int + commonAttributes + logs logEventHeap +} + +// NumSeen returns the number of events seen +// if a severity is passed, it returns number of events seen by severity +func (events *logEvents) NumSeen(severity ...string) float64 { + if len(severity) == 0 { + return float64(events.numSeen) + } else { + return float64(events.severityCount[severity[0]]) + } +} + +func (events *logEvents) NumSaved() float64 { return float64(len(events.logs)) } + +func (h logEventHeap) Len() int { return len(h) } +func (h logEventHeap) Less(i, j int) bool { return h[i].priority.isLowerPriority(h[j].priority) } +func (h logEventHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] } + +// Push and Pop are unused: only heap.Init and heap.Fix are used. +func (h logEventHeap) Push(x interface{}) {} +func (h logEventHeap) Pop() interface{} { return nil } + +func newLogEvents(ca commonAttributes, max int) *logEvents { + return &logEvents{ + commonAttributes: ca, + severityCount: map[string]int{}, + logs: make(logEventHeap, 0, max), + } +} + +func (events *logEvents) capacity() int { + return cap(events.logs) +} + +func (events *logEvents) Add(e *logEvent) { + events.numSeen++ + events.severityCount[e.severity] += 1 + + if events.capacity() == 0 { + // Configurable event harvest limits may be zero. + return + } + + if len(events.logs) < cap(events.logs) { + // copy log event onto event heap + events.logs = append(events.logs, *e) + if len(events.logs) == cap(events.logs) { + // Delay heap initialization so that we can have + // deterministic ordering for integration tests (the max + // is not being reached). + heap.Init(events.logs) + } + return + } + + if e.priority.isLowerPriority((events.logs)[0].priority) { + return + } + + events.logs[0] = *e + heap.Fix(events.logs, 0) +} + +func (events *logEvents) mergeFailed(other *logEvents) { + fails := other.failedHarvests + 1 + if fails >= failedEventsAttemptsLimit { + return + } + events.failedHarvests = fails + events.Merge(other) +} + +func (events *logEvents) Merge(other *logEvents) { + allSeen := events.numSeen + other.numSeen + + for _, e := range other.logs { + events.Add(&e) + } + events.numSeen = allSeen +} + +func (events *logEvents) CollectorJSON(agentRunID string) ([]byte, error) { + if 0 == len(events.logs) { + return nil, nil + } + + estimate := 256 * len(events.logs) + buf := bytes.NewBuffer(make([]byte, 0, estimate)) + + if events.numSeen == 0 { + return nil, nil + } + + buf.WriteByte('[') + buf.WriteByte('{') + buf.WriteString(`"common":`) + buf.WriteByte('{') + buf.WriteString(`"attributes":`) + buf.WriteByte('{') + buf.WriteString(`"entity.guid":`) + jsonx.AppendString(buf, events.entityGUID) + buf.WriteByte(',') + buf.WriteString(`"entity.name":`) + jsonx.AppendString(buf, events.entityName) + buf.WriteByte(',') + buf.WriteString(`"hostname":`) + jsonx.AppendString(buf, events.hostname) + buf.WriteByte('}') + buf.WriteByte('}') + buf.WriteByte(',') + buf.WriteString(`"logs":`) + buf.WriteByte('[') + for i, e := range events.logs { + // If severity is empty string, then this is not a user provided entry, and is empty. + // Do not write json to buffer in this case. + if e.severity != "" { + e.WriteJSON(buf) + if i != len(events.logs)-1 { + buf.WriteByte(',') + } + } + + } + buf.WriteByte(']') + buf.WriteByte('}') + buf.WriteByte(']') + return buf.Bytes(), nil + +} + +// split splits the events into two. NOTE! The two event pools are not valid +// priority queues, and should only be used to create JSON, not for adding any +// events. +func (events *logEvents) split() (*logEvents, *logEvents) { + // numSeen is conserved: e1.numSeen + e2.numSeen == events.numSeen. + sc1, sc2 := splitSeverityCount(events.severityCount) + e1 := &logEvents{ + numSeen: len(events.logs) / 2, + failedHarvests: events.failedHarvests / 2, + severityCount: sc1, + commonAttributes: events.commonAttributes, + logs: make([]logEvent, len(events.logs)/2), + } + e2 := &logEvents{ + numSeen: events.numSeen - e1.numSeen, + failedHarvests: events.failedHarvests - e1.failedHarvests, + severityCount: sc2, + commonAttributes: events.commonAttributes, + logs: make([]logEvent, len(events.logs)-len(e1.logs)), + } + // Note that slicing is not used to ensure that length == capacity for + // e1.events and e2.events. + copy(e1.logs, events.logs) + copy(e2.logs, events.logs[len(events.logs)/2:]) + + return e1, e2 +} + +// splits the contents and counts of the severity map +func splitSeverityCount(severityCount map[string]int) (map[string]int, map[string]int) { + var count1, count2 map[string]int + for k, v := range severityCount { + count1[k] = v / 2 + count2[k] = v - count1[k] + } + return count1, count2 +} + +func (events *logEvents) MergeIntoHarvest(h *harvest) { + h.LogEvents.mergeFailed(events) +} + +func (events *logEvents) Data(agentRunID string, harvestStart time.Time) ([]byte, error) { + return events.CollectorJSON(agentRunID) +} + +func (events *logEvents) EndpointMethod() string { + return cmdLogEvents +} diff --git a/v3/newrelic/log_events_test.go b/v3/newrelic/log_events_test.go new file mode 100644 index 000000000..8676ca166 --- /dev/null +++ b/v3/newrelic/log_events_test.go @@ -0,0 +1,391 @@ +// Copyright 2020 New Relic Corporation. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package newrelic + +import ( + "testing" +) + +var ( + testGUID = "testGUID" + testEntityName = "testEntityName" + testHostname = "testHostname" + testCommonAttributes = commonAttributes{ + entityGUID: testGUID, + entityName: testEntityName, + hostname: testHostname, + } + commonJSON = `[{"common":{"attributes":{"entity.guid":"testGUID","entity.name":"testEntityName","hostname":"testHostname"}},"logs":[` + + infoLevel = "INFO" + unknownLevel = "UNKNOWN" +) + +func sampleLogEvent(priority priority, severity, message string) *logEvent { + return &logEvent{ + priority, + severity, + message, + "AF02332", + "0024483", + 123456, + } +} + +// NOTE: this is going to make the tests run really slow due to heap allocation +func sampleLogEventNoParent(priority priority, severity, message string) *logEvent { + return &logEvent{ + priority, + severity, + message, + "", + "", + 123456, + } +} + +func TestBasicLogEvents(t *testing.T) { + events := newLogEvents(testCommonAttributes, 5) + events.Add(sampleLogEvent(0.5, infoLevel, "message1")) + events.Add(sampleLogEventNoParent(0.1, infoLevel, "message2")) + + json, err := events.CollectorJSON(agentRunID) + if nil != err { + t.Fatal(err) + } + + expected := commonJSON + + `{"severity":"INFO","message":"message1","span.id":"AF02332","trace.id":"0024483","timestamp":123456},` + + `{"severity":"INFO","message":"message2","timestamp":123456}]}` + + `]` + + if string(json) != expected { + t.Error(string(json), expected) + } + if 2 != events.numSeen { + t.Error(events.numSeen) + } + if 2 != events.NumSaved() { + t.Error(events.NumSaved()) + } +} + +func TestEmptyLogEvents(t *testing.T) { + events := newLogEvents(testCommonAttributes, 10) + json, err := events.CollectorJSON(agentRunID) + if nil != err { + t.Fatal(err) + } + if nil != json { + t.Error(string(json)) + } + if 0 != events.numSeen { + t.Error(events.numSeen) + } + if 0 != events.NumSaved() { + t.Error(events.NumSaved()) + } +} + +// The events with the highest priority should make it: a, c, e +func TestSamplingLogEvents(t *testing.T) { + events := newLogEvents(testCommonAttributes, 3) + + events.Add(sampleLogEvent(0.999999, infoLevel, "a")) + events.Add(sampleLogEvent(0.1, infoLevel, "b")) + events.Add(sampleLogEvent(0.9, infoLevel, "c")) + events.Add(sampleLogEvent(0.2, infoLevel, "d")) + events.Add(sampleLogEvent(0.8, infoLevel, "e")) + events.Add(sampleLogEvent(0.3, infoLevel, "f")) + + json, err := events.CollectorJSON(agentRunID) + if nil != err { + t.Fatal(err) + } + expect := commonJSON + + `{"severity":"INFO","message":"e","span.id":"AF02332","trace.id":"0024483","timestamp":123456},` + + `{"severity":"INFO","message":"a","span.id":"AF02332","trace.id":"0024483","timestamp":123456},` + + `{"severity":"INFO","message":"c","span.id":"AF02332","trace.id":"0024483","timestamp":123456}]}` + + `]` + if string(json) != expect { + t.Error(string(json), expect) + } + if 6 != events.numSeen { + t.Error(events.numSeen) + } + if 3 != events.NumSaved() { + t.Error(events.NumSaved()) + } +} + +func TestMergeEmptyLogEvents(t *testing.T) { + e1 := newLogEvents(testCommonAttributes, 10) + e2 := newLogEvents(testCommonAttributes, 10) + e1.Merge(e2) + json, err := e1.CollectorJSON(agentRunID) + if nil != err { + t.Fatal(err) + } + if nil != json { + t.Error(string(json)) + } + if 0 != e1.numSeen { + t.Error(e1.numSeen) + } + if 0 != e1.NumSaved() { + t.Error(e1.NumSaved()) + } +} + +func TestMergeFullLogEvents(t *testing.T) { + e1 := newLogEvents(testCommonAttributes, 2) + e2 := newLogEvents(testCommonAttributes, 3) + + e1.Add(sampleLogEvent(0.1, infoLevel, "a")) + e1.Add(sampleLogEvent(0.15, infoLevel, "b")) + e1.Add(sampleLogEvent(0.25, infoLevel, "c")) + e2.Add(sampleLogEvent(0.06, infoLevel, "d")) + e2.Add(sampleLogEvent(0.12, infoLevel, "e")) + e2.Add(sampleLogEvent(0.18, infoLevel, "f")) + e2.Add(sampleLogEvent(0.24, infoLevel, "g")) + + e1.Merge(e2) + json, err := e1.CollectorJSON(agentRunID) + if nil != err { + t.Fatal(err) + } + + // expect the highest priority events: c, g + expect := commonJSON + + `{"severity":"INFO","message":"g","span.id":"AF02332","trace.id":"0024483","timestamp":123456},` + + `{"severity":"INFO","message":"c","span.id":"AF02332","trace.id":"0024483","timestamp":123456}]}]` + + if string(json) != expect { + t.Error(string(json)) + } + if 7 != e1.numSeen { + t.Error(e1.numSeen) + } + if 2 != e1.NumSaved() { + t.Error(e1.NumSaved()) + } +} + +func TestLogEventMergeFailedSuccess(t *testing.T) { + e1 := newLogEvents(testCommonAttributes, 2) + e2 := newLogEvents(testCommonAttributes, 3) + + e1.Add(sampleLogEvent(0.1, infoLevel, "a")) + e1.Add(sampleLogEvent(0.15, infoLevel, "b")) + e1.Add(sampleLogEvent(0.25, infoLevel, "c")) + + e2.Add(sampleLogEvent(0.06, infoLevel, "d")) + e2.Add(sampleLogEvent(0.12, infoLevel, "e")) + e2.Add(sampleLogEvent(0.18, infoLevel, "f")) + e2.Add(sampleLogEvent(0.24, infoLevel, "g")) + + e1.mergeFailed(e2) + + json, err := e1.CollectorJSON(agentRunID) + if nil != err { + t.Fatal(err) + } + // expect the highest priority events: c, g + expect := commonJSON + + `{"severity":"INFO","message":"g","span.id":"AF02332","trace.id":"0024483","timestamp":123456},` + + `{"severity":"INFO","message":"c","span.id":"AF02332","trace.id":"0024483","timestamp":123456}]}]` + + if string(json) != expect { + t.Error(string(json)) + } + if 7 != e1.numSeen { + t.Error(e1.numSeen) + } + if 2 != e1.NumSaved() { + t.Error(e1.NumSaved()) + } + if 1 != e1.failedHarvests { + t.Error(e1.failedHarvests) + } +} + +func TestLogEventMergeFailedLimitReached(t *testing.T) { + e1 := newLogEvents(testCommonAttributes, 2) + e2 := newLogEvents(testCommonAttributes, 3) + + e1.Add(sampleLogEvent(0.1, infoLevel, "a")) + e1.Add(sampleLogEvent(0.15, infoLevel, "b")) + e1.Add(sampleLogEvent(0.25, infoLevel, "c")) + + e2.Add(sampleLogEvent(0.06, infoLevel, "d")) + e2.Add(sampleLogEvent(0.12, infoLevel, "e")) + e2.Add(sampleLogEvent(0.18, infoLevel, "f")) + e2.Add(sampleLogEvent(0.24, infoLevel, "g")) + + e2.failedHarvests = failedEventsAttemptsLimit + + e1.mergeFailed(e2) + + json, err := e1.CollectorJSON(agentRunID) + if nil != err { + t.Fatal(err) + } + expect := commonJSON + + `{"severity":"INFO","message":"b","span.id":"AF02332","trace.id":"0024483","timestamp":123456},` + + `{"severity":"INFO","message":"c","span.id":"AF02332","trace.id":"0024483","timestamp":123456}]}]` + + if string(json) != expect { + t.Error(string(json)) + } + if 3 != e1.numSeen { + t.Error(e1.numSeen) + } + if 2 != e1.NumSaved() { + t.Error(e1.NumSaved()) + } + if 0 != e1.failedHarvests { + t.Error(e1.failedHarvests) + } +} + +/* +func logEventBenchmarkHelper(b *testing.B, w jsonWriter) { + events := newLogEvents(testCommonAttributes, internal.MaxTxnEvents) + event := logEvent{0, w} + for n := 0; n < internal.MaxTxnEvents; n++ { + events.addEvent(event) + } + + b.ReportAllocs() + b.ResetTimer() + + for n := 0; n < b.N; n++ { + js, err := events.CollectorJSON(agentRunID) + if nil != err { + b.Fatal(err, js) + } + } +} + + +func BenchmarkTxnEventsCollectorJSON(b *testing.B) { + event := &txnEvent{ + FinalName: "WebTransaction/Go/zip/zap", + Start: time.Now(), + Duration: 2 * time.Second, + Queuing: 1 * time.Second, + Zone: apdexSatisfying, + Attrs: nil, + } + analyticsEventBenchmarkHelper(b, event) +} + +func BenchmarkCustomEventsCollectorJSON(b *testing.B) { + now := time.Now() + ce, err := createCustomEvent("myEventType", map[string]interface{}{ + "string": "myString", + "bool": true, + "int64": int64(123), + }, now) + if nil != err { + b.Fatal(err) + } + analyticsEventBenchmarkHelper(b, ce) +} + +func BenchmarkErrorEventsCollectorJSON(b *testing.B) { + e := txnErrorFromResponseCode(time.Now(), 503) + e.Stack = getStackTrace() + + txnName := "WebTransaction/Go/zip/zap" + event := &errorEvent{ + errorData: e, + txnEvent: txnEvent{ + FinalName: txnName, + Duration: 3 * time.Second, + Attrs: nil, + }, + } + analyticsEventBenchmarkHelper(b, event) +} + + +func TestSplitFull(t *testing.T) { + events := newLogEvents(testCommonAttributes, 10) + for i := 0; i < 15; i++ { + events.addEvent(sampleLogEvent(priority(float32(i) / 10.0))) + } + // Test that the capacity cannot exceed the max. + if 10 != events.capacity() { + t.Error(events.capacity()) + } + e1, e2 := events.split() + j1, err1 := e1.CollectorJSON(agentRunID) + j2, err2 := e2.CollectorJSON(agentRunID) + if err1 != nil || err2 != nil { + t.Fatal(err1, err2) + } + if string(j1) != `["12345",{"reservoir_size":5,"events_seen":5},[0.5,0.7,0.6,0.8,0.9]]` { + t.Error(string(j1)) + } + if string(j2) != `["12345",{"reservoir_size":5,"events_seen":10},[1.1,1.4,1,1.3,1.2]]` { + t.Error(string(j2)) + } +} + +func TestSplitNotFullOdd(t *testing.T) { + events := newLogEvents(testCommonAttributes, 10) + for i := 0; i < 7; i++ { + events.addEvent(sampleLogEvent(priority(float32(i) / 10.0))) + } + e1, e2 := events.split() + j1, err1 := e1.CollectorJSON(agentRunID) + j2, err2 := e2.CollectorJSON(agentRunID) + if err1 != nil || err2 != nil { + t.Fatal(err1, err2) + } + if string(j1) != `["12345",{"reservoir_size":3,"events_seen":3},[0,0.1,0.2]]` { + t.Error(string(j1)) + } + if string(j2) != `["12345",{"reservoir_size":4,"events_seen":4},[0.3,0.4,0.5,0.6]]` { + t.Error(string(j2)) + } +} + +func TestSplitNotFullEven(t *testing.T) { + events := newLogEvents(testCommonAttributes, 10) + for i := 0; i < 8; i++ { + events.addEvent(sampleLogEvent(priority(float32(i) / 10.0))) + } + e1, e2 := events.split() + j1, err1 := e1.CollectorJSON(agentRunID) + j2, err2 := e2.CollectorJSON(agentRunID) + if err1 != nil || err2 != nil { + t.Fatal(err1, err2) + } + if string(j1) != `["12345",{"reservoir_size":4,"events_seen":4},[0,0.1,0.2,0.3]]` { + t.Error(string(j1)) + } + if string(j2) != `["12345",{"reservoir_size":4,"events_seen":4},[0.4,0.5,0.6,0.7]]` { + t.Error(string(j2)) + } +} + +func TestLogEventsZeroCapacity(t *testing.T) { + // Analytics events methods should be safe when configurable harvest + // settings have an event limit of zero. + events := newLogEvents(testCommonAttributes, 0) + if 0 != events.NumSeen() || 0 != events.NumSaved() || 0 != events.capacity() { + t.Error(events.NumSeen(), events.NumSaved(), events.capacity()) + } + events.addEvent(sampleLogEvent(0.5)) + if 1 != events.NumSeen() || 0 != events.NumSaved() || 0 != events.capacity() { + t.Error(events.NumSeen(), events.NumSaved(), events.capacity()) + } + js, err := events.CollectorJSON("agentRunID") + if err != nil || js != nil { + t.Error(err, string(js)) + } +} +*/ From 9565130ffd1cf06955bff7bd11f49914a41019c0 Mon Sep 17 00:00:00 2001 From: Emilio Garcia Date: Mon, 9 May 2022 15:06:48 -0400 Subject: [PATCH 02/47] added logging in context metrics --- v3/newrelic/expect_implementation.go | 10 ++-- v3/newrelic/harvest.go | 4 ++ v3/newrelic/harvest_test.go | 68 ++++++++++++++++------------ v3/newrelic/log_events.go | 22 +++++---- v3/newrelic/metric_names.go | 9 ++++ 5 files changed, 70 insertions(+), 43 deletions(-) diff --git a/v3/newrelic/expect_implementation.go b/v3/newrelic/expect_implementation.go index aafaf08a0..76319791b 100644 --- a/v3/newrelic/expect_implementation.go +++ b/v3/newrelic/expect_implementation.go @@ -95,9 +95,9 @@ func expectTxnMetrics(t internal.Validator, mt *metricTable, want internal.WantT expectMetrics(t, mt, metrics) } -func expectMetricField(t internal.Validator, id metricID, v1, v2 float64, fieldName string) { - if v1 != v2 { - t.Error("metric fields do not match", id, v1, v2, fieldName) +func expectMetricField(t internal.Validator, id metricID, expect, want float64, fieldName string) { + if expect != want { + t.Error("incorrect value for metric", fieldName, id, "expect:", expect, "want: ", want) } } @@ -114,7 +114,7 @@ func expectMetrics(t internal.Validator, mt *metricTable, expect []internal.Want func expectMetricsInternal(t internal.Validator, mt *metricTable, expect []internal.WantMetric, exactMatch bool) { if exactMatch { if len(mt.metrics) != len(expect) { - t.Error("metric counts do not match expectations", len(mt.metrics), len(expect)) + t.Error("incorrect number of metrics stored, expected:", len(expect), "got:", len(mt.metrics)) } } expectedIds := make(map[metricID]struct{}) @@ -123,7 +123,7 @@ func expectMetricsInternal(t internal.Validator, mt *metricTable, expect []inter expectedIds[id] = struct{}{} m := mt.metrics[id] if nil == m { - t.Error("unable to find metric", id) + t.Error("expected metric not found", id) continue } diff --git a/v3/newrelic/harvest.go b/v3/newrelic/harvest.go index 08a921c05..51e40fdc4 100644 --- a/v3/newrelic/harvest.go +++ b/v3/newrelic/harvest.go @@ -95,6 +95,9 @@ func (h *harvest) Ready(now time.Time) *harvest { h.CustomEvents = newCustomEvents(h.CustomEvents.capacity()) } if 0 != types&harvestLogEvents { + h.Metrics.addCount(logsSeen, h.LogEvents.NumSeen(), forced) + h.Metrics.addCount(logsDropped, h.LogEvents.NumSeen()-h.LogEvents.NumSaved(), forced) + h.LogEvents.RecordSeverityMetrics(h.Metrics, forced) ready.LogEvents = h.LogEvents h.LogEvents = newLogEvents(h.LogEvents.commonAttributes, h.LogEvents.capacity()) } @@ -232,6 +235,7 @@ func (h *harvest) CreateFinalMetrics(reply *internal.ConnectReply, hc harvestCon h.Metrics.addValue(supportCustomEventLimit, "", float64(hc.MaxCustomEvents), forced) h.Metrics.addValue(supportErrorEventLimit, "", float64(hc.MaxErrorEvents), forced) h.Metrics.addValue(supportSpanEventLimit, "", float64(hc.MaxSpanEvents), forced) + h.Metrics.addValue(supportLogEventLimit, "", float64(hc.MaxLogEvents), forced) createTraceObserverMetrics(to, h.Metrics) createTrackUsageMetrics(h.Metrics) diff --git a/v3/newrelic/harvest_test.go b/v3/newrelic/harvest_test.go index fc5ec4d73..7524eea65 100644 --- a/v3/newrelic/harvest_test.go +++ b/v3/newrelic/harvest_test.go @@ -84,7 +84,8 @@ func TestCreateFinalMetrics(t *testing.T) { "analytic_event_data": 22, "custom_event_data": 33, "error_event_data": 44, - "span_event_data": 55 + "span_event_data": 55, + "log_event_data":66 } } }}`) @@ -101,6 +102,7 @@ func TestCreateFinalMetrics(t *testing.T) { MaxCustomEvents: 33, MaxErrorEvents: 44, MaxSpanEvents: 55, + MaxLogEvents: 66, } h := newHarvest(now, cfgr) h.Metrics.addCount("rename_me", 1.0, unforced) @@ -113,6 +115,7 @@ func TestCreateFinalMetrics(t *testing.T) { {Name: "Supportability/EventHarvest/CustomEventData/HarvestLimit", Scope: "", Forced: true, Data: []float64{1, 33, 33, 33, 33, 33 * 33}}, {Name: "Supportability/EventHarvest/ErrorEventData/HarvestLimit", Scope: "", Forced: true, Data: []float64{1, 44, 44, 44, 44, 44 * 44}}, {Name: "Supportability/EventHarvest/SpanEventData/HarvestLimit", Scope: "", Forced: true, Data: []float64{1, 55, 55, 55, 55, 55 * 55}}, + {Name: "Supportability/EventHarvest/LogEventData/HarvestLimit", Scope: "", Forced: true, Data: []float64{1, 66, 66, 66, 66, 66 * 66}}, {Name: "Supportability/Go/Version/" + Version, Scope: "", Forced: true, Data: []float64{1, 0, 0, 0, 0, 0}}, {Name: "Supportability/Go/Runtime/Version/" + goVersionSimple, Scope: "", Forced: true, Data: []float64{1, 0, 0, 0, 0, 0}}, {Name: "Supportability/Go/gRPC/Version/" + grpcVersion, Scope: "", Forced: true, Data: []float64{1, 0, 0, 0, 0, 0}}, @@ -137,6 +140,7 @@ func TestCreateFinalMetrics(t *testing.T) { {Name: "Supportability/EventHarvest/CustomEventData/HarvestLimit", Scope: "", Forced: true, Data: []float64{1, 10 * 1000, 10 * 1000, 10 * 1000, 10 * 1000, 10 * 1000 * 10 * 1000}}, {Name: "Supportability/EventHarvest/ErrorEventData/HarvestLimit", Scope: "", Forced: true, Data: []float64{1, 100, 100, 100, 100, 100 * 100}}, {Name: "Supportability/EventHarvest/SpanEventData/HarvestLimit", Scope: "", Forced: true, Data: []float64{1, 2000, 2000, 2000, 2000, 2000 * 2000}}, + {Name: "Supportability/EventHarvest/LogEventData/HarvestLimit", Scope: "", Forced: true, Data: []float64{1, 10000, 10000, 10000, 10000, 10000 * 10000}}, {Name: "Supportability/Go/Version/" + Version, Scope: "", Forced: true, Data: []float64{1, 0, 0, 0, 0, 0}}, {Name: "Supportability/Go/Runtime/Version/" + goVersionSimple, Scope: "", Forced: true, Data: []float64{1, 0, 0, 0, 0, 0}}, {Name: "Supportability/Go/gRPC/Version/" + grpcVersion, Scope: "", Forced: true, Data: []float64{1, 0, 0, 0, 0, 0}}, @@ -170,6 +174,7 @@ func TestCreateFinalMetricsTraceObserver(t *testing.T) { {Name: "Supportability/EventHarvest/CustomEventData/HarvestLimit", Scope: "", Forced: true, Data: nil}, {Name: "Supportability/EventHarvest/ErrorEventData/HarvestLimit", Scope: "", Forced: true, Data: nil}, {Name: "Supportability/EventHarvest/SpanEventData/HarvestLimit", Scope: "", Forced: true, Data: nil}, + {Name: "Supportability/EventHarvest/LogEventData/HarvestLimit", Scope: "", Forced: true, Data: nil}, {Name: "Supportability/Go/Version/" + Version, Scope: "", Forced: true, Data: nil}, {Name: "Supportability/Go/Runtime/Version/" + goVersionSimple, Scope: "", Forced: true, Data: nil}, {Name: "Supportability/Go/gRPC/Version/" + grpcVersion, Scope: "", Forced: true, Data: []float64{1, 0, 0, 0, 0, 0}}, @@ -305,19 +310,20 @@ func TestHarvestLogEventsReady(t *testing.T) { t.Fatal("log events not correctly reset") } - expectLogEvents(t, ready.LogEvents, []internal.WantLog{ - internal.WantLog{ - severity, - message, - spanID, - traceID, - timestamp, - }, + sampleLogEvent := internal.WantLog{ + Severity: severity, + Message: message, + SpanID: spanID, + TraceID: traceID, + Timestamp: timestamp, + } + + expectLogEvents(t, ready.LogEvents, []internal.WantLog{sampleLogEvent}) + expectMetrics(t, h.Metrics, []internal.WantMetric{ + {Name: logsSeen, Scope: "", Forced: true, Data: []float64{1, 0, 0, 0, 0, 0}}, + {Name: logsSeen + "/" + severity, Scope: "", Forced: true, Data: []float64{1, 0, 0, 0, 0, 0}}, + {Name: logsDropped, Scope: "", Forced: true, Data: []float64{0, 0, 0, 0, 0, 0}}, }) - /* expectMetrics(t, h.Metrics, []internal.WantMetric{ - {Name: logEventsSeen, Scope: "", Forced: true, Data: []float64{1, 0, 0, 0, 0, 0}}, - {Name: logEventsSent, Scope: "", Forced: true, Data: []float64{1, 0, 0, 0, 0, 0}}, - }) */ } func TestHarvestTxnEventsReady(t *testing.T) { @@ -464,6 +470,7 @@ func TestHarvestMetricsTracesReady(t *testing.T) { MaxCustomEvents: 1, MaxErrorEvents: 1, MaxSpanEvents: 1, + MaxLogEvents: 1, }) h.Metrics.addCount("zip", 1, forced) @@ -538,6 +545,7 @@ func TestMergeFailedHarvest(t *testing.T) { message := "User 'xyz' logged in" spanID := "123456789ADF" traceID := "ADF09876565" + logTimestamp := int64(123456) logEvent := logEvent{ 0.9, @@ -545,7 +553,7 @@ func TestMergeFailedHarvest(t *testing.T) { message, spanID, traceID, - 123456, + logTimestamp, } h.LogEvents.Add(&logEvent) @@ -611,15 +619,15 @@ func TestMergeFailedHarvest(t *testing.T) { }, UserAttributes: customEventParams, }}) - /* expectLogEvents(t, h.LogEvents, []internal.WantEvent{{ - Intrinsics: map[string]interface{}{ - "timestamp": timestamp, - "log.level": logLevel, - "message": message, - "span.id": spanID, - "trace.id": traceID, + expectLogEvents(t, h.LogEvents, []internal.WantLog{ + { + Timestamp: logTimestamp, + Severity: logLevel, + Message: message, + SpanID: spanID, + TraceID: traceID, }, - }})*/ + }) expectErrorEvents(t, h.ErrorEvents, []internal.WantEvent{{ Intrinsics: map[string]interface{}{ "error.class": "klass", @@ -692,15 +700,15 @@ func TestMergeFailedHarvest(t *testing.T) { }, UserAttributes: customEventParams, }}) - /* expectLogEvents(t, nextHarvest.LogEvents, []internal.WantEvent{{ - Intrinsics: map[string]interface{}{ - "timestamp": timestamp, - "log.level": logLevel, - "message": message, - "span.id": spanID, - "trace.id": traceID, + expectLogEvents(t, nextHarvest.LogEvents, []internal.WantLog{ + { + Timestamp: logTimestamp, + Severity: logLevel, + Message: message, + SpanID: spanID, + TraceID: traceID, }, - }}) */ + }) expectErrorEvents(t, nextHarvest.ErrorEvents, []internal.WantEvent{{ Intrinsics: map[string]interface{}{ "error.class": "klass", diff --git a/v3/newrelic/log_events.go b/v3/newrelic/log_events.go index 0493b54a7..bf1b0460c 100644 --- a/v3/newrelic/log_events.go +++ b/v3/newrelic/log_events.go @@ -28,17 +28,23 @@ type logEvents struct { } // NumSeen returns the number of events seen -// if a severity is passed, it returns number of events seen by severity -func (events *logEvents) NumSeen(severity ...string) float64 { - if len(severity) == 0 { - return float64(events.numSeen) - } else { - return float64(events.severityCount[severity[0]]) - } -} +func (events *logEvents) NumSeen() float64 { return float64(events.numSeen) } +// NumSaved returns the number of events that will be harvested for this cycle func (events *logEvents) NumSaved() float64 { return float64(len(events.logs)) } +func (events *logEvents) RecordSeverityMetrics(metrics *metricTable, forced metricForce) { + if metrics == nil { + return + } + + for k, v := range events.severityCount { + metricName := logsSeen + "/" + k + metrics.addCount(metricName, float64(v), forced) + } +} + +//func (events *logEvents) func (h logEventHeap) Len() int { return len(h) } func (h logEventHeap) Less(i, j int) bool { return h[i].priority.isLowerPriority(h[j].priority) } func (h logEventHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] } diff --git a/v3/newrelic/metric_names.go b/v3/newrelic/metric_names.go index e7a6856fb..9530cf276 100644 --- a/v3/newrelic/metric_names.go +++ b/v3/newrelic/metric_names.go @@ -61,6 +61,15 @@ const ( supportCustomEventLimit = "Supportability/EventHarvest/CustomEventData/HarvestLimit" supportErrorEventLimit = "Supportability/EventHarvest/ErrorEventData/HarvestLimit" supportSpanEventLimit = "Supportability/EventHarvest/SpanEventData/HarvestLimit" + supportLogEventLimit = "Supportability/EventHarvest/LogEventData/HarvestLimit" + + // Logging Metrics https://source.datanerd.us/agents/agent-specs/pull/570/files + logsSeen = "Logging/lines" + logsDropped = "Logging/Forwarding/Dropped" + + supportLoggingMetrics = "Supportability/Logging/Metrics/Golang/" + supportLogForwarding = "Supportability/Logging/Forwarding/Golang/" + supportLocalLogDecorating = "Supportability/Logging/Decorating/Golang/" ) // distributedTracingSupport is used to track distributed tracing activity for From 8be71667124085727524a781bb83c28e79b09abf Mon Sep 17 00:00:00 2001 From: Emilio Garcia Date: Tue, 10 May 2022 12:36:07 -0400 Subject: [PATCH 03/47] More Configurability and Testing --- v3/examples/short-lived-process/main.go | 10 +- v3/examples/stress-tests/main.go | 123 ++++++++++ v3/newrelic/app_run.go | 6 +- v3/newrelic/app_run_test.go | 2 +- v3/newrelic/application.go | 20 -- v3/newrelic/config.go | 47 ++-- v3/newrelic/config_options.go | 21 ++ v3/newrelic/harvest.go | 38 ++- v3/newrelic/harvest_test.go | 74 +++--- v3/newrelic/internal_app.go | 43 ---- v3/newrelic/log_event.go | 121 +++++----- v3/newrelic/log_event_test.go | 108 +++++++++ v3/newrelic/log_events.go | 39 ++- v3/newrelic/log_events_test.go | 309 +++++++++++++++--------- v3/newrelic/log_writer.go | 42 ++++ v3/newrelic/log_writer_test.go | 25 ++ v3/newrelic/sampler_test.go | 4 +- 17 files changed, 701 insertions(+), 331 deletions(-) create mode 100644 v3/examples/stress-tests/main.go create mode 100644 v3/newrelic/log_event_test.go create mode 100644 v3/newrelic/log_writer.go create mode 100644 v3/newrelic/log_writer_test.go diff --git a/v3/examples/short-lived-process/main.go b/v3/examples/short-lived-process/main.go index eb4d2acf0..b234c79c5 100644 --- a/v3/examples/short-lived-process/main.go +++ b/v3/examples/short-lived-process/main.go @@ -14,9 +14,10 @@ import ( func main() { app, err := newrelic.NewApplication( - newrelic.ConfigAppName("Logs in context testing"), + newrelic.ConfigAppName("Example Short Lived Process"), newrelic.ConfigAppLogForwardingEnabled(true), newrelic.ConfigDistributedTracerEnabled(true), + newrelic.ConfigAppLogMetricsEnabled(true), newrelic.ConfigLicense(os.Getenv("NEW_RELIC_LICENSE_KEY")), newrelic.ConfigDebugLogger(os.Stdout), ) @@ -30,6 +31,8 @@ func main() { fmt.Println(err) } + app.RecordLogEvent(context.Background(), "App Started", "INFO", time.Now().UnixMilli()) + // Do the tasks at hand. Perhaps record them using transactions and/or // custom events. tasks := []string{"white", "black", "red", "blue", "green", "yellow"} @@ -42,7 +45,10 @@ func main() { }) } - app.RecordFormattedLogEvent(context.Background(), "App Executed Succesfully", "INFO", time.Now().UnixMilli()) + app.RecordLogEvent(context.Background(), "A warning log occured!", "WARN", time.Now().UnixMilli()) + app.RecordLogEvent(context.Background(), "App Executed Succesfully", "INFO", time.Now().UnixMilli()) + + time.Sleep(60 * time.Second) // Shut down the application to flush data to New Relic. app.Shutdown(10 * time.Second) diff --git a/v3/examples/stress-tests/main.go b/v3/examples/stress-tests/main.go new file mode 100644 index 000000000..d8c662d96 --- /dev/null +++ b/v3/examples/stress-tests/main.go @@ -0,0 +1,123 @@ +// Copyright 2020 New Relic Corporation. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package main + +import ( + "context" + "fmt" + "os" + "time" + + "github.com/newrelic/go-agent/v3/newrelic" +) + +var ( + applicationLoggingEvents = "Application Logging Events" + customEvents = "Custom Events" +) + +func main() { + app, err := newrelic.NewApplication( + newrelic.ConfigAppName("ApplicationLogging Stress Test Golang"), + newrelic.ConfigAppLogForwardingEnabled(true), + newrelic.ConfigDistributedTracerEnabled(true), + newrelic.ConfigLicense(os.Getenv("NEW_RELIC_LICENSE_KEY")), + newrelic.ConfigInfoLogger(os.Stdout), + ) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + + // Wait for the application to connect. + if err := app.WaitForConnection(5 * time.Second); nil != err { + fmt.Println(err) + } + + tests := []Benchmark{ + NewLogBenchmark(10, 6), + NewLogBenchmark(100, 6), + NewLogBenchmark(1000, 6), + + NewCustomEventBenchmark(10, 6), + NewCustomEventBenchmark(100, 6), + NewCustomEventBenchmark(1000, 6), + } + + for _, test := range tests { + test.Benchmark(app) + } + + var metrics string + for _, test := range tests { + metrics += test.Sprint() + } + + // Wait for the application to connect. + if err := app.WaitForConnection(5 * time.Second); nil != err { + fmt.Println(err) + } + + // Shut down the application to flush data to New Relic. + app.Shutdown(10 * time.Second) + + fmt.Println(metrics) +} + +type Benchmark struct { + eventType string + numEvents int + sets int + runTimes []int64 +} + +func NewLogBenchmark(numEvents, numRuns int) Benchmark { + return Benchmark{ + applicationLoggingEvents, + numEvents, + numRuns, + make([]int64, numRuns), + } +} + +func NewCustomEventBenchmark(numEvents, numRuns int) Benchmark { + return Benchmark{ + customEvents, + numEvents, + numRuns, + make([]int64, numRuns), + } +} + +func (bench *Benchmark) Sprint() string { + sum := int64(0) + output := fmt.Sprintf("Time taken to record %d %s:\n", bench.numEvents, bench.eventType) + for _, time := range bench.runTimes { + output += fmt.Sprintf("\t\tMicroseconds: %d\n", time) + sum += time + } + + average := sum / int64(len(bench.runTimes)) + output += fmt.Sprintf("\t\tAverage Microseconds: %d\n", average) + return output +} + +func (bench *Benchmark) Benchmark(app *newrelic.Application) { + for set := 0; set < bench.sets; set++ { + start := time.Now() + for i := 0; i < bench.numEvents; i++ { + switch bench.eventType { + case applicationLoggingEvents: + message := "Message " + fmt.Sprint(i) + app.RecordLogEvent(context.Background(), message, "INFO", time.Now().UnixMilli()) + case customEvents: + message := "Message " + fmt.Sprint(i) + app.RecordCustomEvent("TEST EVENT", map[string]interface{}{ + "Message": message, + }) + } + } + bench.runTimes[set] = time.Since(start).Microseconds() + } +} diff --git a/v3/newrelic/app_run.go b/v3/newrelic/app_run.go index 640c544bc..a8e3f175e 100644 --- a/v3/newrelic/app_run.go +++ b/v3/newrelic/app_run.go @@ -120,9 +120,13 @@ func newAppRun(config config, reply *internal.ConnectReply) *appRun { ReportPeriods: run.ReportPeriods(), MaxTxnEvents: run.MaxTxnEvents(), MaxCustomEvents: run.MaxCustomEvents(), - MaxLogEvents: run.MaxLogEvents(), MaxErrorEvents: run.MaxErrorEvents(), MaxSpanEvents: run.MaxSpanEvents(), + LoggingConfig: configLogHarvest{ + config.ApplicationLogging.Forwarding.Enabled, + config.ApplicationLogging.Metrics.Enabled, + run.MaxLogEvents(), + }, } return run diff --git a/v3/newrelic/app_run_test.go b/v3/newrelic/app_run_test.go index 289b31848..308128550 100644 --- a/v3/newrelic/app_run_test.go +++ b/v3/newrelic/app_run_test.go @@ -400,7 +400,7 @@ func assertHarvestConfig(t testing.TB, hc harvestConfig, expect expectHarvestCon if max := hc.MaxErrorEvents; max != expect.maxErrorEvents { t.Error(errorExpectNotEqualActual("MaxErrorEvents", max, expect.maxErrorEvents)) } - if max := hc.MaxLogEvents; max != expect.maxLogEvents { + if max := hc.LoggingConfig.maxLogEvents; max != expect.maxLogEvents { t.Error(errorExpectNotEqualActual("MaxLogEvents", max, expect.maxErrorEvents)) } if periods := hc.ReportPeriods; !reflect.DeepEqual(periods, expect.periods) { diff --git a/v3/newrelic/application.go b/v3/newrelic/application.go index 584ba30ed..7a8db268f 100644 --- a/v3/newrelic/application.go +++ b/v3/newrelic/application.go @@ -4,7 +4,6 @@ package newrelic import ( - "context" "os" "time" ) @@ -52,25 +51,6 @@ func (app *Application) RecordCustomEvent(eventType string, params map[string]in } } -// RecordLogEvent adds a log event from a newrelic context, a log message, a log severity, and a timestamp -// formatted as UnixMilliseconds. -// -// The severity should be either a single word, number, or an empty string. -func (app *Application) RecordLogEvent(context context.Context, message, severity string, timestamp int64) { - if nil == app { - return - } - if nil == app.app { - return - } - err := app.app.RecordLogEvent(context, message, severity, timestamp) - if err != nil { - app.app.Error("unable to record log event", map[string]interface{}{ - "reason": err.Error(), - }) - } -} - // RecordCustomMetric records a custom metric. The metric name you // provide will be prefixed by "Custom/". Custom metrics are not // currently supported in serverless mode. diff --git a/v3/newrelic/config.go b/v3/newrelic/config.go index e7eeb3464..d5d369e0a 100644 --- a/v3/newrelic/config.go +++ b/v3/newrelic/config.go @@ -145,29 +145,6 @@ type Config struct { } } - // ApplicationLogging contains settings which control the capture and sending - // of log event data - ApplicationLogging struct { - // If this is disabled, all sub-features are disabled; - // if it is enabled, the individual sub-feature configurations take effect. - // MAY accomplish this by not installing instrumentation, or by early-return/no-op as necessary for an agent. - Enabled bool - // Forwarding controls log forwarding to New Relic One - Forwarding struct { - // Toggles whether the agent gathers log records for sending to New Relic. - Enabled bool - // Number of log records to send per minute to New Relic. - // Controls the overall memory consumption when using log forwarding. - // SHOULD be sent as part of the harvest_limits on Connect. - MaxSamplesStored int - } - Metrics struct { - // Toggles whether the agent gathers the the user facing Logging/lines and Logging/lines/{SEVERITY} - // Logging Metrics used in the Logs chart on the APM Summary page. - Enabled bool - } - } - // BrowserMonitoring contains settings which control the behavior of // Transaction.BrowserTimingHeader. BrowserMonitoring struct { @@ -323,6 +300,29 @@ type Config struct { } } + // ApplicationLogging contains settings which control the capture and sending + // of log event data + ApplicationLogging struct { + // If this is disabled, all sub-features are disabled; + // if it is enabled, the individual sub-feature configurations take effect. + // MAY accomplish this by not installing instrumentation, or by early-return/no-op as necessary for an agent. + Enabled bool + // Forwarding controls log forwarding to New Relic One + Forwarding struct { + // Toggles whether the agent gathers log records for sending to New Relic. + Enabled bool + // Number of log records to send per minute to New Relic. + // Controls the overall memory consumption when using log forwarding. + // SHOULD be sent as part of the harvest_limits on Connect. + MaxSamplesStored int + } + Metrics struct { + // Toggles whether the agent gathers the the user facing Logging/lines and Logging/lines/{SEVERITY} + // Logging Metrics used in the Logs chart on the APM Summary page. + Enabled bool + } + } + // Attributes controls which attributes are enabled and disabled globally. // This setting affects all attribute destinations: Transaction Events, // Error Events, Transaction Traces and segments, Traced Errors, Span @@ -435,6 +435,7 @@ func defaultConfig() Config { c.TransactionTracer.Attributes.Enabled = true c.TransactionTracer.Segments.Attributes.Enabled = true + // Application Logging Settings c.ApplicationLogging.Enabled = true c.ApplicationLogging.Forwarding.Enabled = false c.ApplicationLogging.Forwarding.MaxSamplesStored = internal.MaxLogEvents diff --git a/v3/newrelic/config_options.go b/v3/newrelic/config_options.go index 9ba7c32f6..147d05fed 100644 --- a/v3/newrelic/config_options.go +++ b/v3/newrelic/config_options.go @@ -59,6 +59,27 @@ func ConfigAppLogForwardingEnabled(enabled bool) ConfigOption { } } +func ConfigAppLogMetricsEnabled(enabled bool) ConfigOption { + return func(cfg *Config) { + if enabled == true { + cfg.ApplicationLogging.Enabled = true + cfg.ApplicationLogging.Metrics.Enabled = true + } else { + cfg.ApplicationLogging.Metrics.Enabled = false + } + } +} + +func ConfigAppLogEnabled(enabled bool) ConfigOption { + return func(cfg *Config) { + if enabled == true { + cfg.ApplicationLogging.Enabled = true + } else { + cfg.ApplicationLogging.Enabled = false + } + } +} + // ConfigAppLogForwardingMaxSamplesStored allows users to set the maximium number of // log events the agent is allowed to collect and store in a given harvest cycle. func ConfigAppLogForwardingMaxSamplesStored(maxSamplesStored int) ConfigOption { diff --git a/v3/newrelic/harvest.go b/v3/newrelic/harvest.go index 51e40fdc4..9760ef38d 100644 --- a/v3/newrelic/harvest.go +++ b/v3/newrelic/harvest.go @@ -57,8 +57,24 @@ func (timer *harvestTimer) ready(now time.Time) (ready harvestTypes) { return } +type knownPriorities map[string]priority + +func (kp knownPriorities) get(uuid string) (priority, bool) { + priority, ok := kp[uuid] + return priority, ok +} + +func (kp knownPriorities) add(uuid string, p priority) { + kp[uuid] = p +} + +func (kp knownPriorities) drop(uuid string) { + delete(kp, uuid) +} + // harvest contains collected data. type harvest struct { + knownPriorities timer *harvestTimer Metrics *metricTable @@ -95,11 +111,9 @@ func (h *harvest) Ready(now time.Time) *harvest { h.CustomEvents = newCustomEvents(h.CustomEvents.capacity()) } if 0 != types&harvestLogEvents { - h.Metrics.addCount(logsSeen, h.LogEvents.NumSeen(), forced) - h.Metrics.addCount(logsDropped, h.LogEvents.NumSeen()-h.LogEvents.NumSaved(), forced) - h.LogEvents.RecordSeverityMetrics(h.Metrics, forced) + h.LogEvents.RecordLoggingMetrics(h.Metrics, forced) ready.LogEvents = h.LogEvents - h.LogEvents = newLogEvents(h.LogEvents.commonAttributes, h.LogEvents.capacity()) + h.LogEvents = newLogEvents(h.LogEvents.commonAttributes, h.LogEvents.config) } if 0 != types&harvestTxnEvents { h.Metrics.addCount(txnEventsSeen, h.TxnEvents.NumSeen(), forced) @@ -176,9 +190,9 @@ func (h *harvest) Payloads(splitLargeTxnEvents bool) (ps []payloadCreator) { type harvestConfig struct { ReportPeriods map[harvestTypes]time.Duration CommonAttributes commonAttributes + LoggingConfig configLogHarvest MaxSpanEvents int MaxCustomEvents int - MaxLogEvents int MaxErrorEvents int MaxTxnEvents int } @@ -193,7 +207,7 @@ func newHarvest(now time.Time, configurer harvestConfig) *harvest { SlowSQLs: newSlowQueries(maxHarvestSlowSQLs), SpanEvents: newSpanEvents(configurer.MaxSpanEvents), CustomEvents: newCustomEvents(configurer.MaxCustomEvents), - LogEvents: newLogEvents(configurer.CommonAttributes, configurer.MaxLogEvents), + LogEvents: newLogEvents(configurer.CommonAttributes, configurer.LoggingConfig), TxnEvents: newTxnEvents(configurer.MaxTxnEvents), ErrorEvents: newErrorEvents(configurer.MaxErrorEvents), } @@ -235,7 +249,7 @@ func (h *harvest) CreateFinalMetrics(reply *internal.ConnectReply, hc harvestCon h.Metrics.addValue(supportCustomEventLimit, "", float64(hc.MaxCustomEvents), forced) h.Metrics.addValue(supportErrorEventLimit, "", float64(hc.MaxErrorEvents), forced) h.Metrics.addValue(supportSpanEventLimit, "", float64(hc.MaxSpanEvents), forced) - h.Metrics.addValue(supportLogEventLimit, "", float64(hc.MaxLogEvents), forced) + h.Metrics.addValue(supportLogEventLimit, "", float64(hc.LoggingConfig.maxLogEvents), forced) createTraceObserverMetrics(to, h.Metrics) createTrackUsageMetrics(h.Metrics) @@ -335,14 +349,18 @@ func createTxnMetrics(args *txnData, metrics *metricTable) { } var ( - // dfltHarvestCfgr is use in internal test cases, and for situations - // where we don't have a ConnectReply, such as for serverless harvests + + // This should only be used by harvests in cases where a connect response is unavailable dfltHarvestCfgr = harvestConfig{ ReportPeriods: map[harvestTypes]time.Duration{harvestTypesAll: fixedHarvestPeriod}, MaxTxnEvents: internal.MaxTxnEvents, MaxSpanEvents: defaultMaxSpanEvents, MaxCustomEvents: internal.MaxCustomEvents, - MaxLogEvents: internal.MaxLogEvents, MaxErrorEvents: internal.MaxErrorEvents, + LoggingConfig: configLogHarvest{ + false, + true, + internal.MaxLogEvents, + }, } ) diff --git a/v3/newrelic/harvest_test.go b/v3/newrelic/harvest_test.go index 7524eea65..bb6f82c52 100644 --- a/v3/newrelic/harvest_test.go +++ b/v3/newrelic/harvest_test.go @@ -11,9 +11,24 @@ import ( "github.com/newrelic/go-agent/v3/internal/logger" ) +var ( + testHarvestCfgr = harvestConfig{ + ReportPeriods: map[harvestTypes]time.Duration{harvestTypesAll: fixedHarvestPeriod}, + MaxTxnEvents: internal.MaxTxnEvents, + MaxSpanEvents: maxSpanEvents, + MaxCustomEvents: internal.MaxCustomEvents, + MaxErrorEvents: internal.MaxErrorEvents, + LoggingConfig: configLogHarvest{ + true, + true, + internal.MaxLogEvents, + }, + } +) + func TestHarvestTimerAllFixed(t *testing.T) { now := time.Now() - harvest := newHarvest(now, dfltHarvestCfgr) + harvest := newHarvest(now, testHarvestCfgr) timer := harvest.timer for _, tc := range []struct { Elapsed time.Duration @@ -69,9 +84,9 @@ func TestCreateFinalMetrics(t *testing.T) { // If the harvest or metrics is nil then CreateFinalMetrics should // not panic. var nilHarvest *harvest - nilHarvest.CreateFinalMetrics(nil, dfltHarvestCfgr, nil) + nilHarvest.CreateFinalMetrics(nil, testHarvestCfgr, nil) emptyHarvest := &harvest{} - emptyHarvest.CreateFinalMetrics(nil, dfltHarvestCfgr, nil) + emptyHarvest.CreateFinalMetrics(nil, testHarvestCfgr, nil) replyJSON := []byte(`{"return_value":{ "metric_name_rules":[{ @@ -102,7 +117,7 @@ func TestCreateFinalMetrics(t *testing.T) { MaxCustomEvents: 33, MaxErrorEvents: 44, MaxSpanEvents: 55, - MaxLogEvents: 66, + LoggingConfig: loggingConfigEnabled(66), } h := newHarvest(now, cfgr) h.Metrics.addCount("rename_me", 1.0, unforced) @@ -129,9 +144,9 @@ func TestCreateFinalMetrics(t *testing.T) { if err != nil { t.Fatal(err) } - h = newHarvest(now, dfltHarvestCfgr) + h = newHarvest(now, testHarvestCfgr) h.Metrics.addCount("rename_me", 1.0, unforced) - h.CreateFinalMetrics(reply, dfltHarvestCfgr, nil) + h.CreateFinalMetrics(reply, testHarvestCfgr, nil) expectMetrics(t, h.Metrics, []internal.WantMetric{ {Name: instanceReporting, Scope: "", Forced: true, Data: []float64{1, 0, 0, 0, 0, 0}}, {Name: "rename_me", Scope: "", Forced: false, Data: []float64{1.0, 0, 0, 0, 0, 0}}, @@ -165,8 +180,8 @@ func TestCreateFinalMetricsTraceObserver(t *testing.T) { }, ) - h := newHarvest(now, dfltHarvestCfgr) - h.CreateFinalMetrics(reply, dfltHarvestCfgr, to) + h := newHarvest(now, testHarvestCfgr) + h.CreateFinalMetrics(reply, testHarvestCfgr, to) expectMetrics(t, h.Metrics, []internal.WantMetric{ {Name: instanceReporting, Scope: "", Forced: true, Data: nil}, {Name: "Supportability/EventHarvest/ReportPeriod", Scope: "", Forced: true, Data: nil}, @@ -184,7 +199,7 @@ func TestCreateFinalMetricsTraceObserver(t *testing.T) { } func TestEmptyPayloads(t *testing.T) { - h := newHarvest(time.Now(), dfltHarvestCfgr) + h := newHarvest(time.Now(), testHarvestCfgr) payloads := h.Payloads(true) if len(payloads) != 9 { t.Error(len(payloads)) @@ -214,7 +229,7 @@ func TestPayloadsEmptyHarvest(t *testing.T) { func TestHarvestNothingReady(t *testing.T) { now := time.Now() - h := newHarvest(now, dfltHarvestCfgr) + h := newHarvest(now, testHarvestCfgr) ready := h.Ready(now.Add(10 * time.Second)) if ready != nil { t.Error("harvest should be nil") @@ -273,7 +288,7 @@ func TestHarvestLogEventsReady(t *testing.T) { fixedHarvestTypes: fixedHarvestPeriod, harvestLogEvents: time.Second * 5, }, - MaxLogEvents: 3, + LoggingConfig: loggingConfigEnabled(3), }) timestamp := timeToIntMillis(now) severity := "INFO" @@ -281,13 +296,10 @@ func TestHarvestLogEventsReady(t *testing.T) { spanID := "123456789ADF" traceID := "ADF09876565" - logEvent := logEvent{ - 0.9, - severity, - message, - spanID, - traceID, - 123456, + log := writeLog(severity, message, spanID, traceID, timestamp) + logEvent, err := CreateLogEvent(log) + if err != nil { + t.Error(err) } h.LogEvents.Add(&logEvent) @@ -470,7 +482,7 @@ func TestHarvestMetricsTracesReady(t *testing.T) { MaxCustomEvents: 1, MaxErrorEvents: 1, MaxSpanEvents: 1, - MaxLogEvents: 1, + LoggingConfig: loggingConfigEnabled(1), }) h.Metrics.addCount("zip", 1, forced) @@ -532,7 +544,7 @@ func TestMergeFailedHarvest(t *testing.T) { start1 := time.Now() start2 := start1.Add(1 * time.Minute) - h := newHarvest(start1, dfltHarvestCfgr) + h := newHarvest(start1, testHarvestCfgr) h.Metrics.addCount("zip", 1, forced) h.TxnEvents.AddTxnEvent(&txnEvent{ FinalName: "finalName", @@ -547,15 +559,11 @@ func TestMergeFailedHarvest(t *testing.T) { traceID := "ADF09876565" logTimestamp := int64(123456) - logEvent := logEvent{ - 0.9, - logLevel, - message, - spanID, - traceID, - logTimestamp, + log := writeLog(logLevel, message, spanID, traceID, logTimestamp) + logEvent, err := CreateLogEvent(log) + if err != nil { + t.Error(err) } - h.LogEvents.Add(&logEvent) customEventParams := map[string]interface{}{"zip": 1} ce, err := createCustomEvent("myEvent", customEventParams, time.Now()) @@ -660,7 +668,7 @@ func TestMergeFailedHarvest(t *testing.T) { Klass: "klass", }}) - nextHarvest := newHarvest(start2, dfltHarvestCfgr) + nextHarvest := newHarvest(start2, testHarvestCfgr) if start2 != nextHarvest.Metrics.metricPeriodStart { t.Error(nextHarvest.Metrics.metricPeriodStart) } @@ -830,7 +838,7 @@ func TestCreateTxnMetrics(t *testing.T) { func TestHarvestSplitTxnEvents(t *testing.T) { now := time.Now() - h := newHarvest(now, dfltHarvestCfgr) + h := newHarvest(now, testHarvestCfgr) for i := 0; i < internal.MaxTxnEvents; i++ { h.TxnEvents.AddTxnEvent(&txnEvent{}, priority(float32(i))) } @@ -926,7 +934,7 @@ func TestCreateTxnMetricsOldCAT(t *testing.T) { func TestNewHarvestSetsDefaultValues(t *testing.T) { now := time.Now() - h := newHarvest(now, dfltHarvestCfgr) + h := newHarvest(now, testHarvestCfgr) if cp := h.TxnEvents.capacity(); cp != internal.MaxTxnEvents { t.Error("wrong txn event capacity", cp) @@ -956,7 +964,7 @@ func TestNewHarvestUsesConnectReply(t *testing.T) { MaxCustomEvents: 2, MaxErrorEvents: 3, MaxSpanEvents: 4, - MaxLogEvents: 5, + LoggingConfig: loggingConfigEnabled(5), }) if cp := h.TxnEvents.capacity(); cp != 1 { @@ -986,9 +994,9 @@ func TestConfigurableHarvestZeroHarvestLimits(t *testing.T) { }, MaxTxnEvents: 0, MaxCustomEvents: 0, - MaxLogEvents: 0, MaxErrorEvents: 0, MaxSpanEvents: 0, + LoggingConfig: loggingConfigEnabled(0), }) if cp := h.TxnEvents.capacity(); cp != 0 { t.Error("wrong txn event capacity", cp) diff --git a/v3/newrelic/internal_app.go b/v3/newrelic/internal_app.go index 6d1b48f63..4456ea70e 100644 --- a/v3/newrelic/internal_app.go +++ b/v3/newrelic/internal_app.go @@ -4,7 +4,6 @@ package newrelic import ( - "context" "errors" "fmt" "io" @@ -550,48 +549,6 @@ func (app *app) RecordCustomEvent(eventType string, params map[string]interface{ return nil } -var ( - errApplicationLoggingDisabled = errors.New("application logging disabled") - errLogForwardingDisabled = errors.New("log forwarding disabled") - - // making a function for this because this huge if statement is an eyesore - isAppLogFowardingDisabled = func(app *app) bool { - return !(app.config.ApplicationLogging.Forwarding.Enabled && - app.config.ApplicationLogging.Forwarding.MaxSamplesStored > 0) - } -) - -func (app *app) RecordLogEvent(context context.Context, message, severity string, timestamp int64) error { - if app.config.Config.HighSecurity { - return errHighSecurityEnabled - } - - if !app.config.ApplicationLogging.Enabled { - return errApplicationLoggingDisabled - } - if isAppLogFowardingDisabled(app) { - return errLogForwardingDisabled - } - - txn := FromContext(context) - traceMetadata := txn.GetTraceMetadata() - logEvent := logEvent{ - severity: severity, - message: message, - traceID: traceMetadata.TraceID, - spanID: traceMetadata.SpanID, - } - err := logEvent.Validate() - if err != nil { - return err - } - - run, _ := app.getState() - - app.Consume(run.Reply.RunID, &logEvent) - return nil -} - var ( errMetricInf = errors.New("invalid metric value: inf") errMetricNaN = errors.New("invalid metric value: NaN") diff --git a/v3/newrelic/log_event.go b/v3/newrelic/log_event.go index d0f8fd805..1d5e00cf3 100644 --- a/v3/newrelic/log_event.go +++ b/v3/newrelic/log_event.go @@ -5,74 +5,31 @@ package newrelic import ( "bytes" + "encoding/json" "errors" "fmt" - "regexp" ) -var ( - // regex allows a single word, or number - severityRegexRaw = `^[a-zA-Z]+$|^[0-9]+$` - severityRegex = regexp.MustCompile(severityRegexRaw) - severityUnknown = "UNKNOWN" - - errNilLogEvent = errors.New("log event can not be nil") - errEmptySeverity = errors.New("severity can not be an empty string") - errSeverityTooLarge = fmt.Errorf("severity exceeds length limit of %d", attributeKeyLengthLimit) - errSeverityRegex = fmt.Errorf("severity must match %s", severityRegexRaw) - errMessageSizeZero = errors.New("message must be a non empty string") +const ( + LogSeverityFieldName = "level" + LogMessageFieldName = "message" + LogTimestampFieldName = "timestamp" + LogSpanIDFieldName = "span.id" + LogTraceIDFieldName = "trace.id" + + maxLogBytes = 32768 ) type logEvent struct { - priority priority - severity string - message string - spanID string - traceID string - timestamp int64 -} - -// ValidateAndRender validates inputs, and creates a rendered log event with -// a jsonWriter buffer populated by rendered json -func (event *logEvent) Validate() error { - if event == nil { - return errNilLogEvent - } - - // Default severity to "UNKNOWN" if no severity is passed. - if len(event.severity) == 0 { - event.severity = severityUnknown - } - - if ok, err := validateSeverity(event.severity); !ok { - return fmt.Errorf("invalid severity: %s", err) - } - - if len(event.message) == 0 { - return errMessageSizeZero - } - - return nil + priority priority + traceID string + severity string + log string } // writeJSON prepares JSON in the format expected by the collector. func (e *logEvent) WriteJSON(buf *bytes.Buffer) { - w := jsonFieldsWriter{buf: buf} - buf.WriteByte('{') - w.stringField("severity", e.severity) - w.stringField("message", e.message) - - if len(e.spanID) > 0 { - w.stringField("span.id", e.spanID) - } - if len(e.traceID) > 0 { - w.stringField("trace.id", e.traceID) - } - - w.needsComma = false - buf.WriteByte(',') - w.intField("timestamp", e.timestamp) - buf.WriteByte('}') + buf.WriteString(e.log) } // MarshalJSON is used for testing. @@ -84,22 +41,52 @@ func (e *logEvent) MarshalJSON() ([]byte, error) { return buf.Bytes(), nil } -// must be a single word or number. If unknown, should be "UNKNOWN" -func validateSeverity(severity string) (bool, error) { - size := len(severity) - if size == 0 { - return false, errEmptySeverity +type logJson struct { + Timestamp float64 `json:"timestamp"` + Severity string `json:"level"` + Message string `json:"message"` + SpanID string `json:"span.id"` + TraceID string `json:"trace.id"` +} + +var ( + // regex allows a single word, or number + severityUnknown = "UNKNOWN" + errEmptyLog = errors.New("log event can not be empty") + errLogTooLarge = fmt.Errorf("log can not exceed %d bytes", maxLogBytes) +) + +func CreateLogEvent(log []byte) (logEvent, error) { + if len(log) > maxLogBytes { + return logEvent{}, errLogTooLarge } - if size > attributeKeyLengthLimit { - return false, errSeverityTooLarge + if len(log) == 0 { + return logEvent{}, errEmptyLog + } + + l := &logJson{} + err := json.Unmarshal(log, l) + if err != nil { + return logEvent{}, err } - if !severityRegex.MatchString(severity) { - return false, errSeverityRegex + logEvent := logEvent{ + log: string(log), + severity: l.Severity, + traceID: l.TraceID, } - return true, nil + + return logEvent, nil } func (e *logEvent) MergeIntoHarvest(h *harvest) { + // Inherit priority from traces or spans if possible + if e.traceID != "" { + priority, known := h.knownPriorities.get(e.traceID) + if known { + e.priority = priority + } + } + h.LogEvents.Add(e) } diff --git a/v3/newrelic/log_event_test.go b/v3/newrelic/log_event_test.go new file mode 100644 index 000000000..5eb997c03 --- /dev/null +++ b/v3/newrelic/log_event_test.go @@ -0,0 +1,108 @@ +package newrelic + +import ( + "testing" + "time" +) + +func TestCreateLogEvent(t *testing.T) { + tests := []struct { + Timestamp int64 + Severity string + Message string + SpanID string + TraceID string + Attributes map[string]interface{} + }{ + { + Timestamp: 123456, + Severity: "debug", + Message: "test", + SpanID: "123Ifker1", + TraceID: "23000L343", + }, + { + Timestamp: 123456, + Severity: "debug", + Message: "test", + }, + { + Timestamp: 123456, + Severity: "debug", + Message: "test", + }, + { + Timestamp: 123456, + Severity: "debug", + Message: "test", + SpanID: "123Ifker1", + TraceID: "23000L343", + Attributes: map[string]interface{}{ + "one": "attributeOne", + "two": "attributeTwo", + }, + }, + } + + for _, test := range tests { + var l []byte + if len(test.Attributes) > 0 { + l = writeLogWithAttributes(test.Severity, test.Message, test.SpanID, test.TraceID, int64(test.Timestamp), test.Attributes) + } else { + l = writeLog(test.Severity, test.Message, test.SpanID, test.TraceID, int64(test.Timestamp)) + } + + logEvent, err := CreateLogEvent(l) + if err != nil { + t.Error(err) + } + + if logEvent.traceID != test.TraceID { + t.Errorf("invalid traceID: expect \"%s\", got \"%s\"", test.TraceID, logEvent.traceID) + } + if logEvent.severity != test.Severity { + t.Errorf("invalid severity: expect \"%s\", got \"%s\"", test.Severity, logEvent.severity) + } + } +} + +func TestLogTooLarge(t *testing.T) { + l := make([]byte, maxLogBytes+1) + _, err := CreateLogEvent(l) + if err == nil { + t.Error("Failed to catch log too large error") + } + if err != errLogTooLarge { + t.Error(err) + } +} + +func TestLogTooSmall(t *testing.T) { + l := []byte{} + _, err := CreateLogEvent(l) + if err == nil { + t.Error("Failed to catch log too large error") + } + if err != errEmptyLog { + t.Error(err) + } +} + +func BenchmarkCreateLogEvent(b *testing.B) { + b.ReportAllocs() + json := writeLog("debug", "test message", "", "", time.Now().UnixMilli()) + _, err := CreateLogEvent(json) + if err != nil { + b.Error(err) + } +} + +func BenchmarkCreateLogEvent100(b *testing.B) { + json := writeLog("debug", "test message", "", "", time.Now().UnixMilli()) + for i := 0; i < 100; i++ { + _, err := CreateLogEvent(json) + if err != nil { + b.Error(err) + } + } +} diff --git a/v3/newrelic/log_events.go b/v3/newrelic/log_events.go index bf1b0460c..6a5942a38 100644 --- a/v3/newrelic/log_events.go +++ b/v3/newrelic/log_events.go @@ -11,6 +11,12 @@ import ( "github.com/newrelic/go-agent/v3/internal/jsonx" ) +type configLogHarvest struct { + collectEvents bool + collectMetrics bool + maxLogEvents int +} + type commonAttributes struct { entityGUID string entityName string @@ -24,7 +30,8 @@ type logEvents struct { failedHarvests int severityCount map[string]int commonAttributes - logs logEventHeap + config configLogHarvest + logs logEventHeap } // NumSeen returns the number of events seen @@ -33,17 +40,27 @@ func (events *logEvents) NumSeen() float64 { return float64(events.numSeen) } // NumSaved returns the number of events that will be harvested for this cycle func (events *logEvents) NumSaved() float64 { return float64(len(events.logs)) } -func (events *logEvents) RecordSeverityMetrics(metrics *metricTable, forced metricForce) { +func (events *logEvents) RecordLoggingMetrics(metrics *metricTable, forced metricForce) { + // Allows us to disable the reporting of metrics for logs + if !events.config.collectMetrics { + return + } + // avoid nil pointers during tests if metrics == nil { return } + metrics.addCount(logsSeen, events.NumSeen(), forced) + metrics.addCount(logsDropped, events.NumSeen()-events.NumSaved(), forced) + for k, v := range events.severityCount { - metricName := logsSeen + "/" + k - metrics.addCount(metricName, float64(v), forced) + severitySeen := logsSeen + "/" + k + metrics.addCount(severitySeen, float64(v), forced) } } +// TODO: when go 1.18 becomes the minimum supported version, re-write to make a generic heap implementation +// for all event heaps, to de-duplicate this code //func (events *logEvents) func (h logEventHeap) Len() int { return len(h) } func (h logEventHeap) Less(i, j int) bool { return h[i].priority.isLowerPriority(h[j].priority) } @@ -53,23 +70,26 @@ func (h logEventHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] } func (h logEventHeap) Push(x interface{}) {} func (h logEventHeap) Pop() interface{} { return nil } -func newLogEvents(ca commonAttributes, max int) *logEvents { +func newLogEvents(ca commonAttributes, loggingConfig configLogHarvest) *logEvents { return &logEvents{ commonAttributes: ca, + config: loggingConfig, severityCount: map[string]int{}, - logs: make(logEventHeap, 0, max), + logs: make(logEventHeap, 0, loggingConfig.maxLogEvents), } } func (events *logEvents) capacity() int { - return cap(events.logs) + return events.config.maxLogEvents } func (events *logEvents) Add(e *logEvent) { events.numSeen++ events.severityCount[e.severity] += 1 - if events.capacity() == 0 { + // Do not collect log events when the harvest capacity is intentionally set to 0 + // or the collection of events is explicitly disabled + if events.capacity() == 0 || !events.config.collectEvents { // Configurable event harvest limits may be zero. return } @@ -191,7 +211,8 @@ func (events *logEvents) split() (*logEvents, *logEvents) { // splits the contents and counts of the severity map func splitSeverityCount(severityCount map[string]int) (map[string]int, map[string]int) { - var count1, count2 map[string]int + count1 := map[string]int{} + count2 := map[string]int{} for k, v := range severityCount { count1[k] = v / 2 count2[k] = v - count1[k] diff --git a/v3/newrelic/log_events_test.go b/v3/newrelic/log_events_test.go index 8676ca166..befd78921 100644 --- a/v3/newrelic/log_events_test.go +++ b/v3/newrelic/log_events_test.go @@ -4,7 +4,12 @@ package newrelic import ( + "bytes" + "fmt" "testing" + "time" + + "github.com/newrelic/go-agent/v3/internal" ) var ( @@ -16,39 +21,83 @@ var ( entityName: testEntityName, hostname: testHostname, } + commonJSON = `[{"common":{"attributes":{"entity.guid":"testGUID","entity.name":"testEntityName","hostname":"testHostname"}},"logs":[` infoLevel = "INFO" unknownLevel = "UNKNOWN" ) -func sampleLogEvent(priority priority, severity, message string) *logEvent { - return &logEvent{ - priority, - severity, - message, - "AF02332", - "0024483", - 123456, +func loggingConfigEnabled(limit int) configLogHarvest { + return configLogHarvest{ + collectEvents: true, + collectMetrics: true, + maxLogEvents: limit, + } +} + +func writeLogWithAttributes(severity, message, spanID, traceID string, timestamp int64, attributes map[string]interface{}) []byte { + buf := &bytes.Buffer{} + w := jsonFieldsWriter{buf: buf} + buf.WriteByte('{') + w.stringField(LogSeverityFieldName, severity) + w.stringField(LogMessageFieldName, message) + + if len(spanID) > 0 { + w.stringField(LogSpanIDFieldName, spanID) + } + if len(traceID) > 0 { + w.stringField(LogTraceIDFieldName, traceID) + } + + w.needsComma = false + buf.WriteByte(',') + w.intField(LogTimestampFieldName, timestamp) + if len(attributes) > 0 { + for key, val := range attributes { + writeAttributeValueJSON(&w, key, val) + } + } + + buf.WriteByte('}') + + return w.buf.Bytes() +} + +func writeLog(severity, message, spanID, traceID string, timestamp int64) []byte { + buf := &bytes.Buffer{} + w := jsonFieldsWriter{buf: buf} + buf.WriteByte('{') + w.stringField(LogSeverityFieldName, severity) + w.stringField(LogMessageFieldName, message) + + if len(spanID) > 0 { + w.stringField(LogSpanIDFieldName, spanID) } + if len(traceID) > 0 { + w.stringField(LogTraceIDFieldName, traceID) + } + + w.needsComma = false + buf.WriteByte(',') + w.intField(LogTimestampFieldName, timestamp) + buf.WriteByte('}') + + return w.buf.Bytes() } -// NOTE: this is going to make the tests run really slow due to heap allocation -func sampleLogEventNoParent(priority priority, severity, message string) *logEvent { +func sampleLogEvent(priority priority, severity, message string) *logEvent { return &logEvent{ - priority, - severity, - message, - "", - "", - 123456, + priority: priority, + severity: severity, + log: string(writeLog(severity, message, "", "", 123456)), } } func TestBasicLogEvents(t *testing.T) { - events := newLogEvents(testCommonAttributes, 5) + events := newLogEvents(testCommonAttributes, loggingConfigEnabled(5)) events.Add(sampleLogEvent(0.5, infoLevel, "message1")) - events.Add(sampleLogEventNoParent(0.1, infoLevel, "message2")) + events.Add(sampleLogEvent(0.5, infoLevel, "message2")) json, err := events.CollectorJSON(agentRunID) if nil != err { @@ -56,9 +105,8 @@ func TestBasicLogEvents(t *testing.T) { } expected := commonJSON + - `{"severity":"INFO","message":"message1","span.id":"AF02332","trace.id":"0024483","timestamp":123456},` + - `{"severity":"INFO","message":"message2","timestamp":123456}]}` + - `]` + `{"level":"INFO","message":"message1","timestamp":123456},` + + `{"level":"INFO","message":"message2","timestamp":123456}]}]` if string(json) != expected { t.Error(string(json), expected) @@ -72,7 +120,7 @@ func TestBasicLogEvents(t *testing.T) { } func TestEmptyLogEvents(t *testing.T) { - events := newLogEvents(testCommonAttributes, 10) + events := newLogEvents(testCommonAttributes, loggingConfigEnabled(10)) json, err := events.CollectorJSON(agentRunID) if nil != err { t.Fatal(err) @@ -90,7 +138,7 @@ func TestEmptyLogEvents(t *testing.T) { // The events with the highest priority should make it: a, c, e func TestSamplingLogEvents(t *testing.T) { - events := newLogEvents(testCommonAttributes, 3) + events := newLogEvents(testCommonAttributes, loggingConfigEnabled(3)) events.Add(sampleLogEvent(0.999999, infoLevel, "a")) events.Add(sampleLogEvent(0.1, infoLevel, "b")) @@ -104,9 +152,9 @@ func TestSamplingLogEvents(t *testing.T) { t.Fatal(err) } expect := commonJSON + - `{"severity":"INFO","message":"e","span.id":"AF02332","trace.id":"0024483","timestamp":123456},` + - `{"severity":"INFO","message":"a","span.id":"AF02332","trace.id":"0024483","timestamp":123456},` + - `{"severity":"INFO","message":"c","span.id":"AF02332","trace.id":"0024483","timestamp":123456}]}` + + `{"level":"INFO","message":"e","timestamp":123456},` + + `{"level":"INFO","message":"a","timestamp":123456},` + + `{"level":"INFO","message":"c","timestamp":123456}]}` + `]` if string(json) != expect { t.Error(string(json), expect) @@ -120,8 +168,8 @@ func TestSamplingLogEvents(t *testing.T) { } func TestMergeEmptyLogEvents(t *testing.T) { - e1 := newLogEvents(testCommonAttributes, 10) - e2 := newLogEvents(testCommonAttributes, 10) + e1 := newLogEvents(testCommonAttributes, loggingConfigEnabled(10)) + e2 := newLogEvents(testCommonAttributes, loggingConfigEnabled(10)) e1.Merge(e2) json, err := e1.CollectorJSON(agentRunID) if nil != err { @@ -139,8 +187,8 @@ func TestMergeEmptyLogEvents(t *testing.T) { } func TestMergeFullLogEvents(t *testing.T) { - e1 := newLogEvents(testCommonAttributes, 2) - e2 := newLogEvents(testCommonAttributes, 3) + e1 := newLogEvents(testCommonAttributes, loggingConfigEnabled(2)) + e2 := newLogEvents(testCommonAttributes, loggingConfigEnabled(3)) e1.Add(sampleLogEvent(0.1, infoLevel, "a")) e1.Add(sampleLogEvent(0.15, infoLevel, "b")) @@ -158,8 +206,8 @@ func TestMergeFullLogEvents(t *testing.T) { // expect the highest priority events: c, g expect := commonJSON + - `{"severity":"INFO","message":"g","span.id":"AF02332","trace.id":"0024483","timestamp":123456},` + - `{"severity":"INFO","message":"c","span.id":"AF02332","trace.id":"0024483","timestamp":123456}]}]` + `{"level":"INFO","message":"g","timestamp":123456},` + + `{"level":"INFO","message":"c","timestamp":123456}]}]` if string(json) != expect { t.Error(string(json)) @@ -173,8 +221,8 @@ func TestMergeFullLogEvents(t *testing.T) { } func TestLogEventMergeFailedSuccess(t *testing.T) { - e1 := newLogEvents(testCommonAttributes, 2) - e2 := newLogEvents(testCommonAttributes, 3) + e1 := newLogEvents(testCommonAttributes, loggingConfigEnabled(2)) + e2 := newLogEvents(testCommonAttributes, loggingConfigEnabled(3)) e1.Add(sampleLogEvent(0.1, infoLevel, "a")) e1.Add(sampleLogEvent(0.15, infoLevel, "b")) @@ -193,8 +241,8 @@ func TestLogEventMergeFailedSuccess(t *testing.T) { } // expect the highest priority events: c, g expect := commonJSON + - `{"severity":"INFO","message":"g","span.id":"AF02332","trace.id":"0024483","timestamp":123456},` + - `{"severity":"INFO","message":"c","span.id":"AF02332","trace.id":"0024483","timestamp":123456}]}]` + `{"level":"INFO","message":"g","timestamp":123456},` + + `{"level":"INFO","message":"c","timestamp":123456}]}]` if string(json) != expect { t.Error(string(json)) @@ -211,8 +259,8 @@ func TestLogEventMergeFailedSuccess(t *testing.T) { } func TestLogEventMergeFailedLimitReached(t *testing.T) { - e1 := newLogEvents(testCommonAttributes, 2) - e2 := newLogEvents(testCommonAttributes, 3) + e1 := newLogEvents(testCommonAttributes, loggingConfigEnabled(2)) + e2 := newLogEvents(testCommonAttributes, loggingConfigEnabled(3)) e1.Add(sampleLogEvent(0.1, infoLevel, "a")) e1.Add(sampleLogEvent(0.15, infoLevel, "b")) @@ -232,8 +280,8 @@ func TestLogEventMergeFailedLimitReached(t *testing.T) { t.Fatal(err) } expect := commonJSON + - `{"severity":"INFO","message":"b","span.id":"AF02332","trace.id":"0024483","timestamp":123456},` + - `{"severity":"INFO","message":"c","span.id":"AF02332","trace.id":"0024483","timestamp":123456}]}]` + `{"level":"INFO","message":"b","timestamp":123456},` + + `{"level":"INFO","message":"c","timestamp":123456}]}]` if string(json) != expect { t.Error(string(json)) @@ -249,72 +297,11 @@ func TestLogEventMergeFailedLimitReached(t *testing.T) { } } -/* -func logEventBenchmarkHelper(b *testing.B, w jsonWriter) { - events := newLogEvents(testCommonAttributes, internal.MaxTxnEvents) - event := logEvent{0, w} - for n := 0; n < internal.MaxTxnEvents; n++ { - events.addEvent(event) - } - - b.ReportAllocs() - b.ResetTimer() - - for n := 0; n < b.N; n++ { - js, err := events.CollectorJSON(agentRunID) - if nil != err { - b.Fatal(err, js) - } - } -} - - -func BenchmarkTxnEventsCollectorJSON(b *testing.B) { - event := &txnEvent{ - FinalName: "WebTransaction/Go/zip/zap", - Start: time.Now(), - Duration: 2 * time.Second, - Queuing: 1 * time.Second, - Zone: apdexSatisfying, - Attrs: nil, - } - analyticsEventBenchmarkHelper(b, event) -} - -func BenchmarkCustomEventsCollectorJSON(b *testing.B) { - now := time.Now() - ce, err := createCustomEvent("myEventType", map[string]interface{}{ - "string": "myString", - "bool": true, - "int64": int64(123), - }, now) - if nil != err { - b.Fatal(err) - } - analyticsEventBenchmarkHelper(b, ce) -} - -func BenchmarkErrorEventsCollectorJSON(b *testing.B) { - e := txnErrorFromResponseCode(time.Now(), 503) - e.Stack = getStackTrace() - - txnName := "WebTransaction/Go/zip/zap" - event := &errorEvent{ - errorData: e, - txnEvent: txnEvent{ - FinalName: txnName, - Duration: 3 * time.Second, - Attrs: nil, - }, - } - analyticsEventBenchmarkHelper(b, event) -} - - -func TestSplitFull(t *testing.T) { - events := newLogEvents(testCommonAttributes, 10) +func TestLogEventsSplitFull(t *testing.T) { + events := newLogEvents(testCommonAttributes, loggingConfigEnabled(10)) for i := 0; i < 15; i++ { - events.addEvent(sampleLogEvent(priority(float32(i) / 10.0))) + priority := priority(float32(i) / 10.0) + events.Add(sampleLogEvent(priority, "INFO", fmt.Sprint(priority))) } // Test that the capacity cannot exceed the max. if 10 != events.capacity() { @@ -326,18 +313,34 @@ func TestSplitFull(t *testing.T) { if err1 != nil || err2 != nil { t.Fatal(err1, err2) } - if string(j1) != `["12345",{"reservoir_size":5,"events_seen":5},[0.5,0.7,0.6,0.8,0.9]]` { + expect1 := commonJSON + + `{"level":"INFO","message":"0.5","timestamp":123456},` + + `{"level":"INFO","message":"0.7","timestamp":123456},` + + `{"level":"INFO","message":"0.6","timestamp":123456},` + + `{"level":"INFO","message":"0.8","timestamp":123456},` + + `{"level":"INFO","message":"0.9","timestamp":123456}]}]` + if string(j1) != expect1 { t.Error(string(j1)) } - if string(j2) != `["12345",{"reservoir_size":5,"events_seen":10},[1.1,1.4,1,1.3,1.2]]` { + + expect2 := commonJSON + + `{"level":"INFO","message":"1.1","timestamp":123456},` + + `{"level":"INFO","message":"1.4","timestamp":123456},` + + `{"level":"INFO","message":"1","timestamp":123456},` + + `{"level":"INFO","message":"1.3","timestamp":123456},` + + `{"level":"INFO","message":"1.2","timestamp":123456}]}]` + if string(j2) != expect2 { t.Error(string(j2)) } } -func TestSplitNotFullOdd(t *testing.T) { - events := newLogEvents(testCommonAttributes, 10) +// TODO: When miniumu supported go version is 1.18, make an event heap in GO generics and remove all this duplicate code +// interfaces are too slow :( +func TestLogEventsSplitNotFullOdd(t *testing.T) { + events := newLogEvents(testCommonAttributes, loggingConfigEnabled(10)) for i := 0; i < 7; i++ { - events.addEvent(sampleLogEvent(priority(float32(i) / 10.0))) + priority := priority(float32(i) / 10.0) + events.Add(sampleLogEvent(priority, "INFO", fmt.Sprint(priority))) } e1, e2 := events.split() j1, err1 := e1.CollectorJSON(agentRunID) @@ -345,18 +348,29 @@ func TestSplitNotFullOdd(t *testing.T) { if err1 != nil || err2 != nil { t.Fatal(err1, err2) } - if string(j1) != `["12345",{"reservoir_size":3,"events_seen":3},[0,0.1,0.2]]` { + expect1 := commonJSON + + `{"level":"INFO","message":"0","timestamp":123456},` + + `{"level":"INFO","message":"0.1","timestamp":123456},` + + `{"level":"INFO","message":"0.2","timestamp":123456}]}]` + if string(j1) != expect1 { t.Error(string(j1)) } - if string(j2) != `["12345",{"reservoir_size":4,"events_seen":4},[0.3,0.4,0.5,0.6]]` { + + expect2 := commonJSON + + `{"level":"INFO","message":"0.3","timestamp":123456},` + + `{"level":"INFO","message":"0.4","timestamp":123456},` + + `{"level":"INFO","message":"0.5","timestamp":123456},` + + `{"level":"INFO","message":"0.6","timestamp":123456}]}]` + if string(j2) != expect2 { t.Error(string(j2)) } } -func TestSplitNotFullEven(t *testing.T) { - events := newLogEvents(testCommonAttributes, 10) +func TestLogEventsSplitNotFullEven(t *testing.T) { + events := newLogEvents(testCommonAttributes, loggingConfigEnabled(10)) for i := 0; i < 8; i++ { - events.addEvent(sampleLogEvent(priority(float32(i) / 10.0))) + priority := priority(float32(i) / 10.0) + events.Add(sampleLogEvent(priority, "INFO", fmt.Sprint(priority))) } e1, e2 := events.split() j1, err1 := e1.CollectorJSON(agentRunID) @@ -364,10 +378,21 @@ func TestSplitNotFullEven(t *testing.T) { if err1 != nil || err2 != nil { t.Fatal(err1, err2) } - if string(j1) != `["12345",{"reservoir_size":4,"events_seen":4},[0,0.1,0.2,0.3]]` { + expect1 := commonJSON + + `{"level":"INFO","message":"0","timestamp":123456},` + + `{"level":"INFO","message":"0.1","timestamp":123456},` + + `{"level":"INFO","message":"0.2","timestamp":123456},` + + `{"level":"INFO","message":"0.3","timestamp":123456}]}]` + if string(j1) != expect1 { t.Error(string(j1)) } - if string(j2) != `["12345",{"reservoir_size":4,"events_seen":4},[0.4,0.5,0.6,0.7]]` { + + expect2 := commonJSON + + `{"level":"INFO","message":"0.4","timestamp":123456},` + + `{"level":"INFO","message":"0.5","timestamp":123456},` + + `{"level":"INFO","message":"0.6","timestamp":123456},` + + `{"level":"INFO","message":"0.7","timestamp":123456}]}]` + if string(j2) != expect2 { t.Error(string(j2)) } } @@ -375,11 +400,11 @@ func TestSplitNotFullEven(t *testing.T) { func TestLogEventsZeroCapacity(t *testing.T) { // Analytics events methods should be safe when configurable harvest // settings have an event limit of zero. - events := newLogEvents(testCommonAttributes, 0) + events := newLogEvents(testCommonAttributes, loggingConfigEnabled(0)) if 0 != events.NumSeen() || 0 != events.NumSaved() || 0 != events.capacity() { t.Error(events.NumSeen(), events.NumSaved(), events.capacity()) } - events.addEvent(sampleLogEvent(0.5)) + events.Add(sampleLogEvent(0.5, "INFO", "TEST")) if 1 != events.NumSeen() || 0 != events.NumSaved() || 0 != events.capacity() { t.Error(events.NumSeen(), events.NumSaved(), events.capacity()) } @@ -388,4 +413,48 @@ func TestLogEventsZeroCapacity(t *testing.T) { t.Error(err, string(js)) } } -*/ + +func TestLogEventCollectionDisabled(t *testing.T) { + // Analytics events methods should be safe when configurable harvest + // settings have an event limit of zero. + config := loggingConfigEnabled(5) + config.collectEvents = false + events := newLogEvents(testCommonAttributes, config) + if 0 != events.NumSeen() || 0 != len(events.severityCount) || 0 != events.NumSaved() || 5 != events.capacity() { + t.Error(events.NumSeen(), len(events.severityCount), events.NumSaved(), events.capacity()) + } + events.Add(sampleLogEvent(0.5, "INFO", "TEST")) + if 1 != events.NumSeen() || 1 != len(events.severityCount) || 0 != events.NumSaved() || 5 != events.capacity() { + t.Error(events.NumSeen(), len(events.severityCount), events.NumSaved(), events.capacity()) + } + js, err := events.CollectorJSON("agentRunID") + if err != nil || js != nil { + t.Error(err, string(js)) + } +} + +func BenchmarkCollectLogEvent(b *testing.B) { + json := writeLog("debug", "test message", "", "", time.Now().UnixMilli()) + logEvent, err := CreateLogEvent(json) + if err != nil { + b.Error(err) + } + logEventBenchmarkHelper(b, &logEvent) +} + +func logEventBenchmarkHelper(b *testing.B, event *logEvent) { + events := newLogEvents(testCommonAttributes, loggingConfigEnabled(internal.MaxLogEvents)) + for n := 0; n < internal.MaxTxnEvents; n++ { + events.Add(event) + } + + b.ReportAllocs() + b.ResetTimer() + + for n := 0; n < b.N; n++ { + js, err := events.CollectorJSON(agentRunID) + if nil != err { + b.Fatal(err, js) + } + } +} diff --git a/v3/newrelic/log_writer.go b/v3/newrelic/log_writer.go new file mode 100644 index 000000000..6eb131847 --- /dev/null +++ b/v3/newrelic/log_writer.go @@ -0,0 +1,42 @@ +package newrelic + +import ( + "errors" + "io" +) + +type logWriter struct { + app *Application + out io.Writer +} + +func NewLogWriter(app *Application, out io.Writer) (logWriter, error) { + if app == nil || app.app == nil { + return logWriter{}, errors.New("app must not be nil") + } + + return logWriter{app, out}, nil +} + +func (writer logWriter) Write(p []byte) (n int, err error) { + internalApp := writer.app.app + if internalApp.config.ApplicationLogging.Enabled && !internalApp.config.Config.HighSecurity { + logEvent, err := CreateLogEvent(p) + if err != nil { + return 0, err + } + run, _ := internalApp.getState() + + // Run reply is unable to exlpicitly disable logging features, so we do not check it. + // If the user wants to disable logging on the server side, they can only set the + // log event limit to zero, which will set the harvest limit for log events to zero. + + internalApp.Consume(run.Reply.RunID, &logEvent) + } + + if writer.out != nil { + return writer.out.Write(p) + } else { + return len(p), nil + } +} diff --git a/v3/newrelic/log_writer_test.go b/v3/newrelic/log_writer_test.go new file mode 100644 index 000000000..51aca4431 --- /dev/null +++ b/v3/newrelic/log_writer_test.go @@ -0,0 +1,25 @@ +package newrelic + +import ( + "testing" + "time" +) + +func BenchmarkWrite(b *testing.B) { + app, err := NewApplication( + ConfigAppLogForwardingEnabled(true), + ) + if err != nil { + b.Error(err) + } + + json := writeLog("debug", "test message", "", "", time.Now().UnixMilli()) + writer, err := NewLogWriter(app, nil) + if err != nil { + b.Error(err) + } + + b.ReportAllocs() + b.ResetTimer() + writer.Write(json) +} diff --git a/v3/newrelic/sampler_test.go b/v3/newrelic/sampler_test.go index 43ca42eb5..4a4a2ce8d 100644 --- a/v3/newrelic/sampler_test.go +++ b/v3/newrelic/sampler_test.go @@ -33,7 +33,7 @@ func TestGetSample(t *testing.T) { func TestMetricsCreated(t *testing.T) { now := time.Now() - h := newHarvest(now, dfltHarvestCfgr) + h := newHarvest(now, testHarvestCfgr) stats := systemStats{ heapObjects: 5 * 1000, @@ -71,7 +71,7 @@ func TestMetricsCreated(t *testing.T) { func TestMetricsCreatedEmpty(t *testing.T) { now := time.Now() - h := newHarvest(now, dfltHarvestCfgr) + h := newHarvest(now, testHarvestCfgr) stats := systemStats{} stats.MergeIntoHarvest(h) From 2fdbfbf182ec1955165b8615cb6e920fb19eaf65 Mon Sep 17 00:00:00 2001 From: Emilio Garcia Date: Tue, 24 May 2022 15:32:09 -0400 Subject: [PATCH 04/47] Zerolog logs in context support Modifies the agent to support passing log data from the new zerolog plugin into the harvest pool and sending that data to our servers. --- v3/examples/short-lived-process/main.go | 13 +- v3/examples/stress-tests/main.go | 123 ------------------ .../logcontext-v2/nrzerolog/go.mod | 8 ++ .../logcontext-v2/nrzerolog/hook.go | 40 ++++++ v3/newrelic/application.go | 15 +++ v3/newrelic/expect_implementation.go | 36 ++++- v3/newrelic/harvest_test.go | 97 +++++++------- v3/newrelic/internal_app.go | 23 ++++ v3/newrelic/log_event.go | 87 ++++++++----- v3/newrelic/log_event_test.go | 123 +++++------------- v3/newrelic/log_events_test.go | 68 +--------- v3/newrelic/log_writer.go | 42 ------ v3/newrelic/log_writer_test.go | 25 ---- 13 files changed, 262 insertions(+), 438 deletions(-) delete mode 100644 v3/examples/stress-tests/main.go create mode 100644 v3/integrations/logcontext-v2/nrzerolog/go.mod create mode 100644 v3/integrations/logcontext-v2/nrzerolog/hook.go delete mode 100644 v3/newrelic/log_writer.go delete mode 100644 v3/newrelic/log_writer_test.go diff --git a/v3/examples/short-lived-process/main.go b/v3/examples/short-lived-process/main.go index b234c79c5..99c30f494 100644 --- a/v3/examples/short-lived-process/main.go +++ b/v3/examples/short-lived-process/main.go @@ -4,7 +4,6 @@ package main import ( - "context" "fmt" "os" "time" @@ -14,12 +13,12 @@ import ( func main() { app, err := newrelic.NewApplication( - newrelic.ConfigAppName("Example Short Lived Process"), + newrelic.ConfigAppName("zerolog test"), newrelic.ConfigAppLogForwardingEnabled(true), newrelic.ConfigDistributedTracerEnabled(true), newrelic.ConfigAppLogMetricsEnabled(true), newrelic.ConfigLicense(os.Getenv("NEW_RELIC_LICENSE_KEY")), - newrelic.ConfigDebugLogger(os.Stdout), + newrelic.ConfigInfoLogger(os.Stdout), ) if nil != err { fmt.Println(err) @@ -31,8 +30,6 @@ func main() { fmt.Println(err) } - app.RecordLogEvent(context.Background(), "App Started", "INFO", time.Now().UnixMilli()) - // Do the tasks at hand. Perhaps record them using transactions and/or // custom events. tasks := []string{"white", "black", "red", "blue", "green", "yellow"} @@ -44,12 +41,6 @@ func main() { "color": task, }) } - - app.RecordLogEvent(context.Background(), "A warning log occured!", "WARN", time.Now().UnixMilli()) - app.RecordLogEvent(context.Background(), "App Executed Succesfully", "INFO", time.Now().UnixMilli()) - - time.Sleep(60 * time.Second) - // Shut down the application to flush data to New Relic. app.Shutdown(10 * time.Second) } diff --git a/v3/examples/stress-tests/main.go b/v3/examples/stress-tests/main.go deleted file mode 100644 index d8c662d96..000000000 --- a/v3/examples/stress-tests/main.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2020 New Relic Corporation. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package main - -import ( - "context" - "fmt" - "os" - "time" - - "github.com/newrelic/go-agent/v3/newrelic" -) - -var ( - applicationLoggingEvents = "Application Logging Events" - customEvents = "Custom Events" -) - -func main() { - app, err := newrelic.NewApplication( - newrelic.ConfigAppName("ApplicationLogging Stress Test Golang"), - newrelic.ConfigAppLogForwardingEnabled(true), - newrelic.ConfigDistributedTracerEnabled(true), - newrelic.ConfigLicense(os.Getenv("NEW_RELIC_LICENSE_KEY")), - newrelic.ConfigInfoLogger(os.Stdout), - ) - if err != nil { - fmt.Println(err) - os.Exit(1) - } - - // Wait for the application to connect. - if err := app.WaitForConnection(5 * time.Second); nil != err { - fmt.Println(err) - } - - tests := []Benchmark{ - NewLogBenchmark(10, 6), - NewLogBenchmark(100, 6), - NewLogBenchmark(1000, 6), - - NewCustomEventBenchmark(10, 6), - NewCustomEventBenchmark(100, 6), - NewCustomEventBenchmark(1000, 6), - } - - for _, test := range tests { - test.Benchmark(app) - } - - var metrics string - for _, test := range tests { - metrics += test.Sprint() - } - - // Wait for the application to connect. - if err := app.WaitForConnection(5 * time.Second); nil != err { - fmt.Println(err) - } - - // Shut down the application to flush data to New Relic. - app.Shutdown(10 * time.Second) - - fmt.Println(metrics) -} - -type Benchmark struct { - eventType string - numEvents int - sets int - runTimes []int64 -} - -func NewLogBenchmark(numEvents, numRuns int) Benchmark { - return Benchmark{ - applicationLoggingEvents, - numEvents, - numRuns, - make([]int64, numRuns), - } -} - -func NewCustomEventBenchmark(numEvents, numRuns int) Benchmark { - return Benchmark{ - customEvents, - numEvents, - numRuns, - make([]int64, numRuns), - } -} - -func (bench *Benchmark) Sprint() string { - sum := int64(0) - output := fmt.Sprintf("Time taken to record %d %s:\n", bench.numEvents, bench.eventType) - for _, time := range bench.runTimes { - output += fmt.Sprintf("\t\tMicroseconds: %d\n", time) - sum += time - } - - average := sum / int64(len(bench.runTimes)) - output += fmt.Sprintf("\t\tAverage Microseconds: %d\n", average) - return output -} - -func (bench *Benchmark) Benchmark(app *newrelic.Application) { - for set := 0; set < bench.sets; set++ { - start := time.Now() - for i := 0; i < bench.numEvents; i++ { - switch bench.eventType { - case applicationLoggingEvents: - message := "Message " + fmt.Sprint(i) - app.RecordLogEvent(context.Background(), message, "INFO", time.Now().UnixMilli()) - case customEvents: - message := "Message " + fmt.Sprint(i) - app.RecordCustomEvent("TEST EVENT", map[string]interface{}{ - "Message": message, - }) - } - } - bench.runTimes[set] = time.Since(start).Microseconds() - } -} diff --git a/v3/integrations/logcontext-v2/nrzerolog/go.mod b/v3/integrations/logcontext-v2/nrzerolog/go.mod new file mode 100644 index 000000000..99453551b --- /dev/null +++ b/v3/integrations/logcontext-v2/nrzerolog/go.mod @@ -0,0 +1,8 @@ +module github.com/newrelic/go-agent/v3/integrations/logcontext-v2/nrzerolog + +go 1.15 + +require ( + github.com/newrelic/go-agent/v3 v3.16.0 + github.com/rs/zerolog v1.26.1 +) diff --git a/v3/integrations/logcontext-v2/nrzerolog/hook.go b/v3/integrations/logcontext-v2/nrzerolog/hook.go new file mode 100644 index 000000000..8c1b4678a --- /dev/null +++ b/v3/integrations/logcontext-v2/nrzerolog/hook.go @@ -0,0 +1,40 @@ +package nrzerolog + +import ( + "context" + "time" + + "github.com/newrelic/go-agent/v3/newrelic" + "github.com/rs/zerolog" +) + +type Hook struct { + App *newrelic.Application + Context context.Context +} + +func (h Hook) Run(e *zerolog.Event, level zerolog.Level, msg string) { + logLevel := "" + if level == zerolog.NoLevel { + logLevel = newrelic.LogSeverityUnknown + } else { + logLevel = level.String() + } + + var spanID, traceID string + if h.Context != nil { + txn := newrelic.FromContext(h.Context) + traceMetadata := txn.GetTraceMetadata() + spanID = traceMetadata.SpanID + traceID = traceMetadata.TraceID + } + + data := newrelic.LogData{ + Timestamp: time.Now().UnixMilli(), + Severity: logLevel, + Message: msg, + SpanID: spanID, + TraceID: traceID, + } + h.App.RecordLog(&data) +} diff --git a/v3/newrelic/application.go b/v3/newrelic/application.go index 7a8db268f..3cf6f5f39 100644 --- a/v3/newrelic/application.go +++ b/v3/newrelic/application.go @@ -74,6 +74,21 @@ func (app *Application) RecordCustomMetric(name string, value float64) { } } +func (app *Application) RecordLog(logEvent *LogData) { + if nil == app { + return + } + if nil == app.app { + return + } + err := app.app.RecordLog(logEvent) + if err != nil { + app.app.Error("unable to record log", map[string]interface{}{ + "reason": err.Error(), + }) + } +} + // WaitForConnection blocks until the application is connected, is // incapable of being connected, or the timeout has been reached. This // method is useful for short-lived processes since the application will diff --git a/v3/newrelic/expect_implementation.go b/v3/newrelic/expect_implementation.go index 76319791b..435a66dff 100644 --- a/v3/newrelic/expect_implementation.go +++ b/v3/newrelic/expect_implementation.go @@ -201,9 +201,39 @@ func expectCustomEvents(v internal.Validator, cs *customEvents, expect []interna expectEvents(v, cs.analyticsEvents, expect, nil) } -func expectLogEvents(v internal.Validator, logEvents *logEvents, expect []internal.WantLog) { - //TODO(egarcia): implement this - return +func expectLogEvents(v internal.Validator, events *logEvents, expect []internal.WantLog) { + if len(events.logs) != len(expect) { + v.Error("number of events does not match", len(events.logs), len(expect)) + return + } + + for i, e := range expect { + event := events.logs[i] + expectLogEvent(v, event, e) + } +} + +func expectLogEvent(v internal.Validator, event logEvent, want internal.WantLog) { + if event.message != want.Message { + v.Error(fmt.Sprintf("unexpected log message: want %s, got %s", event.message, want.Message)) + return + } + if event.severity != want.Severity { + v.Error(fmt.Sprintf("unexpected log severity: want %s, got %s", event.severity, want.Severity)) + return + } + if event.traceID != want.TraceID { + v.Error(fmt.Sprintf("unexpected log trace id: want %s, got %s", event.traceID, want.TraceID)) + return + } + if event.spanID != want.SpanID { + v.Error(fmt.Sprintf("unexpected log span id: want %s, got %s", event.spanID, want.SpanID)) + return + } + if event.timestamp != want.Timestamp { + v.Error(fmt.Sprintf("unexpected log timestamp: want %d, got %d", event.timestamp, want.Timestamp)) + return + } } func expectEvent(v internal.Validator, e json.Marshaler, expect internal.WantEvent) { diff --git a/v3/newrelic/harvest_test.go b/v3/newrelic/harvest_test.go index bb6f82c52..1d811bd3b 100644 --- a/v3/newrelic/harvest_test.go +++ b/v3/newrelic/harvest_test.go @@ -12,20 +12,23 @@ import ( ) var ( - testHarvestCfgr = harvestConfig{ - ReportPeriods: map[harvestTypes]time.Duration{harvestTypesAll: fixedHarvestPeriod}, - MaxTxnEvents: internal.MaxTxnEvents, - MaxSpanEvents: maxSpanEvents, - MaxCustomEvents: internal.MaxCustomEvents, - MaxErrorEvents: internal.MaxErrorEvents, - LoggingConfig: configLogHarvest{ - true, - true, - internal.MaxLogEvents, - }, - } + // This is for testing only + testHarvestCfgr = generateTestHarvestConfig() ) +func generateTestHarvestConfig() harvestConfig { + cfg := dfltHarvestCfgr + + // Enable logging features for testing (not enabled by default) + loggingCfg := configLogHarvest{ + true, + true, + internal.MaxLogEvents, + } + cfg.LoggingConfig = loggingCfg + return cfg +} + func TestHarvestTimerAllFixed(t *testing.T) { now := time.Now() harvest := newHarvest(now, testHarvestCfgr) @@ -290,16 +293,14 @@ func TestHarvestLogEventsReady(t *testing.T) { }, LoggingConfig: loggingConfigEnabled(3), }) - timestamp := timeToIntMillis(now) - severity := "INFO" - message := "User 'xyz' logged in" - spanID := "123456789ADF" - traceID := "ADF09876565" - - log := writeLog(severity, message, spanID, traceID, timestamp) - logEvent, err := CreateLogEvent(log) - if err != nil { - t.Error(err) + + logEvent := logEvent{ + 0.5, + int64(time.Now().UnixMilli()), + "INFO", + "User 'xyz' logged in", + "123456789ADF", + "ADF09876565", } h.LogEvents.Add(&logEvent) @@ -323,17 +324,17 @@ func TestHarvestLogEventsReady(t *testing.T) { } sampleLogEvent := internal.WantLog{ - Severity: severity, - Message: message, - SpanID: spanID, - TraceID: traceID, - Timestamp: timestamp, + Severity: logEvent.severity, + Message: logEvent.message, + SpanID: logEvent.spanID, + TraceID: logEvent.traceID, + Timestamp: logEvent.timestamp, } expectLogEvents(t, ready.LogEvents, []internal.WantLog{sampleLogEvent}) expectMetrics(t, h.Metrics, []internal.WantMetric{ {Name: logsSeen, Scope: "", Forced: true, Data: []float64{1, 0, 0, 0, 0, 0}}, - {Name: logsSeen + "/" + severity, Scope: "", Forced: true, Data: []float64{1, 0, 0, 0, 0, 0}}, + {Name: logsSeen + "/" + logEvent.severity, Scope: "", Forced: true, Data: []float64{1, 0, 0, 0, 0, 0}}, {Name: logsDropped, Scope: "", Forced: true, Data: []float64{0, 0, 0, 0, 0, 0}}, }) } @@ -552,18 +553,16 @@ func TestMergeFailedHarvest(t *testing.T) { Duration: 1 * time.Second, TotalTime: 2 * time.Second, }, 0) - // timestamp := timeToIntMillis(now) - logLevel := "INFO" - message := "User 'xyz' logged in" - spanID := "123456789ADF" - traceID := "ADF09876565" - logTimestamp := int64(123456) - - log := writeLog(logLevel, message, spanID, traceID, logTimestamp) - logEvent, err := CreateLogEvent(log) - if err != nil { - t.Error(err) + + logEvent := logEvent{ + 0.5, + int64(time.Now().UnixMilli()), + "INFO", + "User 'xyz' logged in", + "123456789ADF", + "ADF09876565", } + h.LogEvents.Add(&logEvent) customEventParams := map[string]interface{}{"zip": 1} ce, err := createCustomEvent("myEvent", customEventParams, time.Now()) @@ -629,11 +628,11 @@ func TestMergeFailedHarvest(t *testing.T) { }}) expectLogEvents(t, h.LogEvents, []internal.WantLog{ { - Timestamp: logTimestamp, - Severity: logLevel, - Message: message, - SpanID: spanID, - TraceID: traceID, + Severity: logEvent.severity, + Message: logEvent.message, + SpanID: logEvent.spanID, + TraceID: logEvent.traceID, + Timestamp: logEvent.timestamp, }, }) expectErrorEvents(t, h.ErrorEvents, []internal.WantEvent{{ @@ -710,11 +709,11 @@ func TestMergeFailedHarvest(t *testing.T) { }}) expectLogEvents(t, nextHarvest.LogEvents, []internal.WantLog{ { - Timestamp: logTimestamp, - Severity: logLevel, - Message: message, - SpanID: spanID, - TraceID: traceID, + Severity: logEvent.severity, + Message: logEvent.message, + SpanID: logEvent.spanID, + TraceID: logEvent.traceID, + Timestamp: logEvent.timestamp, }, }) expectErrorEvents(t, nextHarvest.ErrorEvents, []internal.WantEvent{{ diff --git a/v3/newrelic/internal_app.go b/v3/newrelic/internal_app.go index 4456ea70e..cd888d474 100644 --- a/v3/newrelic/internal_app.go +++ b/v3/newrelic/internal_app.go @@ -581,6 +581,29 @@ func (app *app) RecordCustomMetric(name string, value float64) error { return nil } +var ( + errAppLoggingDisabled = errors.New("log data can not be recorded when application logging is disabled") +) + +// RecordLog implements newrelic.Application's RecordCustomMetric. +func (app *app) RecordLog(log *LogData) error { + if app.config.Config.HighSecurity { + return errHighSecurityEnabled + } + if !app.config.ApplicationLogging.Enabled { + return errAppLoggingDisabled + } + + event, err := log.ToLogEvent() + if err != nil { + return err + } + + run, _ := app.getState() + app.Consume(run.Reply.RunID, event) + return nil +} + var ( _ internal.ServerlessWriter = &app{} ) diff --git a/v3/newrelic/log_event.go b/v3/newrelic/log_event.go index 1d5e00cf3..420f0f86d 100644 --- a/v3/newrelic/log_event.go +++ b/v3/newrelic/log_event.go @@ -5,7 +5,6 @@ package newrelic import ( "bytes" - "encoding/json" "errors" "fmt" ) @@ -16,67 +15,85 @@ const ( LogTimestampFieldName = "timestamp" LogSpanIDFieldName = "span.id" LogTraceIDFieldName = "trace.id" + LogSeverityUnknown = "UNKNOWN" - maxLogBytes = 32768 + MaxLogLength = 32768 ) +// for internal user only type logEvent struct { - priority priority - traceID string - severity string - log string + priority priority + timestamp int64 + severity string + message string + spanID string + traceID string +} + +// For customer use +type LogData struct { + Timestamp int64 + Severity string + Message string + SpanID string + TraceID string } // writeJSON prepares JSON in the format expected by the collector. func (e *logEvent) WriteJSON(buf *bytes.Buffer) { - buf.WriteString(e.log) + w := jsonFieldsWriter{buf: buf} + buf.WriteByte('{') + w.stringField(LogSeverityFieldName, e.severity) + w.stringField(LogMessageFieldName, e.message) + + if len(e.spanID) > 0 { + w.stringField(LogSpanIDFieldName, e.spanID) + } + if len(e.traceID) > 0 { + w.stringField(LogTraceIDFieldName, e.traceID) + } + + w.needsComma = false + buf.WriteByte(',') + w.intField(LogTimestampFieldName, e.timestamp) + buf.WriteByte('}') } // MarshalJSON is used for testing. func (e *logEvent) MarshalJSON() ([]byte, error) { buf := bytes.NewBuffer(make([]byte, 0, 256)) - e.WriteJSON(buf) - return buf.Bytes(), nil } -type logJson struct { - Timestamp float64 `json:"timestamp"` - Severity string `json:"level"` - Message string `json:"message"` - SpanID string `json:"span.id"` - TraceID string `json:"trace.id"` -} - var ( // regex allows a single word, or number - severityUnknown = "UNKNOWN" - errEmptyLog = errors.New("log event can not be empty") - errLogTooLarge = fmt.Errorf("log can not exceed %d bytes", maxLogBytes) + severityUnknown = "UNKNOWN" + errEmptySeverity = errors.New("severity can not be empty") + errNilLogData = errors.New("log data can not be nil") + errLogMessageTooLarge = fmt.Errorf("log message can not exceed %d bytes", MaxLogLength) ) -func CreateLogEvent(log []byte) (logEvent, error) { - if len(log) > maxLogBytes { - return logEvent{}, errLogTooLarge +func (data *LogData) ToLogEvent() (*logEvent, error) { + if data == nil { + return nil, errNilLogData } - if len(log) == 0 { - return logEvent{}, errEmptyLog + if data.Severity == "" { + return nil, errEmptySeverity } - - l := &logJson{} - err := json.Unmarshal(log, l) - if err != nil { - return logEvent{}, err + if len(data.Message) > MaxLogLength { + return nil, errLogMessageTooLarge } - logEvent := logEvent{ - log: string(log), - severity: l.Severity, - traceID: l.TraceID, + event := logEvent{ + message: data.Message, + severity: data.Severity, + spanID: data.SpanID, + traceID: data.TraceID, + timestamp: data.Timestamp, } - return logEvent, nil + return &event, nil } func (e *logEvent) MergeIntoHarvest(h *harvest) { diff --git a/v3/newrelic/log_event_test.go b/v3/newrelic/log_event_test.go index 5eb997c03..16a94c081 100644 --- a/v3/newrelic/log_event_test.go +++ b/v3/newrelic/log_event_test.go @@ -2,107 +2,54 @@ package newrelic import ( "testing" - "time" ) -func TestCreateLogEvent(t *testing.T) { - tests := []struct { - Timestamp int64 - Severity string - Message string - SpanID string - TraceID string - Attributes map[string]interface{} - }{ - { - Timestamp: 123456, - Severity: "debug", - Message: "test", - SpanID: "123Ifker1", - TraceID: "23000L343", - }, - { - Timestamp: 123456, - Severity: "debug", - Message: "test", - }, - { - Timestamp: 123456, - Severity: "debug", - Message: "test", - }, - { - Timestamp: 123456, - Severity: "debug", - Message: "test", - SpanID: "123Ifker1", - TraceID: "23000L343", - Attributes: map[string]interface{}{ - "one": "attributeOne", - "two": "attributeTwo", - }, - }, +func TestWriteJSON(t *testing.T) { + event := logEvent{ + severity: "INFO", + message: "test message", + timestamp: 123456, } - - for _, test := range tests { - var l []byte - if len(test.Attributes) > 0 { - l = writeLogWithAttributes(test.Severity, test.Message, test.SpanID, test.TraceID, int64(test.Timestamp), test.Attributes) - } else { - l = writeLog(test.Severity, test.Message, test.SpanID, test.TraceID, int64(test.Timestamp)) - } - - logEvent, err := CreateLogEvent(l) - if err != nil { - t.Error(err) - } - - if logEvent.traceID != test.TraceID { - t.Errorf("invalid traceID: expect \"%s\", got \"%s\"", test.TraceID, logEvent.traceID) - } - if logEvent.severity != test.Severity { - t.Errorf("invalid severity: expect \"%s\", got \"%s\"", test.Severity, logEvent.severity) - } + actual, err := event.MarshalJSON() + if err != nil { + t.Error(err) } -} -func TestLogTooLarge(t *testing.T) { - l := make([]byte, maxLogBytes+1) - _, err := CreateLogEvent(l) - if err == nil { - t.Error("Failed to catch log too large error") - } - if err != errLogTooLarge { - t.Error(err) + expect := `{"level":"INFO","message":"test message","timestamp":123456}` + actualString := string(actual) + if expect != actualString { + t.Errorf("Log json did not build correctly: expecting %s, got %s", expect, actualString) } } -func TestLogTooSmall(t *testing.T) { - l := []byte{} - _, err := CreateLogEvent(l) - if err == nil { - t.Error("Failed to catch log too large error") +func TestWriteJSONWithTrace(t *testing.T) { + event := logEvent{ + severity: "INFO", + message: "test message", + timestamp: 123456, + traceID: "123Ad234", + spanID: "adf3441", } - if err != errEmptyLog { + actual, err := event.MarshalJSON() + if err != nil { t.Error(err) } -} -func BenchmarkCreateLogEvent(b *testing.B) { - b.ReportAllocs() - json := writeLog("debug", "test message", "", "", time.Now().UnixMilli()) - _, err := CreateLogEvent(json) - if err != nil { - b.Error(err) + expect := `{"level":"INFO","message":"test message","span.id":"adf3441","trace.id":"123Ad234","timestamp":123456}` + actualString := string(actual) + if expect != actualString { + t.Errorf("Log json did not build correctly: expecting %s, got %s", expect, actualString) } } -func BenchmarkCreateLogEvent100(b *testing.B) { - json := writeLog("debug", "test message", "", "", time.Now().UnixMilli()) - for i := 0; i < 100; i++ { - _, err := CreateLogEvent(json) - if err != nil { - b.Error(err) - } - } +func BenchmarkToLogEvent(b *testing.B) { + b.ReportAllocs() + data := LogData{ + Severity: "INFO", + Message: "test message", + Timestamp: 123456, + TraceID: "123Ad234", + SpanID: "adf3441", + } + data.ToLogEvent() } diff --git a/v3/newrelic/log_events_test.go b/v3/newrelic/log_events_test.go index befd78921..f48dc4f16 100644 --- a/v3/newrelic/log_events_test.go +++ b/v3/newrelic/log_events_test.go @@ -4,10 +4,8 @@ package newrelic import ( - "bytes" "fmt" "testing" - "time" "github.com/newrelic/go-agent/v3/internal" ) @@ -36,61 +34,12 @@ func loggingConfigEnabled(limit int) configLogHarvest { } } -func writeLogWithAttributes(severity, message, spanID, traceID string, timestamp int64, attributes map[string]interface{}) []byte { - buf := &bytes.Buffer{} - w := jsonFieldsWriter{buf: buf} - buf.WriteByte('{') - w.stringField(LogSeverityFieldName, severity) - w.stringField(LogMessageFieldName, message) - - if len(spanID) > 0 { - w.stringField(LogSpanIDFieldName, spanID) - } - if len(traceID) > 0 { - w.stringField(LogTraceIDFieldName, traceID) - } - - w.needsComma = false - buf.WriteByte(',') - w.intField(LogTimestampFieldName, timestamp) - if len(attributes) > 0 { - for key, val := range attributes { - writeAttributeValueJSON(&w, key, val) - } - } - - buf.WriteByte('}') - - return w.buf.Bytes() -} - -func writeLog(severity, message, spanID, traceID string, timestamp int64) []byte { - buf := &bytes.Buffer{} - w := jsonFieldsWriter{buf: buf} - buf.WriteByte('{') - w.stringField(LogSeverityFieldName, severity) - w.stringField(LogMessageFieldName, message) - - if len(spanID) > 0 { - w.stringField(LogSpanIDFieldName, spanID) - } - if len(traceID) > 0 { - w.stringField(LogTraceIDFieldName, traceID) - } - - w.needsComma = false - buf.WriteByte(',') - w.intField(LogTimestampFieldName, timestamp) - buf.WriteByte('}') - - return w.buf.Bytes() -} - func sampleLogEvent(priority priority, severity, message string) *logEvent { return &logEvent{ - priority: priority, - severity: severity, - log: string(writeLog(severity, message, "", "", 123456)), + priority: priority, + severity: severity, + message: message, + timestamp: 123456, } } @@ -433,13 +382,8 @@ func TestLogEventCollectionDisabled(t *testing.T) { } } -func BenchmarkCollectLogEvent(b *testing.B) { - json := writeLog("debug", "test message", "", "", time.Now().UnixMilli()) - logEvent, err := CreateLogEvent(json) - if err != nil { - b.Error(err) - } - logEventBenchmarkHelper(b, &logEvent) +func BenchmarkAddLogEvent(b *testing.B) { + } func logEventBenchmarkHelper(b *testing.B, event *logEvent) { diff --git a/v3/newrelic/log_writer.go b/v3/newrelic/log_writer.go deleted file mode 100644 index 6eb131847..000000000 --- a/v3/newrelic/log_writer.go +++ /dev/null @@ -1,42 +0,0 @@ -package newrelic - -import ( - "errors" - "io" -) - -type logWriter struct { - app *Application - out io.Writer -} - -func NewLogWriter(app *Application, out io.Writer) (logWriter, error) { - if app == nil || app.app == nil { - return logWriter{}, errors.New("app must not be nil") - } - - return logWriter{app, out}, nil -} - -func (writer logWriter) Write(p []byte) (n int, err error) { - internalApp := writer.app.app - if internalApp.config.ApplicationLogging.Enabled && !internalApp.config.Config.HighSecurity { - logEvent, err := CreateLogEvent(p) - if err != nil { - return 0, err - } - run, _ := internalApp.getState() - - // Run reply is unable to exlpicitly disable logging features, so we do not check it. - // If the user wants to disable logging on the server side, they can only set the - // log event limit to zero, which will set the harvest limit for log events to zero. - - internalApp.Consume(run.Reply.RunID, &logEvent) - } - - if writer.out != nil { - return writer.out.Write(p) - } else { - return len(p), nil - } -} diff --git a/v3/newrelic/log_writer_test.go b/v3/newrelic/log_writer_test.go deleted file mode 100644 index 51aca4431..000000000 --- a/v3/newrelic/log_writer_test.go +++ /dev/null @@ -1,25 +0,0 @@ -package newrelic - -import ( - "testing" - "time" -) - -func BenchmarkWrite(b *testing.B) { - app, err := NewApplication( - ConfigAppLogForwardingEnabled(true), - ) - if err != nil { - b.Error(err) - } - - json := writeLog("debug", "test message", "", "", time.Now().UnixMilli()) - writer, err := NewLogWriter(app, nil) - if err != nil { - b.Error(err) - } - - b.ReportAllocs() - b.ResetTimer() - writer.Write(json) -} From cc5e7ccd8d6c37770cfcd54a42cc3cc8ca8c8e94 Mon Sep 17 00:00:00 2001 From: Emilio Garcia Date: Tue, 24 May 2022 16:41:58 -0400 Subject: [PATCH 05/47] RecordLog documentation --- v3/newrelic/application.go | 11 +++++++++++ v3/newrelic/internal_app.go | 2 +- v3/newrelic/log_event.go | 7 ++++++- 3 files changed, 18 insertions(+), 2 deletions(-) diff --git a/v3/newrelic/application.go b/v3/newrelic/application.go index 3cf6f5f39..32006f596 100644 --- a/v3/newrelic/application.go +++ b/v3/newrelic/application.go @@ -74,6 +74,17 @@ func (app *Application) RecordCustomMetric(name string, value float64) { } } +// RecordLog records the data from a single log line. +// This consumes a LogData object pointer with the following expectations: +// +// Timestamp: Required; An int64 unix millisecond timestamp +// Severity: Required; A string log severity or level taken from logging framework. +// If unknown, must be set to "UNKNOWN". +// Message: Optional; A string containing the message body of a log. +// SpanID: Optional; A string containing the UUID of a span. +// TraceID: Optional; A string containing the UUID of a transaction trace. Log events +// inherit their priority from transaction traces when possible. Failing to +// include this when necessary, may result in important logs being dropped. func (app *Application) RecordLog(logEvent *LogData) { if nil == app { return diff --git a/v3/newrelic/internal_app.go b/v3/newrelic/internal_app.go index cd888d474..6a9f90b02 100644 --- a/v3/newrelic/internal_app.go +++ b/v3/newrelic/internal_app.go @@ -585,7 +585,7 @@ var ( errAppLoggingDisabled = errors.New("log data can not be recorded when application logging is disabled") ) -// RecordLog implements newrelic.Application's RecordCustomMetric. +// RecordLog implements newrelic.Application's RecordLog. func (app *app) RecordLog(log *LogData) error { if app.config.Config.HighSecurity { return errHighSecurityEnabled diff --git a/v3/newrelic/log_event.go b/v3/newrelic/log_event.go index 420f0f86d..8057474c1 100644 --- a/v3/newrelic/log_event.go +++ b/v3/newrelic/log_event.go @@ -68,7 +68,9 @@ func (e *logEvent) MarshalJSON() ([]byte, error) { var ( // regex allows a single word, or number - severityUnknown = "UNKNOWN" + severityUnknown = "UNKNOWN" + + errEmptyTimestamp = errors.New("timestamp can not be empty") errEmptySeverity = errors.New("severity can not be empty") errNilLogData = errors.New("log data can not be nil") errLogMessageTooLarge = fmt.Errorf("log message can not exceed %d bytes", MaxLogLength) @@ -84,6 +86,9 @@ func (data *LogData) ToLogEvent() (*logEvent, error) { if len(data.Message) > MaxLogLength { return nil, errLogMessageTooLarge } + if data.Timestamp == 0 { + return nil, errEmptyTimestamp + } event := logEvent{ message: data.Message, From 6b7c071e85d81670868b31f718b34cee72b64e33 Mon Sep 17 00:00:00 2001 From: Emilio Garcia Date: Thu, 26 May 2022 10:19:40 -0400 Subject: [PATCH 06/47] Supportability Metrics --- v3/newrelic/app_run.go | 20 +++++++++---- v3/newrelic/config.go | 49 +++++++++++++++++--------------- v3/newrelic/config_options.go | 30 ++++++++++++++++++++ v3/newrelic/config_test.go | 4 ++- v3/newrelic/harvest.go | 18 ++++++++++-- v3/newrelic/harvest_test.go | 45 ++++++++++++++++++++++-------- v3/newrelic/internal_app.go | 5 +--- v3/newrelic/log_events.go | 32 ++++++++------------- v3/newrelic/log_events_test.go | 12 ++++---- v3/newrelic/metric_names.go | 51 ++++++++++++++++++++++++++++------ 10 files changed, 185 insertions(+), 81 deletions(-) diff --git a/v3/newrelic/app_run.go b/v3/newrelic/app_run.go index a8e3f175e..28973e7b8 100644 --- a/v3/newrelic/app_run.go +++ b/v3/newrelic/app_run.go @@ -122,11 +122,7 @@ func newAppRun(config config, reply *internal.ConnectReply) *appRun { MaxCustomEvents: run.MaxCustomEvents(), MaxErrorEvents: run.MaxErrorEvents(), MaxSpanEvents: run.MaxSpanEvents(), - LoggingConfig: configLogHarvest{ - config.ApplicationLogging.Forwarding.Enabled, - config.ApplicationLogging.Metrics.Enabled, - run.MaxLogEvents(), - }, + LoggingConfig: run.LoggingConfig(), } return run @@ -207,6 +203,20 @@ func (run *appRun) MaxErrorEvents() int { return run.limit(internal.MaxErrorEvents, run.ptrErrorEvents) } +func (run *appRun) LoggingConfig() (config loggingConfig) { + logging := run.Config.ApplicationLogging + + config.loggingEnabled = logging.Enabled + config.collectEvents = logging.Enabled && logging.Forwarding.Enabled && !run.Config.HighSecurity + config.maxLogEvents = run.MaxLogEvents() + config.collectMetrics = logging.Enabled && logging.Metrics.Enabled + + //TODO + config.localEnrichment = false + + return config +} + // MaxSpanEvents returns the reservoir limit for collected span events, // which will be the default or the user's configured size (if any), but // may be capped to the maximum allowed by the collector. diff --git a/v3/newrelic/config.go b/v3/newrelic/config.go index d5d369e0a..c0801b8bb 100644 --- a/v3/newrelic/config.go +++ b/v3/newrelic/config.go @@ -300,28 +300,8 @@ type Config struct { } } - // ApplicationLogging contains settings which control the capture and sending - // of log event data - ApplicationLogging struct { - // If this is disabled, all sub-features are disabled; - // if it is enabled, the individual sub-feature configurations take effect. - // MAY accomplish this by not installing instrumentation, or by early-return/no-op as necessary for an agent. - Enabled bool - // Forwarding controls log forwarding to New Relic One - Forwarding struct { - // Toggles whether the agent gathers log records for sending to New Relic. - Enabled bool - // Number of log records to send per minute to New Relic. - // Controls the overall memory consumption when using log forwarding. - // SHOULD be sent as part of the harvest_limits on Connect. - MaxSamplesStored int - } - Metrics struct { - // Toggles whether the agent gathers the the user facing Logging/lines and Logging/lines/{SEVERITY} - // Logging Metrics used in the Logs chart on the APM Summary page. - Enabled bool - } - } + // Config Settings for Logs in Context features + ApplicationLogging ApplicationLogging // Attributes controls which attributes are enabled and disabled globally. // This setting affects all attribute destinations: Transaction Events, @@ -372,6 +352,31 @@ type Config struct { Error error } +// ApplicationLogging contains settings which control the capture and sending +// of log event data +type ApplicationLogging struct { + // If this is disabled, all sub-features are disabled; + // if it is enabled, the individual sub-feature configurations take effect. + // MAY accomplish this by not installing instrumentation, or by early-return/no-op as necessary for an agent. + Enabled bool + // Name of instrumented frameworks enabled + Frameworks []string + // Forwarding controls log forwarding to New Relic One + Forwarding struct { + // Toggles whether the agent gathers log records for sending to New Relic. + Enabled bool + // Number of log records to send per minute to New Relic. + // Controls the overall memory consumption when using log forwarding. + // SHOULD be sent as part of the harvest_limits on Connect. + MaxSamplesStored int + } + Metrics struct { + // Toggles whether the agent gathers the the user facing Logging/lines and Logging/lines/{SEVERITY} + // Logging Metrics used in the Logs chart on the APM Summary page. + Enabled bool + } +} + // AttributeDestinationConfig controls the attributes sent to each destination. // For more information, see: // https://docs.newrelic.com/docs/agents/manage-apm-agents/agent-data/agent-attributes diff --git a/v3/newrelic/config_options.go b/v3/newrelic/config_options.go index 147d05fed..0470da053 100644 --- a/v3/newrelic/config_options.go +++ b/v3/newrelic/config_options.go @@ -59,6 +59,9 @@ func ConfigAppLogForwardingEnabled(enabled bool) ConfigOption { } } +// ConfigAppLogMetricsEnabled enables or disables the collection of metrics +// data for logs seen by an instrumented logging framework +// default: true func ConfigAppLogMetricsEnabled(enabled bool) ConfigOption { return func(cfg *Config) { if enabled == true { @@ -70,6 +73,8 @@ func ConfigAppLogMetricsEnabled(enabled bool) ConfigOption { } } +// ConfigAppLogEnabled enables or disables all application logging features +// and data collection func ConfigAppLogEnabled(enabled bool) ConfigOption { return func(cfg *Config) { if enabled == true { @@ -80,6 +85,31 @@ func ConfigAppLogEnabled(enabled bool) ConfigOption { } } +const ( + ZerologFrameworkName = "Zerolog" +) + +// ConfigZerologPluginEnabled enables all supported features +// for the zerolog logs in context plugin. This will not alter +// the max samples stored for logs. +// +// Log Enrichment is currently not supported, and will not be +// enabled. +func ConfigZerologPluginEnabled(enabled bool) ConfigOption { + return func(cfg *Config) { + if enabled == true { + cfg.ApplicationLogging.Enabled = true + cfg.ApplicationLogging.Forwarding.Enabled = true + cfg.ApplicationLogging.Metrics.Enabled = true + if cfg.ApplicationLogging.Frameworks == nil { + cfg.ApplicationLogging.Frameworks = []string{ZerologFrameworkName} + } else { + cfg.ApplicationLogging.Frameworks = append(cfg.ApplicationLogging.Frameworks, ZerologFrameworkName) + } + } + } +} + // ConfigAppLogForwardingMaxSamplesStored allows users to set the maximium number of // log events the agent is allowed to collect and store in a given harvest cycle. func ConfigAppLogForwardingMaxSamplesStored(maxSamplesStored int) ConfigOption { diff --git a/v3/newrelic/config_test.go b/v3/newrelic/config_test.go index 7ad8e77d4..64577bbe3 100644 --- a/v3/newrelic/config_test.go +++ b/v3/newrelic/config_test.go @@ -130,11 +130,12 @@ func TestCopyConfigReferenceFieldsPresent(t *testing.T) { "settings":{ "AppName":"my appname", "ApplicationLogging": { - "Enabled": true, + "Enabled":true, "Forwarding": { "Enabled": false, "MaxSamplesStored": 10000 }, + "Frameworks": null, "Metrics": { "Enabled": true } @@ -319,6 +320,7 @@ func TestCopyConfigReferenceFieldsAbsent(t *testing.T) { "Enabled": false, "MaxSamplesStored": 10000 }, + "Frameworks": null, "Metrics": { "Enabled": true } diff --git a/v3/newrelic/harvest.go b/v3/newrelic/harvest.go index 9760ef38d..6302c2694 100644 --- a/v3/newrelic/harvest.go +++ b/v3/newrelic/harvest.go @@ -190,7 +190,7 @@ func (h *harvest) Payloads(splitLargeTxnEvents bool) (ps []payloadCreator) { type harvestConfig struct { ReportPeriods map[harvestTypes]time.Duration CommonAttributes commonAttributes - LoggingConfig configLogHarvest + LoggingConfig loggingConfig MaxSpanEvents int MaxCustomEvents int MaxErrorEvents int @@ -228,8 +228,17 @@ func createTraceObserverMetrics(to traceObserver, metrics *metricTable) { } } +func createAppLoggingSupportabilityMetrics(lc *loggingConfig, frameworks []string, metrics *metricTable) { + lc.connectMetrics(metrics) + for _, framework := range frameworks { + loggingFrameworkMetric(metrics, framework) + } +} + // CreateFinalMetrics creates extra metrics at harvest time. -func (h *harvest) CreateFinalMetrics(reply *internal.ConnectReply, hc harvestConfig, to traceObserver) { +func (h *harvest) CreateFinalMetrics(run *appRun, to traceObserver) { + reply := run.Reply + hc := run.harvestConfig if nil == h { return } @@ -253,6 +262,7 @@ func (h *harvest) CreateFinalMetrics(reply *internal.ConnectReply, hc harvestCon createTraceObserverMetrics(to, h.Metrics) createTrackUsageMetrics(h.Metrics) + createAppLoggingSupportabilityMetrics(&hc.LoggingConfig, run.Config.ApplicationLogging.Frameworks, h.Metrics) h.Metrics = h.Metrics.ApplyRules(reply.MetricRules) } @@ -357,9 +367,11 @@ var ( MaxSpanEvents: defaultMaxSpanEvents, MaxCustomEvents: internal.MaxCustomEvents, MaxErrorEvents: internal.MaxErrorEvents, - LoggingConfig: configLogHarvest{ + LoggingConfig: loggingConfig{ + true, false, true, + false, internal.MaxLogEvents, }, } diff --git a/v3/newrelic/harvest_test.go b/v3/newrelic/harvest_test.go index 1d811bd3b..8ce8f2584 100644 --- a/v3/newrelic/harvest_test.go +++ b/v3/newrelic/harvest_test.go @@ -20,11 +20,7 @@ func generateTestHarvestConfig() harvestConfig { cfg := dfltHarvestCfgr // Enable logging features for testing (not enabled by default) - loggingCfg := configLogHarvest{ - true, - true, - internal.MaxLogEvents, - } + loggingCfg := loggingConfigEnabled(internal.MaxLogEvents) cfg.LoggingConfig = loggingCfg return cfg } @@ -87,9 +83,16 @@ func TestCreateFinalMetrics(t *testing.T) { // If the harvest or metrics is nil then CreateFinalMetrics should // not panic. var nilHarvest *harvest - nilHarvest.CreateFinalMetrics(nil, testHarvestCfgr, nil) + + config := config{Config: defaultConfig()} + config.ApplicationLogging.Frameworks = append(config.ApplicationLogging.Frameworks, ZerologFrameworkName) + + run := newAppRun(config, internal.ConnectReplyDefaults()) + run.harvestConfig = testHarvestCfgr + + nilHarvest.CreateFinalMetrics(run, nil) emptyHarvest := &harvest{} - emptyHarvest.CreateFinalMetrics(nil, testHarvestCfgr, nil) + emptyHarvest.CreateFinalMetrics(run, nil) replyJSON := []byte(`{"return_value":{ "metric_name_rules":[{ @@ -124,7 +127,9 @@ func TestCreateFinalMetrics(t *testing.T) { } h := newHarvest(now, cfgr) h.Metrics.addCount("rename_me", 1.0, unforced) - h.CreateFinalMetrics(reply, cfgr, nil) + run = newAppRun(config, reply) + run.harvestConfig = cfgr + h.CreateFinalMetrics(run, nil) expectMetrics(t, h.Metrics, []internal.WantMetric{ {Name: instanceReporting, Scope: "", Forced: true, Data: []float64{1, 0, 0, 0, 0, 0}}, {Name: "been_renamed", Scope: "", Forced: false, Data: []float64{1.0, 0, 0, 0, 0, 0}}, @@ -137,6 +142,11 @@ func TestCreateFinalMetrics(t *testing.T) { {Name: "Supportability/Go/Version/" + Version, Scope: "", Forced: true, Data: []float64{1, 0, 0, 0, 0, 0}}, {Name: "Supportability/Go/Runtime/Version/" + goVersionSimple, Scope: "", Forced: true, Data: []float64{1, 0, 0, 0, 0, 0}}, {Name: "Supportability/Go/gRPC/Version/" + grpcVersion, Scope: "", Forced: true, Data: []float64{1, 0, 0, 0, 0, 0}}, + {Name: "Supportability/Logging/Golang", Scope: "", Forced: true, Data: []float64{1, 0, 0, 0, 0, 0}}, + {Name: "Supportability/Logging/Golang/Zerolog", Scope: "", Forced: true, Data: []float64{1, 0, 0, 0, 0, 0}}, + {Name: "Supportability/Logging/Forwarding/Golang", Scope: "", Forced: true, Data: []float64{1, 0, 0, 0, 0, 0}}, + {Name: "Supportability/Logging/Metrics/Golang", Scope: "", Forced: true, Data: []float64{1, 0, 0, 0, 0, 0}}, + {Name: "Supportability/Logging/LocalDecorating/Golang", Scope: "", Forced: true, Data: []float64{1, 0, 0, 0, 0, 0}}, }) // Test again without any metric rules or event_harvest_config. @@ -147,9 +157,11 @@ func TestCreateFinalMetrics(t *testing.T) { if err != nil { t.Fatal(err) } + run = newAppRun(config, reply) + run.harvestConfig = testHarvestCfgr h = newHarvest(now, testHarvestCfgr) h.Metrics.addCount("rename_me", 1.0, unforced) - h.CreateFinalMetrics(reply, testHarvestCfgr, nil) + h.CreateFinalMetrics(run, nil) expectMetrics(t, h.Metrics, []internal.WantMetric{ {Name: instanceReporting, Scope: "", Forced: true, Data: []float64{1, 0, 0, 0, 0, 0}}, {Name: "rename_me", Scope: "", Forced: false, Data: []float64{1.0, 0, 0, 0, 0, 0}}, @@ -162,6 +174,11 @@ func TestCreateFinalMetrics(t *testing.T) { {Name: "Supportability/Go/Version/" + Version, Scope: "", Forced: true, Data: []float64{1, 0, 0, 0, 0, 0}}, {Name: "Supportability/Go/Runtime/Version/" + goVersionSimple, Scope: "", Forced: true, Data: []float64{1, 0, 0, 0, 0, 0}}, {Name: "Supportability/Go/gRPC/Version/" + grpcVersion, Scope: "", Forced: true, Data: []float64{1, 0, 0, 0, 0, 0}}, + {Name: "Supportability/Logging/Golang", Scope: "", Forced: true, Data: []float64{1, 0, 0, 0, 0, 0}}, + {Name: "Supportability/Logging/Golang/Zerolog", Scope: "", Forced: true, Data: []float64{1, 0, 0, 0, 0, 0}}, + {Name: "Supportability/Logging/Forwarding/Golang", Scope: "", Forced: true, Data: []float64{1, 0, 0, 0, 0, 0}}, + {Name: "Supportability/Logging/Metrics/Golang", Scope: "", Forced: true, Data: []float64{1, 0, 0, 0, 0, 0}}, + {Name: "Supportability/Logging/LocalDecorating/Golang", Scope: "", Forced: true, Data: []float64{1, 0, 0, 0, 0, 0}}, }) } @@ -176,15 +193,17 @@ func TestCreateFinalMetricsTraceObserver(t *testing.T) { t.Fatal(err) } + run := newAppRun(config{Config: defaultConfig()}, reply) + run.harvestConfig = testHarvestCfgr + to, _ := newTraceObserver( internal.AgentRunID("runid"), nil, observerConfig{ log: logger.ShimLogger{}, }, ) - h := newHarvest(now, testHarvestCfgr) - h.CreateFinalMetrics(reply, testHarvestCfgr, to) + h.CreateFinalMetrics(run, to) expectMetrics(t, h.Metrics, []internal.WantMetric{ {Name: instanceReporting, Scope: "", Forced: true, Data: nil}, {Name: "Supportability/EventHarvest/ReportPeriod", Scope: "", Forced: true, Data: nil}, @@ -193,6 +212,10 @@ func TestCreateFinalMetricsTraceObserver(t *testing.T) { {Name: "Supportability/EventHarvest/ErrorEventData/HarvestLimit", Scope: "", Forced: true, Data: nil}, {Name: "Supportability/EventHarvest/SpanEventData/HarvestLimit", Scope: "", Forced: true, Data: nil}, {Name: "Supportability/EventHarvest/LogEventData/HarvestLimit", Scope: "", Forced: true, Data: nil}, + {Name: "Supportability/Logging/Golang", Scope: "", Forced: true, Data: nil}, + {Name: "Supportability/Logging/Forwarding/Golang", Scope: "", Forced: true, Data: nil}, + {Name: "Supportability/Logging/Metrics/Golang", Scope: "", Forced: true, Data: nil}, + {Name: "Supportability/Logging/LocalDecorating/Golang", Scope: "", Forced: true, Data: nil}, {Name: "Supportability/Go/Version/" + Version, Scope: "", Forced: true, Data: nil}, {Name: "Supportability/Go/Runtime/Version/" + goVersionSimple, Scope: "", Forced: true, Data: nil}, {Name: "Supportability/Go/gRPC/Version/" + grpcVersion, Scope: "", Forced: true, Data: []float64{1, 0, 0, 0, 0, 0}}, diff --git a/v3/newrelic/internal_app.go b/v3/newrelic/internal_app.go index 6a9f90b02..ee8361385 100644 --- a/v3/newrelic/internal_app.go +++ b/v3/newrelic/internal_app.go @@ -66,7 +66,7 @@ type app struct { } func (app *app) doHarvest(h *harvest, harvestStart time.Time, run *appRun) { - h.CreateFinalMetrics(run.Reply, run.harvestConfig, app.getObserver()) + h.CreateFinalMetrics(run, app.getObserver()) payloads := h.Payloads(app.config.DistributedTracer.Enabled) for _, p := range payloads { @@ -587,9 +587,6 @@ var ( // RecordLog implements newrelic.Application's RecordLog. func (app *app) RecordLog(log *LogData) error { - if app.config.Config.HighSecurity { - return errHighSecurityEnabled - } if !app.config.ApplicationLogging.Enabled { return errAppLoggingDisabled } diff --git a/v3/newrelic/log_events.go b/v3/newrelic/log_events.go index 6a5942a38..e3ceafda8 100644 --- a/v3/newrelic/log_events.go +++ b/v3/newrelic/log_events.go @@ -11,12 +11,6 @@ import ( "github.com/newrelic/go-agent/v3/internal/jsonx" ) -type configLogHarvest struct { - collectEvents bool - collectMetrics bool - maxLogEvents int -} - type commonAttributes struct { entityGUID string entityName string @@ -30,7 +24,7 @@ type logEvents struct { failedHarvests int severityCount map[string]int commonAttributes - config configLogHarvest + config loggingConfig logs logEventHeap } @@ -41,21 +35,16 @@ func (events *logEvents) NumSeen() float64 { return float64(events.numSeen) } func (events *logEvents) NumSaved() float64 { return float64(len(events.logs)) } func (events *logEvents) RecordLoggingMetrics(metrics *metricTable, forced metricForce) { - // Allows us to disable the reporting of metrics for logs - if !events.config.collectMetrics { - return - } - // avoid nil pointers during tests - if metrics == nil { - return + if events.config.collectMetrics && metrics != nil { + metrics.addCount(logsSeen, events.NumSeen(), forced) + for k, v := range events.severityCount { + severitySeen := logsSeen + "/" + k + metrics.addCount(severitySeen, float64(v), forced) + } } - metrics.addCount(logsSeen, events.NumSeen(), forced) - metrics.addCount(logsDropped, events.NumSeen()-events.NumSaved(), forced) - - for k, v := range events.severityCount { - severitySeen := logsSeen + "/" + k - metrics.addCount(severitySeen, float64(v), forced) + if events.config.collectEvents { + metrics.addCount(logsDropped, events.NumSeen()-events.NumSaved(), forced) } } @@ -70,7 +59,7 @@ func (h logEventHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] } func (h logEventHeap) Push(x interface{}) {} func (h logEventHeap) Pop() interface{} { return nil } -func newLogEvents(ca commonAttributes, loggingConfig configLogHarvest) *logEvents { +func newLogEvents(ca commonAttributes, loggingConfig loggingConfig) *logEvents { return &logEvents{ commonAttributes: ca, config: loggingConfig, @@ -84,6 +73,7 @@ func (events *logEvents) capacity() int { } func (events *logEvents) Add(e *logEvent) { + // always collect this but do not report logging metrics when disabled events.numSeen++ events.severityCount[e.severity] += 1 diff --git a/v3/newrelic/log_events_test.go b/v3/newrelic/log_events_test.go index f48dc4f16..9f37362de 100644 --- a/v3/newrelic/log_events_test.go +++ b/v3/newrelic/log_events_test.go @@ -26,11 +26,13 @@ var ( unknownLevel = "UNKNOWN" ) -func loggingConfigEnabled(limit int) configLogHarvest { - return configLogHarvest{ - collectEvents: true, - collectMetrics: true, - maxLogEvents: limit, +func loggingConfigEnabled(limit int) loggingConfig { + return loggingConfig{ + loggingEnabled: true, + localEnrichment: true, + collectEvents: true, + collectMetrics: true, + maxLogEvents: limit, } } diff --git a/v3/newrelic/metric_names.go b/v3/newrelic/metric_names.go index 9530cf276..7bb053e23 100644 --- a/v3/newrelic/metric_names.go +++ b/v3/newrelic/metric_names.go @@ -3,6 +3,8 @@ package newrelic +import "fmt" + const ( apdexRollup = "Apdex" apdexPrefix = "Apdex/" @@ -64,14 +66,51 @@ const ( supportLogEventLimit = "Supportability/EventHarvest/LogEventData/HarvestLimit" // Logging Metrics https://source.datanerd.us/agents/agent-specs/pull/570/files + // User Facing logsSeen = "Logging/lines" logsDropped = "Logging/Forwarding/Dropped" - supportLoggingMetrics = "Supportability/Logging/Metrics/Golang/" - supportLogForwarding = "Supportability/Logging/Forwarding/Golang/" - supportLocalLogDecorating = "Supportability/Logging/Decorating/Golang/" + // Supportability (at connect) + supportLogging = "Supportability/Logging/Golang" + supportLoggingMetrics = "Supportability/Logging/Metrics/Golang" + supportLogForwarding = "Supportability/Logging/Forwarding/Golang" + supportLogDecorating = "Supportability/Logging/LocalDecorating/Golang" + + // Supportability (once per harvest) + logEventsSeen = "Supportability/Logging/Forwarding/Seen" + logEventsSent = "Supportability/Logging/Forwarding/Sent" ) +func supportMetric(metrics *metricTable, b bool, metricName string) { + if b { + metrics.addSingleCount(metricName, forced) + } +} + +// logSupport contains final configuration settings for +// logging features for log data generation and supportability +// metrics generation. +type loggingConfig struct { + loggingEnabled bool // application logging features are enabled + collectEvents bool // collection of log event data is enabled + collectMetrics bool // collection of log metric data is enabled + localEnrichment bool // local log enrichment is enabled + maxLogEvents int // maximum number of log events allowed to be collected +} + +// Logging metrics that are generated at connect response +func (cfg loggingConfig) connectMetrics(ms *metricTable) { + supportMetric(ms, cfg.loggingEnabled, supportLogging) + supportMetric(ms, cfg.collectEvents, supportLogForwarding) + supportMetric(ms, cfg.collectMetrics, supportLoggingMetrics) + supportMetric(ms, cfg.localEnrichment, supportLogDecorating) +} + +func loggingFrameworkMetric(ms *metricTable, framework string) { + name := fmt.Sprintf("%s/%s", supportLogging, framework) + supportMetric(ms, true, name) +} + // distributedTracingSupport is used to track distributed tracing activity for // supportability. type distributedTracingSupport struct { @@ -102,12 +141,6 @@ func (dts distributedTracingSupport) isEmpty() bool { return (distributedTracingSupport{}) == dts } -func supportMetric(metrics *metricTable, b bool, metricName string) { - if b { - metrics.addSingleCount(metricName, forced) - } -} - func (dts distributedTracingSupport) createMetrics(ms *metricTable) { // Distributed Tracing Supportability Metrics supportMetric(ms, dts.AcceptPayloadSuccess, "Supportability/DistributedTrace/AcceptPayload/Success") From 22adddd645557ae37f705d4e0d08404dfff33803 Mon Sep 17 00:00:00 2001 From: Emilio Garcia Date: Thu, 26 May 2022 11:20:11 -0400 Subject: [PATCH 07/47] button up zerolog plugin --- .../logcontext-v2/nrzerolog/Readme.md | 64 +++++++++++++++++++ .../logcontext-v2/nrzerolog/example/main.go | 53 +++++++++++++++ .../logcontext-v2/nrzerolog/hook.go | 5 +- 3 files changed, 120 insertions(+), 2 deletions(-) create mode 100644 v3/integrations/logcontext-v2/nrzerolog/Readme.md create mode 100644 v3/integrations/logcontext-v2/nrzerolog/example/main.go diff --git a/v3/integrations/logcontext-v2/nrzerolog/Readme.md b/v3/integrations/logcontext-v2/nrzerolog/Readme.md new file mode 100644 index 000000000..628d5b263 --- /dev/null +++ b/v3/integrations/logcontext-v2/nrzerolog/Readme.md @@ -0,0 +1,64 @@ +# Zerolog In Context + +This plugin for zerolog implements the logs in context tooling for the go agent. This hook +function can be added to any zerolog logger, and will automatically collect the log data +from zerolog, and send it to New Relic through the go agent. The following Logging features +are supported by this plugin in the current release: + +| Logging Feature | Supported | +| ------- | --------- | +| Forwarding | :heavy_check_mark: | +| Metrics | :heavy_check_mark: | +| Enrichment | :x: | + +## Installation + +The nrzerolog plugin, and the go-agent need to be integrated into your code +in order to use this tool. The following example will shows how to install +and set up your code to send logs to new relic from zerolog. + +```go + +import ( + "github.com/rs/zerolog" + "github.com/newrelic/go-agent/v3/newrelic" + "github.com/newrelic/go-agent/v3/integrations/logcontext-v2/nrzerolog" +) + +func main() { + baseLogger := zerolog.New(os.Stdout) + + app, err := newrelic.NewApplication( + newrelic.ConfigFromEnvironment(), + newrelic.ConfigAppName("NRZerolog Example"), + newrelic.ConfigInfoLogger(os.Stdout), + newrelic.ConfigDistributedTracerEnabled(true), + ) + if nil != err { + fmt.Println(err) + os.Exit(1) + } + + nrHook := nrzerolog.NewRelicHook{ + App: app, + } + + nrLogger := baseLogger.Hook(nrHook) + nrLogger.Info().Msg("Hello World") +} +``` + +## Usage + +When zerolog hooks a logger object, a copy of that logger is made and the +hook is appended to it. Zerolog will *Never* check if you duplicate information +in your logger, so it is very important to treat each logger as an immutable step +in how you generate your logs. If you apply a hook function to a logger that is +already hooked, it will capture all logs generated from that logger twice. +To avoid that issue, we recommend that you create a base logger object with the +formatting settings you prefer, then new hooked loggers from that base logger. + +The plugin captures the log level, and the message from zerolog. It will generate a +timestamp at the moment the hook function is called in zerolog. In most cases, this +timestamp will be the same as the time posted in zerolog, however in some corner +cases, a very small amount of offset is possible. diff --git a/v3/integrations/logcontext-v2/nrzerolog/example/main.go b/v3/integrations/logcontext-v2/nrzerolog/example/main.go new file mode 100644 index 000000000..cc80e07aa --- /dev/null +++ b/v3/integrations/logcontext-v2/nrzerolog/example/main.go @@ -0,0 +1,53 @@ +package main + +import ( + "context" + "fmt" + "os" + "time" + + "github.com/newrelic/go-agent/v3/integrations/logcontext-v2/nrzerolog" + "github.com/newrelic/go-agent/v3/newrelic" + "github.com/rs/zerolog" +) + +func main() { + baseLogger := zerolog.New(os.Stdout) + + app, err := newrelic.NewApplication( + newrelic.ConfigFromEnvironment(), + newrelic.ConfigAppName("NRZerolog Example"), + newrelic.ConfigInfoLogger(os.Stdout), + newrelic.ConfigDistributedTracerEnabled(true), + ) + if nil != err { + fmt.Println(err) + os.Exit(1) + } + + app.WaitForConnection(5 * time.Second) + + nrHook := nrzerolog.NewRelicHook{ + App: app, + } + + nrLogger := baseLogger.Hook(nrHook) + nrLogger.Info().Msg("Hello World") + + // With transaction context + txn := app.StartTransaction("My Transaction") + ctx := newrelic.NewContext(context.Background(), txn) + + nrTxnHook := nrzerolog.NewRelicHook{ + App: app, + Context: ctx, + } + + txnLogger := baseLogger.Hook(nrTxnHook) + txnLogger.Debug().Msg("This is a transaction log") + + txn.End() + + nrLogger.Info().Msg("Goodbye") + app.Shutdown(10 * time.Second) +} diff --git a/v3/integrations/logcontext-v2/nrzerolog/hook.go b/v3/integrations/logcontext-v2/nrzerolog/hook.go index 8c1b4678a..d0de49de9 100644 --- a/v3/integrations/logcontext-v2/nrzerolog/hook.go +++ b/v3/integrations/logcontext-v2/nrzerolog/hook.go @@ -8,12 +8,12 @@ import ( "github.com/rs/zerolog" ) -type Hook struct { +type NewRelicHook struct { App *newrelic.Application Context context.Context } -func (h Hook) Run(e *zerolog.Event, level zerolog.Level, msg string) { +func (h NewRelicHook) Run(e *zerolog.Event, level zerolog.Level, msg string) { logLevel := "" if level == zerolog.NoLevel { logLevel = newrelic.LogSeverityUnknown @@ -36,5 +36,6 @@ func (h Hook) Run(e *zerolog.Event, level zerolog.Level, msg string) { SpanID: spanID, TraceID: traceID, } + h.App.RecordLog(&data) } From 514119bedc73ae1e9a4868291ab893df2208f5c6 Mon Sep 17 00:00:00 2001 From: Emilio Garcia Date: Thu, 26 May 2022 12:06:35 -0400 Subject: [PATCH 08/47] correct config settings in readme and example --- v3/integrations/logcontext-v2/nrzerolog/Readme.md | 15 +++++++++++++-- .../logcontext-v2/nrzerolog/example/main.go | 1 + 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/v3/integrations/logcontext-v2/nrzerolog/Readme.md b/v3/integrations/logcontext-v2/nrzerolog/Readme.md index 628d5b263..272b91611 100644 --- a/v3/integrations/logcontext-v2/nrzerolog/Readme.md +++ b/v3/integrations/logcontext-v2/nrzerolog/Readme.md @@ -14,8 +14,16 @@ are supported by this plugin in the current release: ## Installation The nrzerolog plugin, and the go-agent need to be integrated into your code -in order to use this tool. The following example will shows how to install -and set up your code to send logs to new relic from zerolog. +in order to use this tool. Make sure to set `newrelic.ConfigZerologPluginEnabled(true)` +in your config settings for the application. This will enable log forwarding and metrics +in the go agent, as well as let the agent know that the zerolog pluging is in use. +If you want to disable metrics, set `newrelic.ConfigAppLogMetricsEnabled(false),`. +If you want to disable log forwarding, set `newrelic.ConfigAppLogForwardingEnabled(false),`. +Note that the agent sets the default number of logs per harverst cycle to 10000, but that +number may be reuced by the server. You can manually set this number by setting +`newrelic.ConfigAppLogForwardingMaxSamplesStored(123),`. + +The following example will shows how to install and set up your code to send logs to new relic from zerolog. ```go @@ -32,6 +40,7 @@ func main() { newrelic.ConfigFromEnvironment(), newrelic.ConfigAppName("NRZerolog Example"), newrelic.ConfigInfoLogger(os.Stdout), + newrelic.ConfigZerologPluginEnabled(true), newrelic.ConfigDistributedTracerEnabled(true), ) if nil != err { @@ -62,3 +71,5 @@ The plugin captures the log level, and the message from zerolog. It will generat timestamp at the moment the hook function is called in zerolog. In most cases, this timestamp will be the same as the time posted in zerolog, however in some corner cases, a very small amount of offset is possible. + + diff --git a/v3/integrations/logcontext-v2/nrzerolog/example/main.go b/v3/integrations/logcontext-v2/nrzerolog/example/main.go index cc80e07aa..0681d153e 100644 --- a/v3/integrations/logcontext-v2/nrzerolog/example/main.go +++ b/v3/integrations/logcontext-v2/nrzerolog/example/main.go @@ -18,6 +18,7 @@ func main() { newrelic.ConfigFromEnvironment(), newrelic.ConfigAppName("NRZerolog Example"), newrelic.ConfigInfoLogger(os.Stdout), + newrelic.ConfigZerologPluginEnabled(true), newrelic.ConfigDistributedTracerEnabled(true), ) if nil != err { From d71870a5e224eb796c74c80acb95384816481a02 Mon Sep 17 00:00:00 2001 From: Emilio Garcia Date: Thu, 26 May 2022 12:07:47 -0400 Subject: [PATCH 09/47] remove changes to short-lived-process-example --- v3/examples/short-lived-process/main.go | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/v3/examples/short-lived-process/main.go b/v3/examples/short-lived-process/main.go index 99c30f494..61687488d 100644 --- a/v3/examples/short-lived-process/main.go +++ b/v3/examples/short-lived-process/main.go @@ -13,12 +13,9 @@ import ( func main() { app, err := newrelic.NewApplication( - newrelic.ConfigAppName("zerolog test"), - newrelic.ConfigAppLogForwardingEnabled(true), - newrelic.ConfigDistributedTracerEnabled(true), - newrelic.ConfigAppLogMetricsEnabled(true), + newrelic.ConfigAppName("Short Lived App"), newrelic.ConfigLicense(os.Getenv("NEW_RELIC_LICENSE_KEY")), - newrelic.ConfigInfoLogger(os.Stdout), + newrelic.ConfigDebugLogger(os.Stdout), ) if nil != err { fmt.Println(err) @@ -41,6 +38,7 @@ func main() { "color": task, }) } + // Shut down the application to flush data to New Relic. app.Shutdown(10 * time.Second) } From 96b4b584cd4ebd8fc5400a4cd672e0024f51c4ec Mon Sep 17 00:00:00 2001 From: Emilio Garcia Date: Tue, 24 May 2022 16:56:31 -0400 Subject: [PATCH 10/47] stress-tests Creates a suite of tests to measure and benchmark the performance of features of the Go agent at scale. --- v3/scale-tests/main.go | 176 +++++++++++++++++++++++++++++++ v3/scale-tests/normalize.go | 76 +++++++++++++ v3/scale-tests/normalize_test.go | 86 +++++++++++++++ 3 files changed, 338 insertions(+) create mode 100644 v3/scale-tests/main.go create mode 100644 v3/scale-tests/normalize.go create mode 100644 v3/scale-tests/normalize_test.go diff --git a/v3/scale-tests/main.go b/v3/scale-tests/main.go new file mode 100644 index 000000000..dbf9d86e8 --- /dev/null +++ b/v3/scale-tests/main.go @@ -0,0 +1,176 @@ +// Copyright 2020 New Relic Corporation. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package main + +import ( + "fmt" + "os" + "time" + + "github.com/newrelic/go-agent/v3/integrations/logcontext-v2/nrzerolog" + "github.com/newrelic/go-agent/v3/newrelic" + "github.com/rs/zerolog" +) + +var ( + TestNRZL = "Zerolog Plugin" + TestZerolog = "Zerolog" + TestCustomEvents = "Custom Events" +) + +// Zerolog Test +func Zerolog(numEvents, numRuns int) Benchmark { + return Benchmark{ + TestZerolog, + numEvents, + numRuns, + make([]int64, numRuns), + } +} + +func (bench *Benchmark) timeZerologSet() int64 { + // Init logger + logger := zerolog.New(nil) + + // Time Consumption + start := time.Now() + for i := 0; i < bench.numEvents; i++ { + logger.Info().Msg("Message " + fmt.Sprint(i)) + } + return time.Since(start).Microseconds() +} + +// NR Zerolog Plugin Test +func NRZerolog(numEvents, numRuns int) Benchmark { + return Benchmark{ + TestNRZL, + numEvents, + numRuns, + make([]int64, numRuns), + } +} + +func (bench *Benchmark) timeZerologPluginSet(app *newrelic.Application) int64 { + // Init Logger + + nrHook := nrzerolog.Hook{ + App: app, + } + + logger := zerolog.New(nil).Hook(nrHook) + + // Time Consumption + start := time.Now() + for i := 0; i < bench.numEvents; i++ { + logger.Info().Msg("Message " + fmt.Sprint(i)) + } + return time.Since(start).Microseconds() +} + +// Custom Events Test +func CustomEvent(numEvents, numRuns int) Benchmark { + return Benchmark{ + TestCustomEvents, + numEvents, + numRuns, + make([]int64, numRuns), + } +} + +func (bench *Benchmark) timeCustomEventSet(app *newrelic.Application) int64 { + // Time Consumption + start := time.Now() + for i := 0; i < bench.numEvents; i++ { + message := "Message " + fmt.Sprint(i) + app.RecordCustomEvent("TEST EVENT", map[string]interface{}{ + "Message": message, + }) + } + return time.Since(start).Microseconds() +} + +// Benchmark Framework +type Benchmark struct { + eventType string + numEvents int + sets int + runTimes []int64 +} + +func (bench *Benchmark) Sprint() string { + output := fmt.Sprintf("Time taken to record %d %s:\n", bench.numEvents, bench.eventType) + for _, time := range bench.runTimes { + output += fmt.Sprintf("\t\tMicroseconds: %d\n", time) + } + + validTimes, sum := normalize(bench.runTimes) + average := float64(sum) / float64(len(validTimes)) + output += fmt.Sprintf("\t\tAverage Microseconds: %.3f\n", average) + return output +} + +func (bench *Benchmark) Benchmark(app *newrelic.Application) { + for set := 0; set < bench.sets; set++ { + switch bench.eventType { + case TestZerolog: + bench.runTimes[set] = bench.timeZerologSet() + case TestNRZL: + bench.runTimes[set] = bench.timeZerologPluginSet(app) + case TestCustomEvents: + bench.runTimes[set] = bench.timeCustomEventSet(app) + } + } +} + +func main() { + app, err := newrelic.NewApplication( + newrelic.ConfigAppName("ApplicationLogging Stress Test Golang"), + newrelic.ConfigZerologPluginEnabled(true), + newrelic.ConfigDistributedTracerEnabled(true), + newrelic.ConfigLicense(os.Getenv("NEW_RELIC_LICENSE_KEY")), + newrelic.ConfigInfoLogger(os.Stdout), + ) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + + tests := []Benchmark{ + Zerolog(10, 10), + Zerolog(100, 10), + Zerolog(1000, 10), + Zerolog(10000, 10), + + NRZerolog(10, 10), + NRZerolog(100, 10), + NRZerolog(1000, 10), + NRZerolog(1000, 10), + + CustomEvent(10, 10), + CustomEvent(100, 10), + CustomEvent(1000, 10), + CustomEvent(10000, 10), + } + + // Wait for the application to connect. + if err := app.WaitForConnection(5 * time.Second); nil != err { + fmt.Println(err) + } + + for _, test := range tests { + test.Benchmark(app) + } + + // Make sure the metrics get sent + time.Sleep(60 * time.Second) + app.Shutdown(10 * time.Second) + + // Compile metrics data as pretty printed strings + var metrics string + for _, test := range tests { + metrics += test.Sprint() + } + + fmt.Println(metrics) +} diff --git a/v3/scale-tests/normalize.go b/v3/scale-tests/normalize.go new file mode 100644 index 000000000..250da53cc --- /dev/null +++ b/v3/scale-tests/normalize.go @@ -0,0 +1,76 @@ +// Copyright 2020 New Relic Corporation. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 +// + +package main + +import ( + "sort" +) + +// Normalizes the data by removing outliers based on z score +func normalize(times []int64) ([]int64, int64) { + // not enough entries to do this calculation correctly + if len(times) < 3 { + var sum int64 + for _, time := range times { + sum += time + } + return times, sum + } + + var sum int64 + validTimes := make([]int64, 0, len(times)) + + q1, q3 := interquartileRanges(times) + iqr := q3 - q1 + upperFence := int64(q3 + (1.5 * iqr)) + lowerFence := int64(q1 - (1.5 * iqr)) + + for _, time := range times { + if time >= lowerFence && time <= upperFence { + validTimes = append(validTimes, time) + sum += time + } + } + + return validTimes, sum +} + +func interquartileRanges(times []int64) (float64, float64) { + sorted := make([]int, len(times)) + for i, val := range times { + sorted[i] = int(val) + } + + sort.Ints(sorted) + + var r1, r2 []int + + if len(sorted)%2 == 1 { + r1 = sorted[:(len(sorted) / 2)] + r2 = sorted[(len(sorted)/2)+1:] + } else { + r1 = sorted[:(len(sorted))/2] + r2 = sorted[(len(sorted) / 2):] + } + + q1 := median(r1) + q3 := median(r2) + + return float64(q1), float64(q3) +} + +func median(n []int) float64 { + if len(n) == 0 { + return 0 + } + if len(n) == 1 { + return float64(n[0]) + } + if len(n)%2 == 1 { + return float64(n[len(n)/2]) + } else { + return float64((n[len(n)/2-1] + n[len(n)/2])) / 2 + } +} diff --git a/v3/scale-tests/normalize_test.go b/v3/scale-tests/normalize_test.go new file mode 100644 index 000000000..0bd915e24 --- /dev/null +++ b/v3/scale-tests/normalize_test.go @@ -0,0 +1,86 @@ +// Copyright 2020 New Relic Corporation. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 +// + +package main + +import ( + "testing" +) + +func TestInterquartileRangesEven(t *testing.T) { + vals := []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10} + q1, q3 := interquartileRanges(vals) + if q1 != 3 { + t.Errorf("Expected Q1 to equal 3, got %v", q1) + } + if q3 != 8 { + t.Errorf("Expected Q3 to equal 8, got %v", q3) + } +} + +func TestInterquartileRangesOdd(t *testing.T) { + vals := []int64{1, 2, 3, 4, 5, 6, 7, 8, 9} + q1, q3 := interquartileRanges(vals) + if q1 != 2.5 { + t.Errorf("Expected Q1 to equal 2.5, got %v", q1) + } + if q3 != 7.5 { + t.Errorf("Expected Q3 to equal 7.5, got %v", q3) + } +} + +func TestNormalizeEven(t *testing.T) { + vals := []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 100} + expect := []int64{1, 2, 3, 4, 5, 6, 7, 8, 9} + validTimes, sum := normalize(vals) + + if !AssertInt64SliceEquals(validTimes, expect) { + t.Errorf("Array was not normalized: %v should be %v", vals, expect) + } + + if sum != 45 { + t.Errorf("Sum should be 45, got %v", sum) + } +} + +func TestNormalizeOdd(t *testing.T) { + vals := []int64{2, 3, 4, 5, 6, 7, 8, 9, 100} + expect := []int64{2, 3, 4, 5, 6, 7, 8, 9} + validTimes, sum := normalize(vals) + + if !AssertInt64SliceEquals(validTimes, expect) { + t.Errorf("Array was not normalized: %v should be %v", vals, expect) + } + + if sum != 44 { + t.Errorf("Sum should be 44, got %v", sum) + } +} + +func TestNormalizeNoop(t *testing.T) { + vals := []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10} + expect := []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10} + validTimes, sum := normalize(vals) + + if !AssertInt64SliceEquals(validTimes, expect) { + t.Errorf("Array was not normalized: %v should be %v", vals, expect) + } + + if sum != 55 { + t.Errorf("Sum should be 55, got %v", sum) + } +} + +func AssertInt64SliceEquals(a, b []int64) bool { + if len(a) != len(b) { + return false + } + + for i, v := range a { + if v != b[i] { + return false + } + } + return true +} From f0b6b53a2123c5ddf180816c4e856bef989e8d6f Mon Sep 17 00:00:00 2001 From: Emilio Garcia Date: Thu, 26 May 2022 14:47:44 -0400 Subject: [PATCH 11/47] dont use time.Now().UnixMillisecond() due to old versions of go not supporting it --- v3/newrelic/harvest_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/v3/newrelic/harvest_test.go b/v3/newrelic/harvest_test.go index 8ce8f2584..7fdb78304 100644 --- a/v3/newrelic/harvest_test.go +++ b/v3/newrelic/harvest_test.go @@ -319,7 +319,7 @@ func TestHarvestLogEventsReady(t *testing.T) { logEvent := logEvent{ 0.5, - int64(time.Now().UnixMilli()), + 123456, "INFO", "User 'xyz' logged in", "123456789ADF", @@ -579,7 +579,7 @@ func TestMergeFailedHarvest(t *testing.T) { logEvent := logEvent{ 0.5, - int64(time.Now().UnixMilli()), + 123456, "INFO", "User 'xyz' logged in", "123456789ADF", From 69e13d24d049819f6e5bce0c92b6c08b18e82bc3 Mon Sep 17 00:00:00 2001 From: Emilio Garcia Date: Fri, 27 May 2022 10:59:34 -0400 Subject: [PATCH 12/47] clean up the implementation of record logs to make it more user friendly --- .../logcontext-v2/nrzerolog/hook.go | 11 +------ v3/newrelic/application.go | 15 ++++----- v3/newrelic/config_options.go | 10 +++--- v3/newrelic/harvest_test.go | 2 +- v3/newrelic/log_event.go | 32 +++++++++++++------ v3/newrelic/log_event_test.go | 12 ------- 6 files changed, 35 insertions(+), 47 deletions(-) diff --git a/v3/integrations/logcontext-v2/nrzerolog/hook.go b/v3/integrations/logcontext-v2/nrzerolog/hook.go index d0de49de9..0f58bf65b 100644 --- a/v3/integrations/logcontext-v2/nrzerolog/hook.go +++ b/v3/integrations/logcontext-v2/nrzerolog/hook.go @@ -21,20 +21,11 @@ func (h NewRelicHook) Run(e *zerolog.Event, level zerolog.Level, msg string) { logLevel = level.String() } - var spanID, traceID string - if h.Context != nil { - txn := newrelic.FromContext(h.Context) - traceMetadata := txn.GetTraceMetadata() - spanID = traceMetadata.SpanID - traceID = traceMetadata.TraceID - } - data := newrelic.LogData{ Timestamp: time.Now().UnixMilli(), Severity: logLevel, Message: msg, - SpanID: spanID, - TraceID: traceID, + Context: h.Context, } h.App.RecordLog(&data) diff --git a/v3/newrelic/application.go b/v3/newrelic/application.go index 32006f596..89b05f2f5 100644 --- a/v3/newrelic/application.go +++ b/v3/newrelic/application.go @@ -75,16 +75,13 @@ func (app *Application) RecordCustomMetric(name string, value float64) { } // RecordLog records the data from a single log line. -// This consumes a LogData object pointer with the following expectations: +// This consumes a LogData object that should be configured +// with data taken from a logging framework. // -// Timestamp: Required; An int64 unix millisecond timestamp -// Severity: Required; A string log severity or level taken from logging framework. -// If unknown, must be set to "UNKNOWN". -// Message: Optional; A string containing the message body of a log. -// SpanID: Optional; A string containing the UUID of a span. -// TraceID: Optional; A string containing the UUID of a transaction trace. Log events -// inherit their priority from transaction traces when possible. Failing to -// include this when necessary, may result in important logs being dropped. +// Certian parts of this feature can be turned off based on your +// config settings. Record log is capable of recording log events, +// as well as log metrics depending on how your application is +// configured. func (app *Application) RecordLog(logEvent *LogData) { if nil == app { return diff --git a/v3/newrelic/config_options.go b/v3/newrelic/config_options.go index 0470da053..98f2a3b7e 100644 --- a/v3/newrelic/config_options.go +++ b/v3/newrelic/config_options.go @@ -44,9 +44,9 @@ func ConfigDistributedTracerReservoirLimit(limit int) ConfigOption { return func(cfg *Config) { cfg.DistributedTracer.ReservoirLimit = limit } } -// ConfigAppLogForwadringEnabled enables or disables the collection +// ConfigAppLogForwardingEnabled enables or disables the collection // of logs from a users application by the agent -// Defaults: enabled=false, maxSamplesStored=10,000 +// Defaults: enabled=false func ConfigAppLogForwardingEnabled(enabled bool) ConfigOption { return func(cfg *Config) { if enabled == true { @@ -86,7 +86,7 @@ func ConfigAppLogEnabled(enabled bool) ConfigOption { } const ( - ZerologFrameworkName = "Zerolog" + zerologFrameworkName = "Zerolog" ) // ConfigZerologPluginEnabled enables all supported features @@ -102,9 +102,9 @@ func ConfigZerologPluginEnabled(enabled bool) ConfigOption { cfg.ApplicationLogging.Forwarding.Enabled = true cfg.ApplicationLogging.Metrics.Enabled = true if cfg.ApplicationLogging.Frameworks == nil { - cfg.ApplicationLogging.Frameworks = []string{ZerologFrameworkName} + cfg.ApplicationLogging.Frameworks = []string{zerologFrameworkName} } else { - cfg.ApplicationLogging.Frameworks = append(cfg.ApplicationLogging.Frameworks, ZerologFrameworkName) + cfg.ApplicationLogging.Frameworks = append(cfg.ApplicationLogging.Frameworks, zerologFrameworkName) } } } diff --git a/v3/newrelic/harvest_test.go b/v3/newrelic/harvest_test.go index 7fdb78304..728c7640d 100644 --- a/v3/newrelic/harvest_test.go +++ b/v3/newrelic/harvest_test.go @@ -85,7 +85,7 @@ func TestCreateFinalMetrics(t *testing.T) { var nilHarvest *harvest config := config{Config: defaultConfig()} - config.ApplicationLogging.Frameworks = append(config.ApplicationLogging.Frameworks, ZerologFrameworkName) + config.ApplicationLogging.Frameworks = append(config.ApplicationLogging.Frameworks, zerologFrameworkName) run := newAppRun(config, internal.ConnectReplyDefaults()) run.harvestConfig = testHarvestCfgr diff --git a/v3/newrelic/log_event.go b/v3/newrelic/log_event.go index 8057474c1..8862ef8c6 100644 --- a/v3/newrelic/log_event.go +++ b/v3/newrelic/log_event.go @@ -5,8 +5,10 @@ package newrelic import ( "bytes" + "context" "errors" "fmt" + "strings" ) const ( @@ -30,13 +32,12 @@ type logEvent struct { traceID string } -// For customer use +// LogData contains data fields that are needed to generate log events. type LogData struct { - Timestamp int64 - Severity string - Message string - SpanID string - TraceID string + Timestamp int64 // Required: Unix Millisecond Timestamp + Severity string // Optional: Severity of log being consumed + Message string // Optional: Message of log being consumed; Maximum size: 32768 Bytes. + Context context.Context // Optional: context containing a New Relic Transaction } // writeJSON prepares JSON in the format expected by the collector. @@ -71,7 +72,6 @@ var ( severityUnknown = "UNKNOWN" errEmptyTimestamp = errors.New("timestamp can not be empty") - errEmptySeverity = errors.New("severity can not be empty") errNilLogData = errors.New("log data can not be nil") errLogMessageTooLarge = fmt.Errorf("log message can not exceed %d bytes", MaxLogLength) ) @@ -81,7 +81,7 @@ func (data *LogData) ToLogEvent() (*logEvent, error) { return nil, errNilLogData } if data.Severity == "" { - return nil, errEmptySeverity + data.Severity = LogSeverityUnknown } if len(data.Message) > MaxLogLength { return nil, errLogMessageTooLarge @@ -90,11 +90,23 @@ func (data *LogData) ToLogEvent() (*logEvent, error) { return nil, errEmptyTimestamp } + data.Message = strings.TrimSpace(data.Message) + data.Severity = strings.TrimSpace(data.Severity) + + var spanID, traceID string + + if data.Context != nil { + txn := FromContext(data.Context) + traceMetadata := txn.GetTraceMetadata() + spanID = traceMetadata.SpanID + traceID = traceMetadata.TraceID + } + event := logEvent{ message: data.Message, severity: data.Severity, - spanID: data.SpanID, - traceID: data.TraceID, + spanID: spanID, + traceID: traceID, timestamp: data.Timestamp, } diff --git a/v3/newrelic/log_event_test.go b/v3/newrelic/log_event_test.go index 16a94c081..2d9e3a870 100644 --- a/v3/newrelic/log_event_test.go +++ b/v3/newrelic/log_event_test.go @@ -41,15 +41,3 @@ func TestWriteJSONWithTrace(t *testing.T) { t.Errorf("Log json did not build correctly: expecting %s, got %s", expect, actualString) } } - -func BenchmarkToLogEvent(b *testing.B) { - b.ReportAllocs() - data := LogData{ - Severity: "INFO", - Message: "test message", - Timestamp: 123456, - TraceID: "123Ad234", - SpanID: "adf3441", - } - data.ToLogEvent() -} From c87eba329427e9eb2a9fb57ab1ff46f043e7f0e7 Mon Sep 17 00:00:00 2001 From: Emilio Garcia Date: Fri, 27 May 2022 13:48:05 -0400 Subject: [PATCH 13/47] clean up vet issues --- v3/newrelic/internal_app.go | 2 +- v3/newrelic/log_event.go | 19 ++++++++++--------- v3/newrelic/log_events.go | 2 +- v3/newrelic/log_events_test.go | 10 +++++++++- v3/scale-tests/go.mod | 18 ++++++++++++++++++ v3/scale-tests/main.go | 4 ++-- v3/scale-tests/normalize.go | 2 +- v3/scale-tests/normalize_test.go | 2 +- 8 files changed, 43 insertions(+), 16 deletions(-) create mode 100644 v3/scale-tests/go.mod diff --git a/v3/newrelic/internal_app.go b/v3/newrelic/internal_app.go index ee8361385..fc79e35e6 100644 --- a/v3/newrelic/internal_app.go +++ b/v3/newrelic/internal_app.go @@ -591,7 +591,7 @@ func (app *app) RecordLog(log *LogData) error { return errAppLoggingDisabled } - event, err := log.ToLogEvent() + event, err := log.toLogEvent() if err != nil { return err } diff --git a/v3/newrelic/log_event.go b/v3/newrelic/log_event.go index 8862ef8c6..1c403d5d7 100644 --- a/v3/newrelic/log_event.go +++ b/v3/newrelic/log_event.go @@ -12,14 +12,15 @@ import ( ) const ( - LogSeverityFieldName = "level" - LogMessageFieldName = "message" - LogTimestampFieldName = "timestamp" - LogSpanIDFieldName = "span.id" - LogTraceIDFieldName = "trace.id" - LogSeverityUnknown = "UNKNOWN" - - MaxLogLength = 32768 + LogSeverityFieldName = "level" // The name of the log level field in New Relic logging JSON + LogMessageFieldName = "message" // The name of the log message field in New Relic logging JSON + LogTimestampFieldName = "timestamp" // The name of the timestamp field in New Relic logging JSON + LogSpanIDFieldName = "span.id" // The name of the span ID field in the New Relic logging JSON + LogTraceIDFieldName = "trace.id" // The name of the trace ID field in the New Relic logging JSON + + LogSeverityUnknown = "UNKNOWN" // If the log level/severity is not known, it must be set to this value + + MaxLogLength = 32768 // The maximum number of bytes a new relic log message is allowed to have ) // for internal user only @@ -76,7 +77,7 @@ var ( errLogMessageTooLarge = fmt.Errorf("log message can not exceed %d bytes", MaxLogLength) ) -func (data *LogData) ToLogEvent() (*logEvent, error) { +func (data *LogData) toLogEvent() (*logEvent, error) { if data == nil { return nil, errNilLogData } diff --git a/v3/newrelic/log_events.go b/v3/newrelic/log_events.go index e3ceafda8..91b688254 100644 --- a/v3/newrelic/log_events.go +++ b/v3/newrelic/log_events.go @@ -75,7 +75,7 @@ func (events *logEvents) capacity() int { func (events *logEvents) Add(e *logEvent) { // always collect this but do not report logging metrics when disabled events.numSeen++ - events.severityCount[e.severity] += 1 + events.severityCount[e.severity]++ // Do not collect log events when the harvest capacity is intentionally set to 0 // or the collection of events is explicitly disabled diff --git a/v3/newrelic/log_events_test.go b/v3/newrelic/log_events_test.go index 9f37362de..7ce3a8afa 100644 --- a/v3/newrelic/log_events_test.go +++ b/v3/newrelic/log_events_test.go @@ -385,7 +385,15 @@ func TestLogEventCollectionDisabled(t *testing.T) { } func BenchmarkAddLogEvent(b *testing.B) { - + event := logEvent{ + priority: 0.6, + timestamp: 123456, + severity: "INFO", + message: "test message", + spanID: "Ad300dra7re89", + traceID: "2234iIhfLlejrJ0", + } + logEventBenchmarkHelper(b, &event) } func logEventBenchmarkHelper(b *testing.B, event *logEvent) { diff --git a/v3/scale-tests/go.mod b/v3/scale-tests/go.mod new file mode 100644 index 000000000..efa2f4687 --- /dev/null +++ b/v3/scale-tests/go.mod @@ -0,0 +1,18 @@ +module github.com/newrelic/go-agent/v3/scale-tests + +go 1.18 + +require ( + github.com/newrelic/go-agent/v3 v3.16.1 + github.com/rs/zerolog v1.26.1 +) + +require ( + github.com/golang/protobuf v1.4.3 // indirect + golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d // indirect + golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e // indirect + golang.org/x/text v0.3.6 // indirect + google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 // indirect + google.golang.org/grpc v1.39.0 // indirect + google.golang.org/protobuf v1.25.0 // indirect +) diff --git a/v3/scale-tests/main.go b/v3/scale-tests/main.go index dbf9d86e8..23f33da8e 100644 --- a/v3/scale-tests/main.go +++ b/v3/scale-tests/main.go @@ -1,14 +1,14 @@ // Copyright 2020 New Relic Corporation. All rights reserved. // SPDX-License-Identifier: Apache-2.0 -package main +package scaleTests import ( "fmt" "os" "time" - "github.com/newrelic/go-agent/v3/integrations/logcontext-v2/nrzerolog" + nrzerolog "github.com/newrelic/go-agent/v3/integrations/logcontext-v2/nrzerolog" "github.com/newrelic/go-agent/v3/newrelic" "github.com/rs/zerolog" ) diff --git a/v3/scale-tests/normalize.go b/v3/scale-tests/normalize.go index 250da53cc..541b93c54 100644 --- a/v3/scale-tests/normalize.go +++ b/v3/scale-tests/normalize.go @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -package main +package scaleTests import ( "sort" diff --git a/v3/scale-tests/normalize_test.go b/v3/scale-tests/normalize_test.go index 0bd915e24..ae401328a 100644 --- a/v3/scale-tests/normalize_test.go +++ b/v3/scale-tests/normalize_test.go @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -package main +package scaleTests import ( "testing" From eaef8036239e45d1508abab2758386dae22533ca Mon Sep 17 00:00:00 2001 From: Emilio Garcia Date: Fri, 27 May 2022 14:13:09 -0400 Subject: [PATCH 14/47] priority inheretence --- v3/newrelic/harvest.go | 16 ----------- v3/newrelic/log_event.go | 11 +++----- v3/newrelic/log_event_test.go | 51 +++++++++++++++++++++++++++++++++++ 3 files changed, 54 insertions(+), 24 deletions(-) diff --git a/v3/newrelic/harvest.go b/v3/newrelic/harvest.go index 6302c2694..db0521698 100644 --- a/v3/newrelic/harvest.go +++ b/v3/newrelic/harvest.go @@ -57,24 +57,8 @@ func (timer *harvestTimer) ready(now time.Time) (ready harvestTypes) { return } -type knownPriorities map[string]priority - -func (kp knownPriorities) get(uuid string) (priority, bool) { - priority, ok := kp[uuid] - return priority, ok -} - -func (kp knownPriorities) add(uuid string, p priority) { - kp[uuid] = p -} - -func (kp knownPriorities) drop(uuid string) { - delete(kp, uuid) -} - // harvest contains collected data. type harvest struct { - knownPriorities timer *harvestTimer Metrics *metricTable diff --git a/v3/newrelic/log_event.go b/v3/newrelic/log_event.go index 1c403d5d7..a69c92157 100644 --- a/v3/newrelic/log_event.go +++ b/v3/newrelic/log_event.go @@ -95,9 +95,11 @@ func (data *LogData) toLogEvent() (*logEvent, error) { data.Severity = strings.TrimSpace(data.Severity) var spanID, traceID string + var priority priority if data.Context != nil { txn := FromContext(data.Context) + priority = txn.thread.BetterCAT.Priority traceMetadata := txn.GetTraceMetadata() spanID = traceMetadata.SpanID traceID = traceMetadata.TraceID @@ -109,19 +111,12 @@ func (data *LogData) toLogEvent() (*logEvent, error) { spanID: spanID, traceID: traceID, timestamp: data.Timestamp, + priority: priority, } return &event, nil } func (e *logEvent) MergeIntoHarvest(h *harvest) { - // Inherit priority from traces or spans if possible - if e.traceID != "" { - priority, known := h.knownPriorities.get(e.traceID) - if known { - e.priority = priority - } - } - h.LogEvents.Add(e) } diff --git a/v3/newrelic/log_event_test.go b/v3/newrelic/log_event_test.go index 2d9e3a870..67eae62ef 100644 --- a/v3/newrelic/log_event_test.go +++ b/v3/newrelic/log_event_test.go @@ -1,7 +1,9 @@ package newrelic import ( + "fmt" "testing" + "time" ) func TestWriteJSON(t *testing.T) { @@ -41,3 +43,52 @@ func TestWriteJSONWithTrace(t *testing.T) { t.Errorf("Log json did not build correctly: expecting %s, got %s", expect, actualString) } } + +func BenchmarkToLogEvent(b *testing.B) { + data := LogData{ + Timestamp: 123456, + Severity: "INFO", + Message: "test message", + } + + data.toLogEvent() +} + +func recordLogBenchmarkHelper(b *testing.B, data *LogData, h *harvest) { + event, _ := data.toLogEvent() + event.MergeIntoHarvest(h) +} + +func BenchmarkRecordLog(b *testing.B) { + harvest := newHarvest(time.Now(), testHarvestCfgr) + data := LogData{ + Timestamp: 123456, + Severity: "INFO", + Message: "test message", + } + + b.ReportAllocs() + b.ResetTimer() + + recordLogBenchmarkHelper(b, &data, harvest) +} + +func BenchmarkRecordLog100(b *testing.B) { + harvest := newHarvest(time.Now(), testHarvestCfgr) + + logs := make([]*LogData, 100) + for i := 0; i < 100; i++ { + logs[i] = &LogData{ + Timestamp: 123456, + Severity: "INFO", + Message: "test message " + fmt.Sprint(i), + } + } + + b.ReportAllocs() + b.ResetTimer() + + for _, log := range logs { + recordLogBenchmarkHelper(b, log, harvest) + } +} From d4806f8ad8f3456db189f439420fe03fbc4b5ad9 Mon Sep 17 00:00:00 2001 From: Emilio Garcia Date: Fri, 27 May 2022 14:28:18 -0400 Subject: [PATCH 15/47] fixing scale tests --- v3/scale-tests/go.mod | 18 ------------------ v3/scale-tests/main.go | 2 +- v3/scale-tests/normalize.go | 2 +- v3/scale-tests/normalize_test.go | 2 +- 4 files changed, 3 insertions(+), 21 deletions(-) delete mode 100644 v3/scale-tests/go.mod diff --git a/v3/scale-tests/go.mod b/v3/scale-tests/go.mod deleted file mode 100644 index efa2f4687..000000000 --- a/v3/scale-tests/go.mod +++ /dev/null @@ -1,18 +0,0 @@ -module github.com/newrelic/go-agent/v3/scale-tests - -go 1.18 - -require ( - github.com/newrelic/go-agent/v3 v3.16.1 - github.com/rs/zerolog v1.26.1 -) - -require ( - github.com/golang/protobuf v1.4.3 // indirect - golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d // indirect - golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e // indirect - golang.org/x/text v0.3.6 // indirect - google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 // indirect - google.golang.org/grpc v1.39.0 // indirect - google.golang.org/protobuf v1.25.0 // indirect -) diff --git a/v3/scale-tests/main.go b/v3/scale-tests/main.go index 23f33da8e..a7bea1e3e 100644 --- a/v3/scale-tests/main.go +++ b/v3/scale-tests/main.go @@ -1,7 +1,7 @@ // Copyright 2020 New Relic Corporation. All rights reserved. // SPDX-License-Identifier: Apache-2.0 -package scaleTests +package main import ( "fmt" diff --git a/v3/scale-tests/normalize.go b/v3/scale-tests/normalize.go index 541b93c54..250da53cc 100644 --- a/v3/scale-tests/normalize.go +++ b/v3/scale-tests/normalize.go @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -package scaleTests +package main import ( "sort" diff --git a/v3/scale-tests/normalize_test.go b/v3/scale-tests/normalize_test.go index ae401328a..0bd915e24 100644 --- a/v3/scale-tests/normalize_test.go +++ b/v3/scale-tests/normalize_test.go @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -package scaleTests +package main import ( "testing" From 320057e6fa35d00e3eea778c562967ef958703af Mon Sep 17 00:00:00 2001 From: Emilio Garcia Date: Fri, 27 May 2022 14:37:13 -0400 Subject: [PATCH 16/47] make the linter happy for logging constants --- v3/newrelic/log_event.go | 25 ++++++++++++++++++------- 1 file changed, 18 insertions(+), 7 deletions(-) diff --git a/v3/newrelic/log_event.go b/v3/newrelic/log_event.go index a69c92157..5ccdb1ab4 100644 --- a/v3/newrelic/log_event.go +++ b/v3/newrelic/log_event.go @@ -12,15 +12,26 @@ import ( ) const ( - LogSeverityFieldName = "level" // The name of the log level field in New Relic logging JSON - LogMessageFieldName = "message" // The name of the log message field in New Relic logging JSON - LogTimestampFieldName = "timestamp" // The name of the timestamp field in New Relic logging JSON - LogSpanIDFieldName = "span.id" // The name of the span ID field in the New Relic logging JSON - LogTraceIDFieldName = "trace.id" // The name of the trace ID field in the New Relic logging JSON + // LogSeverityFieldName is the name of the log level field in New Relic logging JSON + LogSeverityFieldName = "level" - LogSeverityUnknown = "UNKNOWN" // If the log level/severity is not known, it must be set to this value + // LogMessageFieldName is the name of the log message field in New Relic logging JSON + LogMessageFieldName = "message" - MaxLogLength = 32768 // The maximum number of bytes a new relic log message is allowed to have + // LogTimestampFieldName is the name of the timestamp field in New Relic logging JSON + LogTimestampFieldName = "timestamp" + + // LogSpanIDFieldName is the name of the span ID field in the New Relic logging JSON + LogSpanIDFieldName = "span.id" + + // LogTraceIDFieldName is the name of the trace ID field in the New Relic logging JSON + LogTraceIDFieldName = "trace.id" + + // LogSeverityUnknown is the value the log severity should be set to if no log severity is known + LogSeverityUnknown = "UNKNOWN" + + // MaxLogLength is the maximum number of bytes the log message is allowed to be + MaxLogLength = 32768 ) // for internal user only From bcf0c6e2d64f15b5039df4f1e33cb5f6656e76af Mon Sep 17 00:00:00 2001 From: Emilio Garcia Date: Sat, 28 May 2022 17:20:59 -0400 Subject: [PATCH 17/47] some clean up --- v3/examples/server/main.go | 23 +++++++++++++++++++ v3/go.mod | 4 ++-- .../logcontext-v2/nrzerolog/Readme.md | 4 +++- 3 files changed, 28 insertions(+), 3 deletions(-) diff --git a/v3/examples/server/main.go b/v3/examples/server/main.go index af1496e34..594e996fb 100644 --- a/v3/examples/server/main.go +++ b/v3/examples/server/main.go @@ -4,6 +4,7 @@ package main import ( + "context" "errors" "fmt" "io" @@ -251,6 +252,8 @@ func main() { newrelic.ConfigAppName("Example App"), newrelic.ConfigFromEnvironment(), newrelic.ConfigDebugLogger(os.Stdout), + newrelic.ConfigDistributedTracerEnabled(true), + newrelic.ConfigAppLogForwardingEnabled(true), ) if err != nil { fmt.Println(err) @@ -274,6 +277,26 @@ func main() { http.HandleFunc(newrelic.WrapHandleFunc(app, "/browser", browser)) http.HandleFunc(newrelic.WrapHandleFunc(app, "/async", async)) http.HandleFunc(newrelic.WrapHandleFunc(app, "/message", message)) + http.HandleFunc("/log", func(w http.ResponseWriter, req *http.Request) { + // Transactions started without an http.Request are classified as + // background transactions. + txn := app.StartTransaction("Log") + defer txn.End() + + ctx := newrelic.NewContext(context.Background(), txn) + + data := &newrelic.LogData{ + Timestamp: time.Now().UnixMilli(), + Message: "Log Message", + Severity: "info", + Context: ctx, + } + + app.RecordLog(data) + + io.WriteString(w, "Log") + time.Sleep(150 * time.Millisecond) + }) http.HandleFunc("/background", func(w http.ResponseWriter, req *http.Request) { // Transactions started without an http.Request are classified as diff --git a/v3/go.mod b/v3/go.mod index 92bc2eef5..2ada3801e 100644 --- a/v3/go.mod +++ b/v3/go.mod @@ -3,6 +3,6 @@ module github.com/newrelic/go-agent/v3 go 1.7 require ( - github.com/golang/protobuf v1.4.3 - google.golang.org/grpc v1.39.0 + github.com/golang/protobuf v1.3.3 + google.golang.org/grpc v1.27.0 ) diff --git a/v3/integrations/logcontext-v2/nrzerolog/Readme.md b/v3/integrations/logcontext-v2/nrzerolog/Readme.md index 272b91611..af99446f1 100644 --- a/v3/integrations/logcontext-v2/nrzerolog/Readme.md +++ b/v3/integrations/logcontext-v2/nrzerolog/Readme.md @@ -34,7 +34,8 @@ import ( ) func main() { - baseLogger := zerolog.New(os.Stdout) + // Initialize a zerolog logger + baseLogger := zerolog.New(os.Stdout) app, err := newrelic.NewApplication( newrelic.ConfigFromEnvironment(), @@ -52,6 +53,7 @@ func main() { App: app, } + // Wrap logger with New Relic Hook nrLogger := baseLogger.Hook(nrHook) nrLogger.Info().Msg("Hello World") } From 317b6a0318c54e66e044b68612b7fe6213954a75 Mon Sep 17 00:00:00 2001 From: Emilio Garcia Date: Tue, 31 May 2022 11:09:48 -0400 Subject: [PATCH 18/47] remove scale tests --- v3/scale-tests/main.go | 176 ------------------------------- v3/scale-tests/normalize.go | 76 ------------- v3/scale-tests/normalize_test.go | 86 --------------- 3 files changed, 338 deletions(-) delete mode 100644 v3/scale-tests/main.go delete mode 100644 v3/scale-tests/normalize.go delete mode 100644 v3/scale-tests/normalize_test.go diff --git a/v3/scale-tests/main.go b/v3/scale-tests/main.go deleted file mode 100644 index a7bea1e3e..000000000 --- a/v3/scale-tests/main.go +++ /dev/null @@ -1,176 +0,0 @@ -// Copyright 2020 New Relic Corporation. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package main - -import ( - "fmt" - "os" - "time" - - nrzerolog "github.com/newrelic/go-agent/v3/integrations/logcontext-v2/nrzerolog" - "github.com/newrelic/go-agent/v3/newrelic" - "github.com/rs/zerolog" -) - -var ( - TestNRZL = "Zerolog Plugin" - TestZerolog = "Zerolog" - TestCustomEvents = "Custom Events" -) - -// Zerolog Test -func Zerolog(numEvents, numRuns int) Benchmark { - return Benchmark{ - TestZerolog, - numEvents, - numRuns, - make([]int64, numRuns), - } -} - -func (bench *Benchmark) timeZerologSet() int64 { - // Init logger - logger := zerolog.New(nil) - - // Time Consumption - start := time.Now() - for i := 0; i < bench.numEvents; i++ { - logger.Info().Msg("Message " + fmt.Sprint(i)) - } - return time.Since(start).Microseconds() -} - -// NR Zerolog Plugin Test -func NRZerolog(numEvents, numRuns int) Benchmark { - return Benchmark{ - TestNRZL, - numEvents, - numRuns, - make([]int64, numRuns), - } -} - -func (bench *Benchmark) timeZerologPluginSet(app *newrelic.Application) int64 { - // Init Logger - - nrHook := nrzerolog.Hook{ - App: app, - } - - logger := zerolog.New(nil).Hook(nrHook) - - // Time Consumption - start := time.Now() - for i := 0; i < bench.numEvents; i++ { - logger.Info().Msg("Message " + fmt.Sprint(i)) - } - return time.Since(start).Microseconds() -} - -// Custom Events Test -func CustomEvent(numEvents, numRuns int) Benchmark { - return Benchmark{ - TestCustomEvents, - numEvents, - numRuns, - make([]int64, numRuns), - } -} - -func (bench *Benchmark) timeCustomEventSet(app *newrelic.Application) int64 { - // Time Consumption - start := time.Now() - for i := 0; i < bench.numEvents; i++ { - message := "Message " + fmt.Sprint(i) - app.RecordCustomEvent("TEST EVENT", map[string]interface{}{ - "Message": message, - }) - } - return time.Since(start).Microseconds() -} - -// Benchmark Framework -type Benchmark struct { - eventType string - numEvents int - sets int - runTimes []int64 -} - -func (bench *Benchmark) Sprint() string { - output := fmt.Sprintf("Time taken to record %d %s:\n", bench.numEvents, bench.eventType) - for _, time := range bench.runTimes { - output += fmt.Sprintf("\t\tMicroseconds: %d\n", time) - } - - validTimes, sum := normalize(bench.runTimes) - average := float64(sum) / float64(len(validTimes)) - output += fmt.Sprintf("\t\tAverage Microseconds: %.3f\n", average) - return output -} - -func (bench *Benchmark) Benchmark(app *newrelic.Application) { - for set := 0; set < bench.sets; set++ { - switch bench.eventType { - case TestZerolog: - bench.runTimes[set] = bench.timeZerologSet() - case TestNRZL: - bench.runTimes[set] = bench.timeZerologPluginSet(app) - case TestCustomEvents: - bench.runTimes[set] = bench.timeCustomEventSet(app) - } - } -} - -func main() { - app, err := newrelic.NewApplication( - newrelic.ConfigAppName("ApplicationLogging Stress Test Golang"), - newrelic.ConfigZerologPluginEnabled(true), - newrelic.ConfigDistributedTracerEnabled(true), - newrelic.ConfigLicense(os.Getenv("NEW_RELIC_LICENSE_KEY")), - newrelic.ConfigInfoLogger(os.Stdout), - ) - if err != nil { - fmt.Println(err) - os.Exit(1) - } - - tests := []Benchmark{ - Zerolog(10, 10), - Zerolog(100, 10), - Zerolog(1000, 10), - Zerolog(10000, 10), - - NRZerolog(10, 10), - NRZerolog(100, 10), - NRZerolog(1000, 10), - NRZerolog(1000, 10), - - CustomEvent(10, 10), - CustomEvent(100, 10), - CustomEvent(1000, 10), - CustomEvent(10000, 10), - } - - // Wait for the application to connect. - if err := app.WaitForConnection(5 * time.Second); nil != err { - fmt.Println(err) - } - - for _, test := range tests { - test.Benchmark(app) - } - - // Make sure the metrics get sent - time.Sleep(60 * time.Second) - app.Shutdown(10 * time.Second) - - // Compile metrics data as pretty printed strings - var metrics string - for _, test := range tests { - metrics += test.Sprint() - } - - fmt.Println(metrics) -} diff --git a/v3/scale-tests/normalize.go b/v3/scale-tests/normalize.go deleted file mode 100644 index 250da53cc..000000000 --- a/v3/scale-tests/normalize.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2020 New Relic Corporation. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 -// - -package main - -import ( - "sort" -) - -// Normalizes the data by removing outliers based on z score -func normalize(times []int64) ([]int64, int64) { - // not enough entries to do this calculation correctly - if len(times) < 3 { - var sum int64 - for _, time := range times { - sum += time - } - return times, sum - } - - var sum int64 - validTimes := make([]int64, 0, len(times)) - - q1, q3 := interquartileRanges(times) - iqr := q3 - q1 - upperFence := int64(q3 + (1.5 * iqr)) - lowerFence := int64(q1 - (1.5 * iqr)) - - for _, time := range times { - if time >= lowerFence && time <= upperFence { - validTimes = append(validTimes, time) - sum += time - } - } - - return validTimes, sum -} - -func interquartileRanges(times []int64) (float64, float64) { - sorted := make([]int, len(times)) - for i, val := range times { - sorted[i] = int(val) - } - - sort.Ints(sorted) - - var r1, r2 []int - - if len(sorted)%2 == 1 { - r1 = sorted[:(len(sorted) / 2)] - r2 = sorted[(len(sorted)/2)+1:] - } else { - r1 = sorted[:(len(sorted))/2] - r2 = sorted[(len(sorted) / 2):] - } - - q1 := median(r1) - q3 := median(r2) - - return float64(q1), float64(q3) -} - -func median(n []int) float64 { - if len(n) == 0 { - return 0 - } - if len(n) == 1 { - return float64(n[0]) - } - if len(n)%2 == 1 { - return float64(n[len(n)/2]) - } else { - return float64((n[len(n)/2-1] + n[len(n)/2])) / 2 - } -} diff --git a/v3/scale-tests/normalize_test.go b/v3/scale-tests/normalize_test.go deleted file mode 100644 index 0bd915e24..000000000 --- a/v3/scale-tests/normalize_test.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2020 New Relic Corporation. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 -// - -package main - -import ( - "testing" -) - -func TestInterquartileRangesEven(t *testing.T) { - vals := []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10} - q1, q3 := interquartileRanges(vals) - if q1 != 3 { - t.Errorf("Expected Q1 to equal 3, got %v", q1) - } - if q3 != 8 { - t.Errorf("Expected Q3 to equal 8, got %v", q3) - } -} - -func TestInterquartileRangesOdd(t *testing.T) { - vals := []int64{1, 2, 3, 4, 5, 6, 7, 8, 9} - q1, q3 := interquartileRanges(vals) - if q1 != 2.5 { - t.Errorf("Expected Q1 to equal 2.5, got %v", q1) - } - if q3 != 7.5 { - t.Errorf("Expected Q3 to equal 7.5, got %v", q3) - } -} - -func TestNormalizeEven(t *testing.T) { - vals := []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 100} - expect := []int64{1, 2, 3, 4, 5, 6, 7, 8, 9} - validTimes, sum := normalize(vals) - - if !AssertInt64SliceEquals(validTimes, expect) { - t.Errorf("Array was not normalized: %v should be %v", vals, expect) - } - - if sum != 45 { - t.Errorf("Sum should be 45, got %v", sum) - } -} - -func TestNormalizeOdd(t *testing.T) { - vals := []int64{2, 3, 4, 5, 6, 7, 8, 9, 100} - expect := []int64{2, 3, 4, 5, 6, 7, 8, 9} - validTimes, sum := normalize(vals) - - if !AssertInt64SliceEquals(validTimes, expect) { - t.Errorf("Array was not normalized: %v should be %v", vals, expect) - } - - if sum != 44 { - t.Errorf("Sum should be 44, got %v", sum) - } -} - -func TestNormalizeNoop(t *testing.T) { - vals := []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10} - expect := []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10} - validTimes, sum := normalize(vals) - - if !AssertInt64SliceEquals(validTimes, expect) { - t.Errorf("Array was not normalized: %v should be %v", vals, expect) - } - - if sum != 55 { - t.Errorf("Sum should be 55, got %v", sum) - } -} - -func AssertInt64SliceEquals(a, b []int64) bool { - if len(a) != len(b) { - return false - } - - for i, v := range a { - if v != b[i] { - return false - } - } - return true -} From 6ffb77a1334a3c2dc1721f6814d423ef2d86acbc Mon Sep 17 00:00:00 2001 From: Emilio Garcia Date: Tue, 31 May 2022 13:31:52 -0400 Subject: [PATCH 19/47] Async Log Event Support It is possible that customers are writing logs asycnrhonously in their applications. If new relic is capturing those logs, we need to ensure that the state of the application is not compromised, and that the customer's expectaions for which logs are being captured are consistent with what the application is actually doing. --- v3/newrelic/log_events.go | 7 + v3/newrelic/log_events_test.go | 236 +++++++++++++++++++++++++++++++-- 2 files changed, 231 insertions(+), 12 deletions(-) diff --git a/v3/newrelic/log_events.go b/v3/newrelic/log_events.go index 91b688254..81c03859b 100644 --- a/v3/newrelic/log_events.go +++ b/v3/newrelic/log_events.go @@ -6,6 +6,7 @@ package newrelic import ( "bytes" "container/heap" + "sync" "time" "github.com/newrelic/go-agent/v3/internal/jsonx" @@ -23,6 +24,7 @@ type logEvents struct { numSeen int failedHarvests int severityCount map[string]int + rwMutex sync.RWMutex commonAttributes config loggingConfig logs logEventHeap @@ -34,7 +36,10 @@ func (events *logEvents) NumSeen() float64 { return float64(events.numSeen) } // NumSaved returns the number of events that will be harvested for this cycle func (events *logEvents) NumSaved() float64 { return float64(len(events.logs)) } +// Adds logging metrics to a harvest metric table if appropriate func (events *logEvents) RecordLoggingMetrics(metrics *metricTable, forced metricForce) { + events.rwMutex.RLock() + defer events.rwMutex.RUnlock() if events.config.collectMetrics && metrics != nil { metrics.addCount(logsSeen, events.NumSeen(), forced) for k, v := range events.severityCount { @@ -73,6 +78,8 @@ func (events *logEvents) capacity() int { } func (events *logEvents) Add(e *logEvent) { + events.rwMutex.Lock() + defer events.rwMutex.Unlock() // always collect this but do not report logging metrics when disabled events.numSeen++ events.severityCount[e.severity]++ diff --git a/v3/newrelic/log_events_test.go b/v3/newrelic/log_events_test.go index 7ce3a8afa..9879f14ed 100644 --- a/v3/newrelic/log_events_test.go +++ b/v3/newrelic/log_events_test.go @@ -5,6 +5,7 @@ package newrelic import ( "fmt" + "sync" "testing" "github.com/newrelic/go-agent/v3/internal" @@ -384,31 +385,242 @@ func TestLogEventCollectionDisabled(t *testing.T) { } } -func BenchmarkAddLogEvent(b *testing.B) { - event := logEvent{ - priority: 0.6, +func TestAsyncAddLogEvent(t *testing.T) { + numThreads := 8 + capacity := numThreads - 1 + + events := newLogEvents(testCommonAttributes, loggingConfigEnabled(capacity)) + group := new(sync.WaitGroup) + group.Add(numThreads) + + // Add a bunch of log events aynchronously + for n := 0; n < numThreads/2; n++ { + p := priority(float32(n) / 10.0) + event := &logEvent{ + priority: p, + timestamp: 123456, + severity: "INFO", + message: fmt.Sprintf("info message %.2f", p), + } + go func(event *logEvent) { + events.Add(event) + group.Done() + }(event) + } + + for n := 0; n < numThreads/2; n++ { + p := priority(float32(n+numThreads/2) / 10.0) + event := &logEvent{ + priority: p, + timestamp: 123456, + severity: "WARN", + message: fmt.Sprintf("warn message %.2f", p), + } + go func(event *logEvent) { + events.Add(event) + group.Done() + }(event) + } + + group.Wait() + + expectMap := map[string]int{ + "INFO": numThreads / 2, + "WARN": numThreads / 2, + } + + metricErrors := events.assertMetrics(8, capacity, expectMap) + if metricErrors != nil { + t.Error(metricErrors) + } + + // Test Heap Data + // Assumes that heap implementation is correct when executed synchronously + expectEvents := newLogEvents(testCommonAttributes, loggingConfigEnabled(capacity)) + for n := 0; n < numThreads/2; n++ { + p := priority(float32(n) / 10.0) + event := &logEvent{ + priority: p, + timestamp: 123456, + severity: "INFO", + message: fmt.Sprintf("info message %.2f", p), + } + expectEvents.Add(event) + } + + for n := 0; n < numThreads/2; n++ { + p := priority(float32(n+numThreads/2) / 10.0) + event := &logEvent{ + priority: p, + timestamp: 123456, + severity: "WARN", + message: fmt.Sprintf("warn message %.2f", p), + } + expectEvents.Add(event) + } + + heapError := events.assertHeapContains(expectEvents) + if heapError != nil { + t.Error(heapError) + } +} + +// verifies that each log events heap contains the same elements +// heaps must be composed of unique messages +func (events *logEvents) assertHeapContains(expect *logEvents) error { + expectLogs := make(map[string]bool, len(expect.logs)) + + for _, event := range expect.logs { + expectLogs[event.message] = false + } + + for _, event := range events.logs { + expectLogs[event.message] = true + } + + missing := []string{} + for msg, contains := range expectLogs { + if !contains { + missing = append(missing, msg) + } + } + + if len(missing) != 0 { + return fmt.Errorf("expected logs were missing from the event heap: %v", missing) + } + + return nil +} + +func (events *logEvents) assertMetrics(expectSeen, expectSaved int, expectSeverity map[string]int) error { + err := assertInt(expectSeen, int(events.NumSeen())) + if err != nil { + return fmt.Errorf("incorrect number of events seen: %v", err) + } + + err = assertInt(expectSaved, int(events.NumSaved())) + if err != nil { + return fmt.Errorf("incorrect number of events saved: %v", err) + } + + if len(expectSeverity) != len(events.severityCount) { + return fmt.Errorf("incorrect number of severities seen: expect %d, actual %d", len(expectSeverity), len(events.severityCount)) + } + + for k, v := range expectSeverity { + val, ok := events.severityCount[k] + if !ok { + return fmt.Errorf("expected severity %s is missing from actual severity count", k) + } + + err := assertInt(v, val) + if err != nil { + return fmt.Errorf("incorrect severity count for %s: expect %d, actual %d", k, v, val) + } + } + + return nil +} + +func assertInt(expect int, actual int) error { + if expect != actual { + return fmt.Errorf("expected %d, actual %d", expect, actual) + } + return nil +} + +func BenchmarkAddMaximumLogEvent(b *testing.B) { + eventList := make([]*logEvent, internal.MaxLogEvents) + for n := 0; n < internal.MaxTxnEvents; n++ { + eventList[n] = &logEvent{ + priority: newPriority(), + timestamp: 123456, + severity: "INFO", + message: "test message", + spanID: "Ad300dra7re89", + traceID: "2234iIhfLlejrJ0", + } + } + events := newLogEvents(testCommonAttributes, loggingConfigEnabled(internal.MaxLogEvents)) + + b.ReportAllocs() + b.ResetTimer() + + for n := 0; n < internal.MaxTxnEvents; n++ { + events.Add(eventList[n]) + } +} + +func BenchmarkWriteMaximumLogEventJSON(b *testing.B) { + eventList := make([]*logEvent, internal.MaxLogEvents) + for n := 0; n < internal.MaxTxnEvents; n++ { + eventList[n] = &logEvent{ + priority: newPriority(), + timestamp: 123456, + severity: "INFO", + message: "test message", + spanID: "Ad300dra7re89", + traceID: "2234iIhfLlejrJ0", + } + } + events := newLogEvents(testCommonAttributes, loggingConfigEnabled(internal.MaxLogEvents)) + + for n := 0; n < internal.MaxTxnEvents; n++ { + events.Add(eventList[n]) + } + + b.ReportAllocs() + b.ResetTimer() + + js, err := events.CollectorJSON(agentRunID) + if nil != err { + b.Fatal(err, js) + } +} + +func BenchmarkAddAndWriteLogEvent(b *testing.B) { + b.ReportAllocs() + + events := newLogEvents(testCommonAttributes, loggingConfigEnabled(internal.MaxLogEvents)) + event := &logEvent{ + priority: newPriority(), timestamp: 123456, severity: "INFO", message: "test message", spanID: "Ad300dra7re89", traceID: "2234iIhfLlejrJ0", } - logEventBenchmarkHelper(b, &event) + + events.Add(event) + js, err := events.CollectorJSON(agentRunID) + if nil != err { + b.Fatal(err, js) + } } -func logEventBenchmarkHelper(b *testing.B, event *logEvent) { +func BenchmarkAddAndWriteMaximumLogEvents(b *testing.B) { + + eventList := make([]*logEvent, internal.MaxLogEvents) events := newLogEvents(testCommonAttributes, loggingConfigEnabled(internal.MaxLogEvents)) for n := 0; n < internal.MaxTxnEvents; n++ { - events.Add(event) + eventList[n] = &logEvent{ + priority: newPriority(), + timestamp: 123456, + severity: "INFO", + message: "test message", + spanID: "Ad300dra7re89", + traceID: "2234iIhfLlejrJ0", + } } - b.ReportAllocs() b.ResetTimer() - for n := 0; n < b.N; n++ { - js, err := events.CollectorJSON(agentRunID) - if nil != err { - b.Fatal(err, js) - } + for n := 0; n < internal.MaxTxnEvents; n++ { + events.Add(eventList[n]) + } + + js, err := events.CollectorJSON(agentRunID) + if nil != err { + b.Fatal(err, js) } } From 138d1bd715de9e225056abe01317582179259b57 Mon Sep 17 00:00:00 2001 From: Emilio Garcia Date: Tue, 31 May 2022 14:32:56 -0400 Subject: [PATCH 20/47] support timestamps on older go versions --- v3/examples/server/main.go | 6 +++++- v3/integrations/logcontext-v2/nrzerolog/hook.go | 6 +++++- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/v3/examples/server/main.go b/v3/examples/server/main.go index 594e996fb..7f0da9794 100644 --- a/v3/examples/server/main.go +++ b/v3/examples/server/main.go @@ -285,8 +285,12 @@ func main() { ctx := newrelic.NewContext(context.Background(), txn) + // Versions of go prior to 1.17 do not have a built in function for Unix Milli time. + // For go versions 1.17+ use time.Now().UnixMilli() to generate timestamps + timestamp := time.Now().UnixNano() / int64(time.Millisecond) + data := &newrelic.LogData{ - Timestamp: time.Now().UnixMilli(), + Timestamp: timestamp, Message: "Log Message", Severity: "info", Context: ctx, diff --git a/v3/integrations/logcontext-v2/nrzerolog/hook.go b/v3/integrations/logcontext-v2/nrzerolog/hook.go index 0f58bf65b..4836b55b7 100644 --- a/v3/integrations/logcontext-v2/nrzerolog/hook.go +++ b/v3/integrations/logcontext-v2/nrzerolog/hook.go @@ -21,8 +21,12 @@ func (h NewRelicHook) Run(e *zerolog.Event, level zerolog.Level, msg string) { logLevel = level.String() } + // Versions of go prior to 1.17 do not have a built in function for Unix Milli time. + // For go versions 1.17+ use time.Now().UnixMilli() to generate timestamps + timestamp := time.Now().UnixNano() / int64(time.Millisecond) + data := newrelic.LogData{ - Timestamp: time.Now().UnixMilli(), + Timestamp: timestamp, Severity: logLevel, Message: msg, Context: h.Context, From 1d326f498d2d5d5fec7b414b018cb09c5648bf70 Mon Sep 17 00:00:00 2001 From: Emilio Garcia Date: Tue, 31 May 2022 17:58:25 -0400 Subject: [PATCH 21/47] Safe Reads --- v3/examples/server/main.go | 2 +- .../logcontext-v2/nrzerolog/hook.go | 2 +- v3/newrelic/application.go | 4 +- v3/newrelic/harvest.go | 2 +- v3/newrelic/log_events.go | 31 +++++-- v3/newrelic/log_events_test.go | 92 ++++--------------- 6 files changed, 49 insertions(+), 84 deletions(-) diff --git a/v3/examples/server/main.go b/v3/examples/server/main.go index 7f0da9794..16b43b2ea 100644 --- a/v3/examples/server/main.go +++ b/v3/examples/server/main.go @@ -289,7 +289,7 @@ func main() { // For go versions 1.17+ use time.Now().UnixMilli() to generate timestamps timestamp := time.Now().UnixNano() / int64(time.Millisecond) - data := &newrelic.LogData{ + data := newrelic.LogData{ Timestamp: timestamp, Message: "Log Message", Severity: "info", diff --git a/v3/integrations/logcontext-v2/nrzerolog/hook.go b/v3/integrations/logcontext-v2/nrzerolog/hook.go index 4836b55b7..43dcf9b21 100644 --- a/v3/integrations/logcontext-v2/nrzerolog/hook.go +++ b/v3/integrations/logcontext-v2/nrzerolog/hook.go @@ -32,5 +32,5 @@ func (h NewRelicHook) Run(e *zerolog.Event, level zerolog.Level, msg string) { Context: h.Context, } - h.App.RecordLog(&data) + h.App.RecordLog(data) } diff --git a/v3/newrelic/application.go b/v3/newrelic/application.go index 89b05f2f5..119fffa79 100644 --- a/v3/newrelic/application.go +++ b/v3/newrelic/application.go @@ -82,14 +82,14 @@ func (app *Application) RecordCustomMetric(name string, value float64) { // config settings. Record log is capable of recording log events, // as well as log metrics depending on how your application is // configured. -func (app *Application) RecordLog(logEvent *LogData) { +func (app *Application) RecordLog(logEvent LogData) { if nil == app { return } if nil == app.app { return } - err := app.app.RecordLog(logEvent) + err := app.app.RecordLog(&logEvent) if err != nil { app.app.Error("unable to record log", map[string]interface{}{ "reason": err.Error(), diff --git a/v3/newrelic/harvest.go b/v3/newrelic/harvest.go index db0521698..7b3aa787e 100644 --- a/v3/newrelic/harvest.go +++ b/v3/newrelic/harvest.go @@ -95,7 +95,7 @@ func (h *harvest) Ready(now time.Time) *harvest { h.CustomEvents = newCustomEvents(h.CustomEvents.capacity()) } if 0 != types&harvestLogEvents { - h.LogEvents.RecordLoggingMetrics(h.Metrics, forced) + h.LogEvents.RecordLoggingMetrics(h.Metrics) ready.LogEvents = h.LogEvents h.LogEvents = newLogEvents(h.LogEvents.commonAttributes, h.LogEvents.config) } diff --git a/v3/newrelic/log_events.go b/v3/newrelic/log_events.go index 81c03859b..7aab43c6b 100644 --- a/v3/newrelic/log_events.go +++ b/v3/newrelic/log_events.go @@ -31,17 +31,30 @@ type logEvents struct { } // NumSeen returns the number of events seen -func (events *logEvents) NumSeen() float64 { return float64(events.numSeen) } +func (events *logEvents) NumSeen() int { + events.rwMutex.RLock() + defer events.rwMutex.RUnlock() + return events.numSeen +} // NumSaved returns the number of events that will be harvested for this cycle -func (events *logEvents) NumSaved() float64 { return float64(len(events.logs)) } +func (events *logEvents) NumSaved() int { + events.rwMutex.RLock() + defer events.rwMutex.RUnlock() + return len(events.logs) +} // Adds logging metrics to a harvest metric table if appropriate -func (events *logEvents) RecordLoggingMetrics(metrics *metricTable, forced metricForce) { +func (events *logEvents) RecordLoggingMetrics(metrics *metricTable) { events.rwMutex.RLock() defer events.rwMutex.RUnlock() + + // This is done to avoid accessing locks 3 times instead of once + seen := float64(events.numSeen) + saved := float64(len(events.logs)) + if events.config.collectMetrics && metrics != nil { - metrics.addCount(logsSeen, events.NumSeen(), forced) + metrics.addCount(logsSeen, seen, forced) for k, v := range events.severityCount { severitySeen := logsSeen + "/" + k metrics.addCount(severitySeen, float64(v), forced) @@ -49,7 +62,7 @@ func (events *logEvents) RecordLoggingMetrics(metrics *metricTable, forced metri } if events.config.collectEvents { - metrics.addCount(logsDropped, events.NumSeen()-events.NumSaved(), forced) + metrics.addCount(logsDropped, seen-saved, forced) } } @@ -120,16 +133,20 @@ func (events *logEvents) mergeFailed(other *logEvents) { events.Merge(other) } +// Merge two logEvents together func (events *logEvents) Merge(other *logEvents) { - allSeen := events.numSeen + other.numSeen - + allSeen := events.NumSeen() + other.NumSeen() for _, e := range other.logs { events.Add(&e) } + events.numSeen = allSeen } func (events *logEvents) CollectorJSON(agentRunID string) ([]byte, error) { + events.rwMutex.RLock() + defer events.rwMutex.RUnlock() + if 0 == len(events.logs) { return nil, nil } diff --git a/v3/newrelic/log_events_test.go b/v3/newrelic/log_events_test.go index 9879f14ed..b07e9a88b 100644 --- a/v3/newrelic/log_events_test.go +++ b/v3/newrelic/log_events_test.go @@ -145,6 +145,7 @@ func TestMergeFullLogEvents(t *testing.T) { e1.Add(sampleLogEvent(0.1, infoLevel, "a")) e1.Add(sampleLogEvent(0.15, infoLevel, "b")) e1.Add(sampleLogEvent(0.25, infoLevel, "c")) + e2.Add(sampleLogEvent(0.06, infoLevel, "d")) e2.Add(sampleLogEvent(0.12, infoLevel, "e")) e2.Add(sampleLogEvent(0.18, infoLevel, "f")) @@ -164,10 +165,10 @@ func TestMergeFullLogEvents(t *testing.T) { if string(json) != expect { t.Error(string(json)) } - if 7 != e1.numSeen { + if e1.numSeen != 7 { t.Error(e1.numSeen) } - if 2 != e1.NumSaved() { + if e1.NumSaved() != 2 { t.Error(e1.NumSaved()) } } @@ -529,58 +530,26 @@ func assertInt(expect int, actual int) error { return nil } -func BenchmarkAddMaximumLogEvent(b *testing.B) { - eventList := make([]*logEvent, internal.MaxLogEvents) - for n := 0; n < internal.MaxTxnEvents; n++ { - eventList[n] = &logEvent{ - priority: newPriority(), - timestamp: 123456, - severity: "INFO", - message: "test message", - spanID: "Ad300dra7re89", - traceID: "2234iIhfLlejrJ0", - } - } +func BenchmarkLogEventsAdd(b *testing.B) { events := newLogEvents(testCommonAttributes, loggingConfigEnabled(internal.MaxLogEvents)) - - b.ReportAllocs() - b.ResetTimer() - - for n := 0; n < internal.MaxTxnEvents; n++ { - events.Add(eventList[n]) - } -} - -func BenchmarkWriteMaximumLogEventJSON(b *testing.B) { - eventList := make([]*logEvent, internal.MaxLogEvents) - for n := 0; n < internal.MaxTxnEvents; n++ { - eventList[n] = &logEvent{ - priority: newPriority(), - timestamp: 123456, - severity: "INFO", - message: "test message", - spanID: "Ad300dra7re89", - traceID: "2234iIhfLlejrJ0", - } - } - events := newLogEvents(testCommonAttributes, loggingConfigEnabled(internal.MaxLogEvents)) - - for n := 0; n < internal.MaxTxnEvents; n++ { - events.Add(eventList[n]) + event := &logEvent{ + priority: newPriority(), + timestamp: 123456, + severity: "INFO", + message: "test message", + spanID: "Ad300dra7re89", + traceID: "2234iIhfLlejrJ0", } b.ReportAllocs() b.ResetTimer() - js, err := events.CollectorJSON(agentRunID) - if nil != err { - b.Fatal(err, js) + for i := 0; i < b.N; i++ { + events.Add(event) } } -func BenchmarkAddAndWriteLogEvent(b *testing.B) { - b.ReportAllocs() - +func BenchmarkLogEventsCollectorJSON(b *testing.B) { events := newLogEvents(testCommonAttributes, loggingConfigEnabled(internal.MaxLogEvents)) event := &logEvent{ priority: newPriority(), @@ -592,35 +561,14 @@ func BenchmarkAddAndWriteLogEvent(b *testing.B) { } events.Add(event) - js, err := events.CollectorJSON(agentRunID) - if nil != err { - b.Fatal(err, js) - } -} - -func BenchmarkAddAndWriteMaximumLogEvents(b *testing.B) { - - eventList := make([]*logEvent, internal.MaxLogEvents) - events := newLogEvents(testCommonAttributes, loggingConfigEnabled(internal.MaxLogEvents)) - for n := 0; n < internal.MaxTxnEvents; n++ { - eventList[n] = &logEvent{ - priority: newPriority(), - timestamp: 123456, - severity: "INFO", - message: "test message", - spanID: "Ad300dra7re89", - traceID: "2234iIhfLlejrJ0", - } - } + b.ReportAllocs() b.ResetTimer() - for n := 0; n < internal.MaxTxnEvents; n++ { - events.Add(eventList[n]) - } - - js, err := events.CollectorJSON(agentRunID) - if nil != err { - b.Fatal(err, js) + for i := 0; i < b.N; i++ { + js, err := events.CollectorJSON(agentRunID) + if nil != err { + b.Fatal(err, js) + } } } From 7f28d6046c9368aadd1c0cfc5de962f401c2008b Mon Sep 17 00:00:00 2001 From: Emilio Garcia Date: Tue, 31 May 2022 18:26:37 -0400 Subject: [PATCH 22/47] turns out that the synchronization is handled already --- v3/newrelic/log_events.go | 28 ++----- v3/newrelic/log_events_test.go | 145 --------------------------------- 2 files changed, 7 insertions(+), 166 deletions(-) diff --git a/v3/newrelic/log_events.go b/v3/newrelic/log_events.go index 7aab43c6b..7da43317e 100644 --- a/v3/newrelic/log_events.go +++ b/v3/newrelic/log_events.go @@ -6,7 +6,6 @@ package newrelic import ( "bytes" "container/heap" - "sync" "time" "github.com/newrelic/go-agent/v3/internal/jsonx" @@ -24,34 +23,26 @@ type logEvents struct { numSeen int failedHarvests int severityCount map[string]int - rwMutex sync.RWMutex commonAttributes config loggingConfig logs logEventHeap } // NumSeen returns the number of events seen -func (events *logEvents) NumSeen() int { - events.rwMutex.RLock() - defer events.rwMutex.RUnlock() - return events.numSeen +func (events *logEvents) NumSeen() float64 { + return float64(events.numSeen) } // NumSaved returns the number of events that will be harvested for this cycle -func (events *logEvents) NumSaved() int { - events.rwMutex.RLock() - defer events.rwMutex.RUnlock() - return len(events.logs) +func (events *logEvents) NumSaved() float64 { + return float64(len(events.logs)) } // Adds logging metrics to a harvest metric table if appropriate func (events *logEvents) RecordLoggingMetrics(metrics *metricTable) { - events.rwMutex.RLock() - defer events.rwMutex.RUnlock() - // This is done to avoid accessing locks 3 times instead of once - seen := float64(events.numSeen) - saved := float64(len(events.logs)) + seen := events.NumSeen() + saved := events.NumSaved() if events.config.collectMetrics && metrics != nil { metrics.addCount(logsSeen, seen, forced) @@ -91,8 +82,6 @@ func (events *logEvents) capacity() int { } func (events *logEvents) Add(e *logEvent) { - events.rwMutex.Lock() - defer events.rwMutex.Unlock() // always collect this but do not report logging metrics when disabled events.numSeen++ events.severityCount[e.severity]++ @@ -140,13 +129,10 @@ func (events *logEvents) Merge(other *logEvents) { events.Add(&e) } - events.numSeen = allSeen + events.numSeen = int(allSeen) } func (events *logEvents) CollectorJSON(agentRunID string) ([]byte, error) { - events.rwMutex.RLock() - defer events.rwMutex.RUnlock() - if 0 == len(events.logs) { return nil, nil } diff --git a/v3/newrelic/log_events_test.go b/v3/newrelic/log_events_test.go index b07e9a88b..d52e72133 100644 --- a/v3/newrelic/log_events_test.go +++ b/v3/newrelic/log_events_test.go @@ -5,7 +5,6 @@ package newrelic import ( "fmt" - "sync" "testing" "github.com/newrelic/go-agent/v3/internal" @@ -386,150 +385,6 @@ func TestLogEventCollectionDisabled(t *testing.T) { } } -func TestAsyncAddLogEvent(t *testing.T) { - numThreads := 8 - capacity := numThreads - 1 - - events := newLogEvents(testCommonAttributes, loggingConfigEnabled(capacity)) - group := new(sync.WaitGroup) - group.Add(numThreads) - - // Add a bunch of log events aynchronously - for n := 0; n < numThreads/2; n++ { - p := priority(float32(n) / 10.0) - event := &logEvent{ - priority: p, - timestamp: 123456, - severity: "INFO", - message: fmt.Sprintf("info message %.2f", p), - } - go func(event *logEvent) { - events.Add(event) - group.Done() - }(event) - } - - for n := 0; n < numThreads/2; n++ { - p := priority(float32(n+numThreads/2) / 10.0) - event := &logEvent{ - priority: p, - timestamp: 123456, - severity: "WARN", - message: fmt.Sprintf("warn message %.2f", p), - } - go func(event *logEvent) { - events.Add(event) - group.Done() - }(event) - } - - group.Wait() - - expectMap := map[string]int{ - "INFO": numThreads / 2, - "WARN": numThreads / 2, - } - - metricErrors := events.assertMetrics(8, capacity, expectMap) - if metricErrors != nil { - t.Error(metricErrors) - } - - // Test Heap Data - // Assumes that heap implementation is correct when executed synchronously - expectEvents := newLogEvents(testCommonAttributes, loggingConfigEnabled(capacity)) - for n := 0; n < numThreads/2; n++ { - p := priority(float32(n) / 10.0) - event := &logEvent{ - priority: p, - timestamp: 123456, - severity: "INFO", - message: fmt.Sprintf("info message %.2f", p), - } - expectEvents.Add(event) - } - - for n := 0; n < numThreads/2; n++ { - p := priority(float32(n+numThreads/2) / 10.0) - event := &logEvent{ - priority: p, - timestamp: 123456, - severity: "WARN", - message: fmt.Sprintf("warn message %.2f", p), - } - expectEvents.Add(event) - } - - heapError := events.assertHeapContains(expectEvents) - if heapError != nil { - t.Error(heapError) - } -} - -// verifies that each log events heap contains the same elements -// heaps must be composed of unique messages -func (events *logEvents) assertHeapContains(expect *logEvents) error { - expectLogs := make(map[string]bool, len(expect.logs)) - - for _, event := range expect.logs { - expectLogs[event.message] = false - } - - for _, event := range events.logs { - expectLogs[event.message] = true - } - - missing := []string{} - for msg, contains := range expectLogs { - if !contains { - missing = append(missing, msg) - } - } - - if len(missing) != 0 { - return fmt.Errorf("expected logs were missing from the event heap: %v", missing) - } - - return nil -} - -func (events *logEvents) assertMetrics(expectSeen, expectSaved int, expectSeverity map[string]int) error { - err := assertInt(expectSeen, int(events.NumSeen())) - if err != nil { - return fmt.Errorf("incorrect number of events seen: %v", err) - } - - err = assertInt(expectSaved, int(events.NumSaved())) - if err != nil { - return fmt.Errorf("incorrect number of events saved: %v", err) - } - - if len(expectSeverity) != len(events.severityCount) { - return fmt.Errorf("incorrect number of severities seen: expect %d, actual %d", len(expectSeverity), len(events.severityCount)) - } - - for k, v := range expectSeverity { - val, ok := events.severityCount[k] - if !ok { - return fmt.Errorf("expected severity %s is missing from actual severity count", k) - } - - err := assertInt(v, val) - if err != nil { - return fmt.Errorf("incorrect severity count for %s: expect %d, actual %d", k, v, val) - } - } - - return nil -} - -func assertInt(expect int, actual int) error { - if expect != actual { - return fmt.Errorf("expected %d, actual %d", expect, actual) - } - return nil -} - func BenchmarkLogEventsAdd(b *testing.B) { events := newLogEvents(testCommonAttributes, loggingConfigEnabled(internal.MaxLogEvents)) event := &logEvent{ From 36074613ee1544f2478b3c3e8833dc80395a0766 Mon Sep 17 00:00:00 2001 From: Emilio Garcia Date: Mon, 13 Jun 2022 18:35:53 -0400 Subject: [PATCH 23/47] avoid alloc when creating logEvent --- v3/newrelic/internal_app.go | 2 +- v3/newrelic/log_event.go | 13 ++-- v3/newrelic/log_event_test.go | 115 +++++++++++++++++++++++++++------- 3 files changed, 100 insertions(+), 30 deletions(-) diff --git a/v3/newrelic/internal_app.go b/v3/newrelic/internal_app.go index fc79e35e6..27d492082 100644 --- a/v3/newrelic/internal_app.go +++ b/v3/newrelic/internal_app.go @@ -597,7 +597,7 @@ func (app *app) RecordLog(log *LogData) error { } run, _ := app.getState() - app.Consume(run.Reply.RunID, event) + app.Consume(run.Reply.RunID, &event) return nil } diff --git a/v3/newrelic/log_event.go b/v3/newrelic/log_event.go index 5ccdb1ab4..ad9cf9987 100644 --- a/v3/newrelic/log_event.go +++ b/v3/newrelic/log_event.go @@ -80,26 +80,23 @@ func (e *logEvent) MarshalJSON() ([]byte, error) { } var ( - // regex allows a single word, or number - severityUnknown = "UNKNOWN" - errEmptyTimestamp = errors.New("timestamp can not be empty") errNilLogData = errors.New("log data can not be nil") errLogMessageTooLarge = fmt.Errorf("log message can not exceed %d bytes", MaxLogLength) ) -func (data *LogData) toLogEvent() (*logEvent, error) { +func (data *LogData) toLogEvent() (logEvent, error) { if data == nil { - return nil, errNilLogData + return logEvent{}, errNilLogData } if data.Severity == "" { data.Severity = LogSeverityUnknown } if len(data.Message) > MaxLogLength { - return nil, errLogMessageTooLarge + return logEvent{}, errLogMessageTooLarge } if data.Timestamp == 0 { - return nil, errEmptyTimestamp + return logEvent{}, errEmptyTimestamp } data.Message = strings.TrimSpace(data.Message) @@ -125,7 +122,7 @@ func (data *LogData) toLogEvent() (*logEvent, error) { priority: priority, } - return &event, nil + return event, nil } func (e *logEvent) MergeIntoHarvest(h *harvest) { diff --git a/v3/newrelic/log_event_test.go b/v3/newrelic/log_event_test.go index 67eae62ef..e7a418888 100644 --- a/v3/newrelic/log_event_test.go +++ b/v3/newrelic/log_event_test.go @@ -2,6 +2,7 @@ package newrelic import ( "fmt" + "math/rand" "testing" "time" ) @@ -24,6 +25,91 @@ func TestWriteJSON(t *testing.T) { } } +func TestToLogEvent(t *testing.T) { + type testcase struct { + name string + data LogData + expectEvent logEvent + expectErr error + } + + testcases := []testcase{ + { + name: "valid case no context", + data: LogData{ + Timestamp: 123456, + Severity: "info", + Message: "test 123", + }, + expectEvent: logEvent{ + timestamp: 123456, + severity: "info", + message: "test 123", + }, + }, + { + name: "valid case empty severity", + data: LogData{ + Timestamp: 123456, + Message: "test 123", + }, + expectEvent: logEvent{ + timestamp: 123456, + severity: "UNKNOWN", + message: "test 123", + }, + }, + { + name: "message too large", + data: LogData{ + Timestamp: 123456, + Severity: "info", + Message: randomString(32769), + }, + expectErr: errLogMessageTooLarge, + }, + { + name: "empty timestamp", + data: LogData{ + Severity: "info", + Message: "test 123", + }, + expectErr: errEmptyTimestamp, + }, + } + + for _, testcase := range testcases { + actualEvent, err := testcase.data.toLogEvent() + + if testcase.expectErr != err { + t.Error(fmt.Errorf("%s: expected error %v, got %v", testcase.name, testcase.expectErr, err)) + } + + if testcase.expectErr == nil { + expect := testcase.expectEvent + if expect.message != actualEvent.message { + t.Error(fmt.Errorf("%s: expected message %s, got %s", testcase.name, expect.message, actualEvent.message)) + } + if expect.severity != actualEvent.severity { + t.Error(fmt.Errorf("%s: expected severity %s, got %s", testcase.name, expect.severity, actualEvent.severity)) + } + if expect.timestamp != actualEvent.timestamp { + t.Error(fmt.Errorf("%s: expected timestamp %d, got %d", testcase.name, expect.timestamp, actualEvent.timestamp)) + } + } + } +} + +var letters = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") + +func randomString(n int) string { + b := make([]rune, n) + for i := range b { + b[i] = letters[rand.Intn(len(letters))] + } + return string(b) +} + func TestWriteJSONWithTrace(t *testing.T) { event := logEvent{ severity: "INFO", @@ -51,7 +137,12 @@ func BenchmarkToLogEvent(b *testing.B) { Message: "test message", } - data.toLogEvent() + b.ReportAllocs() + + for n := 0; n < b.N; n++ { + data.toLogEvent() + } + } func recordLogBenchmarkHelper(b *testing.B, data *LogData, h *harvest) { @@ -70,25 +161,7 @@ func BenchmarkRecordLog(b *testing.B) { b.ReportAllocs() b.ResetTimer() - recordLogBenchmarkHelper(b, &data, harvest) -} - -func BenchmarkRecordLog100(b *testing.B) { - harvest := newHarvest(time.Now(), testHarvestCfgr) - - logs := make([]*LogData, 100) - for i := 0; i < 100; i++ { - logs[i] = &LogData{ - Timestamp: 123456, - Severity: "INFO", - Message: "test message " + fmt.Sprint(i), - } - } - - b.ReportAllocs() - b.ResetTimer() - - for _, log := range logs { - recordLogBenchmarkHelper(b, log, harvest) + for n := 0; n < b.N; n++ { + recordLogBenchmarkHelper(b, &data, harvest) } } From d33379f113e701318b2b94835ac26818deb09cf2 Mon Sep 17 00:00:00 2001 From: Emilio Garcia Date: Tue, 14 Jun 2022 15:14:07 -0400 Subject: [PATCH 24/47] improve efficiency of getting harvest data by avoiding memAlloc --- v3/newrelic/analytics_events.go | 12 +- v3/newrelic/analytics_events_test.go | 88 +++++++++++---- v3/newrelic/custom_events.go | 18 ++- v3/newrelic/error_events.go | 13 ++- v3/newrelic/errors_from_internal.go | 14 ++- v3/newrelic/errors_test.go | 9 +- v3/newrelic/expect_implementation.go | 6 +- v3/newrelic/harvest.go | 5 +- v3/newrelic/harvest_test.go | 53 +++++---- v3/newrelic/internal_app.go | 7 +- v3/newrelic/log_event.go | 2 +- v3/newrelic/log_events.go | 33 +++--- v3/newrelic/log_events_test.go | 160 ++++++++++++++++++++------- v3/newrelic/metrics.go | 24 ++-- v3/newrelic/metrics_test.go | 20 ++-- v3/newrelic/serverless.go | 7 +- v3/newrelic/slow_queries.go | 13 ++- v3/newrelic/slow_queries_test.go | 39 +++++-- v3/newrelic/span_events.go | 13 ++- v3/newrelic/txn_events.go | 13 ++- v3/newrelic/txn_events_test.go | 23 ++-- v3/newrelic/txn_trace.go | 14 ++- v3/newrelic/txn_trace_test.go | 19 +++- 23 files changed, 431 insertions(+), 174 deletions(-) diff --git a/v3/newrelic/analytics_events.go b/v3/newrelic/analytics_events.go index 6c1fdb63b..aa8131ec5 100644 --- a/v3/newrelic/analytics_events.go +++ b/v3/newrelic/analytics_events.go @@ -91,14 +91,11 @@ func (events *analyticsEvents) Merge(other *analyticsEvents) { events.numSeen = allSeen } -func (events *analyticsEvents) CollectorJSON(agentRunID string) ([]byte, error) { - if 0 == len(events.events) { - return nil, nil +func (events *analyticsEvents) CollectorJSON(buf *bytes.Buffer, agentRunID string) error { + if buf == nil || events.NumSaved() == 0 { + return nil } - estimate := 256 * len(events.events) - buf := bytes.NewBuffer(make([]byte, 0, estimate)) - buf.WriteByte('[') jsonx.AppendString(buf, agentRunID) buf.WriteByte(',') @@ -120,8 +117,7 @@ func (events *analyticsEvents) CollectorJSON(agentRunID string) ([]byte, error) buf.WriteByte(']') buf.WriteByte(']') - return buf.Bytes(), nil - + return nil } // split splits the events into two. NOTE! The two event pools are not valid diff --git a/v3/newrelic/analytics_events_test.go b/v3/newrelic/analytics_events_test.go index 98c7d1407..67aa09468 100644 --- a/v3/newrelic/analytics_events_test.go +++ b/v3/newrelic/analytics_events_test.go @@ -35,11 +35,14 @@ func TestBasic(t *testing.T) { events.addEvent(sampleAnalyticsEvent(0.5)) events.addEvent(sampleAnalyticsEvent(0.5)) - json, err := events.CollectorJSON(agentRunID) + buf := &bytes.Buffer{} + err := events.CollectorJSON(buf, agentRunID) if nil != err { t.Fatal(err) } + json := buf.Bytes() + expected := `["12345",{"reservoir_size":10,"events_seen":3},[0.5,0.5,0.5]]` if string(json) != expected { @@ -55,17 +58,19 @@ func TestBasic(t *testing.T) { func TestEmpty(t *testing.T) { events := newAnalyticsEvents(10) - json, err := events.CollectorJSON(agentRunID) + var buf *bytes.Buffer + err := events.CollectorJSON(buf, agentRunID) if nil != err { t.Fatal(err) } - if nil != json { - t.Error(string(json)) + + if buf != nil { + t.Error(string(buf.Bytes())) } - if 0 != events.numSeen { + if events.numSeen != 0 { t.Error(events.numSeen) } - if 0 != events.NumSaved() { + if events.NumSaved() != 0 { t.Error(events.NumSaved()) } } @@ -79,10 +84,13 @@ func TestSampling(t *testing.T) { events.addEvent(sampleAnalyticsEvent(0.8)) events.addEvent(sampleAnalyticsEvent(0.3)) - json, err := events.CollectorJSON(agentRunID) + buf := &bytes.Buffer{} + err := events.CollectorJSON(buf, agentRunID) if nil != err { t.Fatal(err) } + + json := buf.Bytes() if string(json) != `["12345",{"reservoir_size":3,"events_seen":6},[0.8,0.999999,0.9]]` { t.Error(string(json)) } @@ -98,10 +106,14 @@ func TestMergeEmpty(t *testing.T) { e1 := newAnalyticsEvents(10) e2 := newAnalyticsEvents(10) e1.Merge(e2) - json, err := e1.CollectorJSON(agentRunID) + buf := &bytes.Buffer{} + err := e1.CollectorJSON(buf, agentRunID) if nil != err { t.Fatal(err) } + + json := buf.Bytes() + if nil != json { t.Error(string(json)) } @@ -127,10 +139,13 @@ func TestMergeFull(t *testing.T) { e2.addEvent(sampleAnalyticsEvent(0.24)) e1.Merge(e2) - json, err := e1.CollectorJSON(agentRunID) + buf := &bytes.Buffer{} + err := e1.CollectorJSON(buf, agentRunID) if nil != err { t.Fatal(err) } + + json := buf.Bytes() if string(json) != `["12345",{"reservoir_size":2,"events_seen":7},[0.24,0.25]]` { t.Error(string(json)) } @@ -157,10 +172,13 @@ func TestAnalyticsEventMergeFailedSuccess(t *testing.T) { e1.mergeFailed(e2) - json, err := e1.CollectorJSON(agentRunID) + buf := &bytes.Buffer{} + err := e1.CollectorJSON(buf, agentRunID) if nil != err { t.Fatal(err) } + + json := buf.Bytes() if string(json) != `["12345",{"reservoir_size":2,"events_seen":7},[0.24,0.25]]` { t.Error(string(json)) } @@ -192,10 +210,13 @@ func TestAnalyticsEventMergeFailedLimitReached(t *testing.T) { e1.mergeFailed(e2) - json, err := e1.CollectorJSON(agentRunID) + buf := &bytes.Buffer{} + err := e1.CollectorJSON(buf, agentRunID) if nil != err { t.Fatal(err) } + + json := buf.Bytes() if string(json) != `["12345",{"reservoir_size":2,"events_seen":3},[0.15,0.25]]` { t.Error(string(json)) } @@ -221,9 +242,10 @@ func analyticsEventBenchmarkHelper(b *testing.B, w jsonWriter) { b.ResetTimer() for n := 0; n < b.N; n++ { - js, err := events.CollectorJSON(agentRunID) + buf := &bytes.Buffer{} + err := events.CollectorJSON(buf, agentRunID) if nil != err { - b.Fatal(err, js) + b.Fatal(err) } } } @@ -279,8 +301,14 @@ func TestSplitFull(t *testing.T) { t.Error(events.capacity()) } e1, e2 := events.split() - j1, err1 := e1.CollectorJSON(agentRunID) - j2, err2 := e2.CollectorJSON(agentRunID) + buf := &bytes.Buffer{} + err1 := e1.CollectorJSON(buf, agentRunID) + j1 := buf.Bytes() + + buf = &bytes.Buffer{} + err2 := e2.CollectorJSON(buf, agentRunID) + j2 := buf.Bytes() + if err1 != nil || err2 != nil { t.Fatal(err1, err2) } @@ -298,8 +326,14 @@ func TestSplitNotFullOdd(t *testing.T) { events.addEvent(sampleAnalyticsEvent(priority(float32(i) / 10.0))) } e1, e2 := events.split() - j1, err1 := e1.CollectorJSON(agentRunID) - j2, err2 := e2.CollectorJSON(agentRunID) + buf := &bytes.Buffer{} + err1 := e1.CollectorJSON(buf, agentRunID) + j1 := buf.Bytes() + + buf = &bytes.Buffer{} + err2 := e2.CollectorJSON(buf, agentRunID) + j2 := buf.Bytes() + if err1 != nil || err2 != nil { t.Fatal(err1, err2) } @@ -317,8 +351,14 @@ func TestSplitNotFullEven(t *testing.T) { events.addEvent(sampleAnalyticsEvent(priority(float32(i) / 10.0))) } e1, e2 := events.split() - j1, err1 := e1.CollectorJSON(agentRunID) - j2, err2 := e2.CollectorJSON(agentRunID) + buf := &bytes.Buffer{} + err1 := e1.CollectorJSON(buf, agentRunID) + j1 := buf.Bytes() + + buf = &bytes.Buffer{} + err2 := e2.CollectorJSON(buf, agentRunID) + j2 := buf.Bytes() + if err1 != nil || err2 != nil { t.Fatal(err1, err2) } @@ -341,7 +381,15 @@ func TestAnalyticsEventsZeroCapacity(t *testing.T) { if 1 != events.NumSeen() || 0 != events.NumSaved() || 0 != events.capacity() { t.Error(events.NumSeen(), events.NumSaved(), events.capacity()) } - js, err := events.CollectorJSON("agentRunID") + + data := &bytes.Buffer{} + err := events.CollectorJSON(data, agentRunID) + + var js []byte + if data != nil { + js = data.Bytes() + } + if err != nil || js != nil { t.Error(err, string(js)) } diff --git a/v3/newrelic/custom_events.go b/v3/newrelic/custom_events.go index 95809fade..2f09d0b7b 100644 --- a/v3/newrelic/custom_events.go +++ b/v3/newrelic/custom_events.go @@ -3,7 +3,10 @@ package newrelic -import "time" +import ( + "bytes" + "time" +) type customEvents struct { *analyticsEvents @@ -27,8 +30,17 @@ func (cs *customEvents) MergeIntoHarvest(h *harvest) { h.CustomEvents.mergeFailed(cs.analyticsEvents) } -func (cs *customEvents) Data(agentRunID string, harvestStart time.Time) ([]byte, error) { - return cs.CollectorJSON(agentRunID) +func (cs *customEvents) DataBuffer() *bytes.Buffer { + if len(cs.events) == 0 { + return nil + } + + estimate := 256 * len(cs.events) + return bytes.NewBuffer(make([]byte, 0, estimate)) +} + +func (cs *customEvents) WriteData(buf *bytes.Buffer, agentRunID string, harvestStart time.Time) error { + return cs.CollectorJSON(buf, agentRunID) } func (cs *customEvents) EndpointMethod() string { diff --git a/v3/newrelic/error_events.go b/v3/newrelic/error_events.go index 90419747c..cd82db9ad 100644 --- a/v3/newrelic/error_events.go +++ b/v3/newrelic/error_events.go @@ -61,8 +61,17 @@ func (events *errorEvents) MergeIntoHarvest(h *harvest) { h.ErrorEvents.mergeFailed(events.analyticsEvents) } -func (events *errorEvents) Data(agentRunID string, harvestStart time.Time) ([]byte, error) { - return events.CollectorJSON(agentRunID) +func (events *errorEvents) DataBuffer() *bytes.Buffer { + if 0 == len(events.events) { + return nil + } + + estimate := 256 * len(events.events) + return bytes.NewBuffer(make([]byte, 0, estimate)) +} + +func (events *errorEvents) WriteData(buf *bytes.Buffer, agentRunID string, harvestStart time.Time) error { + return events.CollectorJSON(buf, agentRunID) } func (events *errorEvents) EndpointMethod() string { diff --git a/v3/newrelic/errors_from_internal.go b/v3/newrelic/errors_from_internal.go index 7813d102f..1eb8b3340 100644 --- a/v3/newrelic/errors_from_internal.go +++ b/v3/newrelic/errors_from_internal.go @@ -151,12 +151,18 @@ func mergeTxnErrors(errors *harvestErrors, errs txnErrors, txnEvent txnEvent) { } } -func (errors harvestErrors) Data(agentRunID string, harvestStart time.Time) ([]byte, error) { +func (errors harvestErrors) DataBuffer() *bytes.Buffer { if 0 == len(errors) { - return nil, nil + return nil } estimate := 1024 * len(errors) - buf := bytes.NewBuffer(make([]byte, 0, estimate)) + return bytes.NewBuffer(make([]byte, 0, estimate)) +} + +func (errors harvestErrors) WriteData(buf *bytes.Buffer, agentRunID string, harvestStart time.Time) error { + if buf == nil { + return nil + } buf.WriteByte('[') jsonx.AppendString(buf, agentRunID) buf.WriteByte(',') @@ -169,7 +175,7 @@ func (errors harvestErrors) Data(agentRunID string, harvestStart time.Time) ([]b } buf.WriteByte(']') buf.WriteByte(']') - return buf.Bytes(), nil + return nil } func (errors harvestErrors) MergeIntoHarvest(h *harvest) {} diff --git a/v3/newrelic/errors_test.go b/v3/newrelic/errors_test.go index 9ab80bb8e..d40ac5da0 100644 --- a/v3/newrelic/errors_test.go +++ b/v3/newrelic/errors_test.go @@ -239,7 +239,9 @@ func TestErrorsLifecycle(t *testing.T) { }, TotalTime: 2 * time.Second, }) - js, err := he.Data("agentRunID", time.Now()) + buf := he.DataBuffer() + err := he.WriteData(buf, "agentRunID", time.Now()) + js := buf.Bytes() if nil != err { t.Error(err) } @@ -350,7 +352,10 @@ func BenchmarkErrorsJSON(b *testing.B) { b.ResetTimer() for n := 0; n < b.N; n++ { - js, err := he.Data("agentRundID", when) + buf := he.DataBuffer() + err := he.WriteData(buf, "agentRundID", when) + js := buf.Bytes() + if nil != err || nil == js { b.Fatal(err, js) } diff --git a/v3/newrelic/expect_implementation.go b/v3/newrelic/expect_implementation.go index 435a66dff..33fbcbc5f 100644 --- a/v3/newrelic/expect_implementation.go +++ b/v3/newrelic/expect_implementation.go @@ -493,12 +493,14 @@ func expectTxnTraces(v internal.Validator, traces *harvestTraces, want []interna if len(want) == 0 { return } - js, err := traces.Data("agentRunID", time.Now()) - if nil != err { + data := traces.DataBuffer() + err := traces.WriteData(data, "agentRunID", time.Now()) + if err != nil { v.Error("error creasing harvest traces data", err) return } + js := data.Bytes() var unmarshalled []interface{} err = json.Unmarshal(js, &unmarshalled) if nil != err { diff --git a/v3/newrelic/harvest.go b/v3/newrelic/harvest.go index 7b3aa787e..996c3bee2 100644 --- a/v3/newrelic/harvest.go +++ b/v3/newrelic/harvest.go @@ -4,6 +4,7 @@ package newrelic import ( + "bytes" "time" "github.com/newrelic/go-agent/v3/internal" @@ -257,10 +258,12 @@ type payloadCreator interface { // intermittent collector issue) the payload may be merged into the next // time period's harvest. harvestable + + DataBuffer() *bytes.Buffer // Data prepares JSON in the format expected by the collector endpoint. // This method should return (nil, nil) if the payload is empty and no // rpm request is necessary. - Data(agentRunID string, harvestStart time.Time) ([]byte, error) + WriteData(buffer *bytes.Buffer, agentRunID string, harvestStart time.Time) error // EndpointMethod is used for the "method" query parameter when posting // the data. EndpointMethod() string diff --git a/v3/newrelic/harvest_test.go b/v3/newrelic/harvest_test.go index 728c7640d..4acaf037a 100644 --- a/v3/newrelic/harvest_test.go +++ b/v3/newrelic/harvest_test.go @@ -231,9 +231,10 @@ func TestEmptyPayloads(t *testing.T) { t.Error(len(payloads)) } for _, p := range payloads { - d, err := p.Data("agentRunID", time.Now()) - if d != nil || err != nil { - t.Error(d, err) + data := p.DataBuffer() + err := p.WriteData(data, "agentRunID", time.Now()) + if data != nil || err != nil { + t.Error(data.Bytes(), err) } } } @@ -289,9 +290,10 @@ func TestHarvestCustomEventsReady(t *testing.T) { if m := p.EndpointMethod(); m != "custom_event_data" { t.Error(m) } - data, err := p.Data("agentRunID", now) - if nil != err || nil == data { - t.Error(err, data) + data := p.DataBuffer() + err := p.WriteData(data, "agentRunID", now) + if err != nil || data == nil { + t.Error(err, data.Bytes()) } if h.CustomEvents.capacity() != 3 || h.CustomEvents.NumSaved() != 0 { t.Fatal("custom events not correctly reset") @@ -338,9 +340,10 @@ func TestHarvestLogEventsReady(t *testing.T) { if m := p.EndpointMethod(); m != "log_event_data" { t.Error(m) } - data, err := p.Data("agentRunID", now) - if nil != err || nil == data { - t.Error(err, data) + data := p.DataBuffer() + err := p.WriteData(data, "agentRunID", now) + if err != nil || data == nil { + t.Error(err, data.Bytes()) } if h.LogEvents.capacity() != 3 || h.LogEvents.NumSaved() != 0 { t.Fatal("log events not correctly reset") @@ -387,9 +390,10 @@ func TestHarvestTxnEventsReady(t *testing.T) { if m := p.EndpointMethod(); m != "analytic_event_data" { t.Error(m) } - data, err := p.Data("agentRunID", now) - if nil != err || nil == data { - t.Error(err, data) + data := p.DataBuffer() + err := p.WriteData(data, "agentRunID", now) + if err != nil || data == nil { + t.Error(err, data.Bytes()) } if h.TxnEvents.capacity() != 3 || h.TxnEvents.NumSaved() != 0 { t.Fatal("txn events not correctly reset") @@ -429,9 +433,10 @@ func TestHarvestErrorEventsReady(t *testing.T) { if m := p.EndpointMethod(); m != "error_event_data" { t.Error(m) } - data, err := p.Data("agentRunID", now) - if nil != err || nil == data { - t.Error(err, data) + data := p.DataBuffer() + err := p.WriteData(data, "agentRunID", now) + if err != nil || data == nil { + t.Error(err, data.Bytes()) } if h.ErrorEvents.capacity() != 3 || h.ErrorEvents.NumSaved() != 0 { t.Fatal("error events not correctly reset") @@ -469,9 +474,10 @@ func TestHarvestSpanEventsReady(t *testing.T) { if m := p.EndpointMethod(); m != "span_event_data" { t.Error(m) } - data, err := p.Data("agentRunID", now) - if nil != err || nil == data { - t.Error(err, data) + data := p.DataBuffer() + err := p.WriteData(data, "agentRunID", now) + if err != nil || data == nil { + t.Error(err, data.Bytes()) } if h.SpanEvents.capacity() != 3 || h.SpanEvents.NumSaved() != 0 { t.Fatal("span events not correctly reset") @@ -1048,11 +1054,18 @@ func TestConfigurableHarvestZeroHarvestLimits(t *testing.T) { // safe. payloads := h.Ready(now.Add(2 * time.Minute)).Payloads(false) for _, p := range payloads { - js, err := p.Data("agentRunID", now.Add(2*time.Minute)) - if nil != err { + data := p.DataBuffer() + err := p.WriteData(data, "agentRunID", now.Add(2*time.Minute)) + if err != nil { t.Error(err) continue } + + var js []byte + if data != nil { + js = data.Bytes() + } + // Only metric data should be present. if (p.EndpointMethod() == "metric_data") != (string(js) != "") { diff --git a/v3/newrelic/internal_app.go b/v3/newrelic/internal_app.go index 27d492082..39fcb1f73 100644 --- a/v3/newrelic/internal_app.go +++ b/v3/newrelic/internal_app.go @@ -71,7 +71,8 @@ func (app *app) doHarvest(h *harvest, harvestStart time.Time, run *appRun) { payloads := h.Payloads(app.config.DistributedTracer.Enabled) for _, p := range payloads { cmd := p.EndpointMethod() - data, err := p.Data(run.Reply.RunID.String(), harvestStart) + dataBuffer := p.DataBuffer() + err := p.WriteData(dataBuffer, run.Reply.RunID.String(), harvestStart) if nil != err { app.Warn("unable to create harvest data", map[string]interface{}{ @@ -80,7 +81,7 @@ func (app *app) doHarvest(h *harvest, harvestStart time.Time, run *appRun) { }) continue } - if nil == data { + if dataBuffer == nil { continue } @@ -88,7 +89,7 @@ func (app *app) doHarvest(h *harvest, harvestStart time.Time, run *appRun) { Collector: run.Reply.Collector, RunID: run.Reply.RunID.String(), Name: cmd, - Data: data, + Data: dataBuffer.Bytes(), RequestHeadersMap: run.Reply.RequestHeadersMap, MaxPayloadSize: run.Reply.MaxPayloadSizeInBytes, } diff --git a/v3/newrelic/log_event.go b/v3/newrelic/log_event.go index ad9cf9987..b0a7c6fc6 100644 --- a/v3/newrelic/log_event.go +++ b/v3/newrelic/log_event.go @@ -72,7 +72,7 @@ func (e *logEvent) WriteJSON(buf *bytes.Buffer) { buf.WriteByte('}') } -// MarshalJSON is used for testing. +// MarshalJSON is used for testing only func (e *logEvent) MarshalJSON() ([]byte, error) { buf := bytes.NewBuffer(make([]byte, 0, 256)) e.WriteJSON(buf) diff --git a/v3/newrelic/log_events.go b/v3/newrelic/log_events.go index 7da43317e..04f8d1d2c 100644 --- a/v3/newrelic/log_events.go +++ b/v3/newrelic/log_events.go @@ -100,7 +100,7 @@ func (events *logEvents) Add(e *logEvent) { // Delay heap initialization so that we can have // deterministic ordering for integration tests (the max // is not being reached). - heap.Init(events.logs) + heap.Init(events.logs) // Malloc required } return } @@ -132,16 +132,9 @@ func (events *logEvents) Merge(other *logEvents) { events.numSeen = int(allSeen) } -func (events *logEvents) CollectorJSON(agentRunID string) ([]byte, error) { - if 0 == len(events.logs) { - return nil, nil - } - - estimate := 256 * len(events.logs) - buf := bytes.NewBuffer(make([]byte, 0, estimate)) - - if events.numSeen == 0 { - return nil, nil +func (events *logEvents) CollectorJSON(buf *bytes.Buffer, agentRunID string) error { + if buf == nil { + return nil } buf.WriteByte('[') @@ -177,8 +170,7 @@ func (events *logEvents) CollectorJSON(agentRunID string) ([]byte, error) { buf.WriteByte(']') buf.WriteByte('}') buf.WriteByte(']') - return buf.Bytes(), nil - + return nil } // split splits the events into two. NOTE! The two event pools are not valid @@ -224,8 +216,19 @@ func (events *logEvents) MergeIntoHarvest(h *harvest) { h.LogEvents.mergeFailed(events) } -func (events *logEvents) Data(agentRunID string, harvestStart time.Time) ([]byte, error) { - return events.CollectorJSON(agentRunID) +// DataBuffer returns a bytes Buffer with an estimated size based on the contents of the logEvents object +func (events *logEvents) DataBuffer() *bytes.Buffer { + if len(events.logs) == 0 || events.numSeen == 0 { + return nil + } + + estimate := 256 * len(events.logs) + return bytes.NewBuffer(make([]byte, 0, estimate)) +} + +// WriteData writes JSON data to a DataBuffer during a harvest cycle +func (events *logEvents) WriteData(buf *bytes.Buffer, agentRunID string, harvestStart time.Time) error { + return events.CollectorJSON(buf, agentRunID) } func (events *logEvents) EndpointMethod() string { diff --git a/v3/newrelic/log_events_test.go b/v3/newrelic/log_events_test.go index d52e72133..d922c808d 100644 --- a/v3/newrelic/log_events_test.go +++ b/v3/newrelic/log_events_test.go @@ -6,6 +6,7 @@ package newrelic import ( "fmt" "testing" + "time" "github.com/newrelic/go-agent/v3/internal" ) @@ -22,8 +23,7 @@ var ( commonJSON = `[{"common":{"attributes":{"entity.guid":"testGUID","entity.name":"testEntityName","hostname":"testHostname"}},"logs":[` - infoLevel = "INFO" - unknownLevel = "UNKNOWN" + infoLevel = "INFO" ) func loggingConfigEnabled(limit int) loggingConfig { @@ -50,11 +50,14 @@ func TestBasicLogEvents(t *testing.T) { events.Add(sampleLogEvent(0.5, infoLevel, "message1")) events.Add(sampleLogEvent(0.5, infoLevel, "message2")) - json, err := events.CollectorJSON(agentRunID) + buf := events.DataBuffer() + err := events.CollectorJSON(buf, agentRunID) if nil != err { t.Fatal(err) } + json := buf.Bytes() + expected := commonJSON + `{"level":"INFO","message":"message1","timestamp":123456},` + `{"level":"INFO","message":"message2","timestamp":123456}]}]` @@ -62,22 +65,23 @@ func TestBasicLogEvents(t *testing.T) { if string(json) != expected { t.Error(string(json), expected) } - if 2 != events.numSeen { + if events.numSeen != 2 { t.Error(events.numSeen) } - if 2 != events.NumSaved() { + if events.NumSaved() != 2 { t.Error(events.NumSaved()) } } func TestEmptyLogEvents(t *testing.T) { events := newLogEvents(testCommonAttributes, loggingConfigEnabled(10)) - json, err := events.CollectorJSON(agentRunID) + buf := events.DataBuffer() + err := events.CollectorJSON(buf, agentRunID) if nil != err { t.Fatal(err) } - if nil != json { - t.Error(string(json)) + if nil != buf { + t.Error(string(buf.Bytes())) } if 0 != events.numSeen { t.Error(events.numSeen) @@ -98,7 +102,9 @@ func TestSamplingLogEvents(t *testing.T) { events.Add(sampleLogEvent(0.8, infoLevel, "e")) events.Add(sampleLogEvent(0.3, infoLevel, "f")) - json, err := events.CollectorJSON(agentRunID) + buf := events.DataBuffer() + err := events.CollectorJSON(buf, agentRunID) + json := buf.Bytes() if nil != err { t.Fatal(err) } @@ -122,17 +128,20 @@ func TestMergeEmptyLogEvents(t *testing.T) { e1 := newLogEvents(testCommonAttributes, loggingConfigEnabled(10)) e2 := newLogEvents(testCommonAttributes, loggingConfigEnabled(10)) e1.Merge(e2) - json, err := e1.CollectorJSON(agentRunID) - if nil != err { + + buf := e1.DataBuffer() + err := e1.CollectorJSON(buf, agentRunID) + + if err != nil { t.Fatal(err) } - if nil != json { - t.Error(string(json)) + if buf != nil { + t.Error(string(buf.Bytes())) } - if 0 != e1.numSeen { + if e1.numSeen != 0 { t.Error(e1.numSeen) } - if 0 != e1.NumSaved() { + if e1.NumSaved() != 0 { t.Error(e1.NumSaved()) } } @@ -151,7 +160,11 @@ func TestMergeFullLogEvents(t *testing.T) { e2.Add(sampleLogEvent(0.24, infoLevel, "g")) e1.Merge(e2) - json, err := e1.CollectorJSON(agentRunID) + + buf := e1.DataBuffer() + err := e1.CollectorJSON(buf, agentRunID) + json := buf.Bytes() + if nil != err { t.Fatal(err) } @@ -187,7 +200,10 @@ func TestLogEventMergeFailedSuccess(t *testing.T) { e1.mergeFailed(e2) - json, err := e1.CollectorJSON(agentRunID) + buf := e1.DataBuffer() + err := e1.CollectorJSON(buf, agentRunID) + json := buf.Bytes() + if nil != err { t.Fatal(err) } @@ -227,7 +243,10 @@ func TestLogEventMergeFailedLimitReached(t *testing.T) { e1.mergeFailed(e2) - json, err := e1.CollectorJSON(agentRunID) + buf := e1.DataBuffer() + err := e1.CollectorJSON(buf, agentRunID) + json := buf.Bytes() + if nil != err { t.Fatal(err) } @@ -256,12 +275,19 @@ func TestLogEventsSplitFull(t *testing.T) { events.Add(sampleLogEvent(priority, "INFO", fmt.Sprint(priority))) } // Test that the capacity cannot exceed the max. - if 10 != events.capacity() { + if events.capacity() != 10 { t.Error(events.capacity()) } e1, e2 := events.split() - j1, err1 := e1.CollectorJSON(agentRunID) - j2, err2 := e2.CollectorJSON(agentRunID) + + buf := e1.DataBuffer() + err1 := e1.CollectorJSON(buf, agentRunID) + j1 := buf.Bytes() + + buf = e2.DataBuffer() + err2 := e2.CollectorJSON(buf, agentRunID) + j2 := buf.Bytes() + if err1 != nil || err2 != nil { t.Fatal(err1, err2) } @@ -295,8 +321,14 @@ func TestLogEventsSplitNotFullOdd(t *testing.T) { events.Add(sampleLogEvent(priority, "INFO", fmt.Sprint(priority))) } e1, e2 := events.split() - j1, err1 := e1.CollectorJSON(agentRunID) - j2, err2 := e2.CollectorJSON(agentRunID) + buf := e1.DataBuffer() + err1 := e1.CollectorJSON(buf, agentRunID) + j1 := buf.Bytes() + + buf = e2.DataBuffer() + err2 := e2.CollectorJSON(buf, agentRunID) + j2 := buf.Bytes() + if err1 != nil || err2 != nil { t.Fatal(err1, err2) } @@ -325,8 +357,14 @@ func TestLogEventsSplitNotFullEven(t *testing.T) { events.Add(sampleLogEvent(priority, "INFO", fmt.Sprint(priority))) } e1, e2 := events.split() - j1, err1 := e1.CollectorJSON(agentRunID) - j2, err2 := e2.CollectorJSON(agentRunID) + buf := e1.DataBuffer() + err1 := e1.CollectorJSON(buf, agentRunID) + j1 := buf.Bytes() + + buf = e2.DataBuffer() + err2 := e2.CollectorJSON(buf, agentRunID) + j2 := buf.Bytes() + if err1 != nil || err2 != nil { t.Fatal(err1, err2) } @@ -360,9 +398,10 @@ func TestLogEventsZeroCapacity(t *testing.T) { if 1 != events.NumSeen() || 0 != events.NumSaved() || 0 != events.capacity() { t.Error(events.NumSeen(), events.NumSaved(), events.capacity()) } - js, err := events.CollectorJSON("agentRunID") - if err != nil || js != nil { - t.Error(err, string(js)) + buf := events.DataBuffer() + err := events.CollectorJSON(buf, agentRunID) + if err != nil || buf != nil { + t.Error(err, string(buf.Bytes())) } } @@ -379,9 +418,42 @@ func TestLogEventCollectionDisabled(t *testing.T) { if 1 != events.NumSeen() || 1 != len(events.severityCount) || 0 != events.NumSaved() || 5 != events.capacity() { t.Error(events.NumSeen(), len(events.severityCount), events.NumSaved(), events.capacity()) } - js, err := events.CollectorJSON("agentRunID") - if err != nil || js != nil { - t.Error(err, string(js)) + + buf := events.DataBuffer() + err := events.CollectorJSON(buf, agentRunID) + if err != nil || buf != nil { + t.Error(err, string(buf.Bytes())) + } +} + +func BenchmarkRecordLoggingMetrics(b *testing.B) { + now := time.Now() + fixedHarvestTypes := harvestMetricsTraces & harvestTxnEvents & harvestSpanEvents & harvestLogEvents + h := newHarvest(now, harvestConfig{ + ReportPeriods: map[harvestTypes]time.Duration{ + fixedHarvestTypes: fixedHarvestPeriod, + harvestLogEvents: time.Second * 5, + }, + LoggingConfig: loggingConfigEnabled(3), + }) + + for i := 0; i < internal.MaxLogEvents; i++ { + logEvent := logEvent{ + newPriority(), + 123456, + "INFO", + fmt.Sprintf("User 'xyz' logged in %d", i), + "123456789ADF", + "ADF09876565", + } + + h.LogEvents.Add(&logEvent) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + b.ReportAllocs() + h.LogEvents.RecordLoggingMetrics(h.Metrics) } } @@ -404,26 +476,30 @@ func BenchmarkLogEventsAdd(b *testing.B) { } } +// Benchmark the cost of harvesting a full log event collector func BenchmarkLogEventsCollectorJSON(b *testing.B) { events := newLogEvents(testCommonAttributes, loggingConfigEnabled(internal.MaxLogEvents)) - event := &logEvent{ - priority: newPriority(), - timestamp: 123456, - severity: "INFO", - message: "test message", - spanID: "Ad300dra7re89", - traceID: "2234iIhfLlejrJ0", - } - events.Add(event) + for i := 0; i < internal.MaxLogEvents; i++ { + event := &logEvent{ + priority: newPriority(), + timestamp: 123456, + severity: "INFO", + message: "test message", + spanID: "Ad300dra7re89", + traceID: "2234iIhfLlejrJ0", + } + events.Add(event) + } b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { - js, err := events.CollectorJSON(agentRunID) + buf := events.DataBuffer() + err := events.CollectorJSON(buf, agentRunID) if nil != err { - b.Fatal(err, js) + b.Fatal(err) } } } diff --git a/v3/newrelic/metrics.go b/v3/newrelic/metrics.go index 273a5bead..cc05c3115 100644 --- a/v3/newrelic/metrics.go +++ b/v3/newrelic/metrics.go @@ -176,13 +176,10 @@ func (mt *metricTable) addApdex(name, scope string, apdexThreshold time.Duration mt.add(name, scope, data, force) } -func (mt *metricTable) CollectorJSON(agentRunID string, now time.Time) ([]byte, error) { - if 0 == len(mt.metrics) { - return nil, nil +func (mt *metricTable) CollectorJSON(buf *bytes.Buffer, agentRunID string, now time.Time) error { + if buf == nil { + return nil } - estimatedBytesPerMetric := 128 - estimatedLen := len(mt.metrics) * estimatedBytesPerMetric - buf := bytes.NewBuffer(make([]byte, 0, estimatedLen)) buf.WriteByte('[') jsonx.AppendString(buf, agentRunID) @@ -224,11 +221,20 @@ func (mt *metricTable) CollectorJSON(agentRunID string, now time.Time) ([]byte, buf.WriteByte(']') buf.WriteByte(']') - return buf.Bytes(), nil + return nil } -func (mt *metricTable) Data(agentRunID string, harvestStart time.Time) ([]byte, error) { - return mt.CollectorJSON(agentRunID, harvestStart) +func (mt *metricTable) DataBuffer() *bytes.Buffer { + if len(mt.metrics) == 0 { + return nil + } + + estimatedBytesPerMetric := 128 + estimatedLen := len(mt.metrics) * estimatedBytesPerMetric + return bytes.NewBuffer(make([]byte, 0, estimatedLen)) +} +func (mt *metricTable) WriteData(buf *bytes.Buffer, agentRunID string, harvestStart time.Time) error { + return mt.CollectorJSON(buf, agentRunID, harvestStart) } func (mt *metricTable) MergeIntoHarvest(h *harvest) { h.Metrics.mergeFailed(mt) diff --git a/v3/newrelic/metrics_test.go b/v3/newrelic/metrics_test.go index 148622b94..c33e63713 100644 --- a/v3/newrelic/metrics_test.go +++ b/v3/newrelic/metrics_test.go @@ -19,12 +19,13 @@ var ( func TestEmptyMetrics(t *testing.T) { mt := newMetricTable(20, start) - js, err := mt.CollectorJSON(`12345`, end) + buf := mt.DataBuffer() + err := mt.CollectorJSON(buf, `12345`, end) if nil != err { t.Fatal(err) } - if nil != js { - t.Error(string(js)) + if nil != buf { + t.Error(string(buf.Bytes())) } } @@ -62,8 +63,10 @@ func TestMetrics(t *testing.T) { {Name: "count 1", Scope: "", Forced: false, Data: []float64{1, 0, 0, 0, 0, 0}}, }) - js, err := mt.Data("12345", end) - if nil != err { + buf := mt.DataBuffer() + err := mt.CollectorJSON(buf, `12345`, end) + js := buf.Bytes() + if err != nil { t.Error(err) } // The JSON metric order is not deterministic, so we merely test that it @@ -277,10 +280,13 @@ func BenchmarkMetricTableCollectorJSON(b *testing.B) { } } - data, err := mt.CollectorJSON("12345", time.Now()) + buf := mt.DataBuffer() + err := mt.CollectorJSON(buf, "12345", time.Now()) if nil != err { b.Fatal(err) } + + data := buf.Bytes() if err := isValidJSON(data); nil != err { b.Fatal(err, string(data)) } @@ -291,7 +297,7 @@ func BenchmarkMetricTableCollectorJSON(b *testing.B) { id := "12345" now := time.Now() for i := 0; i < b.N; i++ { - mt.CollectorJSON(id, now) + mt.CollectorJSON(buf, id, now) } } diff --git a/v3/newrelic/serverless.go b/v3/newrelic/serverless.go index b2e45c368..614900f82 100644 --- a/v3/newrelic/serverless.go +++ b/v3/newrelic/serverless.go @@ -80,7 +80,8 @@ func (sh *serverlessHarvest) Write(arn string, writer io.Writer) { for _, p := range payloads { agentRunID := "" cmd := p.EndpointMethod() - data, err := p.Data(agentRunID, time.Now()) + data := p.DataBuffer() + err := p.WriteData(data, agentRunID, time.Now()) if err != nil { sh.logger.Error("error creating payload json", map[string]interface{}{ "command": cmd, @@ -88,7 +89,7 @@ func (sh *serverlessHarvest) Write(arn string, writer io.Writer) { }) continue } - if nil == data { + if data == nil { continue } // NOTE! This code relies on the fact that each payload is @@ -104,7 +105,7 @@ func (sh *serverlessHarvest) Write(arn string, writer io.Writer) { "command": cmd, }) } - d := json.RawMessage(data) + d := json.RawMessage(data.Bytes()) harvestPayloads[cmd] = &d } diff --git a/v3/newrelic/slow_queries.go b/v3/newrelic/slow_queries.go index 7557d3b8a..97ecec721 100644 --- a/v3/newrelic/slow_queries.go +++ b/v3/newrelic/slow_queries.go @@ -246,14 +246,19 @@ func (slows *slowQueries) WriteJSON(buf *bytes.Buffer) { buf.WriteByte(']') } -func (slows *slowQueries) Data(agentRunID string, harvestStart time.Time) ([]byte, error) { +func (slows *slowQueries) DataBuffer() *bytes.Buffer { if 0 == len(slows.priorityQueue) { - return nil, nil + return nil } estimate := 1024 * len(slows.priorityQueue) - buf := bytes.NewBuffer(make([]byte, 0, estimate)) + return bytes.NewBuffer(make([]byte, 0, estimate)) +} +func (slows *slowQueries) WriteData(buf *bytes.Buffer, agentRunID string, harvestStart time.Time) error { + if buf == nil { + return nil + } slows.WriteJSON(buf) - return buf.Bytes(), nil + return nil } func (slows *slowQueries) MergeIntoHarvest(newHarvest *harvest) { diff --git a/v3/newrelic/slow_queries_test.go b/v3/newrelic/slow_queries_test.go index 3d831f427..2b592091e 100644 --- a/v3/newrelic/slow_queries_test.go +++ b/v3/newrelic/slow_queries_test.go @@ -13,9 +13,10 @@ import ( func TestEmptySlowQueriesData(t *testing.T) { slows := newSlowQueries(maxHarvestSlowSQLs) - js, err := slows.Data("agentRunID", time.Now()) - if nil != js || nil != err { - t.Error(string(js), err) + buf := slows.DataBuffer() + err := slows.WriteData(buf, "agentRunID", time.Now()) + if buf != nil { + t.Error(string(buf.Bytes()), err) } } @@ -53,7 +54,13 @@ func TestSlowQueriesBasic(t *testing.T) { }) harvestSlows := newSlowQueries(maxHarvestSlowSQLs) harvestSlows.Merge(txnSlows, txnEvent) - js, err := harvestSlows.Data("agentRunID", time.Now()) + buf := harvestSlows.DataBuffer() + err = harvestSlows.WriteData(buf, "agentRunID", time.Now()) + if err != nil { + t.Error(err) + } + + js := buf.Bytes() expect := compactJSONString(`[[ [ "WebTransaction/Go/hello", @@ -118,7 +125,13 @@ func TestSlowQueriesExcludeURI(t *testing.T) { }) harvestSlows := newSlowQueries(maxHarvestSlowSQLs) harvestSlows.Merge(txnSlows, txnEvent) - js, err := harvestSlows.Data("agentRunID", time.Now()) + buf := harvestSlows.DataBuffer() + err = harvestSlows.WriteData(buf, "agentRunID", time.Now()) + if err != nil { + t.Error(err) + } + + js := buf.Bytes() expect := compactJSONString(`[[ [ "WebTransaction/Go/hello", @@ -182,7 +195,13 @@ func TestSlowQueriesAggregation(t *testing.T) { for _, idx := range perm { sq.observeInstance(slows[idx]) } - js, err := sq.Data("agentRunID", time.Now()) + buf := sq.DataBuffer() + err := sq.WriteData(buf, "agentRunID", time.Now()) + if err != nil { + t.Error(err) + } + + js := buf.Bytes() expect := compactJSONString(`[[ ["Txn/241","",2296612630,"41","Datastore/41",1,241000,241000,241000,{}], ["Txn/242","",2279835011,"42","Datastore/42",2,384000,142000,242000,{}], @@ -252,7 +271,13 @@ func TestSlowQueriesBetterCAT(t *testing.T) { }) harvestSlows := newSlowQueries(maxHarvestSlowSQLs) harvestSlows.Merge(txnSlows, txnEvent) - js, err := harvestSlows.Data("agentRunID", time.Now()) + buf := harvestSlows.DataBuffer() + err = harvestSlows.WriteData(buf, "agentRunID", time.Now()) + if err != nil { + t.Error(err) + } + + js := buf.Bytes() expect := compactJSONString(`[[ [ "WebTransaction/Go/hello", diff --git a/v3/newrelic/span_events.go b/v3/newrelic/span_events.go index 978e620bb..b04315e90 100644 --- a/v3/newrelic/span_events.go +++ b/v3/newrelic/span_events.go @@ -136,8 +136,17 @@ func (events *spanEvents) MergeIntoHarvest(h *harvest) { h.SpanEvents.mergeFailed(events.analyticsEvents) } -func (events *spanEvents) Data(agentRunID string, harvestStart time.Time) ([]byte, error) { - return events.CollectorJSON(agentRunID) +func (events *spanEvents) DataBuffer() *bytes.Buffer { + if 0 == len(events.events) { + return nil + } + + estimate := 256 * len(events.events) + return bytes.NewBuffer(make([]byte, 0, estimate)) +} + +func (events *spanEvents) WriteData(buf *bytes.Buffer, agentRunID string, harvestStart time.Time) error { + return events.CollectorJSON(buf, agentRunID) } func (events *spanEvents) EndpointMethod() string { diff --git a/v3/newrelic/txn_events.go b/v3/newrelic/txn_events.go index 61bd9aff4..a395a5025 100644 --- a/v3/newrelic/txn_events.go +++ b/v3/newrelic/txn_events.go @@ -170,8 +170,17 @@ func (events *txnEvents) MergeIntoHarvest(h *harvest) { h.TxnEvents.mergeFailed(events.analyticsEvents) } -func (events *txnEvents) Data(agentRunID string, harvestStart time.Time) ([]byte, error) { - return events.CollectorJSON(agentRunID) +func (events *txnEvents) DataBuffer() *bytes.Buffer { + if len(events.events) == 0 { + return nil + } + + estimate := 256 * len(events.events) + return bytes.NewBuffer(make([]byte, 0, estimate)) +} + +func (events *txnEvents) WriteData(buf *bytes.Buffer, agentRunID string, harvestStart time.Time) error { + return events.CollectorJSON(buf, agentRunID) } func (events *txnEvents) EndpointMethod() string { diff --git a/v3/newrelic/txn_events_test.go b/v3/newrelic/txn_events_test.go index 6157e0c6d..4350be139 100644 --- a/v3/newrelic/txn_events_test.go +++ b/v3/newrelic/txn_events_test.go @@ -243,8 +243,10 @@ func TestTxnEventsPayloadsEmpty(t *testing.T) { if len(ps) != 1 { t.Error(ps) } - if data, err := ps[0].Data("agentRunID", time.Now()); data != nil || err != nil { - t.Error(data, err) + data := events.DataBuffer() + err := ps[0].WriteData(data, "agentRunID", time.Now()) + if data != nil || err != nil { + t.Error(data.Bytes(), err) } } @@ -257,7 +259,9 @@ func TestTxnEventsPayloadsUnderLimit(t *testing.T) { if len(ps) != 1 { t.Error(ps) } - if data, err := ps[0].Data("agentRunID", time.Now()); data == nil || err != nil { + data := events.DataBuffer() + err := ps[0].WriteData(data, "agentRunID", time.Now()) + if data == nil || err != nil { t.Error(data, err) } } @@ -271,11 +275,16 @@ func TestTxnEventsPayloadsOverLimit(t *testing.T) { if len(ps) != 2 { t.Error(ps) } - if data, err := ps[0].Data("agentRunID", time.Now()); data == nil || err != nil { - t.Error(data, err) + data := events.DataBuffer() + err := ps[0].WriteData(data, "agentRunID", time.Now()) + if data == nil || err != nil { + t.Error(data.Bytes(), err) } - if data, err := ps[1].Data("agentRunID", time.Now()); data == nil || err != nil { - t.Error(data, err) + + data = events.DataBuffer() + err = ps[1].WriteData(data, "agentRunID", time.Now()) + if data == nil || err != nil { + t.Error(data.Bytes(), err) } } diff --git a/v3/newrelic/txn_trace.go b/v3/newrelic/txn_trace.go index 6050ba69e..a3aa7facd 100644 --- a/v3/newrelic/txn_trace.go +++ b/v3/newrelic/txn_trace.go @@ -392,9 +392,9 @@ func (traces *harvestTraces) Witness(trace harvestTrace) { } } -func (traces *harvestTraces) Data(agentRunID string, harvestStart time.Time) ([]byte, error) { +func (traces *harvestTraces) DataBuffer() *bytes.Buffer { if traces.Len() == 0 { - return nil, nil + return nil } // This estimate is used to guess the size of the buffer. No worries if @@ -408,7 +408,13 @@ func (traces *harvestTraces) Data(agentRunID string, harvestStart time.Time) ([] estimate += 100 * t.Trace.nodes.Len() } - buf := bytes.NewBuffer(make([]byte, 0, estimate)) + return bytes.NewBuffer(make([]byte, 0, estimate)) +} + +func (traces *harvestTraces) WriteData(buf *bytes.Buffer, agentRunID string, harvestStart time.Time) error { + if buf == nil { + return nil + } buf.WriteByte('[') jsonx.AppendString(buf, agentRunID) buf.WriteByte(',') @@ -435,7 +441,7 @@ func (traces *harvestTraces) Data(agentRunID string, harvestStart time.Time) ([] buf.WriteByte(']') buf.WriteByte(']') - return buf.Bytes(), nil + return nil } func (traces *harvestTraces) slice() []*harvestTrace { diff --git a/v3/newrelic/txn_trace_test.go b/v3/newrelic/txn_trace_test.go index e49aa6035..80102a81b 100644 --- a/v3/newrelic/txn_trace_test.go +++ b/v3/newrelic/txn_trace_test.go @@ -813,9 +813,10 @@ func TestTxnTraceSegmentThreshold(t *testing.T) { func TestEmptyHarvestTraces(t *testing.T) { start := time.Date(2014, time.November, 28, 1, 1, 0, 0, time.UTC) ht := newHarvestTraces() - js, err := ht.Data("12345", start) - if nil != err || nil != js { - t.Error(string(js), err) + data := ht.DataBuffer() + err := ht.WriteData(data, "12345", start) + if err != nil || data != nil { + t.Error(string(data.Bytes()), err) } } @@ -1169,7 +1170,9 @@ func TestTraceJSON(t *testing.T) { ] ]` - js, err := ht.Data("12345", start) + data := ht.DataBuffer() + err := ht.WriteData(data, "12345", start) + js := data.Bytes() if nil != err { t.Fatal(err) } @@ -1223,7 +1226,9 @@ func TestTraceCatGUID(t *testing.T) { ] ]` - js, err := ht.Data("12345", start) + data := ht.DataBuffer() + err := ht.WriteData(data, "12345", start) + js := data.Bytes() if nil != err { t.Fatal(err) } @@ -1284,7 +1289,9 @@ func TestTraceDistributedTracingGUID(t *testing.T) { ] ]` - js, err := ht.Data("12345", start) + data := ht.DataBuffer() + err := ht.WriteData(data, "12345", start) + js := data.Bytes() if nil != err { t.Fatal(err) } From cbc801fd65d21651602753614e68d28829a3227d Mon Sep 17 00:00:00 2001 From: Emilio Garcia Date: Wed, 15 Jun 2022 10:48:22 -0400 Subject: [PATCH 25/47] Revert "improve efficiency of getting harvest data by avoiding memAlloc" This reverts commit ea253d7dee0c9ec68ad0063c56add2bad8f863f6. This was a red herring. --- v3/newrelic/analytics_events.go | 12 +- v3/newrelic/analytics_events_test.go | 88 ++++----------- v3/newrelic/custom_events.go | 18 +-- v3/newrelic/error_events.go | 13 +-- v3/newrelic/errors_from_internal.go | 14 +-- v3/newrelic/errors_test.go | 9 +- v3/newrelic/expect_implementation.go | 6 +- v3/newrelic/harvest.go | 5 +- v3/newrelic/harvest_test.go | 53 ++++----- v3/newrelic/internal_app.go | 7 +- v3/newrelic/log_event.go | 2 +- v3/newrelic/log_events.go | 33 +++--- v3/newrelic/log_events_test.go | 160 +++++++-------------------- v3/newrelic/metrics.go | 24 ++-- v3/newrelic/metrics_test.go | 20 ++-- v3/newrelic/serverless.go | 7 +- v3/newrelic/slow_queries.go | 13 +-- v3/newrelic/slow_queries_test.go | 39 ++----- v3/newrelic/span_events.go | 13 +-- v3/newrelic/txn_events.go | 13 +-- v3/newrelic/txn_events_test.go | 23 ++-- v3/newrelic/txn_trace.go | 14 +-- v3/newrelic/txn_trace_test.go | 19 +--- 23 files changed, 174 insertions(+), 431 deletions(-) diff --git a/v3/newrelic/analytics_events.go b/v3/newrelic/analytics_events.go index aa8131ec5..6c1fdb63b 100644 --- a/v3/newrelic/analytics_events.go +++ b/v3/newrelic/analytics_events.go @@ -91,11 +91,14 @@ func (events *analyticsEvents) Merge(other *analyticsEvents) { events.numSeen = allSeen } -func (events *analyticsEvents) CollectorJSON(buf *bytes.Buffer, agentRunID string) error { - if buf == nil || events.NumSaved() == 0 { - return nil +func (events *analyticsEvents) CollectorJSON(agentRunID string) ([]byte, error) { + if 0 == len(events.events) { + return nil, nil } + estimate := 256 * len(events.events) + buf := bytes.NewBuffer(make([]byte, 0, estimate)) + buf.WriteByte('[') jsonx.AppendString(buf, agentRunID) buf.WriteByte(',') @@ -117,7 +120,8 @@ func (events *analyticsEvents) CollectorJSON(buf *bytes.Buffer, agentRunID strin buf.WriteByte(']') buf.WriteByte(']') - return nil + return buf.Bytes(), nil + } // split splits the events into two. NOTE! The two event pools are not valid diff --git a/v3/newrelic/analytics_events_test.go b/v3/newrelic/analytics_events_test.go index 67aa09468..98c7d1407 100644 --- a/v3/newrelic/analytics_events_test.go +++ b/v3/newrelic/analytics_events_test.go @@ -35,14 +35,11 @@ func TestBasic(t *testing.T) { events.addEvent(sampleAnalyticsEvent(0.5)) events.addEvent(sampleAnalyticsEvent(0.5)) - buf := &bytes.Buffer{} - err := events.CollectorJSON(buf, agentRunID) + json, err := events.CollectorJSON(agentRunID) if nil != err { t.Fatal(err) } - json := buf.Bytes() - expected := `["12345",{"reservoir_size":10,"events_seen":3},[0.5,0.5,0.5]]` if string(json) != expected { @@ -58,19 +55,17 @@ func TestBasic(t *testing.T) { func TestEmpty(t *testing.T) { events := newAnalyticsEvents(10) - var buf *bytes.Buffer - err := events.CollectorJSON(buf, agentRunID) + json, err := events.CollectorJSON(agentRunID) if nil != err { t.Fatal(err) } - - if buf != nil { - t.Error(string(buf.Bytes())) + if nil != json { + t.Error(string(json)) } - if events.numSeen != 0 { + if 0 != events.numSeen { t.Error(events.numSeen) } - if events.NumSaved() != 0 { + if 0 != events.NumSaved() { t.Error(events.NumSaved()) } } @@ -84,13 +79,10 @@ func TestSampling(t *testing.T) { events.addEvent(sampleAnalyticsEvent(0.8)) events.addEvent(sampleAnalyticsEvent(0.3)) - buf := &bytes.Buffer{} - err := events.CollectorJSON(buf, agentRunID) + json, err := events.CollectorJSON(agentRunID) if nil != err { t.Fatal(err) } - - json := buf.Bytes() if string(json) != `["12345",{"reservoir_size":3,"events_seen":6},[0.8,0.999999,0.9]]` { t.Error(string(json)) } @@ -106,14 +98,10 @@ func TestMergeEmpty(t *testing.T) { e1 := newAnalyticsEvents(10) e2 := newAnalyticsEvents(10) e1.Merge(e2) - buf := &bytes.Buffer{} - err := e1.CollectorJSON(buf, agentRunID) + json, err := e1.CollectorJSON(agentRunID) if nil != err { t.Fatal(err) } - - json := buf.Bytes() - if nil != json { t.Error(string(json)) } @@ -139,13 +127,10 @@ func TestMergeFull(t *testing.T) { e2.addEvent(sampleAnalyticsEvent(0.24)) e1.Merge(e2) - buf := &bytes.Buffer{} - err := e1.CollectorJSON(buf, agentRunID) + json, err := e1.CollectorJSON(agentRunID) if nil != err { t.Fatal(err) } - - json := buf.Bytes() if string(json) != `["12345",{"reservoir_size":2,"events_seen":7},[0.24,0.25]]` { t.Error(string(json)) } @@ -172,13 +157,10 @@ func TestAnalyticsEventMergeFailedSuccess(t *testing.T) { e1.mergeFailed(e2) - buf := &bytes.Buffer{} - err := e1.CollectorJSON(buf, agentRunID) + json, err := e1.CollectorJSON(agentRunID) if nil != err { t.Fatal(err) } - - json := buf.Bytes() if string(json) != `["12345",{"reservoir_size":2,"events_seen":7},[0.24,0.25]]` { t.Error(string(json)) } @@ -210,13 +192,10 @@ func TestAnalyticsEventMergeFailedLimitReached(t *testing.T) { e1.mergeFailed(e2) - buf := &bytes.Buffer{} - err := e1.CollectorJSON(buf, agentRunID) + json, err := e1.CollectorJSON(agentRunID) if nil != err { t.Fatal(err) } - - json := buf.Bytes() if string(json) != `["12345",{"reservoir_size":2,"events_seen":3},[0.15,0.25]]` { t.Error(string(json)) } @@ -242,10 +221,9 @@ func analyticsEventBenchmarkHelper(b *testing.B, w jsonWriter) { b.ResetTimer() for n := 0; n < b.N; n++ { - buf := &bytes.Buffer{} - err := events.CollectorJSON(buf, agentRunID) + js, err := events.CollectorJSON(agentRunID) if nil != err { - b.Fatal(err) + b.Fatal(err, js) } } } @@ -301,14 +279,8 @@ func TestSplitFull(t *testing.T) { t.Error(events.capacity()) } e1, e2 := events.split() - buf := &bytes.Buffer{} - err1 := e1.CollectorJSON(buf, agentRunID) - j1 := buf.Bytes() - - buf = &bytes.Buffer{} - err2 := e2.CollectorJSON(buf, agentRunID) - j2 := buf.Bytes() - + j1, err1 := e1.CollectorJSON(agentRunID) + j2, err2 := e2.CollectorJSON(agentRunID) if err1 != nil || err2 != nil { t.Fatal(err1, err2) } @@ -326,14 +298,8 @@ func TestSplitNotFullOdd(t *testing.T) { events.addEvent(sampleAnalyticsEvent(priority(float32(i) / 10.0))) } e1, e2 := events.split() - buf := &bytes.Buffer{} - err1 := e1.CollectorJSON(buf, agentRunID) - j1 := buf.Bytes() - - buf = &bytes.Buffer{} - err2 := e2.CollectorJSON(buf, agentRunID) - j2 := buf.Bytes() - + j1, err1 := e1.CollectorJSON(agentRunID) + j2, err2 := e2.CollectorJSON(agentRunID) if err1 != nil || err2 != nil { t.Fatal(err1, err2) } @@ -351,14 +317,8 @@ func TestSplitNotFullEven(t *testing.T) { events.addEvent(sampleAnalyticsEvent(priority(float32(i) / 10.0))) } e1, e2 := events.split() - buf := &bytes.Buffer{} - err1 := e1.CollectorJSON(buf, agentRunID) - j1 := buf.Bytes() - - buf = &bytes.Buffer{} - err2 := e2.CollectorJSON(buf, agentRunID) - j2 := buf.Bytes() - + j1, err1 := e1.CollectorJSON(agentRunID) + j2, err2 := e2.CollectorJSON(agentRunID) if err1 != nil || err2 != nil { t.Fatal(err1, err2) } @@ -381,15 +341,7 @@ func TestAnalyticsEventsZeroCapacity(t *testing.T) { if 1 != events.NumSeen() || 0 != events.NumSaved() || 0 != events.capacity() { t.Error(events.NumSeen(), events.NumSaved(), events.capacity()) } - - data := &bytes.Buffer{} - err := events.CollectorJSON(data, agentRunID) - - var js []byte - if data != nil { - js = data.Bytes() - } - + js, err := events.CollectorJSON("agentRunID") if err != nil || js != nil { t.Error(err, string(js)) } diff --git a/v3/newrelic/custom_events.go b/v3/newrelic/custom_events.go index 2f09d0b7b..95809fade 100644 --- a/v3/newrelic/custom_events.go +++ b/v3/newrelic/custom_events.go @@ -3,10 +3,7 @@ package newrelic -import ( - "bytes" - "time" -) +import "time" type customEvents struct { *analyticsEvents @@ -30,17 +27,8 @@ func (cs *customEvents) MergeIntoHarvest(h *harvest) { h.CustomEvents.mergeFailed(cs.analyticsEvents) } -func (cs *customEvents) DataBuffer() *bytes.Buffer { - if len(cs.events) == 0 { - return nil - } - - estimate := 256 * len(cs.events) - return bytes.NewBuffer(make([]byte, 0, estimate)) -} - -func (cs *customEvents) WriteData(buf *bytes.Buffer, agentRunID string, harvestStart time.Time) error { - return cs.CollectorJSON(buf, agentRunID) +func (cs *customEvents) Data(agentRunID string, harvestStart time.Time) ([]byte, error) { + return cs.CollectorJSON(agentRunID) } func (cs *customEvents) EndpointMethod() string { diff --git a/v3/newrelic/error_events.go b/v3/newrelic/error_events.go index cd82db9ad..90419747c 100644 --- a/v3/newrelic/error_events.go +++ b/v3/newrelic/error_events.go @@ -61,17 +61,8 @@ func (events *errorEvents) MergeIntoHarvest(h *harvest) { h.ErrorEvents.mergeFailed(events.analyticsEvents) } -func (events *errorEvents) DataBuffer() *bytes.Buffer { - if 0 == len(events.events) { - return nil - } - - estimate := 256 * len(events.events) - return bytes.NewBuffer(make([]byte, 0, estimate)) -} - -func (events *errorEvents) WriteData(buf *bytes.Buffer, agentRunID string, harvestStart time.Time) error { - return events.CollectorJSON(buf, agentRunID) +func (events *errorEvents) Data(agentRunID string, harvestStart time.Time) ([]byte, error) { + return events.CollectorJSON(agentRunID) } func (events *errorEvents) EndpointMethod() string { diff --git a/v3/newrelic/errors_from_internal.go b/v3/newrelic/errors_from_internal.go index 1eb8b3340..7813d102f 100644 --- a/v3/newrelic/errors_from_internal.go +++ b/v3/newrelic/errors_from_internal.go @@ -151,18 +151,12 @@ func mergeTxnErrors(errors *harvestErrors, errs txnErrors, txnEvent txnEvent) { } } -func (errors harvestErrors) DataBuffer() *bytes.Buffer { +func (errors harvestErrors) Data(agentRunID string, harvestStart time.Time) ([]byte, error) { if 0 == len(errors) { - return nil + return nil, nil } estimate := 1024 * len(errors) - return bytes.NewBuffer(make([]byte, 0, estimate)) -} - -func (errors harvestErrors) WriteData(buf *bytes.Buffer, agentRunID string, harvestStart time.Time) error { - if buf == nil { - return nil - } + buf := bytes.NewBuffer(make([]byte, 0, estimate)) buf.WriteByte('[') jsonx.AppendString(buf, agentRunID) buf.WriteByte(',') @@ -175,7 +169,7 @@ func (errors harvestErrors) WriteData(buf *bytes.Buffer, agentRunID string, harv } buf.WriteByte(']') buf.WriteByte(']') - return nil + return buf.Bytes(), nil } func (errors harvestErrors) MergeIntoHarvest(h *harvest) {} diff --git a/v3/newrelic/errors_test.go b/v3/newrelic/errors_test.go index d40ac5da0..9ab80bb8e 100644 --- a/v3/newrelic/errors_test.go +++ b/v3/newrelic/errors_test.go @@ -239,9 +239,7 @@ func TestErrorsLifecycle(t *testing.T) { }, TotalTime: 2 * time.Second, }) - buf := he.DataBuffer() - err := he.WriteData(buf, "agentRunID", time.Now()) - js := buf.Bytes() + js, err := he.Data("agentRunID", time.Now()) if nil != err { t.Error(err) } @@ -352,10 +350,7 @@ func BenchmarkErrorsJSON(b *testing.B) { b.ResetTimer() for n := 0; n < b.N; n++ { - buf := he.DataBuffer() - err := he.WriteData(buf, "agentRundID", when) - js := buf.Bytes() - + js, err := he.Data("agentRundID", when) if nil != err || nil == js { b.Fatal(err, js) } diff --git a/v3/newrelic/expect_implementation.go b/v3/newrelic/expect_implementation.go index 33fbcbc5f..435a66dff 100644 --- a/v3/newrelic/expect_implementation.go +++ b/v3/newrelic/expect_implementation.go @@ -493,14 +493,12 @@ func expectTxnTraces(v internal.Validator, traces *harvestTraces, want []interna if len(want) == 0 { return } - data := traces.DataBuffer() - err := traces.WriteData(data, "agentRunID", time.Now()) - if err != nil { + js, err := traces.Data("agentRunID", time.Now()) + if nil != err { v.Error("error creasing harvest traces data", err) return } - js := data.Bytes() var unmarshalled []interface{} err = json.Unmarshal(js, &unmarshalled) if nil != err { diff --git a/v3/newrelic/harvest.go b/v3/newrelic/harvest.go index 996c3bee2..7b3aa787e 100644 --- a/v3/newrelic/harvest.go +++ b/v3/newrelic/harvest.go @@ -4,7 +4,6 @@ package newrelic import ( - "bytes" "time" "github.com/newrelic/go-agent/v3/internal" @@ -258,12 +257,10 @@ type payloadCreator interface { // intermittent collector issue) the payload may be merged into the next // time period's harvest. harvestable - - DataBuffer() *bytes.Buffer // Data prepares JSON in the format expected by the collector endpoint. // This method should return (nil, nil) if the payload is empty and no // rpm request is necessary. - WriteData(buffer *bytes.Buffer, agentRunID string, harvestStart time.Time) error + Data(agentRunID string, harvestStart time.Time) ([]byte, error) // EndpointMethod is used for the "method" query parameter when posting // the data. EndpointMethod() string diff --git a/v3/newrelic/harvest_test.go b/v3/newrelic/harvest_test.go index 4acaf037a..728c7640d 100644 --- a/v3/newrelic/harvest_test.go +++ b/v3/newrelic/harvest_test.go @@ -231,10 +231,9 @@ func TestEmptyPayloads(t *testing.T) { t.Error(len(payloads)) } for _, p := range payloads { - data := p.DataBuffer() - err := p.WriteData(data, "agentRunID", time.Now()) - if data != nil || err != nil { - t.Error(data.Bytes(), err) + d, err := p.Data("agentRunID", time.Now()) + if d != nil || err != nil { + t.Error(d, err) } } } @@ -290,10 +289,9 @@ func TestHarvestCustomEventsReady(t *testing.T) { if m := p.EndpointMethod(); m != "custom_event_data" { t.Error(m) } - data := p.DataBuffer() - err := p.WriteData(data, "agentRunID", now) - if err != nil || data == nil { - t.Error(err, data.Bytes()) + data, err := p.Data("agentRunID", now) + if nil != err || nil == data { + t.Error(err, data) } if h.CustomEvents.capacity() != 3 || h.CustomEvents.NumSaved() != 0 { t.Fatal("custom events not correctly reset") @@ -340,10 +338,9 @@ func TestHarvestLogEventsReady(t *testing.T) { if m := p.EndpointMethod(); m != "log_event_data" { t.Error(m) } - data := p.DataBuffer() - err := p.WriteData(data, "agentRunID", now) - if err != nil || data == nil { - t.Error(err, data.Bytes()) + data, err := p.Data("agentRunID", now) + if nil != err || nil == data { + t.Error(err, data) } if h.LogEvents.capacity() != 3 || h.LogEvents.NumSaved() != 0 { t.Fatal("log events not correctly reset") @@ -390,10 +387,9 @@ func TestHarvestTxnEventsReady(t *testing.T) { if m := p.EndpointMethod(); m != "analytic_event_data" { t.Error(m) } - data := p.DataBuffer() - err := p.WriteData(data, "agentRunID", now) - if err != nil || data == nil { - t.Error(err, data.Bytes()) + data, err := p.Data("agentRunID", now) + if nil != err || nil == data { + t.Error(err, data) } if h.TxnEvents.capacity() != 3 || h.TxnEvents.NumSaved() != 0 { t.Fatal("txn events not correctly reset") @@ -433,10 +429,9 @@ func TestHarvestErrorEventsReady(t *testing.T) { if m := p.EndpointMethod(); m != "error_event_data" { t.Error(m) } - data := p.DataBuffer() - err := p.WriteData(data, "agentRunID", now) - if err != nil || data == nil { - t.Error(err, data.Bytes()) + data, err := p.Data("agentRunID", now) + if nil != err || nil == data { + t.Error(err, data) } if h.ErrorEvents.capacity() != 3 || h.ErrorEvents.NumSaved() != 0 { t.Fatal("error events not correctly reset") @@ -474,10 +469,9 @@ func TestHarvestSpanEventsReady(t *testing.T) { if m := p.EndpointMethod(); m != "span_event_data" { t.Error(m) } - data := p.DataBuffer() - err := p.WriteData(data, "agentRunID", now) - if err != nil || data == nil { - t.Error(err, data.Bytes()) + data, err := p.Data("agentRunID", now) + if nil != err || nil == data { + t.Error(err, data) } if h.SpanEvents.capacity() != 3 || h.SpanEvents.NumSaved() != 0 { t.Fatal("span events not correctly reset") @@ -1054,18 +1048,11 @@ func TestConfigurableHarvestZeroHarvestLimits(t *testing.T) { // safe. payloads := h.Ready(now.Add(2 * time.Minute)).Payloads(false) for _, p := range payloads { - data := p.DataBuffer() - err := p.WriteData(data, "agentRunID", now.Add(2*time.Minute)) - if err != nil { + js, err := p.Data("agentRunID", now.Add(2*time.Minute)) + if nil != err { t.Error(err) continue } - - var js []byte - if data != nil { - js = data.Bytes() - } - // Only metric data should be present. if (p.EndpointMethod() == "metric_data") != (string(js) != "") { diff --git a/v3/newrelic/internal_app.go b/v3/newrelic/internal_app.go index 39fcb1f73..27d492082 100644 --- a/v3/newrelic/internal_app.go +++ b/v3/newrelic/internal_app.go @@ -71,8 +71,7 @@ func (app *app) doHarvest(h *harvest, harvestStart time.Time, run *appRun) { payloads := h.Payloads(app.config.DistributedTracer.Enabled) for _, p := range payloads { cmd := p.EndpointMethod() - dataBuffer := p.DataBuffer() - err := p.WriteData(dataBuffer, run.Reply.RunID.String(), harvestStart) + data, err := p.Data(run.Reply.RunID.String(), harvestStart) if nil != err { app.Warn("unable to create harvest data", map[string]interface{}{ @@ -81,7 +80,7 @@ func (app *app) doHarvest(h *harvest, harvestStart time.Time, run *appRun) { }) continue } - if dataBuffer == nil { + if nil == data { continue } @@ -89,7 +88,7 @@ func (app *app) doHarvest(h *harvest, harvestStart time.Time, run *appRun) { Collector: run.Reply.Collector, RunID: run.Reply.RunID.String(), Name: cmd, - Data: dataBuffer.Bytes(), + Data: data, RequestHeadersMap: run.Reply.RequestHeadersMap, MaxPayloadSize: run.Reply.MaxPayloadSizeInBytes, } diff --git a/v3/newrelic/log_event.go b/v3/newrelic/log_event.go index b0a7c6fc6..ad9cf9987 100644 --- a/v3/newrelic/log_event.go +++ b/v3/newrelic/log_event.go @@ -72,7 +72,7 @@ func (e *logEvent) WriteJSON(buf *bytes.Buffer) { buf.WriteByte('}') } -// MarshalJSON is used for testing only +// MarshalJSON is used for testing. func (e *logEvent) MarshalJSON() ([]byte, error) { buf := bytes.NewBuffer(make([]byte, 0, 256)) e.WriteJSON(buf) diff --git a/v3/newrelic/log_events.go b/v3/newrelic/log_events.go index 04f8d1d2c..7da43317e 100644 --- a/v3/newrelic/log_events.go +++ b/v3/newrelic/log_events.go @@ -100,7 +100,7 @@ func (events *logEvents) Add(e *logEvent) { // Delay heap initialization so that we can have // deterministic ordering for integration tests (the max // is not being reached). - heap.Init(events.logs) // Malloc required + heap.Init(events.logs) } return } @@ -132,9 +132,16 @@ func (events *logEvents) Merge(other *logEvents) { events.numSeen = int(allSeen) } -func (events *logEvents) CollectorJSON(buf *bytes.Buffer, agentRunID string) error { - if buf == nil { - return nil +func (events *logEvents) CollectorJSON(agentRunID string) ([]byte, error) { + if 0 == len(events.logs) { + return nil, nil + } + + estimate := 256 * len(events.logs) + buf := bytes.NewBuffer(make([]byte, 0, estimate)) + + if events.numSeen == 0 { + return nil, nil } buf.WriteByte('[') @@ -170,7 +177,8 @@ func (events *logEvents) CollectorJSON(buf *bytes.Buffer, agentRunID string) err buf.WriteByte(']') buf.WriteByte('}') buf.WriteByte(']') - return nil + return buf.Bytes(), nil + } // split splits the events into two. NOTE! The two event pools are not valid @@ -216,19 +224,8 @@ func (events *logEvents) MergeIntoHarvest(h *harvest) { h.LogEvents.mergeFailed(events) } -// DataBuffer returns a bytes Buffer with an estimated size based on the contents of the logEvents object -func (events *logEvents) DataBuffer() *bytes.Buffer { - if len(events.logs) == 0 || events.numSeen == 0 { - return nil - } - - estimate := 256 * len(events.logs) - return bytes.NewBuffer(make([]byte, 0, estimate)) -} - -// WriteData writes JSON data to a DataBuffer during a harvest cycle -func (events *logEvents) WriteData(buf *bytes.Buffer, agentRunID string, harvestStart time.Time) error { - return events.CollectorJSON(buf, agentRunID) +func (events *logEvents) Data(agentRunID string, harvestStart time.Time) ([]byte, error) { + return events.CollectorJSON(agentRunID) } func (events *logEvents) EndpointMethod() string { diff --git a/v3/newrelic/log_events_test.go b/v3/newrelic/log_events_test.go index d922c808d..d52e72133 100644 --- a/v3/newrelic/log_events_test.go +++ b/v3/newrelic/log_events_test.go @@ -6,7 +6,6 @@ package newrelic import ( "fmt" "testing" - "time" "github.com/newrelic/go-agent/v3/internal" ) @@ -23,7 +22,8 @@ var ( commonJSON = `[{"common":{"attributes":{"entity.guid":"testGUID","entity.name":"testEntityName","hostname":"testHostname"}},"logs":[` - infoLevel = "INFO" + infoLevel = "INFO" + unknownLevel = "UNKNOWN" ) func loggingConfigEnabled(limit int) loggingConfig { @@ -50,14 +50,11 @@ func TestBasicLogEvents(t *testing.T) { events.Add(sampleLogEvent(0.5, infoLevel, "message1")) events.Add(sampleLogEvent(0.5, infoLevel, "message2")) - buf := events.DataBuffer() - err := events.CollectorJSON(buf, agentRunID) + json, err := events.CollectorJSON(agentRunID) if nil != err { t.Fatal(err) } - json := buf.Bytes() - expected := commonJSON + `{"level":"INFO","message":"message1","timestamp":123456},` + `{"level":"INFO","message":"message2","timestamp":123456}]}]` @@ -65,23 +62,22 @@ func TestBasicLogEvents(t *testing.T) { if string(json) != expected { t.Error(string(json), expected) } - if events.numSeen != 2 { + if 2 != events.numSeen { t.Error(events.numSeen) } - if events.NumSaved() != 2 { + if 2 != events.NumSaved() { t.Error(events.NumSaved()) } } func TestEmptyLogEvents(t *testing.T) { events := newLogEvents(testCommonAttributes, loggingConfigEnabled(10)) - buf := events.DataBuffer() - err := events.CollectorJSON(buf, agentRunID) + json, err := events.CollectorJSON(agentRunID) if nil != err { t.Fatal(err) } - if nil != buf { - t.Error(string(buf.Bytes())) + if nil != json { + t.Error(string(json)) } if 0 != events.numSeen { t.Error(events.numSeen) @@ -102,9 +98,7 @@ func TestSamplingLogEvents(t *testing.T) { events.Add(sampleLogEvent(0.8, infoLevel, "e")) events.Add(sampleLogEvent(0.3, infoLevel, "f")) - buf := events.DataBuffer() - err := events.CollectorJSON(buf, agentRunID) - json := buf.Bytes() + json, err := events.CollectorJSON(agentRunID) if nil != err { t.Fatal(err) } @@ -128,20 +122,17 @@ func TestMergeEmptyLogEvents(t *testing.T) { e1 := newLogEvents(testCommonAttributes, loggingConfigEnabled(10)) e2 := newLogEvents(testCommonAttributes, loggingConfigEnabled(10)) e1.Merge(e2) - - buf := e1.DataBuffer() - err := e1.CollectorJSON(buf, agentRunID) - - if err != nil { + json, err := e1.CollectorJSON(agentRunID) + if nil != err { t.Fatal(err) } - if buf != nil { - t.Error(string(buf.Bytes())) + if nil != json { + t.Error(string(json)) } - if e1.numSeen != 0 { + if 0 != e1.numSeen { t.Error(e1.numSeen) } - if e1.NumSaved() != 0 { + if 0 != e1.NumSaved() { t.Error(e1.NumSaved()) } } @@ -160,11 +151,7 @@ func TestMergeFullLogEvents(t *testing.T) { e2.Add(sampleLogEvent(0.24, infoLevel, "g")) e1.Merge(e2) - - buf := e1.DataBuffer() - err := e1.CollectorJSON(buf, agentRunID) - json := buf.Bytes() - + json, err := e1.CollectorJSON(agentRunID) if nil != err { t.Fatal(err) } @@ -200,10 +187,7 @@ func TestLogEventMergeFailedSuccess(t *testing.T) { e1.mergeFailed(e2) - buf := e1.DataBuffer() - err := e1.CollectorJSON(buf, agentRunID) - json := buf.Bytes() - + json, err := e1.CollectorJSON(agentRunID) if nil != err { t.Fatal(err) } @@ -243,10 +227,7 @@ func TestLogEventMergeFailedLimitReached(t *testing.T) { e1.mergeFailed(e2) - buf := e1.DataBuffer() - err := e1.CollectorJSON(buf, agentRunID) - json := buf.Bytes() - + json, err := e1.CollectorJSON(agentRunID) if nil != err { t.Fatal(err) } @@ -275,19 +256,12 @@ func TestLogEventsSplitFull(t *testing.T) { events.Add(sampleLogEvent(priority, "INFO", fmt.Sprint(priority))) } // Test that the capacity cannot exceed the max. - if events.capacity() != 10 { + if 10 != events.capacity() { t.Error(events.capacity()) } e1, e2 := events.split() - - buf := e1.DataBuffer() - err1 := e1.CollectorJSON(buf, agentRunID) - j1 := buf.Bytes() - - buf = e2.DataBuffer() - err2 := e2.CollectorJSON(buf, agentRunID) - j2 := buf.Bytes() - + j1, err1 := e1.CollectorJSON(agentRunID) + j2, err2 := e2.CollectorJSON(agentRunID) if err1 != nil || err2 != nil { t.Fatal(err1, err2) } @@ -321,14 +295,8 @@ func TestLogEventsSplitNotFullOdd(t *testing.T) { events.Add(sampleLogEvent(priority, "INFO", fmt.Sprint(priority))) } e1, e2 := events.split() - buf := e1.DataBuffer() - err1 := e1.CollectorJSON(buf, agentRunID) - j1 := buf.Bytes() - - buf = e2.DataBuffer() - err2 := e2.CollectorJSON(buf, agentRunID) - j2 := buf.Bytes() - + j1, err1 := e1.CollectorJSON(agentRunID) + j2, err2 := e2.CollectorJSON(agentRunID) if err1 != nil || err2 != nil { t.Fatal(err1, err2) } @@ -357,14 +325,8 @@ func TestLogEventsSplitNotFullEven(t *testing.T) { events.Add(sampleLogEvent(priority, "INFO", fmt.Sprint(priority))) } e1, e2 := events.split() - buf := e1.DataBuffer() - err1 := e1.CollectorJSON(buf, agentRunID) - j1 := buf.Bytes() - - buf = e2.DataBuffer() - err2 := e2.CollectorJSON(buf, agentRunID) - j2 := buf.Bytes() - + j1, err1 := e1.CollectorJSON(agentRunID) + j2, err2 := e2.CollectorJSON(agentRunID) if err1 != nil || err2 != nil { t.Fatal(err1, err2) } @@ -398,10 +360,9 @@ func TestLogEventsZeroCapacity(t *testing.T) { if 1 != events.NumSeen() || 0 != events.NumSaved() || 0 != events.capacity() { t.Error(events.NumSeen(), events.NumSaved(), events.capacity()) } - buf := events.DataBuffer() - err := events.CollectorJSON(buf, agentRunID) - if err != nil || buf != nil { - t.Error(err, string(buf.Bytes())) + js, err := events.CollectorJSON("agentRunID") + if err != nil || js != nil { + t.Error(err, string(js)) } } @@ -418,42 +379,9 @@ func TestLogEventCollectionDisabled(t *testing.T) { if 1 != events.NumSeen() || 1 != len(events.severityCount) || 0 != events.NumSaved() || 5 != events.capacity() { t.Error(events.NumSeen(), len(events.severityCount), events.NumSaved(), events.capacity()) } - - buf := events.DataBuffer() - err := events.CollectorJSON(buf, agentRunID) - if err != nil || buf != nil { - t.Error(err, string(buf.Bytes())) - } -} - -func BenchmarkRecordLoggingMetrics(b *testing.B) { - now := time.Now() - fixedHarvestTypes := harvestMetricsTraces & harvestTxnEvents & harvestSpanEvents & harvestLogEvents - h := newHarvest(now, harvestConfig{ - ReportPeriods: map[harvestTypes]time.Duration{ - fixedHarvestTypes: fixedHarvestPeriod, - harvestLogEvents: time.Second * 5, - }, - LoggingConfig: loggingConfigEnabled(3), - }) - - for i := 0; i < internal.MaxLogEvents; i++ { - logEvent := logEvent{ - newPriority(), - 123456, - "INFO", - fmt.Sprintf("User 'xyz' logged in %d", i), - "123456789ADF", - "ADF09876565", - } - - h.LogEvents.Add(&logEvent) - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - b.ReportAllocs() - h.LogEvents.RecordLoggingMetrics(h.Metrics) + js, err := events.CollectorJSON("agentRunID") + if err != nil || js != nil { + t.Error(err, string(js)) } } @@ -476,30 +404,26 @@ func BenchmarkLogEventsAdd(b *testing.B) { } } -// Benchmark the cost of harvesting a full log event collector func BenchmarkLogEventsCollectorJSON(b *testing.B) { events := newLogEvents(testCommonAttributes, loggingConfigEnabled(internal.MaxLogEvents)) - - for i := 0; i < internal.MaxLogEvents; i++ { - event := &logEvent{ - priority: newPriority(), - timestamp: 123456, - severity: "INFO", - message: "test message", - spanID: "Ad300dra7re89", - traceID: "2234iIhfLlejrJ0", - } - events.Add(event) + event := &logEvent{ + priority: newPriority(), + timestamp: 123456, + severity: "INFO", + message: "test message", + spanID: "Ad300dra7re89", + traceID: "2234iIhfLlejrJ0", } + events.Add(event) + b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { - buf := events.DataBuffer() - err := events.CollectorJSON(buf, agentRunID) + js, err := events.CollectorJSON(agentRunID) if nil != err { - b.Fatal(err) + b.Fatal(err, js) } } } diff --git a/v3/newrelic/metrics.go b/v3/newrelic/metrics.go index cc05c3115..273a5bead 100644 --- a/v3/newrelic/metrics.go +++ b/v3/newrelic/metrics.go @@ -176,10 +176,13 @@ func (mt *metricTable) addApdex(name, scope string, apdexThreshold time.Duration mt.add(name, scope, data, force) } -func (mt *metricTable) CollectorJSON(buf *bytes.Buffer, agentRunID string, now time.Time) error { - if buf == nil { - return nil +func (mt *metricTable) CollectorJSON(agentRunID string, now time.Time) ([]byte, error) { + if 0 == len(mt.metrics) { + return nil, nil } + estimatedBytesPerMetric := 128 + estimatedLen := len(mt.metrics) * estimatedBytesPerMetric + buf := bytes.NewBuffer(make([]byte, 0, estimatedLen)) buf.WriteByte('[') jsonx.AppendString(buf, agentRunID) @@ -221,20 +224,11 @@ func (mt *metricTable) CollectorJSON(buf *bytes.Buffer, agentRunID string, now t buf.WriteByte(']') buf.WriteByte(']') - return nil + return buf.Bytes(), nil } -func (mt *metricTable) DataBuffer() *bytes.Buffer { - if len(mt.metrics) == 0 { - return nil - } - - estimatedBytesPerMetric := 128 - estimatedLen := len(mt.metrics) * estimatedBytesPerMetric - return bytes.NewBuffer(make([]byte, 0, estimatedLen)) -} -func (mt *metricTable) WriteData(buf *bytes.Buffer, agentRunID string, harvestStart time.Time) error { - return mt.CollectorJSON(buf, agentRunID, harvestStart) +func (mt *metricTable) Data(agentRunID string, harvestStart time.Time) ([]byte, error) { + return mt.CollectorJSON(agentRunID, harvestStart) } func (mt *metricTable) MergeIntoHarvest(h *harvest) { h.Metrics.mergeFailed(mt) diff --git a/v3/newrelic/metrics_test.go b/v3/newrelic/metrics_test.go index c33e63713..148622b94 100644 --- a/v3/newrelic/metrics_test.go +++ b/v3/newrelic/metrics_test.go @@ -19,13 +19,12 @@ var ( func TestEmptyMetrics(t *testing.T) { mt := newMetricTable(20, start) - buf := mt.DataBuffer() - err := mt.CollectorJSON(buf, `12345`, end) + js, err := mt.CollectorJSON(`12345`, end) if nil != err { t.Fatal(err) } - if nil != buf { - t.Error(string(buf.Bytes())) + if nil != js { + t.Error(string(js)) } } @@ -63,10 +62,8 @@ func TestMetrics(t *testing.T) { {Name: "count 1", Scope: "", Forced: false, Data: []float64{1, 0, 0, 0, 0, 0}}, }) - buf := mt.DataBuffer() - err := mt.CollectorJSON(buf, `12345`, end) - js := buf.Bytes() - if err != nil { + js, err := mt.Data("12345", end) + if nil != err { t.Error(err) } // The JSON metric order is not deterministic, so we merely test that it @@ -280,13 +277,10 @@ func BenchmarkMetricTableCollectorJSON(b *testing.B) { } } - buf := mt.DataBuffer() - err := mt.CollectorJSON(buf, "12345", time.Now()) + data, err := mt.CollectorJSON("12345", time.Now()) if nil != err { b.Fatal(err) } - - data := buf.Bytes() if err := isValidJSON(data); nil != err { b.Fatal(err, string(data)) } @@ -297,7 +291,7 @@ func BenchmarkMetricTableCollectorJSON(b *testing.B) { id := "12345" now := time.Now() for i := 0; i < b.N; i++ { - mt.CollectorJSON(buf, id, now) + mt.CollectorJSON(id, now) } } diff --git a/v3/newrelic/serverless.go b/v3/newrelic/serverless.go index 614900f82..b2e45c368 100644 --- a/v3/newrelic/serverless.go +++ b/v3/newrelic/serverless.go @@ -80,8 +80,7 @@ func (sh *serverlessHarvest) Write(arn string, writer io.Writer) { for _, p := range payloads { agentRunID := "" cmd := p.EndpointMethod() - data := p.DataBuffer() - err := p.WriteData(data, agentRunID, time.Now()) + data, err := p.Data(agentRunID, time.Now()) if err != nil { sh.logger.Error("error creating payload json", map[string]interface{}{ "command": cmd, @@ -89,7 +88,7 @@ func (sh *serverlessHarvest) Write(arn string, writer io.Writer) { }) continue } - if data == nil { + if nil == data { continue } // NOTE! This code relies on the fact that each payload is @@ -105,7 +104,7 @@ func (sh *serverlessHarvest) Write(arn string, writer io.Writer) { "command": cmd, }) } - d := json.RawMessage(data.Bytes()) + d := json.RawMessage(data) harvestPayloads[cmd] = &d } diff --git a/v3/newrelic/slow_queries.go b/v3/newrelic/slow_queries.go index 97ecec721..7557d3b8a 100644 --- a/v3/newrelic/slow_queries.go +++ b/v3/newrelic/slow_queries.go @@ -246,19 +246,14 @@ func (slows *slowQueries) WriteJSON(buf *bytes.Buffer) { buf.WriteByte(']') } -func (slows *slowQueries) DataBuffer() *bytes.Buffer { +func (slows *slowQueries) Data(agentRunID string, harvestStart time.Time) ([]byte, error) { if 0 == len(slows.priorityQueue) { - return nil + return nil, nil } estimate := 1024 * len(slows.priorityQueue) - return bytes.NewBuffer(make([]byte, 0, estimate)) -} -func (slows *slowQueries) WriteData(buf *bytes.Buffer, agentRunID string, harvestStart time.Time) error { - if buf == nil { - return nil - } + buf := bytes.NewBuffer(make([]byte, 0, estimate)) slows.WriteJSON(buf) - return nil + return buf.Bytes(), nil } func (slows *slowQueries) MergeIntoHarvest(newHarvest *harvest) { diff --git a/v3/newrelic/slow_queries_test.go b/v3/newrelic/slow_queries_test.go index 2b592091e..3d831f427 100644 --- a/v3/newrelic/slow_queries_test.go +++ b/v3/newrelic/slow_queries_test.go @@ -13,10 +13,9 @@ import ( func TestEmptySlowQueriesData(t *testing.T) { slows := newSlowQueries(maxHarvestSlowSQLs) - buf := slows.DataBuffer() - err := slows.WriteData(buf, "agentRunID", time.Now()) - if buf != nil { - t.Error(string(buf.Bytes()), err) + js, err := slows.Data("agentRunID", time.Now()) + if nil != js || nil != err { + t.Error(string(js), err) } } @@ -54,13 +53,7 @@ func TestSlowQueriesBasic(t *testing.T) { }) harvestSlows := newSlowQueries(maxHarvestSlowSQLs) harvestSlows.Merge(txnSlows, txnEvent) - buf := harvestSlows.DataBuffer() - err = harvestSlows.WriteData(buf, "agentRunID", time.Now()) - if err != nil { - t.Error(err) - } - - js := buf.Bytes() + js, err := harvestSlows.Data("agentRunID", time.Now()) expect := compactJSONString(`[[ [ "WebTransaction/Go/hello", @@ -125,13 +118,7 @@ func TestSlowQueriesExcludeURI(t *testing.T) { }) harvestSlows := newSlowQueries(maxHarvestSlowSQLs) harvestSlows.Merge(txnSlows, txnEvent) - buf := harvestSlows.DataBuffer() - err = harvestSlows.WriteData(buf, "agentRunID", time.Now()) - if err != nil { - t.Error(err) - } - - js := buf.Bytes() + js, err := harvestSlows.Data("agentRunID", time.Now()) expect := compactJSONString(`[[ [ "WebTransaction/Go/hello", @@ -195,13 +182,7 @@ func TestSlowQueriesAggregation(t *testing.T) { for _, idx := range perm { sq.observeInstance(slows[idx]) } - buf := sq.DataBuffer() - err := sq.WriteData(buf, "agentRunID", time.Now()) - if err != nil { - t.Error(err) - } - - js := buf.Bytes() + js, err := sq.Data("agentRunID", time.Now()) expect := compactJSONString(`[[ ["Txn/241","",2296612630,"41","Datastore/41",1,241000,241000,241000,{}], ["Txn/242","",2279835011,"42","Datastore/42",2,384000,142000,242000,{}], @@ -271,13 +252,7 @@ func TestSlowQueriesBetterCAT(t *testing.T) { }) harvestSlows := newSlowQueries(maxHarvestSlowSQLs) harvestSlows.Merge(txnSlows, txnEvent) - buf := harvestSlows.DataBuffer() - err = harvestSlows.WriteData(buf, "agentRunID", time.Now()) - if err != nil { - t.Error(err) - } - - js := buf.Bytes() + js, err := harvestSlows.Data("agentRunID", time.Now()) expect := compactJSONString(`[[ [ "WebTransaction/Go/hello", diff --git a/v3/newrelic/span_events.go b/v3/newrelic/span_events.go index b04315e90..978e620bb 100644 --- a/v3/newrelic/span_events.go +++ b/v3/newrelic/span_events.go @@ -136,17 +136,8 @@ func (events *spanEvents) MergeIntoHarvest(h *harvest) { h.SpanEvents.mergeFailed(events.analyticsEvents) } -func (events *spanEvents) DataBuffer() *bytes.Buffer { - if 0 == len(events.events) { - return nil - } - - estimate := 256 * len(events.events) - return bytes.NewBuffer(make([]byte, 0, estimate)) -} - -func (events *spanEvents) WriteData(buf *bytes.Buffer, agentRunID string, harvestStart time.Time) error { - return events.CollectorJSON(buf, agentRunID) +func (events *spanEvents) Data(agentRunID string, harvestStart time.Time) ([]byte, error) { + return events.CollectorJSON(agentRunID) } func (events *spanEvents) EndpointMethod() string { diff --git a/v3/newrelic/txn_events.go b/v3/newrelic/txn_events.go index a395a5025..61bd9aff4 100644 --- a/v3/newrelic/txn_events.go +++ b/v3/newrelic/txn_events.go @@ -170,17 +170,8 @@ func (events *txnEvents) MergeIntoHarvest(h *harvest) { h.TxnEvents.mergeFailed(events.analyticsEvents) } -func (events *txnEvents) DataBuffer() *bytes.Buffer { - if len(events.events) == 0 { - return nil - } - - estimate := 256 * len(events.events) - return bytes.NewBuffer(make([]byte, 0, estimate)) -} - -func (events *txnEvents) WriteData(buf *bytes.Buffer, agentRunID string, harvestStart time.Time) error { - return events.CollectorJSON(buf, agentRunID) +func (events *txnEvents) Data(agentRunID string, harvestStart time.Time) ([]byte, error) { + return events.CollectorJSON(agentRunID) } func (events *txnEvents) EndpointMethod() string { diff --git a/v3/newrelic/txn_events_test.go b/v3/newrelic/txn_events_test.go index 4350be139..6157e0c6d 100644 --- a/v3/newrelic/txn_events_test.go +++ b/v3/newrelic/txn_events_test.go @@ -243,10 +243,8 @@ func TestTxnEventsPayloadsEmpty(t *testing.T) { if len(ps) != 1 { t.Error(ps) } - data := events.DataBuffer() - err := ps[0].WriteData(data, "agentRunID", time.Now()) - if data != nil || err != nil { - t.Error(data.Bytes(), err) + if data, err := ps[0].Data("agentRunID", time.Now()); data != nil || err != nil { + t.Error(data, err) } } @@ -259,9 +257,7 @@ func TestTxnEventsPayloadsUnderLimit(t *testing.T) { if len(ps) != 1 { t.Error(ps) } - data := events.DataBuffer() - err := ps[0].WriteData(data, "agentRunID", time.Now()) - if data == nil || err != nil { + if data, err := ps[0].Data("agentRunID", time.Now()); data == nil || err != nil { t.Error(data, err) } } @@ -275,16 +271,11 @@ func TestTxnEventsPayloadsOverLimit(t *testing.T) { if len(ps) != 2 { t.Error(ps) } - data := events.DataBuffer() - err := ps[0].WriteData(data, "agentRunID", time.Now()) - if data == nil || err != nil { - t.Error(data.Bytes(), err) + if data, err := ps[0].Data("agentRunID", time.Now()); data == nil || err != nil { + t.Error(data, err) } - - data = events.DataBuffer() - err = ps[1].WriteData(data, "agentRunID", time.Now()) - if data == nil || err != nil { - t.Error(data.Bytes(), err) + if data, err := ps[1].Data("agentRunID", time.Now()); data == nil || err != nil { + t.Error(data, err) } } diff --git a/v3/newrelic/txn_trace.go b/v3/newrelic/txn_trace.go index a3aa7facd..6050ba69e 100644 --- a/v3/newrelic/txn_trace.go +++ b/v3/newrelic/txn_trace.go @@ -392,9 +392,9 @@ func (traces *harvestTraces) Witness(trace harvestTrace) { } } -func (traces *harvestTraces) DataBuffer() *bytes.Buffer { +func (traces *harvestTraces) Data(agentRunID string, harvestStart time.Time) ([]byte, error) { if traces.Len() == 0 { - return nil + return nil, nil } // This estimate is used to guess the size of the buffer. No worries if @@ -408,13 +408,7 @@ func (traces *harvestTraces) DataBuffer() *bytes.Buffer { estimate += 100 * t.Trace.nodes.Len() } - return bytes.NewBuffer(make([]byte, 0, estimate)) -} - -func (traces *harvestTraces) WriteData(buf *bytes.Buffer, agentRunID string, harvestStart time.Time) error { - if buf == nil { - return nil - } + buf := bytes.NewBuffer(make([]byte, 0, estimate)) buf.WriteByte('[') jsonx.AppendString(buf, agentRunID) buf.WriteByte(',') @@ -441,7 +435,7 @@ func (traces *harvestTraces) WriteData(buf *bytes.Buffer, agentRunID string, har buf.WriteByte(']') buf.WriteByte(']') - return nil + return buf.Bytes(), nil } func (traces *harvestTraces) slice() []*harvestTrace { diff --git a/v3/newrelic/txn_trace_test.go b/v3/newrelic/txn_trace_test.go index 80102a81b..e49aa6035 100644 --- a/v3/newrelic/txn_trace_test.go +++ b/v3/newrelic/txn_trace_test.go @@ -813,10 +813,9 @@ func TestTxnTraceSegmentThreshold(t *testing.T) { func TestEmptyHarvestTraces(t *testing.T) { start := time.Date(2014, time.November, 28, 1, 1, 0, 0, time.UTC) ht := newHarvestTraces() - data := ht.DataBuffer() - err := ht.WriteData(data, "12345", start) - if err != nil || data != nil { - t.Error(string(data.Bytes()), err) + js, err := ht.Data("12345", start) + if nil != err || nil != js { + t.Error(string(js), err) } } @@ -1170,9 +1169,7 @@ func TestTraceJSON(t *testing.T) { ] ]` - data := ht.DataBuffer() - err := ht.WriteData(data, "12345", start) - js := data.Bytes() + js, err := ht.Data("12345", start) if nil != err { t.Fatal(err) } @@ -1226,9 +1223,7 @@ func TestTraceCatGUID(t *testing.T) { ] ]` - data := ht.DataBuffer() - err := ht.WriteData(data, "12345", start) - js := data.Bytes() + js, err := ht.Data("12345", start) if nil != err { t.Fatal(err) } @@ -1289,9 +1284,7 @@ func TestTraceDistributedTracingGUID(t *testing.T) { ] ]` - data := ht.DataBuffer() - err := ht.WriteData(data, "12345", start) - js := data.Bytes() + js, err := ht.Data("12345", start) if nil != err { t.Fatal(err) } From 77fc73b22d16c7cab40eefddeb539e98d2e9a965 Mon Sep 17 00:00:00 2001 From: Emilio Garcia Date: Wed, 15 Jun 2022 11:32:23 -0400 Subject: [PATCH 26/47] better benchmarks for log events --- v3/newrelic/log_events.go | 2 +- v3/newrelic/log_events_test.go | 60 +++++++++++++++++++++++++++++++++- 2 files changed, 60 insertions(+), 2 deletions(-) diff --git a/v3/newrelic/log_events.go b/v3/newrelic/log_events.go index 7da43317e..bceaf60c1 100644 --- a/v3/newrelic/log_events.go +++ b/v3/newrelic/log_events.go @@ -137,7 +137,7 @@ func (events *logEvents) CollectorJSON(agentRunID string) ([]byte, error) { return nil, nil } - estimate := 256 * len(events.logs) + estimate := 500 * len(events.logs) buf := bytes.NewBuffer(make([]byte, 0, estimate)) if events.numSeen == 0 { diff --git a/v3/newrelic/log_events_test.go b/v3/newrelic/log_events_test.go index d52e72133..7e97eca9f 100644 --- a/v3/newrelic/log_events_test.go +++ b/v3/newrelic/log_events_test.go @@ -6,6 +6,7 @@ package newrelic import ( "fmt" "testing" + "time" "github.com/newrelic/go-agent/v3/internal" ) @@ -405,12 +406,38 @@ func BenchmarkLogEventsAdd(b *testing.B) { } func BenchmarkLogEventsCollectorJSON(b *testing.B) { + events := newLogEvents(testCommonAttributes, loggingConfigEnabled(internal.MaxLogEvents)) + for i := 0; i < internal.MaxLogEvents; i++ { + event := &logEvent{ + priority: newPriority(), + timestamp: 123456, + severity: "INFO", + message: "This is a log message that represents an estimate for how long the average log message is. The average log payload is 700 bytese.", + spanID: "Ad300dra7re89", + traceID: "2234iIhfLlejrJ0", + } + + events.Add(event) + } + + b.ReportAllocs() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + js, err := events.CollectorJSON(agentRunID) + if nil != err { + b.Fatal(err, js) + } + } +} + +func BenchmarkLogEventCollectorJSON_OneEvent(b *testing.B) { events := newLogEvents(testCommonAttributes, loggingConfigEnabled(internal.MaxLogEvents)) event := &logEvent{ priority: newPriority(), timestamp: 123456, severity: "INFO", - message: "test message", + message: "This is a log message that represents an estimate for how long the average log message is. The average log payload is 700 bytese.", spanID: "Ad300dra7re89", traceID: "2234iIhfLlejrJ0", } @@ -427,3 +454,34 @@ func BenchmarkLogEventsCollectorJSON(b *testing.B) { } } } + +func BenchmarkRecordLoggingMetrics(b *testing.B) { + now := time.Now() + fixedHarvestTypes := harvestMetricsTraces & harvestTxnEvents & harvestSpanEvents & harvestLogEvents + h := newHarvest(now, harvestConfig{ + ReportPeriods: map[harvestTypes]time.Duration{ + fixedHarvestTypes: fixedHarvestPeriod, + harvestLogEvents: time.Second * 5, + }, + LoggingConfig: loggingConfigEnabled(3), + }) + + for i := 0; i < internal.MaxLogEvents; i++ { + logEvent := logEvent{ + newPriority(), + 123456, + "INFO", + fmt.Sprintf("User 'xyz' logged in %d", i), + "123456789ADF", + "ADF09876565", + } + + h.LogEvents.Add(&logEvent) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + b.ReportAllocs() + h.LogEvents.RecordLoggingMetrics(h.Metrics) + } +} From e6083eddedf8032cdfd28ba264b6ac173ccc7559 Mon Sep 17 00:00:00 2001 From: Emilio Garcia Date: Wed, 15 Jun 2022 15:26:14 -0400 Subject: [PATCH 27/47] more granular benchmarks for pinpointing json buffer writing runtime cost issues --- v3/internal/jsonx/encode_test.go | 24 ++++++++++++++++++++++++ v3/newrelic/json_object_writer_test.go | 26 ++++++++++++++++++++++++++ v3/newrelic/log_event.go | 10 ++++++++-- v3/newrelic/log_event_test.go | 23 +++++++++++++++++++++++ v3/newrelic/log_events.go | 5 ++--- v3/newrelic/log_events_test.go | 2 +- 6 files changed, 84 insertions(+), 6 deletions(-) create mode 100644 v3/newrelic/json_object_writer_test.go diff --git a/v3/internal/jsonx/encode_test.go b/v3/internal/jsonx/encode_test.go index fed3ab7f7..f5b7c949b 100644 --- a/v3/internal/jsonx/encode_test.go +++ b/v3/internal/jsonx/encode_test.go @@ -180,3 +180,27 @@ func TestAppendString(t *testing.T) { } } } + +func BenchmarkAppendString(b *testing.B) { + buf := &bytes.Buffer{} + + for i := 0; i < b.N; i++ { + AppendString(buf, "s") + } +} + +func BenchmarkAppendString10(b *testing.B) { + buf := &bytes.Buffer{} + + for i := 0; i < b.N; i++ { + AppendString(buf, "qwertyuiop") + } +} + +func BenchmarkWriteString10(b *testing.B) { + buf := &bytes.Buffer{} + + for i := 0; i < b.N; i++ { + buf.WriteString("qwertyuiop") + } +} diff --git a/v3/newrelic/json_object_writer_test.go b/v3/newrelic/json_object_writer_test.go new file mode 100644 index 000000000..fc2c9723c --- /dev/null +++ b/v3/newrelic/json_object_writer_test.go @@ -0,0 +1,26 @@ +package newrelic + +import ( + "bytes" + "testing" +) + +func BenchmarkStringFieldShort(b *testing.B) { + writer := jsonFieldsWriter{ + buf: bytes.NewBuffer(make([]byte, 300)), + } + + for i := 0; i < b.N; i++ { + writer.stringField("testkey", "this is a short string") + } +} + +func BenchmarkStringFieldLong(b *testing.B) { + writer := jsonFieldsWriter{ + buf: bytes.NewBuffer(make([]byte, 300)), + } + + for i := 0; i < b.N; i++ { + writer.stringField("testkey", "this is a long string that will capture the runtime performance impact that writing more bytes has on this function") + } +} diff --git a/v3/newrelic/log_event.go b/v3/newrelic/log_event.go index ad9cf9987..b8e7aa715 100644 --- a/v3/newrelic/log_event.go +++ b/v3/newrelic/log_event.go @@ -11,6 +11,7 @@ import ( "strings" ) +// Exported Constants for log decorators const ( // LogSeverityFieldName is the name of the log level field in New Relic logging JSON LogSeverityFieldName = "level" @@ -34,7 +35,12 @@ const ( MaxLogLength = 32768 ) -// for internal user only +// internal variable names and constants +const ( + // number of bytes expected to be needed for the average log message + averageLogSizeEstimate = 400 +) + type logEvent struct { priority priority timestamp int64 @@ -74,7 +80,7 @@ func (e *logEvent) WriteJSON(buf *bytes.Buffer) { // MarshalJSON is used for testing. func (e *logEvent) MarshalJSON() ([]byte, error) { - buf := bytes.NewBuffer(make([]byte, 0, 256)) + buf := bytes.NewBuffer(make([]byte, 0, averageLogSizeEstimate)) e.WriteJSON(buf) return buf.Bytes(), nil } diff --git a/v3/newrelic/log_event_test.go b/v3/newrelic/log_event_test.go index e7a418888..b3ba98a00 100644 --- a/v3/newrelic/log_event_test.go +++ b/v3/newrelic/log_event_test.go @@ -1,6 +1,7 @@ package newrelic import ( + "bytes" "fmt" "math/rand" "testing" @@ -165,3 +166,25 @@ func BenchmarkRecordLog(b *testing.B) { recordLogBenchmarkHelper(b, &data, harvest) } } + +func BenchmarkWriteJSON(b *testing.B) { + data := LogData{ + Timestamp: 123456, + Severity: "INFO", + Message: "This is a log message that represents an estimate for how long the average log message is. The average log payload is 700 bytese.", + } + + event, err := data.toLogEvent() + if err != nil { + b.Fail() + } + + buf := bytes.NewBuffer(make([]byte, 0, averageLogSizeEstimate)) + + b.ReportAllocs() + b.ResetTimer() + + for n := 0; n < b.N; n++ { + event.WriteJSON(buf) + } +} diff --git a/v3/newrelic/log_events.go b/v3/newrelic/log_events.go index bceaf60c1..66643a31a 100644 --- a/v3/newrelic/log_events.go +++ b/v3/newrelic/log_events.go @@ -133,11 +133,11 @@ func (events *logEvents) Merge(other *logEvents) { } func (events *logEvents) CollectorJSON(agentRunID string) ([]byte, error) { - if 0 == len(events.logs) { + if len(events.logs) == 0 { return nil, nil } - estimate := 500 * len(events.logs) + estimate := averageLogSizeEstimate * len(events.logs) buf := bytes.NewBuffer(make([]byte, 0, estimate)) if events.numSeen == 0 { @@ -178,7 +178,6 @@ func (events *logEvents) CollectorJSON(agentRunID string) ([]byte, error) { buf.WriteByte('}') buf.WriteByte(']') return buf.Bytes(), nil - } // split splits the events into two. NOTE! The two event pools are not valid diff --git a/v3/newrelic/log_events_test.go b/v3/newrelic/log_events_test.go index 7e97eca9f..e2841fe69 100644 --- a/v3/newrelic/log_events_test.go +++ b/v3/newrelic/log_events_test.go @@ -437,7 +437,7 @@ func BenchmarkLogEventCollectorJSON_OneEvent(b *testing.B) { priority: newPriority(), timestamp: 123456, severity: "INFO", - message: "This is a log message that represents an estimate for how long the average log message is. The average log payload is 700 bytese.", + message: "This is a log message that represents an estimate for how long the average log message is. The average log payload is 700 bytes.", spanID: "Ad300dra7re89", traceID: "2234iIhfLlejrJ0", } From 5594e30b47b095b6830c6bb848592bbf95693a13 Mon Sep 17 00:00:00 2001 From: Emilio Garcia Date: Wed, 15 Jun 2022 16:42:35 -0400 Subject: [PATCH 28/47] logging readme improved --- .../logcontext-v2/nrzerolog/Readme.md | 46 ++++++++++++++----- 1 file changed, 34 insertions(+), 12 deletions(-) diff --git a/v3/integrations/logcontext-v2/nrzerolog/Readme.md b/v3/integrations/logcontext-v2/nrzerolog/Readme.md index af99446f1..0b2769f8a 100644 --- a/v3/integrations/logcontext-v2/nrzerolog/Readme.md +++ b/v3/integrations/logcontext-v2/nrzerolog/Readme.md @@ -49,6 +49,7 @@ func main() { os.Exit(1) } + // Send logs to New Relic outside of a transaction nrHook := nrzerolog.NewRelicHook{ App: app, } @@ -56,22 +57,43 @@ func main() { // Wrap logger with New Relic Hook nrLogger := baseLogger.Hook(nrHook) nrLogger.Info().Msg("Hello World") + + // Send logs to New Relic inside of a transaction + txn := app.StartTransaction("My Transaction") + ctx := newrelic.NewContext(context.Background(), txn) + + nrTxnHook := nrzerolog.NewRelicHook{ + App: app, + Context: ctx, + } + + txnLogger := baseLogger.Hook(nrTxnHook) + txnLogger.Debug().Msg("This is a transaction log") + + txn.End() } ``` ## Usage -When zerolog hooks a logger object, a copy of that logger is made and the -hook is appended to it. Zerolog will *Never* check if you duplicate information -in your logger, so it is very important to treat each logger as an immutable step -in how you generate your logs. If you apply a hook function to a logger that is -already hooked, it will capture all logs generated from that logger twice. -To avoid that issue, we recommend that you create a base logger object with the -formatting settings you prefer, then new hooked loggers from that base logger. - -The plugin captures the log level, and the message from zerolog. It will generate a -timestamp at the moment the hook function is called in zerolog. In most cases, this -timestamp will be the same as the time posted in zerolog, however in some corner -cases, a very small amount of offset is possible. +Please enable the agent to ingest your logs by calling newrelic.ConfigZerologPluginEnabled(true), +when setting up your application. This will enable log forwarding and log metrics in the +go agent automatically. + +This integration for the zerolog logging frameworks uses a built in feature +of the zerolog framework called hook functions. Zerolog loggers can be modified +to have hook functions run on them before each time a write is executed. When a +logger is hooked, meaning a hook function was added to that logger with the Hook() +funciton, a copy of that logger is created with those changes. Note that zerolog +will *never* attempt to verify that the hook functions were not duplicated, or +that fields are not repeated in any way. As a result, we recommend that you create +a base logger that is configured in the way you prefer to use zerolog. Then you +create hooked loggers to send log data to New Relic from that base logger. + +The plugin captures the log level, and the message from zerolog. It will also collect +distributed tracing data from your transaction context. At the moment the hook function is +called in zerolog, a timestamp will be generated for your log .In most cases, this +timestamp will be the same as the time posted in zerolog log message, however it is possible that +there could be a slight offset depending on the the performance of your system. From 6cf172d3865b1f592e4ddb790500aac7f17c79a0 Mon Sep 17 00:00:00 2001 From: Emilio Garcia Date: Wed, 15 Jun 2022 23:59:19 -0400 Subject: [PATCH 29/47] return go.mod to upstream --- v3/go.mod | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/v3/go.mod b/v3/go.mod index 2ada3801e..92bc2eef5 100644 --- a/v3/go.mod +++ b/v3/go.mod @@ -3,6 +3,6 @@ module github.com/newrelic/go-agent/v3 go 1.7 require ( - github.com/golang/protobuf v1.3.3 - google.golang.org/grpc v1.27.0 + github.com/golang/protobuf v1.4.3 + google.golang.org/grpc v1.39.0 ) From 2e6700e93818bef216c81635ffc4a0d0d801242f Mon Sep 17 00:00:00 2001 From: Emilio Garcia Date: Thu, 16 Jun 2022 00:02:07 -0400 Subject: [PATCH 30/47] remove comparison to write string --- v3/internal/jsonx/encode_test.go | 8 -------- 1 file changed, 8 deletions(-) diff --git a/v3/internal/jsonx/encode_test.go b/v3/internal/jsonx/encode_test.go index f5b7c949b..cc0f5934c 100644 --- a/v3/internal/jsonx/encode_test.go +++ b/v3/internal/jsonx/encode_test.go @@ -196,11 +196,3 @@ func BenchmarkAppendString10(b *testing.B) { AppendString(buf, "qwertyuiop") } } - -func BenchmarkWriteString10(b *testing.B) { - buf := &bytes.Buffer{} - - for i := 0; i < b.N; i++ { - buf.WriteString("qwertyuiop") - } -} From 0901501b52eca098e9310953f48788d81108d5fc Mon Sep 17 00:00:00 2001 From: Emilio Garcia Date: Fri, 17 Jun 2022 14:28:25 -0400 Subject: [PATCH 31/47] group struct fields by type to avoid overallocation of contiguous memory --- v3/newrelic/tracing.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/v3/newrelic/tracing.go b/v3/newrelic/tracing.go index ee8db4983..0c8014322 100644 --- a/v3/newrelic/tracing.go +++ b/v3/newrelic/tracing.go @@ -21,32 +21,32 @@ import ( // https://source.datanerd.us/agents/agent-specs/blob/master/Transaction-Events-PORTED.md // https://newrelic.atlassian.net/wiki/display/eng/Agent+Support+for+Synthetics%3A+Forced+Transaction+Traces+and+Analytic+Events type txnEvent struct { + HasError bool FinalName string + Attrs *attributes + CrossProcess txnCrossProcess + BetterCAT betterCAT Start time.Time Duration time.Duration TotalTime time.Duration Queuing time.Duration Zone apdexZone - Attrs *attributes externalCallCount uint64 externalDuration time.Duration datastoreCallCount uint64 datastoreDuration time.Duration - CrossProcess txnCrossProcess - BetterCAT betterCAT - HasError bool } // betterCAT stores the transaction's priority and all fields related // to a DistributedTracer's Cross-Application Trace. type betterCAT struct { Enabled bool - Priority priority Sampled bool - Inbound *payload + Priority priority TxnID string TraceID string TransportType string + Inbound *payload } // SetTraceAndTxnIDs takes a single 32 character ID and uses it to From f3b5b5b2427c912c49a3e7374ffc9bd4511a845e Mon Sep 17 00:00:00 2001 From: Emilio Garcia Date: Fri, 17 Jun 2022 18:30:08 -0400 Subject: [PATCH 32/47] piggyback log events on transactions to pool their priorities --- v3/examples/server/main.go | 32 +++++++++++++++++--------------- v3/newrelic/internal_app.go | 18 ++++++++++++++++-- v3/newrelic/internal_txn.go | 20 ++++++++++++++++++++ v3/newrelic/log_event.go | 14 -------------- v3/newrelic/tracing.go | 1 + 5 files changed, 54 insertions(+), 31 deletions(-) diff --git a/v3/examples/server/main.go b/v3/examples/server/main.go index 16b43b2ea..3d1e65bc8 100644 --- a/v3/examples/server/main.go +++ b/v3/examples/server/main.go @@ -252,7 +252,6 @@ func main() { newrelic.ConfigAppName("Example App"), newrelic.ConfigFromEnvironment(), newrelic.ConfigDebugLogger(os.Stdout), - newrelic.ConfigDistributedTracerEnabled(true), newrelic.ConfigAppLogForwardingEnabled(true), ) if err != nil { @@ -278,28 +277,31 @@ func main() { http.HandleFunc(newrelic.WrapHandleFunc(app, "/async", async)) http.HandleFunc(newrelic.WrapHandleFunc(app, "/message", message)) http.HandleFunc("/log", func(w http.ResponseWriter, req *http.Request) { - // Transactions started without an http.Request are classified as - // background transactions. - txn := app.StartTransaction("Log") - defer txn.End() - - ctx := newrelic.NewContext(context.Background(), txn) - // Versions of go prior to 1.17 do not have a built in function for Unix Milli time. // For go versions 1.17+ use time.Now().UnixMilli() to generate timestamps timestamp := time.Now().UnixNano() / int64(time.Millisecond) - - data := newrelic.LogData{ + app.RecordLog(newrelic.LogData{ Timestamp: timestamp, Message: "Log Message", Severity: "info", - Context: ctx, - } + }) - app.RecordLog(data) + io.WriteString(w, "A log message was recorded") + }) - io.WriteString(w, "Log") - time.Sleep(150 * time.Millisecond) + http.HandleFunc("/transaction_log", func(w http.ResponseWriter, req *http.Request) { + txn := app.StartTransaction("Log Transaction") + defer txn.End() + ctx := newrelic.NewContext(context.Background(), txn) + + app.RecordLog(newrelic.LogData{ + Timestamp: time.Now().UnixNano() / int64(time.Millisecond), + Message: "Transaction Log Message", + Severity: "info", + Context: ctx, + }) + + io.WriteString(w, "A log message was recorded as part of a transaction") }) http.HandleFunc("/background", func(w http.ResponseWriter, req *http.Request) { diff --git a/v3/newrelic/internal_app.go b/v3/newrelic/internal_app.go index 27d492082..2a21025eb 100644 --- a/v3/newrelic/internal_app.go +++ b/v3/newrelic/internal_app.go @@ -596,8 +596,22 @@ func (app *app) RecordLog(log *LogData) error { return err } - run, _ := app.getState() - app.Consume(run.Reply.RunID, &event) + var txn *Transaction + if log.Context != nil { + txn = FromContext(log.Context) + } + + // Whenever a log is part of a transaction, store it on the transaction + // to ensure it gets correctly prioritized. + if txn != nil { + metadata := txn.GetTraceMetadata() + event.spanID = metadata.SpanID + event.traceID = metadata.SpanID + txn.thread.StoreLog(&event) + } else { + run, _ := app.getState() + app.Consume(run.Reply.RunID, &event) + } return nil } diff --git a/v3/newrelic/internal_txn.go b/v3/newrelic/internal_txn.go index 942056aee..cfc6d2101 100644 --- a/v3/newrelic/internal_txn.go +++ b/v3/newrelic/internal_txn.go @@ -226,6 +226,19 @@ func (thd *thread) SetWebResponse(w http.ResponseWriter) http.ResponseWriter { }) } +func (thd *thread) StoreLog(log *logEvent) { + txn := thd.txn + txn.Lock() + defer txn.Unlock() + + // Copy log data into the slice so that the stack frame can return without needing to allocate heap memory + if txn.logs == nil { + txn.logs = []logEvent{*log} + } else { + txn.logs = append(txn.logs, *log) + } +} + func (txn *txn) freezeName() { if txn.ignore || ("" != txn.FinalName) { return @@ -262,6 +275,13 @@ func (txn *txn) MergeIntoHarvest(h *harvest) { createTxnMetrics(&txn.txnData, h.Metrics) mergeBreakdownMetrics(&txn.txnData, h.Metrics) + // Dump log events into harvest + // Note: this will create a surge of log events that could affect sampling. + for _, logEvent := range txn.logs { + logEvent.priority = priority + h.LogEvents.Add(&logEvent) + } + if txn.Config.TransactionEvents.Enabled { // Allocate a new TxnEvent to prevent a reference to the large transaction. alloc := new(txnEvent) diff --git a/v3/newrelic/log_event.go b/v3/newrelic/log_event.go index b8e7aa715..c5d8893a2 100644 --- a/v3/newrelic/log_event.go +++ b/v3/newrelic/log_event.go @@ -108,24 +108,10 @@ func (data *LogData) toLogEvent() (logEvent, error) { data.Message = strings.TrimSpace(data.Message) data.Severity = strings.TrimSpace(data.Severity) - var spanID, traceID string - var priority priority - - if data.Context != nil { - txn := FromContext(data.Context) - priority = txn.thread.BetterCAT.Priority - traceMetadata := txn.GetTraceMetadata() - spanID = traceMetadata.SpanID - traceID = traceMetadata.TraceID - } - event := logEvent{ message: data.Message, severity: data.Severity, - spanID: spanID, - traceID: traceID, timestamp: data.Timestamp, - priority: priority, } return event, nil diff --git a/v3/newrelic/tracing.go b/v3/newrelic/tracing.go index 0c8014322..c2d6882cc 100644 --- a/v3/newrelic/tracing.go +++ b/v3/newrelic/tracing.go @@ -79,6 +79,7 @@ type txnData struct { rootSpanID string rootSpanErrData *errorData SpanEvents []*spanEvent + logs []logEvent customSegments map[string]*metricData datastoreSegments map[datastoreMetricKey]*metricData From 561ab2d52a30364a7611e48f327bd94574197018 Mon Sep 17 00:00:00 2001 From: Emilio Garcia Date: Tue, 21 Jun 2022 10:13:13 -0400 Subject: [PATCH 33/47] style update for zerolog in context --- v3/integrations/logcontext-v2/nrzerolog/Readme.md | 3 +-- v3/integrations/logcontext-v2/nrzerolog/example/main.go | 3 +-- v3/newrelic/config_options.go | 2 +- 3 files changed, 3 insertions(+), 5 deletions(-) diff --git a/v3/integrations/logcontext-v2/nrzerolog/Readme.md b/v3/integrations/logcontext-v2/nrzerolog/Readme.md index 0b2769f8a..05dbf897b 100644 --- a/v3/integrations/logcontext-v2/nrzerolog/Readme.md +++ b/v3/integrations/logcontext-v2/nrzerolog/Readme.md @@ -42,9 +42,8 @@ func main() { newrelic.ConfigAppName("NRZerolog Example"), newrelic.ConfigInfoLogger(os.Stdout), newrelic.ConfigZerologPluginEnabled(true), - newrelic.ConfigDistributedTracerEnabled(true), ) - if nil != err { + if err != nil { fmt.Println(err) os.Exit(1) } diff --git a/v3/integrations/logcontext-v2/nrzerolog/example/main.go b/v3/integrations/logcontext-v2/nrzerolog/example/main.go index 0681d153e..f79c7f232 100644 --- a/v3/integrations/logcontext-v2/nrzerolog/example/main.go +++ b/v3/integrations/logcontext-v2/nrzerolog/example/main.go @@ -19,9 +19,8 @@ func main() { newrelic.ConfigAppName("NRZerolog Example"), newrelic.ConfigInfoLogger(os.Stdout), newrelic.ConfigZerologPluginEnabled(true), - newrelic.ConfigDistributedTracerEnabled(true), ) - if nil != err { + if err != nil { fmt.Println(err) os.Exit(1) } diff --git a/v3/newrelic/config_options.go b/v3/newrelic/config_options.go index 98f2a3b7e..6adacef91 100644 --- a/v3/newrelic/config_options.go +++ b/v3/newrelic/config_options.go @@ -97,7 +97,7 @@ const ( // enabled. func ConfigZerologPluginEnabled(enabled bool) ConfigOption { return func(cfg *Config) { - if enabled == true { + if enabled { cfg.ApplicationLogging.Enabled = true cfg.ApplicationLogging.Forwarding.Enabled = true cfg.ApplicationLogging.Metrics.Enabled = true From 900b6f4066f96994b63adda9cdf43ba9f84cd0f7 Mon Sep 17 00:00:00 2001 From: Emilio Garcia Date: Tue, 21 Jun 2022 10:14:25 -0400 Subject: [PATCH 34/47] if statements checking booleans simplified to just check value --- v3/newrelic/config_options.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/v3/newrelic/config_options.go b/v3/newrelic/config_options.go index 6adacef91..c728ae5c5 100644 --- a/v3/newrelic/config_options.go +++ b/v3/newrelic/config_options.go @@ -49,7 +49,7 @@ func ConfigDistributedTracerReservoirLimit(limit int) ConfigOption { // Defaults: enabled=false func ConfigAppLogForwardingEnabled(enabled bool) ConfigOption { return func(cfg *Config) { - if enabled == true { + if enabled { cfg.ApplicationLogging.Enabled = true cfg.ApplicationLogging.Forwarding.Enabled = true } else { @@ -64,7 +64,7 @@ func ConfigAppLogForwardingEnabled(enabled bool) ConfigOption { // default: true func ConfigAppLogMetricsEnabled(enabled bool) ConfigOption { return func(cfg *Config) { - if enabled == true { + if enabled { cfg.ApplicationLogging.Enabled = true cfg.ApplicationLogging.Metrics.Enabled = true } else { @@ -77,7 +77,7 @@ func ConfigAppLogMetricsEnabled(enabled bool) ConfigOption { // and data collection func ConfigAppLogEnabled(enabled bool) ConfigOption { return func(cfg *Config) { - if enabled == true { + if enabled { cfg.ApplicationLogging.Enabled = true } else { cfg.ApplicationLogging.Enabled = false From 9660625841dd01efba81f73d6490c44879888668 Mon Sep 17 00:00:00 2001 From: Emilio Garcia Date: Tue, 21 Jun 2022 17:11:15 -0400 Subject: [PATCH 35/47] safeguard memory for transaction log events --- v3/newrelic/internal_txn.go | 6 ++-- v3/newrelic/log_events.go | 60 ++++++++++++++++++++-------------- v3/newrelic/log_events_test.go | 7 ++-- v3/newrelic/tracing.go | 2 +- 4 files changed, 41 insertions(+), 34 deletions(-) diff --git a/v3/newrelic/internal_txn.go b/v3/newrelic/internal_txn.go index cfc6d2101..967e6edae 100644 --- a/v3/newrelic/internal_txn.go +++ b/v3/newrelic/internal_txn.go @@ -231,12 +231,10 @@ func (thd *thread) StoreLog(log *logEvent) { txn.Lock() defer txn.Unlock() - // Copy log data into the slice so that the stack frame can return without needing to allocate heap memory if txn.logs == nil { - txn.logs = []logEvent{*log} - } else { - txn.logs = append(txn.logs, *log) + txn.logs = NewLogHeap(internal.MaxLogEvents) } + txn.logs.Add(log) } func (txn *txn) freezeName() { diff --git a/v3/newrelic/log_events.go b/v3/newrelic/log_events.go index 66643a31a..8663c754d 100644 --- a/v3/newrelic/log_events.go +++ b/v3/newrelic/log_events.go @@ -17,8 +17,6 @@ type commonAttributes struct { hostname string } -type logEventHeap []logEvent - type logEvents struct { numSeen int failedHarvests int @@ -57,23 +55,51 @@ func (events *logEvents) RecordLoggingMetrics(metrics *metricTable) { } } +type logEventHeap []logEvent + // TODO: when go 1.18 becomes the minimum supported version, re-write to make a generic heap implementation // for all event heaps, to de-duplicate this code //func (events *logEvents) -func (h logEventHeap) Len() int { return len(h) } -func (h logEventHeap) Less(i, j int) bool { return h[i].priority.isLowerPriority(h[j].priority) } -func (h logEventHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] } + +func NewLogHeap(capacity int) logEventHeap { return make(logEventHeap, 0, capacity) } +func (h logEventHeap) Len() int { return len(h) } +func (h logEventHeap) Less(i, j int) bool { return h[i].priority.isLowerPriority(h[j].priority) } +func (h logEventHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] } + +// To avoid using interface reflection, this function is used in place of Push() to add log events to the heap +// Please replace all of this when the minimum supported version of go is 1.18 so that we can use generics +func (h *logEventHeap) Add(event *logEvent) { + // when fewer events are in the heap than the capacity, do not heap sort + if len(*h) < cap(*h) { + // copy log event onto event heap + *h = append(*h, *event) + if len(*h) == cap(*h) { + // Delay heap initialization so that we can have + // deterministic ordering for integration tests (the max + // is not being reached). + heap.Init(*h) + } + return + } + + if event.priority.isLowerPriority((*h)[0].priority) { + return + } + + (*h)[0] = *event + heap.Fix(h, 0) +} // Push and Pop are unused: only heap.Init and heap.Fix are used. -func (h logEventHeap) Push(x interface{}) {} func (h logEventHeap) Pop() interface{} { return nil } +func (h logEventHeap) Push(x interface{}) {} func newLogEvents(ca commonAttributes, loggingConfig loggingConfig) *logEvents { return &logEvents{ commonAttributes: ca, config: loggingConfig, severityCount: map[string]int{}, - logs: make(logEventHeap, 0, loggingConfig.maxLogEvents), + logs: NewLogHeap(loggingConfig.maxLogEvents), } } @@ -93,24 +119,8 @@ func (events *logEvents) Add(e *logEvent) { return } - if len(events.logs) < cap(events.logs) { - // copy log event onto event heap - events.logs = append(events.logs, *e) - if len(events.logs) == cap(events.logs) { - // Delay heap initialization so that we can have - // deterministic ordering for integration tests (the max - // is not being reached). - heap.Init(events.logs) - } - return - } - - if e.priority.isLowerPriority((events.logs)[0].priority) { - return - } - - events.logs[0] = *e - heap.Fix(events.logs, 0) + // Add logs to event heap + events.logs.Add(e) } func (events *logEvents) mergeFailed(other *logEvents) { diff --git a/v3/newrelic/log_events_test.go b/v3/newrelic/log_events_test.go index e2841fe69..2a796d06a 100644 --- a/v3/newrelic/log_events_test.go +++ b/v3/newrelic/log_events_test.go @@ -23,8 +23,7 @@ var ( commonJSON = `[{"common":{"attributes":{"entity.guid":"testGUID","entity.name":"testEntityName","hostname":"testHostname"}},"logs":[` - infoLevel = "INFO" - unknownLevel = "UNKNOWN" + infoLevel = "INFO" ) func loggingConfigEnabled(limit int) loggingConfig { @@ -63,10 +62,10 @@ func TestBasicLogEvents(t *testing.T) { if string(json) != expected { t.Error(string(json), expected) } - if 2 != events.numSeen { + if events.numSeen != 2 { t.Error(events.numSeen) } - if 2 != events.NumSaved() { + if events.NumSaved() != 2 { t.Error(events.NumSaved()) } } diff --git a/v3/newrelic/tracing.go b/v3/newrelic/tracing.go index c2d6882cc..c17f2a2ad 100644 --- a/v3/newrelic/tracing.go +++ b/v3/newrelic/tracing.go @@ -79,7 +79,7 @@ type txnData struct { rootSpanID string rootSpanErrData *errorData SpanEvents []*spanEvent - logs []logEvent + logs logEventHeap customSegments map[string]*metricData datastoreSegments map[datastoreMetricKey]*metricData From ace72256003434438619550b4580d42ad02d7532 Mon Sep 17 00:00:00 2001 From: Emilio Garcia Date: Tue, 21 Jun 2022 17:44:16 -0400 Subject: [PATCH 36/47] made go vet happy :) --- v3/newrelic/internal_txn.go | 2 +- v3/newrelic/log_event.go | 1 + v3/newrelic/log_events.go | 10 ++++------ 3 files changed, 6 insertions(+), 7 deletions(-) diff --git a/v3/newrelic/internal_txn.go b/v3/newrelic/internal_txn.go index 967e6edae..ca77ca9a6 100644 --- a/v3/newrelic/internal_txn.go +++ b/v3/newrelic/internal_txn.go @@ -232,7 +232,7 @@ func (thd *thread) StoreLog(log *logEvent) { defer txn.Unlock() if txn.logs == nil { - txn.logs = NewLogHeap(internal.MaxLogEvents) + txn.logs = make(logEventHeap, 0, internal.MaxLogEvents) } txn.logs.Add(log) } diff --git a/v3/newrelic/log_event.go b/v3/newrelic/log_event.go index c5d8893a2..8870269ad 100644 --- a/v3/newrelic/log_event.go +++ b/v3/newrelic/log_event.go @@ -109,6 +109,7 @@ func (data *LogData) toLogEvent() (logEvent, error) { data.Severity = strings.TrimSpace(data.Severity) event := logEvent{ + priority: newPriority(), message: data.Message, severity: data.Severity, timestamp: data.Timestamp, diff --git a/v3/newrelic/log_events.go b/v3/newrelic/log_events.go index 8663c754d..fccc857da 100644 --- a/v3/newrelic/log_events.go +++ b/v3/newrelic/log_events.go @@ -60,11 +60,9 @@ type logEventHeap []logEvent // TODO: when go 1.18 becomes the minimum supported version, re-write to make a generic heap implementation // for all event heaps, to de-duplicate this code //func (events *logEvents) - -func NewLogHeap(capacity int) logEventHeap { return make(logEventHeap, 0, capacity) } -func (h logEventHeap) Len() int { return len(h) } -func (h logEventHeap) Less(i, j int) bool { return h[i].priority.isLowerPriority(h[j].priority) } -func (h logEventHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] } +func (h logEventHeap) Len() int { return len(h) } +func (h logEventHeap) Less(i, j int) bool { return h[i].priority.isLowerPriority(h[j].priority) } +func (h logEventHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] } // To avoid using interface reflection, this function is used in place of Push() to add log events to the heap // Please replace all of this when the minimum supported version of go is 1.18 so that we can use generics @@ -99,7 +97,7 @@ func newLogEvents(ca commonAttributes, loggingConfig loggingConfig) *logEvents { commonAttributes: ca, config: loggingConfig, severityCount: map[string]int{}, - logs: NewLogHeap(loggingConfig.maxLogEvents), + logs: make(logEventHeap, 0, loggingConfig.maxLogEvents), } } From 458f9d01e9217699c6450e98de4171654e885179 Mon Sep 17 00:00:00 2001 From: Emilio Garcia Date: Tue, 21 Jun 2022 18:00:37 -0400 Subject: [PATCH 37/47] autogenerate timestamps instead of failing --- v3/examples/server/main.go | 15 +++++---------- v3/newrelic/log_event.go | 4 ++-- v3/newrelic/log_event_test.go | 8 -------- 3 files changed, 7 insertions(+), 20 deletions(-) diff --git a/v3/examples/server/main.go b/v3/examples/server/main.go index 3d1e65bc8..2c13c46c9 100644 --- a/v3/examples/server/main.go +++ b/v3/examples/server/main.go @@ -277,13 +277,9 @@ func main() { http.HandleFunc(newrelic.WrapHandleFunc(app, "/async", async)) http.HandleFunc(newrelic.WrapHandleFunc(app, "/message", message)) http.HandleFunc("/log", func(w http.ResponseWriter, req *http.Request) { - // Versions of go prior to 1.17 do not have a built in function for Unix Milli time. - // For go versions 1.17+ use time.Now().UnixMilli() to generate timestamps - timestamp := time.Now().UnixNano() / int64(time.Millisecond) app.RecordLog(newrelic.LogData{ - Timestamp: timestamp, - Message: "Log Message", - Severity: "info", + Message: "Log Message", + Severity: "info", }) io.WriteString(w, "A log message was recorded") @@ -295,10 +291,9 @@ func main() { ctx := newrelic.NewContext(context.Background(), txn) app.RecordLog(newrelic.LogData{ - Timestamp: time.Now().UnixNano() / int64(time.Millisecond), - Message: "Transaction Log Message", - Severity: "info", - Context: ctx, + Message: "Transaction Log Message", + Severity: "info", + Context: ctx, }) io.WriteString(w, "A log message was recorded as part of a transaction") diff --git a/v3/newrelic/log_event.go b/v3/newrelic/log_event.go index 8870269ad..cd23d8d8a 100644 --- a/v3/newrelic/log_event.go +++ b/v3/newrelic/log_event.go @@ -9,6 +9,7 @@ import ( "errors" "fmt" "strings" + "time" ) // Exported Constants for log decorators @@ -86,7 +87,6 @@ func (e *logEvent) MarshalJSON() ([]byte, error) { } var ( - errEmptyTimestamp = errors.New("timestamp can not be empty") errNilLogData = errors.New("log data can not be nil") errLogMessageTooLarge = fmt.Errorf("log message can not exceed %d bytes", MaxLogLength) ) @@ -102,7 +102,7 @@ func (data *LogData) toLogEvent() (logEvent, error) { return logEvent{}, errLogMessageTooLarge } if data.Timestamp == 0 { - return logEvent{}, errEmptyTimestamp + data.Timestamp = int64(timeToUnixMilliseconds(time.Now())) } data.Message = strings.TrimSpace(data.Message) diff --git a/v3/newrelic/log_event_test.go b/v3/newrelic/log_event_test.go index b3ba98a00..3d47a33da 100644 --- a/v3/newrelic/log_event_test.go +++ b/v3/newrelic/log_event_test.go @@ -69,14 +69,6 @@ func TestToLogEvent(t *testing.T) { }, expectErr: errLogMessageTooLarge, }, - { - name: "empty timestamp", - data: LogData{ - Severity: "info", - Message: "test 123", - }, - expectErr: errEmptyTimestamp, - }, } for _, testcase := range testcases { From 6f02ca7bcd5074044a5a9c48ff7ecb8b3267dc41 Mon Sep 17 00:00:00 2001 From: Emilio Garcia Date: Tue, 21 Jun 2022 18:20:47 -0400 Subject: [PATCH 38/47] cleanup and testing for autotimestamps --- .../logcontext-v2/nrzerolog/hook.go | 13 ++------ v3/newrelic/log_event.go | 2 +- v3/newrelic/log_event_test.go | 30 ++++++++++++++----- 3 files changed, 27 insertions(+), 18 deletions(-) diff --git a/v3/integrations/logcontext-v2/nrzerolog/hook.go b/v3/integrations/logcontext-v2/nrzerolog/hook.go index 43dcf9b21..a4e833b7e 100644 --- a/v3/integrations/logcontext-v2/nrzerolog/hook.go +++ b/v3/integrations/logcontext-v2/nrzerolog/hook.go @@ -2,7 +2,6 @@ package nrzerolog import ( "context" - "time" "github.com/newrelic/go-agent/v3/newrelic" "github.com/rs/zerolog" @@ -20,16 +19,10 @@ func (h NewRelicHook) Run(e *zerolog.Event, level zerolog.Level, msg string) { } else { logLevel = level.String() } - - // Versions of go prior to 1.17 do not have a built in function for Unix Milli time. - // For go versions 1.17+ use time.Now().UnixMilli() to generate timestamps - timestamp := time.Now().UnixNano() / int64(time.Millisecond) - data := newrelic.LogData{ - Timestamp: timestamp, - Severity: logLevel, - Message: msg, - Context: h.Context, + Severity: logLevel, + Message: msg, + Context: h.Context, } h.App.RecordLog(data) diff --git a/v3/newrelic/log_event.go b/v3/newrelic/log_event.go index cd23d8d8a..24a2f3ccb 100644 --- a/v3/newrelic/log_event.go +++ b/v3/newrelic/log_event.go @@ -53,7 +53,7 @@ type logEvent struct { // LogData contains data fields that are needed to generate log events. type LogData struct { - Timestamp int64 // Required: Unix Millisecond Timestamp + Timestamp int64 // Optional: Unix Millisecond Timestamp; A timestamp will be generated if unset Severity string // Optional: Severity of log being consumed Message string // Optional: Message of log being consumed; Maximum size: 32768 Bytes. Context context.Context // Optional: context containing a New Relic Transaction diff --git a/v3/newrelic/log_event_test.go b/v3/newrelic/log_event_test.go index 3d47a33da..d555c5cec 100644 --- a/v3/newrelic/log_event_test.go +++ b/v3/newrelic/log_event_test.go @@ -28,15 +28,16 @@ func TestWriteJSON(t *testing.T) { func TestToLogEvent(t *testing.T) { type testcase struct { - name string - data LogData - expectEvent logEvent - expectErr error + name string + data LogData + expectEvent logEvent + expectErr error + skipTimestamp bool } testcases := []testcase{ { - name: "valid case no context", + name: "context nil", data: LogData{ Timestamp: 123456, Severity: "info", @@ -49,7 +50,7 @@ func TestToLogEvent(t *testing.T) { }, }, { - name: "valid case empty severity", + name: "severity empty", data: LogData{ Timestamp: 123456, Message: "test 123", @@ -60,6 +61,18 @@ func TestToLogEvent(t *testing.T) { message: "test 123", }, }, + { + name: "no timestamp", + data: LogData{ + Severity: "info", + Message: "test 123", + }, + expectEvent: logEvent{ + severity: "info", + message: "test 123", + }, + skipTimestamp: true, + }, { name: "message too large", data: LogData{ @@ -86,7 +99,10 @@ func TestToLogEvent(t *testing.T) { if expect.severity != actualEvent.severity { t.Error(fmt.Errorf("%s: expected severity %s, got %s", testcase.name, expect.severity, actualEvent.severity)) } - if expect.timestamp != actualEvent.timestamp { + if actualEvent.timestamp == 0 { + t.Errorf("timestamp was not set on test %s", testcase.name) + } + if expect.timestamp != actualEvent.timestamp && !testcase.skipTimestamp { t.Error(fmt.Errorf("%s: expected timestamp %d, got %d", testcase.name, expect.timestamp, actualEvent.timestamp)) } } From 1f766f4c55149829a19b2b320551387d2570cb38 Mon Sep 17 00:00:00 2001 From: Emilio Garcia Date: Thu, 23 Jun 2022 10:43:43 -0400 Subject: [PATCH 39/47] Fix Spelling mistakes Co-authored-by: Rich Vanderwal --- v3/integrations/logcontext-v2/nrzerolog/Readme.md | 2 +- v3/newrelic/config_options.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/v3/integrations/logcontext-v2/nrzerolog/Readme.md b/v3/integrations/logcontext-v2/nrzerolog/Readme.md index 05dbf897b..14647bddd 100644 --- a/v3/integrations/logcontext-v2/nrzerolog/Readme.md +++ b/v3/integrations/logcontext-v2/nrzerolog/Readme.md @@ -20,7 +20,7 @@ in the go agent, as well as let the agent know that the zerolog pluging is in us If you want to disable metrics, set `newrelic.ConfigAppLogMetricsEnabled(false),`. If you want to disable log forwarding, set `newrelic.ConfigAppLogForwardingEnabled(false),`. Note that the agent sets the default number of logs per harverst cycle to 10000, but that -number may be reuced by the server. You can manually set this number by setting +number may be reduced by the server. You can manually set this number by setting `newrelic.ConfigAppLogForwardingMaxSamplesStored(123),`. The following example will shows how to install and set up your code to send logs to new relic from zerolog. diff --git a/v3/newrelic/config_options.go b/v3/newrelic/config_options.go index c728ae5c5..1115164ac 100644 --- a/v3/newrelic/config_options.go +++ b/v3/newrelic/config_options.go @@ -45,7 +45,7 @@ func ConfigDistributedTracerReservoirLimit(limit int) ConfigOption { } // ConfigAppLogForwardingEnabled enables or disables the collection -// of logs from a users application by the agent +// of logs from a user's application by the agent // Defaults: enabled=false func ConfigAppLogForwardingEnabled(enabled bool) ConfigOption { return func(cfg *Config) { From 814b1d6fd6df338cc68293bad320bc6d4d2d3c19 Mon Sep 17 00:00:00 2001 From: Emilio Garcia Date: Thu, 23 Jun 2022 11:52:57 -0400 Subject: [PATCH 40/47] simplify supportability for zerolog --- .../logcontext-v2/nrzerolog/Readme.md | 13 ++++++------ .../logcontext-v2/nrzerolog/example/main.go | 2 +- .../logcontext-v2/nrzerolog/hook.go | 3 +++ v3/newrelic/config.go | 2 -- v3/newrelic/config_options.go | 21 ------------------- v3/newrelic/config_test.go | 2 -- v3/newrelic/harvest.go | 7 ++----- v3/newrelic/harvest_test.go | 3 --- 8 files changed, 12 insertions(+), 41 deletions(-) diff --git a/v3/integrations/logcontext-v2/nrzerolog/Readme.md b/v3/integrations/logcontext-v2/nrzerolog/Readme.md index 14647bddd..47fcc27e8 100644 --- a/v3/integrations/logcontext-v2/nrzerolog/Readme.md +++ b/v3/integrations/logcontext-v2/nrzerolog/Readme.md @@ -41,7 +41,7 @@ func main() { newrelic.ConfigFromEnvironment(), newrelic.ConfigAppName("NRZerolog Example"), newrelic.ConfigInfoLogger(os.Stdout), - newrelic.ConfigZerologPluginEnabled(true), + newrelic.ConfigAppLogForwarding(true), ) if err != nil { fmt.Println(err) @@ -75,24 +75,23 @@ func main() { ## Usage -Please enable the agent to ingest your logs by calling newrelic.ConfigZerologPluginEnabled(true), -when setting up your application. This will enable log forwarding and log metrics in the -go agent automatically. +Please enable the agent to ingest your logs by calling newrelic.ConfigAppLogForwardingEnabled(true), +when setting up your application. This is not enabled by default. This integration for the zerolog logging frameworks uses a built in feature of the zerolog framework called hook functions. Zerolog loggers can be modified to have hook functions run on them before each time a write is executed. When a logger is hooked, meaning a hook function was added to that logger with the Hook() funciton, a copy of that logger is created with those changes. Note that zerolog -will *never* attempt to verify that the hook functions were not duplicated, or +will *never* attempt to verify that any hook functions have not been not duplicated, or that fields are not repeated in any way. As a result, we recommend that you create a base logger that is configured in the way you prefer to use zerolog. Then you create hooked loggers to send log data to New Relic from that base logger. The plugin captures the log level, and the message from zerolog. It will also collect distributed tracing data from your transaction context. At the moment the hook function is -called in zerolog, a timestamp will be generated for your log .In most cases, this -timestamp will be the same as the time posted in zerolog log message, however it is possible that +called in zerolog, a timestamp will be generated for your log. In most cases, this +timestamp will be the same as the time posted in the zerolog log message, however it is possible that there could be a slight offset depending on the the performance of your system. diff --git a/v3/integrations/logcontext-v2/nrzerolog/example/main.go b/v3/integrations/logcontext-v2/nrzerolog/example/main.go index f79c7f232..f3c284229 100644 --- a/v3/integrations/logcontext-v2/nrzerolog/example/main.go +++ b/v3/integrations/logcontext-v2/nrzerolog/example/main.go @@ -18,7 +18,7 @@ func main() { newrelic.ConfigFromEnvironment(), newrelic.ConfigAppName("NRZerolog Example"), newrelic.ConfigInfoLogger(os.Stdout), - newrelic.ConfigZerologPluginEnabled(true), + newrelic.ConfigAppLogForwardingEnabled(true), ) if err != nil { fmt.Println(err) diff --git a/v3/integrations/logcontext-v2/nrzerolog/hook.go b/v3/integrations/logcontext-v2/nrzerolog/hook.go index a4e833b7e..58fa0783c 100644 --- a/v3/integrations/logcontext-v2/nrzerolog/hook.go +++ b/v3/integrations/logcontext-v2/nrzerolog/hook.go @@ -3,10 +3,13 @@ package nrzerolog import ( "context" + "github.com/newrelic/go-agent/v3/internal" "github.com/newrelic/go-agent/v3/newrelic" "github.com/rs/zerolog" ) +func init() { internal.TrackUsage("integration", "logcontext", "zerolog") } + type NewRelicHook struct { App *newrelic.Application Context context.Context diff --git a/v3/newrelic/config.go b/v3/newrelic/config.go index c0801b8bb..38f67853f 100644 --- a/v3/newrelic/config.go +++ b/v3/newrelic/config.go @@ -359,8 +359,6 @@ type ApplicationLogging struct { // if it is enabled, the individual sub-feature configurations take effect. // MAY accomplish this by not installing instrumentation, or by early-return/no-op as necessary for an agent. Enabled bool - // Name of instrumented frameworks enabled - Frameworks []string // Forwarding controls log forwarding to New Relic One Forwarding struct { // Toggles whether the agent gathers log records for sending to New Relic. diff --git a/v3/newrelic/config_options.go b/v3/newrelic/config_options.go index 1115164ac..55ec64823 100644 --- a/v3/newrelic/config_options.go +++ b/v3/newrelic/config_options.go @@ -89,27 +89,6 @@ const ( zerologFrameworkName = "Zerolog" ) -// ConfigZerologPluginEnabled enables all supported features -// for the zerolog logs in context plugin. This will not alter -// the max samples stored for logs. -// -// Log Enrichment is currently not supported, and will not be -// enabled. -func ConfigZerologPluginEnabled(enabled bool) ConfigOption { - return func(cfg *Config) { - if enabled { - cfg.ApplicationLogging.Enabled = true - cfg.ApplicationLogging.Forwarding.Enabled = true - cfg.ApplicationLogging.Metrics.Enabled = true - if cfg.ApplicationLogging.Frameworks == nil { - cfg.ApplicationLogging.Frameworks = []string{zerologFrameworkName} - } else { - cfg.ApplicationLogging.Frameworks = append(cfg.ApplicationLogging.Frameworks, zerologFrameworkName) - } - } - } -} - // ConfigAppLogForwardingMaxSamplesStored allows users to set the maximium number of // log events the agent is allowed to collect and store in a given harvest cycle. func ConfigAppLogForwardingMaxSamplesStored(maxSamplesStored int) ConfigOption { diff --git a/v3/newrelic/config_test.go b/v3/newrelic/config_test.go index 64577bbe3..6b159976a 100644 --- a/v3/newrelic/config_test.go +++ b/v3/newrelic/config_test.go @@ -135,7 +135,6 @@ func TestCopyConfigReferenceFieldsPresent(t *testing.T) { "Enabled": false, "MaxSamplesStored": 10000 }, - "Frameworks": null, "Metrics": { "Enabled": true } @@ -320,7 +319,6 @@ func TestCopyConfigReferenceFieldsAbsent(t *testing.T) { "Enabled": false, "MaxSamplesStored": 10000 }, - "Frameworks": null, "Metrics": { "Enabled": true } diff --git a/v3/newrelic/harvest.go b/v3/newrelic/harvest.go index 7b3aa787e..3a7e1cf61 100644 --- a/v3/newrelic/harvest.go +++ b/v3/newrelic/harvest.go @@ -212,11 +212,8 @@ func createTraceObserverMetrics(to traceObserver, metrics *metricTable) { } } -func createAppLoggingSupportabilityMetrics(lc *loggingConfig, frameworks []string, metrics *metricTable) { +func createAppLoggingSupportabilityMetrics(lc *loggingConfig, metrics *metricTable) { lc.connectMetrics(metrics) - for _, framework := range frameworks { - loggingFrameworkMetric(metrics, framework) - } } // CreateFinalMetrics creates extra metrics at harvest time. @@ -246,7 +243,7 @@ func (h *harvest) CreateFinalMetrics(run *appRun, to traceObserver) { createTraceObserverMetrics(to, h.Metrics) createTrackUsageMetrics(h.Metrics) - createAppLoggingSupportabilityMetrics(&hc.LoggingConfig, run.Config.ApplicationLogging.Frameworks, h.Metrics) + createAppLoggingSupportabilityMetrics(&hc.LoggingConfig, h.Metrics) h.Metrics = h.Metrics.ApplyRules(reply.MetricRules) } diff --git a/v3/newrelic/harvest_test.go b/v3/newrelic/harvest_test.go index 728c7640d..c1c23eb18 100644 --- a/v3/newrelic/harvest_test.go +++ b/v3/newrelic/harvest_test.go @@ -85,7 +85,6 @@ func TestCreateFinalMetrics(t *testing.T) { var nilHarvest *harvest config := config{Config: defaultConfig()} - config.ApplicationLogging.Frameworks = append(config.ApplicationLogging.Frameworks, zerologFrameworkName) run := newAppRun(config, internal.ConnectReplyDefaults()) run.harvestConfig = testHarvestCfgr @@ -143,7 +142,6 @@ func TestCreateFinalMetrics(t *testing.T) { {Name: "Supportability/Go/Runtime/Version/" + goVersionSimple, Scope: "", Forced: true, Data: []float64{1, 0, 0, 0, 0, 0}}, {Name: "Supportability/Go/gRPC/Version/" + grpcVersion, Scope: "", Forced: true, Data: []float64{1, 0, 0, 0, 0, 0}}, {Name: "Supportability/Logging/Golang", Scope: "", Forced: true, Data: []float64{1, 0, 0, 0, 0, 0}}, - {Name: "Supportability/Logging/Golang/Zerolog", Scope: "", Forced: true, Data: []float64{1, 0, 0, 0, 0, 0}}, {Name: "Supportability/Logging/Forwarding/Golang", Scope: "", Forced: true, Data: []float64{1, 0, 0, 0, 0, 0}}, {Name: "Supportability/Logging/Metrics/Golang", Scope: "", Forced: true, Data: []float64{1, 0, 0, 0, 0, 0}}, {Name: "Supportability/Logging/LocalDecorating/Golang", Scope: "", Forced: true, Data: []float64{1, 0, 0, 0, 0, 0}}, @@ -175,7 +173,6 @@ func TestCreateFinalMetrics(t *testing.T) { {Name: "Supportability/Go/Runtime/Version/" + goVersionSimple, Scope: "", Forced: true, Data: []float64{1, 0, 0, 0, 0, 0}}, {Name: "Supportability/Go/gRPC/Version/" + grpcVersion, Scope: "", Forced: true, Data: []float64{1, 0, 0, 0, 0, 0}}, {Name: "Supportability/Logging/Golang", Scope: "", Forced: true, Data: []float64{1, 0, 0, 0, 0, 0}}, - {Name: "Supportability/Logging/Golang/Zerolog", Scope: "", Forced: true, Data: []float64{1, 0, 0, 0, 0, 0}}, {Name: "Supportability/Logging/Forwarding/Golang", Scope: "", Forced: true, Data: []float64{1, 0, 0, 0, 0, 0}}, {Name: "Supportability/Logging/Metrics/Golang", Scope: "", Forced: true, Data: []float64{1, 0, 0, 0, 0, 0}}, {Name: "Supportability/Logging/LocalDecorating/Golang", Scope: "", Forced: true, Data: []float64{1, 0, 0, 0, 0, 0}}, From 33f7217a4a067c04e00fe2856d726d5c7fccf78a Mon Sep 17 00:00:00 2001 From: Emilio Garcia Date: Thu, 23 Jun 2022 12:29:19 -0400 Subject: [PATCH 41/47] a ConfigZerologEnabled() function call was separated from its heard --- v3/integrations/logcontext-v2/nrzerolog/Readme.md | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/v3/integrations/logcontext-v2/nrzerolog/Readme.md b/v3/integrations/logcontext-v2/nrzerolog/Readme.md index 47fcc27e8..52c25f907 100644 --- a/v3/integrations/logcontext-v2/nrzerolog/Readme.md +++ b/v3/integrations/logcontext-v2/nrzerolog/Readme.md @@ -14,11 +14,9 @@ are supported by this plugin in the current release: ## Installation The nrzerolog plugin, and the go-agent need to be integrated into your code -in order to use this tool. Make sure to set `newrelic.ConfigZerologPluginEnabled(true)` -in your config settings for the application. This will enable log forwarding and metrics -in the go agent, as well as let the agent know that the zerolog pluging is in use. -If you want to disable metrics, set `newrelic.ConfigAppLogMetricsEnabled(false),`. -If you want to disable log forwarding, set `newrelic.ConfigAppLogForwardingEnabled(false),`. +in order to use this tool. Make sure to set `newrelic.ConfigAppLogForwardingEnabled(true)` +in your config settings for the application. This will enable log forwarding +in the go agent. If you want to disable metrics, set `newrelic.ConfigAppLogMetricsEnabled(false),`. Note that the agent sets the default number of logs per harverst cycle to 10000, but that number may be reduced by the server. You can manually set this number by setting `newrelic.ConfigAppLogForwardingMaxSamplesStored(123),`. From d916ec5712b577e17ff5785f175c1c3dcd007564 Mon Sep 17 00:00:00 2001 From: Emilio Garcia Date: Tue, 28 Jun 2022 16:00:48 -0400 Subject: [PATCH 42/47] app log trace id was set to span id --- v3/newrelic/internal_app.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/v3/newrelic/internal_app.go b/v3/newrelic/internal_app.go index 2a21025eb..9b1312df9 100644 --- a/v3/newrelic/internal_app.go +++ b/v3/newrelic/internal_app.go @@ -606,7 +606,7 @@ func (app *app) RecordLog(log *LogData) error { if txn != nil { metadata := txn.GetTraceMetadata() event.spanID = metadata.SpanID - event.traceID = metadata.SpanID + event.traceID = metadata.TraceID txn.thread.StoreLog(&event) } else { run, _ := app.getState() From 92a4a3746c9e5b58540e72b19dac514b66364fda Mon Sep 17 00:00:00 2001 From: Emilio Garcia Date: Tue, 28 Jun 2022 16:03:13 -0400 Subject: [PATCH 43/47] Additional Testing covering Application Function Calls --- .../integrationsupport/integrationsupport.go | 7 ++ v3/newrelic/expect_implementation.go | 2 +- v3/newrelic/internal_app_test.go | 97 +++++++++++++++++++ 3 files changed, 105 insertions(+), 1 deletion(-) diff --git a/v3/internal/integrationsupport/integrationsupport.go b/v3/internal/integrationsupport/integrationsupport.go index 4ce06267f..54c7e8151 100644 --- a/v3/internal/integrationsupport/integrationsupport.go +++ b/v3/internal/integrationsupport/integrationsupport.go @@ -93,6 +93,13 @@ var DTEnabledCfgFn = func(cfg *newrelic.Config) { cfg.DistributedTracer.Enabled = true } +var AppLogEnabledCfgFn = func(cfg *newrelic.Config) { + cfg.Enabled = false + cfg.ApplicationLogging.Enabled = true + cfg.ApplicationLogging.Forwarding.Enabled = true + cfg.ApplicationLogging.Metrics.Enabled = true +} + // SampleEverythingReplyFn is a reusable ConnectReply function that samples everything var SampleEverythingReplyFn = func(reply *internal.ConnectReply) { reply.SetSampleEverything() diff --git a/v3/newrelic/expect_implementation.go b/v3/newrelic/expect_implementation.go index 435a66dff..06e09c886 100644 --- a/v3/newrelic/expect_implementation.go +++ b/v3/newrelic/expect_implementation.go @@ -203,7 +203,7 @@ func expectCustomEvents(v internal.Validator, cs *customEvents, expect []interna func expectLogEvents(v internal.Validator, events *logEvents, expect []internal.WantLog) { if len(events.logs) != len(expect) { - v.Error("number of events does not match", len(events.logs), len(expect)) + v.Error("actual number of events does not match what is expected", len(events.logs), len(expect)) return } diff --git a/v3/newrelic/internal_app_test.go b/v3/newrelic/internal_app_test.go index 4a8de7387..a083cd7f0 100644 --- a/v3/newrelic/internal_app_test.go +++ b/v3/newrelic/internal_app_test.go @@ -4,10 +4,13 @@ package newrelic import ( + "context" "errors" "fmt" "testing" "time" + + "github.com/newrelic/go-agent/v3/internal" ) func TestConnectBackoff(t *testing.T) { @@ -71,3 +74,97 @@ func TestConfigOptionError(t *testing.T) { t.Error("app not nil") } } + +const ( + SampleAppName = "my app" +) + +// ExpectApp combines Application and Expect, for use in validating data in test apps +type ExpectApp struct { + internal.Expect + *Application +} + +// NewTestApp creates an ExpectApp with the given ConnectReply function and Config function +func NewTestApp(replyfn func(*internal.ConnectReply), cfgFn ...ConfigOption) ExpectApp { + cfgFn = append(cfgFn, + func(cfg *Config) { + // Prevent spawning app goroutines in tests. + if !cfg.ServerlessMode.Enabled { + cfg.Enabled = false + } + }, + ConfigAppName(SampleAppName), + ConfigLicense(testLicenseKey), + ) + + app, err := NewApplication(cfgFn...) + if nil != err { + panic(err) + } + + internal.HarvestTesting(app.Private, replyfn) + + return ExpectApp{ + Expect: app.Private.(internal.Expect), + Application: app, + } +} + +var SampleEverythingReplyFn = func(reply *internal.ConnectReply) { + reply.SetSampleEverything() +} + +var ConfigTestAppLogFn = func(cfg *Config) { + cfg.Enabled = false + cfg.ApplicationLogging.Enabled = true + cfg.ApplicationLogging.Forwarding.Enabled = true + cfg.ApplicationLogging.Metrics.Enabled = true +} + +func TestRecordLog(t *testing.T) { + testApp := NewTestApp( + SampleEverythingReplyFn, + ConfigTestAppLogFn, + ) + + time := int64(timeToUnixMilliseconds(time.Now())) + + testApp.Application.RecordLog(LogData{ + Severity: "Debug", + Message: "Test Message", + Timestamp: time, + }) + + txn := testApp.StartTransaction("test transaction") + ctx := NewContext(context.Background(), txn) + + // gather linking metadata values for test verification + metadata := txn.GetTraceMetadata() + spanID := metadata.SpanID + traceID := metadata.TraceID + + testApp.Application.RecordLog(LogData{ + Severity: "Warn", + Message: "Test Message With Transaction", + Timestamp: time, + Context: ctx, + }) + + txn.End() + + testApp.ExpectLogEvents(t, []internal.WantLog{ + { + Severity: "Debug", + Message: "Test Message", + Timestamp: time, + }, + { + Severity: "Warn", + Message: "Test Message With Transaction", + Timestamp: time, + SpanID: spanID, + TraceID: traceID, + }, + }) +} From ad1a86416509d5c639c2759680c4d2b0dc471340 Mon Sep 17 00:00:00 2001 From: Emilio Garcia Date: Tue, 28 Jun 2022 16:47:24 -0400 Subject: [PATCH 44/47] fix sample size selection for logging --- v3/internal/connect_reply.go | 8 ++++---- v3/internal/connect_reply_test.go | 2 +- v3/newrelic/config.go | 12 +++++++++++- 3 files changed, 16 insertions(+), 6 deletions(-) diff --git a/v3/internal/connect_reply.go b/v3/internal/connect_reply.go index 19cd4b721..342c140d5 100644 --- a/v3/internal/connect_reply.go +++ b/v3/internal/connect_reply.go @@ -137,20 +137,20 @@ func (r *ConnectReply) ConfigurablePeriod() time.Duration { func uintPtr(x uint) *uint { return &x } // DefaultEventHarvestConfig provides faster event harvest defaults. -func DefaultEventHarvestConfig(maxTxnEvents int) EventHarvestConfig { +func DefaultEventHarvestConfig(maxTxnEvents, maxLogEvents int) EventHarvestConfig { cfg := EventHarvestConfig{} cfg.ReportPeriodMs = DefaultConfigurableEventHarvestMs cfg.Limits.TxnEvents = uintPtr(uint(maxTxnEvents)) cfg.Limits.CustomEvents = uintPtr(uint(MaxCustomEvents)) - cfg.Limits.LogEvents = uintPtr(uint(MaxLogEvents)) + cfg.Limits.LogEvents = uintPtr(uint(maxLogEvents)) cfg.Limits.ErrorEvents = uintPtr(uint(MaxErrorEvents)) return cfg } // DefaultEventHarvestConfigWithDT is an extended version of DefaultEventHarvestConfig, // with the addition that it takes into account distributed tracer span event harvest limits. -func DefaultEventHarvestConfigWithDT(maxTxnEvents int, dtEnabled bool, spanEventLimit int) EventHarvestConfig { - cfg := DefaultEventHarvestConfig(maxTxnEvents) +func DefaultEventHarvestConfigWithDT(maxTxnEvents, maxLogEvents, spanEventLimit int, dtEnabled bool) EventHarvestConfig { + cfg := DefaultEventHarvestConfig(maxTxnEvents, maxLogEvents) if dtEnabled { cfg.Limits.SpanEvents = uintPtr(uint(spanEventLimit)) } diff --git a/v3/internal/connect_reply_test.go b/v3/internal/connect_reply_test.go index a2be63a17..eb9711354 100644 --- a/v3/internal/connect_reply_test.go +++ b/v3/internal/connect_reply_test.go @@ -173,7 +173,7 @@ func TestNegativeHarvestLimits(t *testing.T) { } func TestDefaultEventHarvestConfigJSON(t *testing.T) { - js, err := json.Marshal(DefaultEventHarvestConfig(MaxTxnEvents)) + js, err := json.Marshal(DefaultEventHarvestConfig(MaxTxnEvents, MaxLogEvents)) if err != nil { t.Error(err) } diff --git a/v3/newrelic/config.go b/v3/newrelic/config.go index 38f67853f..1abf253a7 100644 --- a/v3/newrelic/config.go +++ b/v3/newrelic/config.go @@ -545,6 +545,16 @@ func (c Config) maxTxnEvents() int { return configured } +// maxTxnEvents returns the configured maximum number of Transaction Events if it has been configured +// and is less than the default maximum; otherwise it returns the default max. +func (c Config) maxLogEvents() int { + configured := c.ApplicationLogging.Forwarding.MaxSamplesStored + if configured < 0 || configured > internal.MaxTxnEvents { + return internal.MaxTxnEvents + } + return configured +} + func copyDestConfig(c AttributeDestinationConfig) AttributeDestinationConfig { cp := c if nil != c.Include { @@ -699,7 +709,7 @@ func configConnectJSONInternal(c Config, pid int, util *utilization.Data, e envi Util: util, SecurityPolicies: securityPolicies, Metadata: metadata, - EventData: internal.DefaultEventHarvestConfigWithDT(c.maxTxnEvents(), c.DistributedTracer.Enabled, c.DistributedTracer.ReservoirLimit), + EventData: internal.DefaultEventHarvestConfigWithDT(c.maxTxnEvents(), c.maxLogEvents(), c.DistributedTracer.ReservoirLimit, c.DistributedTracer.Enabled), }}) } From 91ca115a26ae131d25bd313cc76f26258d29134a Mon Sep 17 00:00:00 2001 From: Emilio Garcia Date: Tue, 28 Jun 2022 16:57:57 -0400 Subject: [PATCH 45/47] cleaner server example --- v3/examples/server/main.go | 51 +++++++++++++++++++++----------------- 1 file changed, 28 insertions(+), 23 deletions(-) diff --git a/v3/examples/server/main.go b/v3/examples/server/main.go index 2c13c46c9..6b4138030 100644 --- a/v3/examples/server/main.go +++ b/v3/examples/server/main.go @@ -4,7 +4,6 @@ package main import ( - "context" "errors" "fmt" "io" @@ -247,6 +246,32 @@ func browser(w http.ResponseWriter, r *http.Request) { io.WriteString(w, "browser header page") } +func logMessage(w http.ResponseWriter, r *http.Request) { + txn := newrelic.FromContext(r.Context()) + app := txn.Application() + + app.RecordLog(newrelic.LogData{ + Message: "Log Message", + Severity: "info", + }) + + io.WriteString(w, "A log message was recorded") +} + +func logTxnMessage(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + txn := newrelic.FromContext(ctx) + app := txn.Application() + + app.RecordLog(newrelic.LogData{ + Message: "Log Message", + Severity: "info", + Context: ctx, + }) + + io.WriteString(w, "A log message was recorded as part of a transaction") +} + func main() { app, err := newrelic.NewApplication( newrelic.ConfigAppName("Example App"), @@ -276,28 +301,8 @@ func main() { http.HandleFunc(newrelic.WrapHandleFunc(app, "/browser", browser)) http.HandleFunc(newrelic.WrapHandleFunc(app, "/async", async)) http.HandleFunc(newrelic.WrapHandleFunc(app, "/message", message)) - http.HandleFunc("/log", func(w http.ResponseWriter, req *http.Request) { - app.RecordLog(newrelic.LogData{ - Message: "Log Message", - Severity: "info", - }) - - io.WriteString(w, "A log message was recorded") - }) - - http.HandleFunc("/transaction_log", func(w http.ResponseWriter, req *http.Request) { - txn := app.StartTransaction("Log Transaction") - defer txn.End() - ctx := newrelic.NewContext(context.Background(), txn) - - app.RecordLog(newrelic.LogData{ - Message: "Transaction Log Message", - Severity: "info", - Context: ctx, - }) - - io.WriteString(w, "A log message was recorded as part of a transaction") - }) + http.HandleFunc(newrelic.WrapHandleFunc(app, "/log", logMessage)) + http.HandleFunc(newrelic.WrapHandleFunc(app, "/transaction_log", logTxnMessage)) http.HandleFunc("/background", func(w http.ResponseWriter, req *http.Request) { // Transactions started without an http.Request are classified as From d1385ffee697e02cdac77ce7aa2fe6b84a6638ef Mon Sep 17 00:00:00 2001 From: Emilio Garcia Date: Tue, 28 Jun 2022 17:35:18 -0400 Subject: [PATCH 46/47] make it more user friendly to record transaction logs --- v3/examples/server/main.go | 8 ++---- .../logcontext-v2/nrzerolog/hook.go | 13 ++++++++-- v3/newrelic/internal_app.go | 18 ++----------- v3/newrelic/internal_app_test.go | 25 ------------------- v3/newrelic/log_event.go | 8 +++--- v3/newrelic/transaction.go | 23 +++++++++++++++++ 6 files changed, 41 insertions(+), 54 deletions(-) diff --git a/v3/examples/server/main.go b/v3/examples/server/main.go index 6b4138030..cb7d5cfc1 100644 --- a/v3/examples/server/main.go +++ b/v3/examples/server/main.go @@ -259,14 +259,10 @@ func logMessage(w http.ResponseWriter, r *http.Request) { } func logTxnMessage(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - txn := newrelic.FromContext(ctx) - app := txn.Application() - - app.RecordLog(newrelic.LogData{ + txn := newrelic.FromContext(r.Context()) + txn.RecordLog(newrelic.LogData{ Message: "Log Message", Severity: "info", - Context: ctx, }) io.WriteString(w, "A log message was recorded as part of a transaction") diff --git a/v3/integrations/logcontext-v2/nrzerolog/hook.go b/v3/integrations/logcontext-v2/nrzerolog/hook.go index 58fa0783c..5055a1213 100644 --- a/v3/integrations/logcontext-v2/nrzerolog/hook.go +++ b/v3/integrations/logcontext-v2/nrzerolog/hook.go @@ -16,17 +16,26 @@ type NewRelicHook struct { } func (h NewRelicHook) Run(e *zerolog.Event, level zerolog.Level, msg string) { + var txn *newrelic.Transaction + if h.Context != nil { + txn = newrelic.FromContext(h.Context) + } + logLevel := "" if level == zerolog.NoLevel { logLevel = newrelic.LogSeverityUnknown } else { logLevel = level.String() } + data := newrelic.LogData{ Severity: logLevel, Message: msg, - Context: h.Context, } - h.App.RecordLog(data) + if txn != nil { + txn.RecordLog(data) + } else { + h.App.RecordLog(data) + } } diff --git a/v3/newrelic/internal_app.go b/v3/newrelic/internal_app.go index 9b1312df9..27d492082 100644 --- a/v3/newrelic/internal_app.go +++ b/v3/newrelic/internal_app.go @@ -596,22 +596,8 @@ func (app *app) RecordLog(log *LogData) error { return err } - var txn *Transaction - if log.Context != nil { - txn = FromContext(log.Context) - } - - // Whenever a log is part of a transaction, store it on the transaction - // to ensure it gets correctly prioritized. - if txn != nil { - metadata := txn.GetTraceMetadata() - event.spanID = metadata.SpanID - event.traceID = metadata.TraceID - txn.thread.StoreLog(&event) - } else { - run, _ := app.getState() - app.Consume(run.Reply.RunID, &event) - } + run, _ := app.getState() + app.Consume(run.Reply.RunID, &event) return nil } diff --git a/v3/newrelic/internal_app_test.go b/v3/newrelic/internal_app_test.go index a083cd7f0..366a7756b 100644 --- a/v3/newrelic/internal_app_test.go +++ b/v3/newrelic/internal_app_test.go @@ -4,7 +4,6 @@ package newrelic import ( - "context" "errors" "fmt" "testing" @@ -136,35 +135,11 @@ func TestRecordLog(t *testing.T) { Timestamp: time, }) - txn := testApp.StartTransaction("test transaction") - ctx := NewContext(context.Background(), txn) - - // gather linking metadata values for test verification - metadata := txn.GetTraceMetadata() - spanID := metadata.SpanID - traceID := metadata.TraceID - - testApp.Application.RecordLog(LogData{ - Severity: "Warn", - Message: "Test Message With Transaction", - Timestamp: time, - Context: ctx, - }) - - txn.End() - testApp.ExpectLogEvents(t, []internal.WantLog{ { Severity: "Debug", Message: "Test Message", Timestamp: time, }, - { - Severity: "Warn", - Message: "Test Message With Transaction", - Timestamp: time, - SpanID: spanID, - TraceID: traceID, - }, }) } diff --git a/v3/newrelic/log_event.go b/v3/newrelic/log_event.go index 24a2f3ccb..1f7fabc9b 100644 --- a/v3/newrelic/log_event.go +++ b/v3/newrelic/log_event.go @@ -5,7 +5,6 @@ package newrelic import ( "bytes" - "context" "errors" "fmt" "strings" @@ -53,10 +52,9 @@ type logEvent struct { // LogData contains data fields that are needed to generate log events. type LogData struct { - Timestamp int64 // Optional: Unix Millisecond Timestamp; A timestamp will be generated if unset - Severity string // Optional: Severity of log being consumed - Message string // Optional: Message of log being consumed; Maximum size: 32768 Bytes. - Context context.Context // Optional: context containing a New Relic Transaction + Timestamp int64 // Optional: Unix Millisecond Timestamp; A timestamp will be generated if unset + Severity string // Optional: Severity of log being consumed + Message string // Optional: Message of log being consumed; Maximum size: 32768 Bytes. } // writeJSON prepares JSON in the format expected by the collector. diff --git a/v3/newrelic/transaction.go b/v3/newrelic/transaction.go index 82e738ed1..39a769862 100644 --- a/v3/newrelic/transaction.go +++ b/v3/newrelic/transaction.go @@ -119,6 +119,29 @@ func (txn *Transaction) AddAttribute(key string, value interface{}) { txn.thread.logAPIError(txn.thread.AddAttribute(key, value), "add attribute", nil) } +// RecordLog records the data from a single log line. +// This consumes a LogData object that should be configured +// with data taken from a logging framework. +// +// Certian parts of this feature can be turned off based on your +// config settings. Record log is capable of recording log events, +// as well as log metrics depending on how your application is +// configured. +func (txn *Transaction) RecordLog(log LogData) { + event, err := log.toLogEvent() + if err != nil { + txn.Application().app.Error("unable to record log", map[string]interface{}{ + "reason": err.Error(), + }) + return + } + + metadata := txn.GetTraceMetadata() + event.spanID = metadata.SpanID + event.traceID = metadata.TraceID + txn.thread.StoreLog(&event) +} + // SetWebRequestHTTP marks the transaction as a web transaction. If // the request is non-nil, SetWebRequestHTTP will additionally collect // details on request attributes, url, and method. If headers are From f137814b8c72a57eb8279331a3ed96f683702a51 Mon Sep 17 00:00:00 2001 From: Emilio Garcia Date: Tue, 28 Jun 2022 17:39:39 -0400 Subject: [PATCH 47/47] background logs --- v3/examples/server/main.go | 29 ++++++++++++++--------------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/v3/examples/server/main.go b/v3/examples/server/main.go index cb7d5cfc1..1b33ec93f 100644 --- a/v3/examples/server/main.go +++ b/v3/examples/server/main.go @@ -246,18 +246,6 @@ func browser(w http.ResponseWriter, r *http.Request) { io.WriteString(w, "browser header page") } -func logMessage(w http.ResponseWriter, r *http.Request) { - txn := newrelic.FromContext(r.Context()) - app := txn.Application() - - app.RecordLog(newrelic.LogData{ - Message: "Log Message", - Severity: "info", - }) - - io.WriteString(w, "A log message was recorded") -} - func logTxnMessage(w http.ResponseWriter, r *http.Request) { txn := newrelic.FromContext(r.Context()) txn.RecordLog(newrelic.LogData{ @@ -265,7 +253,7 @@ func logTxnMessage(w http.ResponseWriter, r *http.Request) { Severity: "info", }) - io.WriteString(w, "A log message was recorded as part of a transaction") + io.WriteString(w, "A log message was recorded") } func main() { @@ -297,8 +285,7 @@ func main() { http.HandleFunc(newrelic.WrapHandleFunc(app, "/browser", browser)) http.HandleFunc(newrelic.WrapHandleFunc(app, "/async", async)) http.HandleFunc(newrelic.WrapHandleFunc(app, "/message", message)) - http.HandleFunc(newrelic.WrapHandleFunc(app, "/log", logMessage)) - http.HandleFunc(newrelic.WrapHandleFunc(app, "/transaction_log", logTxnMessage)) + http.HandleFunc(newrelic.WrapHandleFunc(app, "/log", logTxnMessage)) http.HandleFunc("/background", func(w http.ResponseWriter, req *http.Request) { // Transactions started without an http.Request are classified as @@ -310,5 +297,17 @@ func main() { time.Sleep(150 * time.Millisecond) }) + http.HandleFunc("/background_log", func(w http.ResponseWriter, req *http.Request) { + // Logs that occur outside of a transaction are classified as + // background logs. + + app.RecordLog(newrelic.LogData{ + Message: "Background Log Message", + Severity: "info", + }) + + io.WriteString(w, "A background log message was recorded") + }) + http.ListenAndServe(":8000", nil) }