diff --git a/v3/examples/server/main.go b/v3/examples/server/main.go index af1496e34..1b33ec93f 100644 --- a/v3/examples/server/main.go +++ b/v3/examples/server/main.go @@ -246,11 +246,22 @@ func browser(w http.ResponseWriter, r *http.Request) { io.WriteString(w, "browser header page") } +func logTxnMessage(w http.ResponseWriter, r *http.Request) { + txn := newrelic.FromContext(r.Context()) + txn.RecordLog(newrelic.LogData{ + Message: "Log Message", + Severity: "info", + }) + + io.WriteString(w, "A log message was recorded") +} + func main() { app, err := newrelic.NewApplication( newrelic.ConfigAppName("Example App"), newrelic.ConfigFromEnvironment(), newrelic.ConfigDebugLogger(os.Stdout), + newrelic.ConfigAppLogForwardingEnabled(true), ) if err != nil { fmt.Println(err) @@ -274,6 +285,7 @@ func main() { http.HandleFunc(newrelic.WrapHandleFunc(app, "/browser", browser)) http.HandleFunc(newrelic.WrapHandleFunc(app, "/async", async)) http.HandleFunc(newrelic.WrapHandleFunc(app, "/message", message)) + http.HandleFunc(newrelic.WrapHandleFunc(app, "/log", logTxnMessage)) http.HandleFunc("/background", func(w http.ResponseWriter, req *http.Request) { // Transactions started without an http.Request are classified as @@ -285,5 +297,17 @@ func main() { time.Sleep(150 * time.Millisecond) }) + http.HandleFunc("/background_log", func(w http.ResponseWriter, req *http.Request) { + // Logs that occur outside of a transaction are classified as + // background logs. + + app.RecordLog(newrelic.LogData{ + Message: "Background Log Message", + Severity: "info", + }) + + io.WriteString(w, "A background log message was recorded") + }) + http.ListenAndServe(":8000", nil) } diff --git a/v3/integrations/logcontext-v2/nrzerolog/Readme.md b/v3/integrations/logcontext-v2/nrzerolog/Readme.md new file mode 100644 index 000000000..52c25f907 --- /dev/null +++ b/v3/integrations/logcontext-v2/nrzerolog/Readme.md @@ -0,0 +1,95 @@ +# Zerolog In Context + +This plugin for zerolog implements the logs in context tooling for the go agent. This hook +function can be added to any zerolog logger, and will automatically collect the log data +from zerolog, and send it to New Relic through the go agent. The following Logging features +are supported by this plugin in the current release: + +| Logging Feature | Supported | +| ------- | --------- | +| Forwarding | :heavy_check_mark: | +| Metrics | :heavy_check_mark: | +| Enrichment | :x: | + +## Installation + +The nrzerolog plugin, and the go-agent need to be integrated into your code +in order to use this tool. Make sure to set `newrelic.ConfigAppLogForwardingEnabled(true)` +in your config settings for the application. This will enable log forwarding +in the go agent. If you want to disable metrics, set `newrelic.ConfigAppLogMetricsEnabled(false),`. +Note that the agent sets the default number of logs per harverst cycle to 10000, but that +number may be reduced by the server. You can manually set this number by setting +`newrelic.ConfigAppLogForwardingMaxSamplesStored(123),`. + +The following example will shows how to install and set up your code to send logs to new relic from zerolog. + +```go + +import ( + "github.com/rs/zerolog" + "github.com/newrelic/go-agent/v3/newrelic" + "github.com/newrelic/go-agent/v3/integrations/logcontext-v2/nrzerolog" +) + +func main() { + // Initialize a zerolog logger + baseLogger := zerolog.New(os.Stdout) + + app, err := newrelic.NewApplication( + newrelic.ConfigFromEnvironment(), + newrelic.ConfigAppName("NRZerolog Example"), + newrelic.ConfigInfoLogger(os.Stdout), + newrelic.ConfigAppLogForwarding(true), + ) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + + // Send logs to New Relic outside of a transaction + nrHook := nrzerolog.NewRelicHook{ + App: app, + } + + // Wrap logger with New Relic Hook + nrLogger := baseLogger.Hook(nrHook) + nrLogger.Info().Msg("Hello World") + + // Send logs to New Relic inside of a transaction + txn := app.StartTransaction("My Transaction") + ctx := newrelic.NewContext(context.Background(), txn) + + nrTxnHook := nrzerolog.NewRelicHook{ + App: app, + Context: ctx, + } + + txnLogger := baseLogger.Hook(nrTxnHook) + txnLogger.Debug().Msg("This is a transaction log") + + txn.End() +} +``` + +## Usage + +Please enable the agent to ingest your logs by calling newrelic.ConfigAppLogForwardingEnabled(true), +when setting up your application. This is not enabled by default. + +This integration for the zerolog logging frameworks uses a built in feature +of the zerolog framework called hook functions. Zerolog loggers can be modified +to have hook functions run on them before each time a write is executed. When a +logger is hooked, meaning a hook function was added to that logger with the Hook() +funciton, a copy of that logger is created with those changes. Note that zerolog +will *never* attempt to verify that any hook functions have not been not duplicated, or +that fields are not repeated in any way. As a result, we recommend that you create +a base logger that is configured in the way you prefer to use zerolog. Then you +create hooked loggers to send log data to New Relic from that base logger. + +The plugin captures the log level, and the message from zerolog. It will also collect +distributed tracing data from your transaction context. At the moment the hook function is +called in zerolog, a timestamp will be generated for your log. In most cases, this +timestamp will be the same as the time posted in the zerolog log message, however it is possible that +there could be a slight offset depending on the the performance of your system. + + diff --git a/v3/integrations/logcontext-v2/nrzerolog/example/main.go b/v3/integrations/logcontext-v2/nrzerolog/example/main.go new file mode 100644 index 000000000..f3c284229 --- /dev/null +++ b/v3/integrations/logcontext-v2/nrzerolog/example/main.go @@ -0,0 +1,53 @@ +package main + +import ( + "context" + "fmt" + "os" + "time" + + "github.com/newrelic/go-agent/v3/integrations/logcontext-v2/nrzerolog" + "github.com/newrelic/go-agent/v3/newrelic" + "github.com/rs/zerolog" +) + +func main() { + baseLogger := zerolog.New(os.Stdout) + + app, err := newrelic.NewApplication( + newrelic.ConfigFromEnvironment(), + newrelic.ConfigAppName("NRZerolog Example"), + newrelic.ConfigInfoLogger(os.Stdout), + newrelic.ConfigAppLogForwardingEnabled(true), + ) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + + app.WaitForConnection(5 * time.Second) + + nrHook := nrzerolog.NewRelicHook{ + App: app, + } + + nrLogger := baseLogger.Hook(nrHook) + nrLogger.Info().Msg("Hello World") + + // With transaction context + txn := app.StartTransaction("My Transaction") + ctx := newrelic.NewContext(context.Background(), txn) + + nrTxnHook := nrzerolog.NewRelicHook{ + App: app, + Context: ctx, + } + + txnLogger := baseLogger.Hook(nrTxnHook) + txnLogger.Debug().Msg("This is a transaction log") + + txn.End() + + nrLogger.Info().Msg("Goodbye") + app.Shutdown(10 * time.Second) +} diff --git a/v3/integrations/logcontext-v2/nrzerolog/go.mod b/v3/integrations/logcontext-v2/nrzerolog/go.mod new file mode 100644 index 000000000..99453551b --- /dev/null +++ b/v3/integrations/logcontext-v2/nrzerolog/go.mod @@ -0,0 +1,8 @@ +module github.com/newrelic/go-agent/v3/integrations/logcontext-v2/nrzerolog + +go 1.15 + +require ( + github.com/newrelic/go-agent/v3 v3.16.0 + github.com/rs/zerolog v1.26.1 +) diff --git a/v3/integrations/logcontext-v2/nrzerolog/hook.go b/v3/integrations/logcontext-v2/nrzerolog/hook.go new file mode 100644 index 000000000..5055a1213 --- /dev/null +++ b/v3/integrations/logcontext-v2/nrzerolog/hook.go @@ -0,0 +1,41 @@ +package nrzerolog + +import ( + "context" + + "github.com/newrelic/go-agent/v3/internal" + "github.com/newrelic/go-agent/v3/newrelic" + "github.com/rs/zerolog" +) + +func init() { internal.TrackUsage("integration", "logcontext", "zerolog") } + +type NewRelicHook struct { + App *newrelic.Application + Context context.Context +} + +func (h NewRelicHook) Run(e *zerolog.Event, level zerolog.Level, msg string) { + var txn *newrelic.Transaction + if h.Context != nil { + txn = newrelic.FromContext(h.Context) + } + + logLevel := "" + if level == zerolog.NoLevel { + logLevel = newrelic.LogSeverityUnknown + } else { + logLevel = level.String() + } + + data := newrelic.LogData{ + Severity: logLevel, + Message: msg, + } + + if txn != nil { + txn.RecordLog(data) + } else { + h.App.RecordLog(data) + } +} diff --git a/v3/internal/connect_reply.go b/v3/internal/connect_reply.go index 84b5d00b6..342c140d5 100644 --- a/v3/internal/connect_reply.go +++ b/v3/internal/connect_reply.go @@ -118,6 +118,7 @@ type EventHarvestConfig struct { Limits struct { TxnEvents *uint `json:"analytic_event_data,omitempty"` CustomEvents *uint `json:"custom_event_data,omitempty"` + LogEvents *uint `json:"log_event_data,omitempty"` ErrorEvents *uint `json:"error_event_data,omitempty"` SpanEvents *uint `json:"span_event_data,omitempty"` } `json:"harvest_limits"` @@ -136,19 +137,20 @@ func (r *ConnectReply) ConfigurablePeriod() time.Duration { func uintPtr(x uint) *uint { return &x } // DefaultEventHarvestConfig provides faster event harvest defaults. -func DefaultEventHarvestConfig(maxTxnEvents int) EventHarvestConfig { +func DefaultEventHarvestConfig(maxTxnEvents, maxLogEvents int) EventHarvestConfig { cfg := EventHarvestConfig{} cfg.ReportPeriodMs = DefaultConfigurableEventHarvestMs cfg.Limits.TxnEvents = uintPtr(uint(maxTxnEvents)) cfg.Limits.CustomEvents = uintPtr(uint(MaxCustomEvents)) + cfg.Limits.LogEvents = uintPtr(uint(maxLogEvents)) cfg.Limits.ErrorEvents = uintPtr(uint(MaxErrorEvents)) return cfg } // DefaultEventHarvestConfigWithDT is an extended version of DefaultEventHarvestConfig, // with the addition that it takes into account distributed tracer span event harvest limits. -func DefaultEventHarvestConfigWithDT(maxTxnEvents int, dtEnabled bool, spanEventLimit int) EventHarvestConfig { - cfg := DefaultEventHarvestConfig(maxTxnEvents) +func DefaultEventHarvestConfigWithDT(maxTxnEvents, maxLogEvents, spanEventLimit int, dtEnabled bool) EventHarvestConfig { + cfg := DefaultEventHarvestConfig(maxTxnEvents, maxLogEvents) if dtEnabled { cfg.Limits.SpanEvents = uintPtr(uint(spanEventLimit)) } diff --git a/v3/internal/connect_reply_test.go b/v3/internal/connect_reply_test.go index 64312007a..eb9711354 100644 --- a/v3/internal/connect_reply_test.go +++ b/v3/internal/connect_reply_test.go @@ -173,11 +173,13 @@ func TestNegativeHarvestLimits(t *testing.T) { } func TestDefaultEventHarvestConfigJSON(t *testing.T) { - js, err := json.Marshal(DefaultEventHarvestConfig(MaxTxnEvents)) + js, err := json.Marshal(DefaultEventHarvestConfig(MaxTxnEvents, MaxLogEvents)) if err != nil { t.Error(err) } - if string(js) != `{"report_period_ms":60000,"harvest_limits":{"analytic_event_data":10000,"custom_event_data":10000,"error_event_data":100}}` { - t.Error(string(js)) + + expect := `{"report_period_ms":60000,"harvest_limits":{"analytic_event_data":10000,"custom_event_data":10000,"log_event_data":10000,"error_event_data":100}}` + if string(js) != expect { + t.Errorf("DefaultEventHarvestConfig does not match expected valued:\nExpected:\t%s\nActual:\t\t%s", expect, string(js)) } } diff --git a/v3/internal/expect.go b/v3/internal/expect.go index 3dff6e475..638cb224b 100644 --- a/v3/internal/expect.go +++ b/v3/internal/expect.go @@ -26,6 +26,15 @@ type WantError struct { AgentAttributes map[string]interface{} } +// WantLog is a traced log event expectation +type WantLog struct { + Severity string + Message string + SpanID string + TraceID string + Timestamp int64 +} + func uniquePointer() *struct{} { s := struct{}{} return &s @@ -112,6 +121,7 @@ type WantTxn struct { // captured. type Expect interface { ExpectCustomEvents(t Validator, want []WantEvent) + ExpectLogEvents(t Validator, want []WantLog) ExpectErrors(t Validator, want []WantError) ExpectErrorEvents(t Validator, want []WantEvent) diff --git a/v3/internal/integrationsupport/integrationsupport.go b/v3/internal/integrationsupport/integrationsupport.go index 4ce06267f..54c7e8151 100644 --- a/v3/internal/integrationsupport/integrationsupport.go +++ b/v3/internal/integrationsupport/integrationsupport.go @@ -93,6 +93,13 @@ var DTEnabledCfgFn = func(cfg *newrelic.Config) { cfg.DistributedTracer.Enabled = true } +var AppLogEnabledCfgFn = func(cfg *newrelic.Config) { + cfg.Enabled = false + cfg.ApplicationLogging.Enabled = true + cfg.ApplicationLogging.Forwarding.Enabled = true + cfg.ApplicationLogging.Metrics.Enabled = true +} + // SampleEverythingReplyFn is a reusable ConnectReply function that samples everything var SampleEverythingReplyFn = func(reply *internal.ConnectReply) { reply.SetSampleEverything() diff --git a/v3/internal/jsonx/encode_test.go b/v3/internal/jsonx/encode_test.go index fed3ab7f7..cc0f5934c 100644 --- a/v3/internal/jsonx/encode_test.go +++ b/v3/internal/jsonx/encode_test.go @@ -180,3 +180,19 @@ func TestAppendString(t *testing.T) { } } } + +func BenchmarkAppendString(b *testing.B) { + buf := &bytes.Buffer{} + + for i := 0; i < b.N; i++ { + AppendString(buf, "s") + } +} + +func BenchmarkAppendString10(b *testing.B) { + buf := &bytes.Buffer{} + + for i := 0; i < b.N; i++ { + AppendString(buf, "qwertyuiop") + } +} diff --git a/v3/internal/limits.go b/v3/internal/limits.go index 7dcb96785..95fcebe6c 100644 --- a/v3/internal/limits.go +++ b/v3/internal/limits.go @@ -17,6 +17,9 @@ const ( // MaxCustomEvents is the maximum number of Transaction Events that can be captured // per 60-second harvest cycle MaxCustomEvents = 10 * 1000 + // MaxLogEvents is the maximum number of Log Events that can be captured per + // 60-second harvest cycle + MaxLogEvents = 10 * 1000 // MaxTxnEvents is the maximum number of Transaction Events that can be captured // per 60-second harvest cycle MaxTxnEvents = 10 * 1000 diff --git a/v3/newrelic/analytics_events.go b/v3/newrelic/analytics_events.go index aa751ceab..6c1fdb63b 100644 --- a/v3/newrelic/analytics_events.go +++ b/v3/newrelic/analytics_events.go @@ -19,8 +19,8 @@ type analyticsEventHeap []analyticsEvent type analyticsEvents struct { numSeen int - events analyticsEventHeap failedHarvests int + events analyticsEventHeap } func (events *analyticsEvents) NumSeen() float64 { return float64(events.numSeen) } diff --git a/v3/newrelic/app_run.go b/v3/newrelic/app_run.go index 3df42ea55..28973e7b8 100644 --- a/v3/newrelic/app_run.go +++ b/v3/newrelic/app_run.go @@ -122,6 +122,7 @@ func newAppRun(config config, reply *internal.ConnectReply) *appRun { MaxCustomEvents: run.MaxCustomEvents(), MaxErrorEvents: run.MaxErrorEvents(), MaxSpanEvents: run.MaxSpanEvents(), + LoggingConfig: run.LoggingConfig(), } return run @@ -187,6 +188,7 @@ func (run *appRun) txnTraceThreshold(apdexThreshold time.Duration) time.Duration func (run *appRun) ptrTxnEvents() *uint { return run.Reply.EventData.Limits.TxnEvents } func (run *appRun) ptrCustomEvents() *uint { return run.Reply.EventData.Limits.CustomEvents } +func (run *appRun) ptrLogEvents() *uint { return run.Reply.EventData.Limits.LogEvents } func (run *appRun) ptrErrorEvents() *uint { return run.Reply.EventData.Limits.ErrorEvents } func (run *appRun) ptrSpanEvents() *uint { return run.Reply.EventData.Limits.SpanEvents } @@ -194,10 +196,27 @@ func (run *appRun) MaxTxnEvents() int { return run.limit(run.Config.maxTxnEvents func (run *appRun) MaxCustomEvents() int { return run.limit(internal.MaxCustomEvents, run.ptrCustomEvents) } +func (run *appRun) MaxLogEvents() int { + return run.limit(internal.MaxLogEvents, run.ptrLogEvents) +} func (run *appRun) MaxErrorEvents() int { return run.limit(internal.MaxErrorEvents, run.ptrErrorEvents) } +func (run *appRun) LoggingConfig() (config loggingConfig) { + logging := run.Config.ApplicationLogging + + config.loggingEnabled = logging.Enabled + config.collectEvents = logging.Enabled && logging.Forwarding.Enabled && !run.Config.HighSecurity + config.maxLogEvents = run.MaxLogEvents() + config.collectMetrics = logging.Enabled && logging.Metrics.Enabled + + //TODO + config.localEnrichment = false + + return config +} + // MaxSpanEvents returns the reservoir limit for collected span events, // which will be the default or the user's configured size (if any), but // may be capped to the maximum allowed by the collector. @@ -219,6 +238,7 @@ func (run *appRun) ReportPeriods() map[harvestTypes]time.Duration { for tp, fn := range map[harvestTypes]func() *uint{ harvestTxnEvents: run.ptrTxnEvents, harvestCustomEvents: run.ptrCustomEvents, + harvestLogEvents: run.ptrLogEvents, harvestErrorEvents: run.ptrErrorEvents, harvestSpanEvents: run.ptrSpanEvents, } { diff --git a/v3/newrelic/app_run_test.go b/v3/newrelic/app_run_test.go index 56bd33c2f..308128550 100644 --- a/v3/newrelic/app_run_test.go +++ b/v3/newrelic/app_run_test.go @@ -130,6 +130,8 @@ func TestEmptyReplyEventHarvestDefaults(t *testing.T) { maxCustomEvents: internal.MaxCustomEvents, maxErrorEvents: internal.MaxErrorEvents, maxSpanEvents: run.Config.DistributedTracer.ReservoirLimit, + maxLogEvents: internal.MaxLogEvents, + periods: map[harvestTypes]time.Duration{ harvestTypesAll: 60 * time.Second, 0: 60 * time.Second, @@ -144,8 +146,9 @@ func TestEventHarvestFieldsAllPopulated(t *testing.T) { "harvest_limits": { "analytic_event_data": 1, "custom_event_data": 2, - "span_event_data": 3, - "error_event_data": 4 + "log_event_data": 3, + "span_event_data": 4, + "error_event_data": 5 } } }}`), internal.PreconnectReply{}) @@ -156,8 +159,9 @@ func TestEventHarvestFieldsAllPopulated(t *testing.T) { assertHarvestConfig(t, run.harvestConfig, expectHarvestConfig{ maxTxnEvents: 1, maxCustomEvents: 2, - maxErrorEvents: 4, - maxSpanEvents: 3, + maxLogEvents: 3, + maxSpanEvents: 4, + maxErrorEvents: 5, periods: map[harvestTypes]time.Duration{ harvestMetricsTraces: 60 * time.Second, harvestTypesEvents: 5 * time.Second, @@ -178,6 +182,7 @@ func TestZeroReportPeriod(t *testing.T) { assertHarvestConfig(t, run.harvestConfig, expectHarvestConfig{ maxTxnEvents: internal.MaxTxnEvents, maxCustomEvents: internal.MaxCustomEvents, + maxLogEvents: internal.MaxLogEvents, maxErrorEvents: internal.MaxErrorEvents, maxSpanEvents: run.Config.DistributedTracer.ReservoirLimit, periods: map[harvestTypes]time.Duration{ @@ -200,6 +205,7 @@ func TestEventHarvestFieldsOnlySpanEvents(t *testing.T) { assertHarvestConfig(t, run.harvestConfig, expectHarvestConfig{ maxTxnEvents: internal.MaxTxnEvents, maxCustomEvents: internal.MaxCustomEvents, + maxLogEvents: internal.MaxLogEvents, maxErrorEvents: internal.MaxErrorEvents, maxSpanEvents: 3, periods: map[harvestTypes]time.Duration{ @@ -224,6 +230,7 @@ func TestEventHarvestFieldsOnlyTxnEvents(t *testing.T) { maxCustomEvents: internal.MaxCustomEvents, maxErrorEvents: internal.MaxErrorEvents, maxSpanEvents: run.Config.DistributedTracer.ReservoirLimit, + maxLogEvents: internal.MaxLogEvents, periods: map[harvestTypes]time.Duration{ harvestTypesAll ^ harvestTxnEvents: 60 * time.Second, harvestTxnEvents: 5 * time.Second, @@ -244,6 +251,7 @@ func TestEventHarvestFieldsOnlyErrorEvents(t *testing.T) { assertHarvestConfig(t, run.harvestConfig, expectHarvestConfig{ maxTxnEvents: internal.MaxTxnEvents, maxCustomEvents: internal.MaxCustomEvents, + maxLogEvents: internal.MaxLogEvents, maxErrorEvents: 3, maxSpanEvents: run.Config.DistributedTracer.ReservoirLimit, periods: map[harvestTypes]time.Duration{ @@ -266,6 +274,7 @@ func TestEventHarvestFieldsOnlyCustomEvents(t *testing.T) { assertHarvestConfig(t, run.harvestConfig, expectHarvestConfig{ maxTxnEvents: internal.MaxTxnEvents, maxCustomEvents: 3, + maxLogEvents: internal.MaxLogEvents, maxErrorEvents: internal.MaxErrorEvents, maxSpanEvents: run.Config.DistributedTracer.ReservoirLimit, periods: map[harvestTypes]time.Duration{ @@ -366,9 +375,13 @@ type expectHarvestConfig struct { maxCustomEvents int maxErrorEvents int maxSpanEvents int + maxLogEvents int periods map[harvestTypes]time.Duration } +func errorExpectNotEqualActual(value string, expect, actual interface{}) error { + return fmt.Errorf("Expected %s value does not match actual; expected: %+v actual: %+v", value, expect, actual) +} func assertHarvestConfig(t testing.TB, hc harvestConfig, expect expectHarvestConfig) { if h, ok := t.(interface { Helper() @@ -376,19 +389,22 @@ func assertHarvestConfig(t testing.TB, hc harvestConfig, expect expectHarvestCon h.Helper() } if max := hc.MaxTxnEvents; max != expect.maxTxnEvents { - t.Error(max, expect.maxTxnEvents) + t.Error(errorExpectNotEqualActual("maxTxnEvents", max, expect.maxTxnEvents)) } if max := hc.MaxCustomEvents; max != expect.maxCustomEvents { - t.Error(max, expect.maxCustomEvents) + t.Error(errorExpectNotEqualActual("MaxCustomEvents", max, expect.maxCustomEvents)) } if max := hc.MaxSpanEvents; max != expect.maxSpanEvents { - t.Error(max, expect.maxSpanEvents) + t.Error(errorExpectNotEqualActual("MaxSpanEvents", max, expect.maxSpanEvents)) } if max := hc.MaxErrorEvents; max != expect.maxErrorEvents { - t.Error(max, expect.maxErrorEvents) + t.Error(errorExpectNotEqualActual("MaxErrorEvents", max, expect.maxErrorEvents)) + } + if max := hc.LoggingConfig.maxLogEvents; max != expect.maxLogEvents { + t.Error(errorExpectNotEqualActual("MaxLogEvents", max, expect.maxErrorEvents)) } if periods := hc.ReportPeriods; !reflect.DeepEqual(periods, expect.periods) { - t.Error(periods, expect.periods) + t.Error(errorExpectNotEqualActual("ReportPeriods", periods, expect.periods)) } } diff --git a/v3/newrelic/application.go b/v3/newrelic/application.go index 7a8db268f..119fffa79 100644 --- a/v3/newrelic/application.go +++ b/v3/newrelic/application.go @@ -74,6 +74,29 @@ func (app *Application) RecordCustomMetric(name string, value float64) { } } +// RecordLog records the data from a single log line. +// This consumes a LogData object that should be configured +// with data taken from a logging framework. +// +// Certian parts of this feature can be turned off based on your +// config settings. Record log is capable of recording log events, +// as well as log metrics depending on how your application is +// configured. +func (app *Application) RecordLog(logEvent LogData) { + if nil == app { + return + } + if nil == app.app { + return + } + err := app.app.RecordLog(&logEvent) + if err != nil { + app.app.Error("unable to record log", map[string]interface{}{ + "reason": err.Error(), + }) + } +} + // WaitForConnection blocks until the application is connected, is // incapable of being connected, or the timeout has been reached. This // method is useful for short-lived processes since the application will diff --git a/v3/newrelic/collector.go b/v3/newrelic/collector.go index 5ed11dcef..ea61e146c 100644 --- a/v3/newrelic/collector.go +++ b/v3/newrelic/collector.go @@ -30,6 +30,7 @@ const ( cmdConnect = "connect" cmdMetrics = "metric_data" cmdCustomEvents = "custom_event_data" + cmdLogEvents = "log_event_data" cmdTxnEvents = "analytic_event_data" cmdErrorEvents = "error_event_data" cmdErrorData = "error_data" diff --git a/v3/newrelic/config.go b/v3/newrelic/config.go index fe0f7c7f7..1abf253a7 100644 --- a/v3/newrelic/config.go +++ b/v3/newrelic/config.go @@ -300,6 +300,9 @@ type Config struct { } } + // Config Settings for Logs in Context features + ApplicationLogging ApplicationLogging + // Attributes controls which attributes are enabled and disabled globally. // This setting affects all attribute destinations: Transaction Events, // Error Events, Transaction Traces and segments, Traced Errors, Span @@ -349,6 +352,29 @@ type Config struct { Error error } +// ApplicationLogging contains settings which control the capture and sending +// of log event data +type ApplicationLogging struct { + // If this is disabled, all sub-features are disabled; + // if it is enabled, the individual sub-feature configurations take effect. + // MAY accomplish this by not installing instrumentation, or by early-return/no-op as necessary for an agent. + Enabled bool + // Forwarding controls log forwarding to New Relic One + Forwarding struct { + // Toggles whether the agent gathers log records for sending to New Relic. + Enabled bool + // Number of log records to send per minute to New Relic. + // Controls the overall memory consumption when using log forwarding. + // SHOULD be sent as part of the harvest_limits on Connect. + MaxSamplesStored int + } + Metrics struct { + // Toggles whether the agent gathers the the user facing Logging/lines and Logging/lines/{SEVERITY} + // Logging Metrics used in the Logs chart on the APM Summary page. + Enabled bool + } +} + // AttributeDestinationConfig controls the attributes sent to each destination. // For more information, see: // https://docs.newrelic.com/docs/agents/manage-apm-agents/agent-data/agent-attributes @@ -412,6 +438,12 @@ func defaultConfig() Config { c.TransactionTracer.Attributes.Enabled = true c.TransactionTracer.Segments.Attributes.Enabled = true + // Application Logging Settings + c.ApplicationLogging.Enabled = true + c.ApplicationLogging.Forwarding.Enabled = false + c.ApplicationLogging.Forwarding.MaxSamplesStored = internal.MaxLogEvents + c.ApplicationLogging.Metrics.Enabled = true + c.BrowserMonitoring.Enabled = true // browser monitoring attributes are disabled by default c.BrowserMonitoring.Attributes.Enabled = false @@ -513,6 +545,16 @@ func (c Config) maxTxnEvents() int { return configured } +// maxTxnEvents returns the configured maximum number of Transaction Events if it has been configured +// and is less than the default maximum; otherwise it returns the default max. +func (c Config) maxLogEvents() int { + configured := c.ApplicationLogging.Forwarding.MaxSamplesStored + if configured < 0 || configured > internal.MaxTxnEvents { + return internal.MaxTxnEvents + } + return configured +} + func copyDestConfig(c AttributeDestinationConfig) AttributeDestinationConfig { cp := c if nil != c.Include { @@ -667,7 +709,7 @@ func configConnectJSONInternal(c Config, pid int, util *utilization.Data, e envi Util: util, SecurityPolicies: securityPolicies, Metadata: metadata, - EventData: internal.DefaultEventHarvestConfigWithDT(c.maxTxnEvents(), c.DistributedTracer.Enabled, c.DistributedTracer.ReservoirLimit), + EventData: internal.DefaultEventHarvestConfigWithDT(c.maxTxnEvents(), c.maxLogEvents(), c.DistributedTracer.ReservoirLimit, c.DistributedTracer.Enabled), }}) } diff --git a/v3/newrelic/config_options.go b/v3/newrelic/config_options.go index 3ba323924..55ec64823 100644 --- a/v3/newrelic/config_options.go +++ b/v3/newrelic/config_options.go @@ -44,6 +44,59 @@ func ConfigDistributedTracerReservoirLimit(limit int) ConfigOption { return func(cfg *Config) { cfg.DistributedTracer.ReservoirLimit = limit } } +// ConfigAppLogForwardingEnabled enables or disables the collection +// of logs from a user's application by the agent +// Defaults: enabled=false +func ConfigAppLogForwardingEnabled(enabled bool) ConfigOption { + return func(cfg *Config) { + if enabled { + cfg.ApplicationLogging.Enabled = true + cfg.ApplicationLogging.Forwarding.Enabled = true + } else { + cfg.ApplicationLogging.Forwarding.Enabled = false + cfg.ApplicationLogging.Forwarding.MaxSamplesStored = 0 + } + } +} + +// ConfigAppLogMetricsEnabled enables or disables the collection of metrics +// data for logs seen by an instrumented logging framework +// default: true +func ConfigAppLogMetricsEnabled(enabled bool) ConfigOption { + return func(cfg *Config) { + if enabled { + cfg.ApplicationLogging.Enabled = true + cfg.ApplicationLogging.Metrics.Enabled = true + } else { + cfg.ApplicationLogging.Metrics.Enabled = false + } + } +} + +// ConfigAppLogEnabled enables or disables all application logging features +// and data collection +func ConfigAppLogEnabled(enabled bool) ConfigOption { + return func(cfg *Config) { + if enabled { + cfg.ApplicationLogging.Enabled = true + } else { + cfg.ApplicationLogging.Enabled = false + } + } +} + +const ( + zerologFrameworkName = "Zerolog" +) + +// ConfigAppLogForwardingMaxSamplesStored allows users to set the maximium number of +// log events the agent is allowed to collect and store in a given harvest cycle. +func ConfigAppLogForwardingMaxSamplesStored(maxSamplesStored int) ConfigOption { + return func(cfg *Config) { + cfg.ApplicationLogging.Forwarding.MaxSamplesStored = maxSamplesStored + } +} + // ConfigLogger populates the Config's Logger. func ConfigLogger(l Logger) ConfigOption { return func(cfg *Config) { cfg.Logger = l } diff --git a/v3/newrelic/config_test.go b/v3/newrelic/config_test.go index b6df1731b..6b159976a 100644 --- a/v3/newrelic/config_test.go +++ b/v3/newrelic/config_test.go @@ -129,6 +129,16 @@ func TestCopyConfigReferenceFieldsPresent(t *testing.T) { "host":"my-hostname", "settings":{ "AppName":"my appname", + "ApplicationLogging": { + "Enabled":true, + "Forwarding": { + "Enabled": false, + "MaxSamplesStored": 10000 + }, + "Metrics": { + "Enabled": true + } + }, "Attributes":{"Enabled":true,"Exclude":["2"],"Include":["1"]}, "BrowserMonitoring":{ "Attributes":{"Enabled":false,"Exclude":["10"],"Include":["9"]}, @@ -250,6 +260,7 @@ func TestCopyConfigReferenceFieldsPresent(t *testing.T) { "harvest_limits": { "analytic_event_data": 10000, "custom_event_data": 10000, + "log_event_data": 10000, "error_event_data": 100, "span_event_data": 2000 } @@ -302,6 +313,16 @@ func TestCopyConfigReferenceFieldsAbsent(t *testing.T) { "host":"my-hostname", "settings":{ "AppName":"my appname", + "ApplicationLogging": { + "Enabled": true, + "Forwarding": { + "Enabled": false, + "MaxSamplesStored": 10000 + }, + "Metrics": { + "Enabled": true + } + }, "Attributes":{"Enabled":true,"Exclude":null,"Include":null}, "BrowserMonitoring":{ "Attributes":{ @@ -415,6 +436,7 @@ func TestCopyConfigReferenceFieldsAbsent(t *testing.T) { "harvest_limits": { "analytic_event_data": 10000, "custom_event_data": 10000, + "log_event_data": 10000, "error_event_data": 100, "span_event_data": 2000 } diff --git a/v3/newrelic/expect_implementation.go b/v3/newrelic/expect_implementation.go index 5edcc3033..06e09c886 100644 --- a/v3/newrelic/expect_implementation.go +++ b/v3/newrelic/expect_implementation.go @@ -4,6 +4,7 @@ package newrelic import ( + "bytes" "encoding/json" "fmt" "time" @@ -94,9 +95,9 @@ func expectTxnMetrics(t internal.Validator, mt *metricTable, want internal.WantT expectMetrics(t, mt, metrics) } -func expectMetricField(t internal.Validator, id metricID, v1, v2 float64, fieldName string) { - if v1 != v2 { - t.Error("metric fields do not match", id, v1, v2, fieldName) +func expectMetricField(t internal.Validator, id metricID, expect, want float64, fieldName string) { + if expect != want { + t.Error("incorrect value for metric", fieldName, id, "expect:", expect, "want: ", want) } } @@ -113,7 +114,7 @@ func expectMetrics(t internal.Validator, mt *metricTable, expect []internal.Want func expectMetricsInternal(t internal.Validator, mt *metricTable, expect []internal.WantMetric, exactMatch bool) { if exactMatch { if len(mt.metrics) != len(expect) { - t.Error("metric counts do not match expectations", len(mt.metrics), len(expect)) + t.Error("incorrect number of metrics stored, expected:", len(expect), "got:", len(mt.metrics)) } } expectedIds := make(map[metricID]struct{}) @@ -122,7 +123,7 @@ func expectMetricsInternal(t internal.Validator, mt *metricTable, expect []inter expectedIds[id] = struct{}{} m := mt.metrics[id] if nil == m { - t.Error("unable to find metric", id) + t.Error("expected metric not found", id) continue } @@ -159,19 +160,31 @@ func expectAttributes(v internal.Validator, exists map[string]interface{}, expec if len(exists) != len(expect) { v.Error("attributes length difference", len(exists), len(expect)) } - for key, val := range expect { - found, ok := exists[key] + for key, expectVal := range expect { + actualVal, ok := exists[key] if !ok { v.Error("expected attribute not found: ", key) continue } - if val == internal.MatchAnything || val == "*" { + if expectVal == internal.MatchAnything || expectVal == "*" { continue } - v1 := fmt.Sprint(found) - v2 := fmt.Sprint(val) - if v1 != v2 { - v.Error("value difference", fmt.Sprintf("key=%s", key), v1, v2) + + actualString := fmt.Sprint(actualVal) + expectString := fmt.Sprint(expectVal) + switch expectVal.(type) { + case float64: + // json.Number type objects need to be converted into float64 strings + // when compared against a float64 or the comparison will fail due to + // the number formatting being different + if number, ok := actualVal.(json.Number); ok { + numString, _ := number.Float64() + actualString = fmt.Sprint(numString) + } + } + + if expectString != actualString { + v.Error(fmt.Sprintf("Values of key \"%s\" do not match; Expect: %s Actual: %s", key, expectString, actualString)) } } for key, val := range exists { @@ -188,18 +201,66 @@ func expectCustomEvents(v internal.Validator, cs *customEvents, expect []interna expectEvents(v, cs.analyticsEvents, expect, nil) } +func expectLogEvents(v internal.Validator, events *logEvents, expect []internal.WantLog) { + if len(events.logs) != len(expect) { + v.Error("actual number of events does not match what is expected", len(events.logs), len(expect)) + return + } + + for i, e := range expect { + event := events.logs[i] + expectLogEvent(v, event, e) + } +} + +func expectLogEvent(v internal.Validator, event logEvent, want internal.WantLog) { + if event.message != want.Message { + v.Error(fmt.Sprintf("unexpected log message: want %s, got %s", event.message, want.Message)) + return + } + if event.severity != want.Severity { + v.Error(fmt.Sprintf("unexpected log severity: want %s, got %s", event.severity, want.Severity)) + return + } + if event.traceID != want.TraceID { + v.Error(fmt.Sprintf("unexpected log trace id: want %s, got %s", event.traceID, want.TraceID)) + return + } + if event.spanID != want.SpanID { + v.Error(fmt.Sprintf("unexpected log span id: want %s, got %s", event.spanID, want.SpanID)) + return + } + if event.timestamp != want.Timestamp { + v.Error(fmt.Sprintf("unexpected log timestamp: want %d, got %d", event.timestamp, want.Timestamp)) + return + } +} + func expectEvent(v internal.Validator, e json.Marshaler, expect internal.WantEvent) { js, err := e.MarshalJSON() if nil != err { v.Error("unable to marshal event", err) return } + + // Because we are unmarshaling into a generic struct without types + // JSON numbers will be set to the float64 type by default, causing + // errors when comparing to the expected integer timestamp value. + decoder := json.NewDecoder(bytes.NewReader(js)) + decoder.UseNumber() var event []map[string]interface{} - err = json.Unmarshal(js, &event) + err = decoder.Decode(&event) if nil != err { v.Error("unable to parse event json", err) return } + + // avoid nil pointer errors or index out of bounds errors + if event == nil || len(event) == 0 { + v.Error("Event can not be nil or empty") + return + } + intrinsics := event[0] userAttributes := event[1] agentAttributes := event[2] diff --git a/v3/newrelic/harvest.go b/v3/newrelic/harvest.go index b92865d4f..3a7e1cf61 100644 --- a/v3/newrelic/harvest.go +++ b/v3/newrelic/harvest.go @@ -22,13 +22,14 @@ const ( harvestMetricsTraces harvestTypes = 1 << iota harvestSpanEvents harvestCustomEvents + harvestLogEvents harvestTxnEvents harvestErrorEvents ) const ( // harvestTypesEvents includes all Event types - harvestTypesEvents = harvestSpanEvents | harvestCustomEvents | harvestTxnEvents | harvestErrorEvents + harvestTypesEvents = harvestSpanEvents | harvestCustomEvents | harvestTxnEvents | harvestErrorEvents | harvestLogEvents // harvestTypesAll includes all harvest types harvestTypesAll = harvestMetricsTraces | harvestTypesEvents ) @@ -66,6 +67,7 @@ type harvest struct { SlowSQLs *slowQueries SpanEvents *spanEvents CustomEvents *customEvents + LogEvents *logEvents TxnEvents *txnEvents ErrorEvents *errorEvents } @@ -92,6 +94,11 @@ func (h *harvest) Ready(now time.Time) *harvest { ready.CustomEvents = h.CustomEvents h.CustomEvents = newCustomEvents(h.CustomEvents.capacity()) } + if 0 != types&harvestLogEvents { + h.LogEvents.RecordLoggingMetrics(h.Metrics) + ready.LogEvents = h.LogEvents + h.LogEvents = newLogEvents(h.LogEvents.commonAttributes, h.LogEvents.config) + } if 0 != types&harvestTxnEvents { h.Metrics.addCount(txnEventsSeen, h.TxnEvents.NumSeen(), forced) h.Metrics.addCount(txnEventsSent, h.TxnEvents.NumSaved(), forced) @@ -133,6 +140,9 @@ func (h *harvest) Payloads(splitLargeTxnEvents bool) (ps []payloadCreator) { if nil != h.CustomEvents { ps = append(ps, h.CustomEvents) } + if nil != h.LogEvents { + ps = append(ps, h.LogEvents) + } if nil != h.ErrorEvents { ps = append(ps, h.ErrorEvents) } @@ -162,11 +172,13 @@ func (h *harvest) Payloads(splitLargeTxnEvents bool) (ps []payloadCreator) { } type harvestConfig struct { - ReportPeriods map[harvestTypes]time.Duration - MaxSpanEvents int - MaxCustomEvents int - MaxErrorEvents int - MaxTxnEvents int + ReportPeriods map[harvestTypes]time.Duration + CommonAttributes commonAttributes + LoggingConfig loggingConfig + MaxSpanEvents int + MaxCustomEvents int + MaxErrorEvents int + MaxTxnEvents int } // newHarvest returns a new Harvest. @@ -179,6 +191,7 @@ func newHarvest(now time.Time, configurer harvestConfig) *harvest { SlowSQLs: newSlowQueries(maxHarvestSlowSQLs), SpanEvents: newSpanEvents(configurer.MaxSpanEvents), CustomEvents: newCustomEvents(configurer.MaxCustomEvents), + LogEvents: newLogEvents(configurer.CommonAttributes, configurer.LoggingConfig), TxnEvents: newTxnEvents(configurer.MaxTxnEvents), ErrorEvents: newErrorEvents(configurer.MaxErrorEvents), } @@ -199,8 +212,14 @@ func createTraceObserverMetrics(to traceObserver, metrics *metricTable) { } } +func createAppLoggingSupportabilityMetrics(lc *loggingConfig, metrics *metricTable) { + lc.connectMetrics(metrics) +} + // CreateFinalMetrics creates extra metrics at harvest time. -func (h *harvest) CreateFinalMetrics(reply *internal.ConnectReply, hc harvestConfig, to traceObserver) { +func (h *harvest) CreateFinalMetrics(run *appRun, to traceObserver) { + reply := run.Reply + hc := run.harvestConfig if nil == h { return } @@ -220,9 +239,11 @@ func (h *harvest) CreateFinalMetrics(reply *internal.ConnectReply, hc harvestCon h.Metrics.addValue(supportCustomEventLimit, "", float64(hc.MaxCustomEvents), forced) h.Metrics.addValue(supportErrorEventLimit, "", float64(hc.MaxErrorEvents), forced) h.Metrics.addValue(supportSpanEventLimit, "", float64(hc.MaxSpanEvents), forced) + h.Metrics.addValue(supportLogEventLimit, "", float64(hc.LoggingConfig.maxLogEvents), forced) createTraceObserverMetrics(to, h.Metrics) createTrackUsageMetrics(h.Metrics) + createAppLoggingSupportabilityMetrics(&hc.LoggingConfig, h.Metrics) h.Metrics = h.Metrics.ApplyRules(reply.MetricRules) } @@ -319,13 +340,20 @@ func createTxnMetrics(args *txnData, metrics *metricTable) { } var ( - // dfltHarvestCfgr is use in internal test cases, and for situations - // where we don't have a ConnectReply, such as for serverless harvests + + // This should only be used by harvests in cases where a connect response is unavailable dfltHarvestCfgr = harvestConfig{ ReportPeriods: map[harvestTypes]time.Duration{harvestTypesAll: fixedHarvestPeriod}, MaxTxnEvents: internal.MaxTxnEvents, MaxSpanEvents: defaultMaxSpanEvents, MaxCustomEvents: internal.MaxCustomEvents, MaxErrorEvents: internal.MaxErrorEvents, + LoggingConfig: loggingConfig{ + true, + false, + true, + false, + internal.MaxLogEvents, + }, } ) diff --git a/v3/newrelic/harvest_test.go b/v3/newrelic/harvest_test.go index 76317ed4d..c1c23eb18 100644 --- a/v3/newrelic/harvest_test.go +++ b/v3/newrelic/harvest_test.go @@ -11,9 +11,23 @@ import ( "github.com/newrelic/go-agent/v3/internal/logger" ) +var ( + // This is for testing only + testHarvestCfgr = generateTestHarvestConfig() +) + +func generateTestHarvestConfig() harvestConfig { + cfg := dfltHarvestCfgr + + // Enable logging features for testing (not enabled by default) + loggingCfg := loggingConfigEnabled(internal.MaxLogEvents) + cfg.LoggingConfig = loggingCfg + return cfg +} + func TestHarvestTimerAllFixed(t *testing.T) { now := time.Now() - harvest := newHarvest(now, dfltHarvestCfgr) + harvest := newHarvest(now, testHarvestCfgr) timer := harvest.timer for _, tc := range []struct { Elapsed time.Duration @@ -69,9 +83,15 @@ func TestCreateFinalMetrics(t *testing.T) { // If the harvest or metrics is nil then CreateFinalMetrics should // not panic. var nilHarvest *harvest - nilHarvest.CreateFinalMetrics(nil, dfltHarvestCfgr, nil) + + config := config{Config: defaultConfig()} + + run := newAppRun(config, internal.ConnectReplyDefaults()) + run.harvestConfig = testHarvestCfgr + + nilHarvest.CreateFinalMetrics(run, nil) emptyHarvest := &harvest{} - emptyHarvest.CreateFinalMetrics(nil, dfltHarvestCfgr, nil) + emptyHarvest.CreateFinalMetrics(run, nil) replyJSON := []byte(`{"return_value":{ "metric_name_rules":[{ @@ -84,7 +104,8 @@ func TestCreateFinalMetrics(t *testing.T) { "analytic_event_data": 22, "custom_event_data": 33, "error_event_data": 44, - "span_event_data": 55 + "span_event_data": 55, + "log_event_data":66 } } }}`) @@ -101,10 +122,13 @@ func TestCreateFinalMetrics(t *testing.T) { MaxCustomEvents: 33, MaxErrorEvents: 44, MaxSpanEvents: 55, + LoggingConfig: loggingConfigEnabled(66), } h := newHarvest(now, cfgr) h.Metrics.addCount("rename_me", 1.0, unforced) - h.CreateFinalMetrics(reply, cfgr, nil) + run = newAppRun(config, reply) + run.harvestConfig = cfgr + h.CreateFinalMetrics(run, nil) expectMetrics(t, h.Metrics, []internal.WantMetric{ {Name: instanceReporting, Scope: "", Forced: true, Data: []float64{1, 0, 0, 0, 0, 0}}, {Name: "been_renamed", Scope: "", Forced: false, Data: []float64{1.0, 0, 0, 0, 0, 0}}, @@ -113,9 +137,14 @@ func TestCreateFinalMetrics(t *testing.T) { {Name: "Supportability/EventHarvest/CustomEventData/HarvestLimit", Scope: "", Forced: true, Data: []float64{1, 33, 33, 33, 33, 33 * 33}}, {Name: "Supportability/EventHarvest/ErrorEventData/HarvestLimit", Scope: "", Forced: true, Data: []float64{1, 44, 44, 44, 44, 44 * 44}}, {Name: "Supportability/EventHarvest/SpanEventData/HarvestLimit", Scope: "", Forced: true, Data: []float64{1, 55, 55, 55, 55, 55 * 55}}, + {Name: "Supportability/EventHarvest/LogEventData/HarvestLimit", Scope: "", Forced: true, Data: []float64{1, 66, 66, 66, 66, 66 * 66}}, {Name: "Supportability/Go/Version/" + Version, Scope: "", Forced: true, Data: []float64{1, 0, 0, 0, 0, 0}}, {Name: "Supportability/Go/Runtime/Version/" + goVersionSimple, Scope: "", Forced: true, Data: []float64{1, 0, 0, 0, 0, 0}}, {Name: "Supportability/Go/gRPC/Version/" + grpcVersion, Scope: "", Forced: true, Data: []float64{1, 0, 0, 0, 0, 0}}, + {Name: "Supportability/Logging/Golang", Scope: "", Forced: true, Data: []float64{1, 0, 0, 0, 0, 0}}, + {Name: "Supportability/Logging/Forwarding/Golang", Scope: "", Forced: true, Data: []float64{1, 0, 0, 0, 0, 0}}, + {Name: "Supportability/Logging/Metrics/Golang", Scope: "", Forced: true, Data: []float64{1, 0, 0, 0, 0, 0}}, + {Name: "Supportability/Logging/LocalDecorating/Golang", Scope: "", Forced: true, Data: []float64{1, 0, 0, 0, 0, 0}}, }) // Test again without any metric rules or event_harvest_config. @@ -126,9 +155,11 @@ func TestCreateFinalMetrics(t *testing.T) { if err != nil { t.Fatal(err) } - h = newHarvest(now, dfltHarvestCfgr) + run = newAppRun(config, reply) + run.harvestConfig = testHarvestCfgr + h = newHarvest(now, testHarvestCfgr) h.Metrics.addCount("rename_me", 1.0, unforced) - h.CreateFinalMetrics(reply, dfltHarvestCfgr, nil) + h.CreateFinalMetrics(run, nil) expectMetrics(t, h.Metrics, []internal.WantMetric{ {Name: instanceReporting, Scope: "", Forced: true, Data: []float64{1, 0, 0, 0, 0, 0}}, {Name: "rename_me", Scope: "", Forced: false, Data: []float64{1.0, 0, 0, 0, 0, 0}}, @@ -137,9 +168,14 @@ func TestCreateFinalMetrics(t *testing.T) { {Name: "Supportability/EventHarvest/CustomEventData/HarvestLimit", Scope: "", Forced: true, Data: []float64{1, 10 * 1000, 10 * 1000, 10 * 1000, 10 * 1000, 10 * 1000 * 10 * 1000}}, {Name: "Supportability/EventHarvest/ErrorEventData/HarvestLimit", Scope: "", Forced: true, Data: []float64{1, 100, 100, 100, 100, 100 * 100}}, {Name: "Supportability/EventHarvest/SpanEventData/HarvestLimit", Scope: "", Forced: true, Data: []float64{1, 2000, 2000, 2000, 2000, 2000 * 2000}}, + {Name: "Supportability/EventHarvest/LogEventData/HarvestLimit", Scope: "", Forced: true, Data: []float64{1, 10000, 10000, 10000, 10000, 10000 * 10000}}, {Name: "Supportability/Go/Version/" + Version, Scope: "", Forced: true, Data: []float64{1, 0, 0, 0, 0, 0}}, {Name: "Supportability/Go/Runtime/Version/" + goVersionSimple, Scope: "", Forced: true, Data: []float64{1, 0, 0, 0, 0, 0}}, {Name: "Supportability/Go/gRPC/Version/" + grpcVersion, Scope: "", Forced: true, Data: []float64{1, 0, 0, 0, 0, 0}}, + {Name: "Supportability/Logging/Golang", Scope: "", Forced: true, Data: []float64{1, 0, 0, 0, 0, 0}}, + {Name: "Supportability/Logging/Forwarding/Golang", Scope: "", Forced: true, Data: []float64{1, 0, 0, 0, 0, 0}}, + {Name: "Supportability/Logging/Metrics/Golang", Scope: "", Forced: true, Data: []float64{1, 0, 0, 0, 0, 0}}, + {Name: "Supportability/Logging/LocalDecorating/Golang", Scope: "", Forced: true, Data: []float64{1, 0, 0, 0, 0, 0}}, }) } @@ -154,15 +190,17 @@ func TestCreateFinalMetricsTraceObserver(t *testing.T) { t.Fatal(err) } + run := newAppRun(config{Config: defaultConfig()}, reply) + run.harvestConfig = testHarvestCfgr + to, _ := newTraceObserver( internal.AgentRunID("runid"), nil, observerConfig{ log: logger.ShimLogger{}, }, ) - - h := newHarvest(now, dfltHarvestCfgr) - h.CreateFinalMetrics(reply, dfltHarvestCfgr, to) + h := newHarvest(now, testHarvestCfgr) + h.CreateFinalMetrics(run, to) expectMetrics(t, h.Metrics, []internal.WantMetric{ {Name: instanceReporting, Scope: "", Forced: true, Data: nil}, {Name: "Supportability/EventHarvest/ReportPeriod", Scope: "", Forced: true, Data: nil}, @@ -170,6 +208,11 @@ func TestCreateFinalMetricsTraceObserver(t *testing.T) { {Name: "Supportability/EventHarvest/CustomEventData/HarvestLimit", Scope: "", Forced: true, Data: nil}, {Name: "Supportability/EventHarvest/ErrorEventData/HarvestLimit", Scope: "", Forced: true, Data: nil}, {Name: "Supportability/EventHarvest/SpanEventData/HarvestLimit", Scope: "", Forced: true, Data: nil}, + {Name: "Supportability/EventHarvest/LogEventData/HarvestLimit", Scope: "", Forced: true, Data: nil}, + {Name: "Supportability/Logging/Golang", Scope: "", Forced: true, Data: nil}, + {Name: "Supportability/Logging/Forwarding/Golang", Scope: "", Forced: true, Data: nil}, + {Name: "Supportability/Logging/Metrics/Golang", Scope: "", Forced: true, Data: nil}, + {Name: "Supportability/Logging/LocalDecorating/Golang", Scope: "", Forced: true, Data: nil}, {Name: "Supportability/Go/Version/" + Version, Scope: "", Forced: true, Data: nil}, {Name: "Supportability/Go/Runtime/Version/" + goVersionSimple, Scope: "", Forced: true, Data: nil}, {Name: "Supportability/Go/gRPC/Version/" + grpcVersion, Scope: "", Forced: true, Data: []float64{1, 0, 0, 0, 0, 0}}, @@ -179,9 +222,9 @@ func TestCreateFinalMetricsTraceObserver(t *testing.T) { } func TestEmptyPayloads(t *testing.T) { - h := newHarvest(time.Now(), dfltHarvestCfgr) + h := newHarvest(time.Now(), testHarvestCfgr) payloads := h.Payloads(true) - if len(payloads) != 8 { + if len(payloads) != 9 { t.Error(len(payloads)) } for _, p := range payloads { @@ -209,7 +252,7 @@ func TestPayloadsEmptyHarvest(t *testing.T) { func TestHarvestNothingReady(t *testing.T) { now := time.Now() - h := newHarvest(now, dfltHarvestCfgr) + h := newHarvest(now, testHarvestCfgr) ready := h.Ready(now.Add(10 * time.Second)) if ready != nil { t.Error("harvest should be nil") @@ -260,6 +303,62 @@ func TestHarvestCustomEventsReady(t *testing.T) { }) } +func TestHarvestLogEventsReady(t *testing.T) { + now := time.Now() + fixedHarvestTypes := harvestMetricsTraces & harvestTxnEvents & harvestSpanEvents & harvestLogEvents + h := newHarvest(now, harvestConfig{ + ReportPeriods: map[harvestTypes]time.Duration{ + fixedHarvestTypes: fixedHarvestPeriod, + harvestLogEvents: time.Second * 5, + }, + LoggingConfig: loggingConfigEnabled(3), + }) + + logEvent := logEvent{ + 0.5, + 123456, + "INFO", + "User 'xyz' logged in", + "123456789ADF", + "ADF09876565", + } + + h.LogEvents.Add(&logEvent) + ready := h.Ready(now.Add(10 * time.Second)) + payloads := ready.Payloads(true) + if len(payloads) == 0 { + t.Fatal("no payloads generated") + } else if len(payloads) > 1 { + t.Fatalf("too many payloads: %d", len(payloads)) + } + p := payloads[0] + if m := p.EndpointMethod(); m != "log_event_data" { + t.Error(m) + } + data, err := p.Data("agentRunID", now) + if nil != err || nil == data { + t.Error(err, data) + } + if h.LogEvents.capacity() != 3 || h.LogEvents.NumSaved() != 0 { + t.Fatal("log events not correctly reset") + } + + sampleLogEvent := internal.WantLog{ + Severity: logEvent.severity, + Message: logEvent.message, + SpanID: logEvent.spanID, + TraceID: logEvent.traceID, + Timestamp: logEvent.timestamp, + } + + expectLogEvents(t, ready.LogEvents, []internal.WantLog{sampleLogEvent}) + expectMetrics(t, h.Metrics, []internal.WantMetric{ + {Name: logsSeen, Scope: "", Forced: true, Data: []float64{1, 0, 0, 0, 0, 0}}, + {Name: logsSeen + "/" + logEvent.severity, Scope: "", Forced: true, Data: []float64{1, 0, 0, 0, 0, 0}}, + {Name: logsDropped, Scope: "", Forced: true, Data: []float64{0, 0, 0, 0, 0, 0}}, + }) +} + func TestHarvestTxnEventsReady(t *testing.T) { now := time.Now() fixedHarvestTypes := harvestMetricsTraces & harvestCustomEvents & harvestSpanEvents & harvestErrorEvents @@ -404,6 +503,7 @@ func TestHarvestMetricsTracesReady(t *testing.T) { MaxCustomEvents: 1, MaxErrorEvents: 1, MaxSpanEvents: 1, + LoggingConfig: loggingConfigEnabled(1), }) h.Metrics.addCount("zip", 1, forced) @@ -465,7 +565,7 @@ func TestMergeFailedHarvest(t *testing.T) { start1 := time.Now() start2 := start1.Add(1 * time.Minute) - h := newHarvest(start1, dfltHarvestCfgr) + h := newHarvest(start1, testHarvestCfgr) h.Metrics.addCount("zip", 1, forced) h.TxnEvents.AddTxnEvent(&txnEvent{ FinalName: "finalName", @@ -473,6 +573,17 @@ func TestMergeFailedHarvest(t *testing.T) { Duration: 1 * time.Second, TotalTime: 2 * time.Second, }, 0) + + logEvent := logEvent{ + 0.5, + 123456, + "INFO", + "User 'xyz' logged in", + "123456789ADF", + "ADF09876565", + } + + h.LogEvents.Add(&logEvent) customEventParams := map[string]interface{}{"zip": 1} ce, err := createCustomEvent("myEvent", customEventParams, time.Now()) if nil != err { @@ -513,6 +624,9 @@ func TestMergeFailedHarvest(t *testing.T) { if 0 != h.CustomEvents.analyticsEvents.failedHarvests { t.Error(h.CustomEvents.analyticsEvents.failedHarvests) } + if 0 != h.LogEvents.failedHarvests { + t.Error(h.LogEvents.failedHarvests) + } if 0 != h.TxnEvents.analyticsEvents.failedHarvests { t.Error(h.TxnEvents.analyticsEvents.failedHarvests) } @@ -532,6 +646,15 @@ func TestMergeFailedHarvest(t *testing.T) { }, UserAttributes: customEventParams, }}) + expectLogEvents(t, h.LogEvents, []internal.WantLog{ + { + Severity: logEvent.severity, + Message: logEvent.message, + SpanID: logEvent.spanID, + TraceID: logEvent.traceID, + Timestamp: logEvent.timestamp, + }, + }) expectErrorEvents(t, h.ErrorEvents, []internal.WantEvent{{ Intrinsics: map[string]interface{}{ "error.class": "klass", @@ -564,7 +687,7 @@ func TestMergeFailedHarvest(t *testing.T) { Klass: "klass", }}) - nextHarvest := newHarvest(start2, dfltHarvestCfgr) + nextHarvest := newHarvest(start2, testHarvestCfgr) if start2 != nextHarvest.Metrics.metricPeriodStart { t.Error(nextHarvest.Metrics.metricPeriodStart) } @@ -582,6 +705,9 @@ func TestMergeFailedHarvest(t *testing.T) { if 1 != nextHarvest.CustomEvents.analyticsEvents.failedHarvests { t.Error(nextHarvest.CustomEvents.analyticsEvents.failedHarvests) } + if 1 != nextHarvest.LogEvents.failedHarvests { + t.Error(nextHarvest.LogEvents.failedHarvests) + } if 1 != nextHarvest.TxnEvents.analyticsEvents.failedHarvests { t.Error(nextHarvest.TxnEvents.analyticsEvents.failedHarvests) } @@ -601,6 +727,15 @@ func TestMergeFailedHarvest(t *testing.T) { }, UserAttributes: customEventParams, }}) + expectLogEvents(t, nextHarvest.LogEvents, []internal.WantLog{ + { + Severity: logEvent.severity, + Message: logEvent.message, + SpanID: logEvent.spanID, + TraceID: logEvent.traceID, + Timestamp: logEvent.timestamp, + }, + }) expectErrorEvents(t, nextHarvest.ErrorEvents, []internal.WantEvent{{ Intrinsics: map[string]interface{}{ "error.class": "klass", @@ -722,7 +857,7 @@ func TestCreateTxnMetrics(t *testing.T) { func TestHarvestSplitTxnEvents(t *testing.T) { now := time.Now() - h := newHarvest(now, dfltHarvestCfgr) + h := newHarvest(now, testHarvestCfgr) for i := 0; i < internal.MaxTxnEvents; i++ { h.TxnEvents.AddTxnEvent(&txnEvent{}, priority(float32(i))) } @@ -730,10 +865,10 @@ func TestHarvestSplitTxnEvents(t *testing.T) { payloadsWithSplit := h.Payloads(true) payloadsWithoutSplit := h.Payloads(false) - if len(payloadsWithSplit) != 9 { + if len(payloadsWithSplit) != 10 { t.Error(len(payloadsWithSplit)) } - if len(payloadsWithoutSplit) != 8 { + if len(payloadsWithoutSplit) != 9 { t.Error(len(payloadsWithoutSplit)) } } @@ -818,7 +953,7 @@ func TestCreateTxnMetricsOldCAT(t *testing.T) { func TestNewHarvestSetsDefaultValues(t *testing.T) { now := time.Now() - h := newHarvest(now, dfltHarvestCfgr) + h := newHarvest(now, testHarvestCfgr) if cp := h.TxnEvents.capacity(); cp != internal.MaxTxnEvents { t.Error("wrong txn event capacity", cp) @@ -826,6 +961,9 @@ func TestNewHarvestSetsDefaultValues(t *testing.T) { if cp := h.CustomEvents.capacity(); cp != internal.MaxCustomEvents { t.Error("wrong custom event capacity", cp) } + if cp := h.LogEvents.capacity(); cp != internal.MaxLogEvents { + t.Error("wrong log event capacity", cp) + } if cp := h.ErrorEvents.capacity(); cp != internal.MaxErrorEvents { t.Error("wrong error event capacity", cp) } @@ -845,6 +983,7 @@ func TestNewHarvestUsesConnectReply(t *testing.T) { MaxCustomEvents: 2, MaxErrorEvents: 3, MaxSpanEvents: 4, + LoggingConfig: loggingConfigEnabled(5), }) if cp := h.TxnEvents.capacity(); cp != 1 { @@ -859,6 +998,9 @@ func TestNewHarvestUsesConnectReply(t *testing.T) { if cp := h.SpanEvents.capacity(); cp != 4 { t.Error("wrong span event capacity", cp) } + if cp := h.LogEvents.capacity(); cp != 5 { + t.Error("wrong log event capacity", cp) + } } func TestConfigurableHarvestZeroHarvestLimits(t *testing.T) { @@ -873,6 +1015,7 @@ func TestConfigurableHarvestZeroHarvestLimits(t *testing.T) { MaxCustomEvents: 0, MaxErrorEvents: 0, MaxSpanEvents: 0, + LoggingConfig: loggingConfigEnabled(0), }) if cp := h.TxnEvents.capacity(); cp != 0 { t.Error("wrong txn event capacity", cp) @@ -880,6 +1023,9 @@ func TestConfigurableHarvestZeroHarvestLimits(t *testing.T) { if cp := h.CustomEvents.capacity(); cp != 0 { t.Error("wrong custom event capacity", cp) } + if cp := h.LogEvents.capacity(); cp != 0 { + t.Error("wrong log event capacity", cp) + } if cp := h.ErrorEvents.capacity(); cp != 0 { t.Error("wrong error event capacity", cp) } @@ -891,6 +1037,7 @@ func TestConfigurableHarvestZeroHarvestLimits(t *testing.T) { // safe. h.TxnEvents.AddTxnEvent(&txnEvent{}, 1.0) h.CustomEvents.Add(&customEvent{}) + h.LogEvents.Add(&logEvent{}) h.ErrorEvents.Add(&errorEvent{}, 1.0) h.SpanEvents.addEventPopulated(&sampleSpanEvent) diff --git a/v3/newrelic/internal_app.go b/v3/newrelic/internal_app.go index 767f1782e..27d492082 100644 --- a/v3/newrelic/internal_app.go +++ b/v3/newrelic/internal_app.go @@ -66,7 +66,7 @@ type app struct { } func (app *app) doHarvest(h *harvest, harvestStart time.Time, run *appRun) { - h.CreateFinalMetrics(run.Reply, run.harvestConfig, app.getObserver()) + h.CreateFinalMetrics(run, app.getObserver()) payloads := h.Payloads(app.config.DistributedTracer.Enabled) for _, p := range payloads { @@ -291,6 +291,13 @@ func (app *app) process() { "server-SpanEvents.Enabled": run.Config.SpanEvents.Enabled, }) } + + run.harvestConfig.CommonAttributes = commonAttributes{ + hostname: app.config.hostname, + entityName: app.config.AppName, + entityGUID: run.Reply.EntityGUID, + } + h = newHarvest(time.Now(), run.harvestConfig) app.setState(run, nil) @@ -574,6 +581,26 @@ func (app *app) RecordCustomMetric(name string, value float64) error { return nil } +var ( + errAppLoggingDisabled = errors.New("log data can not be recorded when application logging is disabled") +) + +// RecordLog implements newrelic.Application's RecordLog. +func (app *app) RecordLog(log *LogData) error { + if !app.config.ApplicationLogging.Enabled { + return errAppLoggingDisabled + } + + event, err := log.toLogEvent() + if err != nil { + return err + } + + run, _ := app.getState() + app.Consume(run.Reply.RunID, &event) + return nil +} + var ( _ internal.ServerlessWriter = &app{} ) @@ -605,6 +632,10 @@ func (app *app) ExpectCustomEvents(t internal.Validator, want []internal.WantEve expectCustomEvents(extendValidator(t, "custom events"), app.testHarvest.CustomEvents, want) } +func (app *app) ExpectLogEvents(t internal.Validator, want []internal.WantLog) { + expectLogEvents(extendValidator(t, "log events"), app.testHarvest.LogEvents, want) +} + func (app *app) ExpectErrors(t internal.Validator, want []internal.WantError) { t = extendValidator(t, "traced errors") expectErrors(t, app.testHarvest.ErrorTraces, want) diff --git a/v3/newrelic/internal_app_test.go b/v3/newrelic/internal_app_test.go index 4a8de7387..366a7756b 100644 --- a/v3/newrelic/internal_app_test.go +++ b/v3/newrelic/internal_app_test.go @@ -8,6 +8,8 @@ import ( "fmt" "testing" "time" + + "github.com/newrelic/go-agent/v3/internal" ) func TestConnectBackoff(t *testing.T) { @@ -71,3 +73,73 @@ func TestConfigOptionError(t *testing.T) { t.Error("app not nil") } } + +const ( + SampleAppName = "my app" +) + +// ExpectApp combines Application and Expect, for use in validating data in test apps +type ExpectApp struct { + internal.Expect + *Application +} + +// NewTestApp creates an ExpectApp with the given ConnectReply function and Config function +func NewTestApp(replyfn func(*internal.ConnectReply), cfgFn ...ConfigOption) ExpectApp { + cfgFn = append(cfgFn, + func(cfg *Config) { + // Prevent spawning app goroutines in tests. + if !cfg.ServerlessMode.Enabled { + cfg.Enabled = false + } + }, + ConfigAppName(SampleAppName), + ConfigLicense(testLicenseKey), + ) + + app, err := NewApplication(cfgFn...) + if nil != err { + panic(err) + } + + internal.HarvestTesting(app.Private, replyfn) + + return ExpectApp{ + Expect: app.Private.(internal.Expect), + Application: app, + } +} + +var SampleEverythingReplyFn = func(reply *internal.ConnectReply) { + reply.SetSampleEverything() +} + +var ConfigTestAppLogFn = func(cfg *Config) { + cfg.Enabled = false + cfg.ApplicationLogging.Enabled = true + cfg.ApplicationLogging.Forwarding.Enabled = true + cfg.ApplicationLogging.Metrics.Enabled = true +} + +func TestRecordLog(t *testing.T) { + testApp := NewTestApp( + SampleEverythingReplyFn, + ConfigTestAppLogFn, + ) + + time := int64(timeToUnixMilliseconds(time.Now())) + + testApp.Application.RecordLog(LogData{ + Severity: "Debug", + Message: "Test Message", + Timestamp: time, + }) + + testApp.ExpectLogEvents(t, []internal.WantLog{ + { + Severity: "Debug", + Message: "Test Message", + Timestamp: time, + }, + }) +} diff --git a/v3/newrelic/internal_txn.go b/v3/newrelic/internal_txn.go index 942056aee..ca77ca9a6 100644 --- a/v3/newrelic/internal_txn.go +++ b/v3/newrelic/internal_txn.go @@ -226,6 +226,17 @@ func (thd *thread) SetWebResponse(w http.ResponseWriter) http.ResponseWriter { }) } +func (thd *thread) StoreLog(log *logEvent) { + txn := thd.txn + txn.Lock() + defer txn.Unlock() + + if txn.logs == nil { + txn.logs = make(logEventHeap, 0, internal.MaxLogEvents) + } + txn.logs.Add(log) +} + func (txn *txn) freezeName() { if txn.ignore || ("" != txn.FinalName) { return @@ -262,6 +273,13 @@ func (txn *txn) MergeIntoHarvest(h *harvest) { createTxnMetrics(&txn.txnData, h.Metrics) mergeBreakdownMetrics(&txn.txnData, h.Metrics) + // Dump log events into harvest + // Note: this will create a surge of log events that could affect sampling. + for _, logEvent := range txn.logs { + logEvent.priority = priority + h.LogEvents.Add(&logEvent) + } + if txn.Config.TransactionEvents.Enabled { // Allocate a new TxnEvent to prevent a reference to the large transaction. alloc := new(txnEvent) diff --git a/v3/newrelic/json_object_writer_test.go b/v3/newrelic/json_object_writer_test.go new file mode 100644 index 000000000..fc2c9723c --- /dev/null +++ b/v3/newrelic/json_object_writer_test.go @@ -0,0 +1,26 @@ +package newrelic + +import ( + "bytes" + "testing" +) + +func BenchmarkStringFieldShort(b *testing.B) { + writer := jsonFieldsWriter{ + buf: bytes.NewBuffer(make([]byte, 300)), + } + + for i := 0; i < b.N; i++ { + writer.stringField("testkey", "this is a short string") + } +} + +func BenchmarkStringFieldLong(b *testing.B) { + writer := jsonFieldsWriter{ + buf: bytes.NewBuffer(make([]byte, 300)), + } + + for i := 0; i < b.N; i++ { + writer.stringField("testkey", "this is a long string that will capture the runtime performance impact that writing more bytes has on this function") + } +} diff --git a/v3/newrelic/log_event.go b/v3/newrelic/log_event.go new file mode 100644 index 000000000..1f7fabc9b --- /dev/null +++ b/v3/newrelic/log_event.go @@ -0,0 +1,121 @@ +// Copyright 2020 New Relic Corporation. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package newrelic + +import ( + "bytes" + "errors" + "fmt" + "strings" + "time" +) + +// Exported Constants for log decorators +const ( + // LogSeverityFieldName is the name of the log level field in New Relic logging JSON + LogSeverityFieldName = "level" + + // LogMessageFieldName is the name of the log message field in New Relic logging JSON + LogMessageFieldName = "message" + + // LogTimestampFieldName is the name of the timestamp field in New Relic logging JSON + LogTimestampFieldName = "timestamp" + + // LogSpanIDFieldName is the name of the span ID field in the New Relic logging JSON + LogSpanIDFieldName = "span.id" + + // LogTraceIDFieldName is the name of the trace ID field in the New Relic logging JSON + LogTraceIDFieldName = "trace.id" + + // LogSeverityUnknown is the value the log severity should be set to if no log severity is known + LogSeverityUnknown = "UNKNOWN" + + // MaxLogLength is the maximum number of bytes the log message is allowed to be + MaxLogLength = 32768 +) + +// internal variable names and constants +const ( + // number of bytes expected to be needed for the average log message + averageLogSizeEstimate = 400 +) + +type logEvent struct { + priority priority + timestamp int64 + severity string + message string + spanID string + traceID string +} + +// LogData contains data fields that are needed to generate log events. +type LogData struct { + Timestamp int64 // Optional: Unix Millisecond Timestamp; A timestamp will be generated if unset + Severity string // Optional: Severity of log being consumed + Message string // Optional: Message of log being consumed; Maximum size: 32768 Bytes. +} + +// writeJSON prepares JSON in the format expected by the collector. +func (e *logEvent) WriteJSON(buf *bytes.Buffer) { + w := jsonFieldsWriter{buf: buf} + buf.WriteByte('{') + w.stringField(LogSeverityFieldName, e.severity) + w.stringField(LogMessageFieldName, e.message) + + if len(e.spanID) > 0 { + w.stringField(LogSpanIDFieldName, e.spanID) + } + if len(e.traceID) > 0 { + w.stringField(LogTraceIDFieldName, e.traceID) + } + + w.needsComma = false + buf.WriteByte(',') + w.intField(LogTimestampFieldName, e.timestamp) + buf.WriteByte('}') +} + +// MarshalJSON is used for testing. +func (e *logEvent) MarshalJSON() ([]byte, error) { + buf := bytes.NewBuffer(make([]byte, 0, averageLogSizeEstimate)) + e.WriteJSON(buf) + return buf.Bytes(), nil +} + +var ( + errNilLogData = errors.New("log data can not be nil") + errLogMessageTooLarge = fmt.Errorf("log message can not exceed %d bytes", MaxLogLength) +) + +func (data *LogData) toLogEvent() (logEvent, error) { + if data == nil { + return logEvent{}, errNilLogData + } + if data.Severity == "" { + data.Severity = LogSeverityUnknown + } + if len(data.Message) > MaxLogLength { + return logEvent{}, errLogMessageTooLarge + } + if data.Timestamp == 0 { + data.Timestamp = int64(timeToUnixMilliseconds(time.Now())) + } + + data.Message = strings.TrimSpace(data.Message) + data.Severity = strings.TrimSpace(data.Severity) + + event := logEvent{ + priority: newPriority(), + message: data.Message, + severity: data.Severity, + timestamp: data.Timestamp, + } + + return event, nil +} + +func (e *logEvent) MergeIntoHarvest(h *harvest) { + h.LogEvents.Add(e) +} diff --git a/v3/newrelic/log_event_test.go b/v3/newrelic/log_event_test.go new file mode 100644 index 000000000..d555c5cec --- /dev/null +++ b/v3/newrelic/log_event_test.go @@ -0,0 +1,198 @@ +package newrelic + +import ( + "bytes" + "fmt" + "math/rand" + "testing" + "time" +) + +func TestWriteJSON(t *testing.T) { + event := logEvent{ + severity: "INFO", + message: "test message", + timestamp: 123456, + } + actual, err := event.MarshalJSON() + if err != nil { + t.Error(err) + } + + expect := `{"level":"INFO","message":"test message","timestamp":123456}` + actualString := string(actual) + if expect != actualString { + t.Errorf("Log json did not build correctly: expecting %s, got %s", expect, actualString) + } +} + +func TestToLogEvent(t *testing.T) { + type testcase struct { + name string + data LogData + expectEvent logEvent + expectErr error + skipTimestamp bool + } + + testcases := []testcase{ + { + name: "context nil", + data: LogData{ + Timestamp: 123456, + Severity: "info", + Message: "test 123", + }, + expectEvent: logEvent{ + timestamp: 123456, + severity: "info", + message: "test 123", + }, + }, + { + name: "severity empty", + data: LogData{ + Timestamp: 123456, + Message: "test 123", + }, + expectEvent: logEvent{ + timestamp: 123456, + severity: "UNKNOWN", + message: "test 123", + }, + }, + { + name: "no timestamp", + data: LogData{ + Severity: "info", + Message: "test 123", + }, + expectEvent: logEvent{ + severity: "info", + message: "test 123", + }, + skipTimestamp: true, + }, + { + name: "message too large", + data: LogData{ + Timestamp: 123456, + Severity: "info", + Message: randomString(32769), + }, + expectErr: errLogMessageTooLarge, + }, + } + + for _, testcase := range testcases { + actualEvent, err := testcase.data.toLogEvent() + + if testcase.expectErr != err { + t.Error(fmt.Errorf("%s: expected error %v, got %v", testcase.name, testcase.expectErr, err)) + } + + if testcase.expectErr == nil { + expect := testcase.expectEvent + if expect.message != actualEvent.message { + t.Error(fmt.Errorf("%s: expected message %s, got %s", testcase.name, expect.message, actualEvent.message)) + } + if expect.severity != actualEvent.severity { + t.Error(fmt.Errorf("%s: expected severity %s, got %s", testcase.name, expect.severity, actualEvent.severity)) + } + if actualEvent.timestamp == 0 { + t.Errorf("timestamp was not set on test %s", testcase.name) + } + if expect.timestamp != actualEvent.timestamp && !testcase.skipTimestamp { + t.Error(fmt.Errorf("%s: expected timestamp %d, got %d", testcase.name, expect.timestamp, actualEvent.timestamp)) + } + } + } +} + +var letters = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") + +func randomString(n int) string { + b := make([]rune, n) + for i := range b { + b[i] = letters[rand.Intn(len(letters))] + } + return string(b) +} + +func TestWriteJSONWithTrace(t *testing.T) { + event := logEvent{ + severity: "INFO", + message: "test message", + timestamp: 123456, + traceID: "123Ad234", + spanID: "adf3441", + } + actual, err := event.MarshalJSON() + if err != nil { + t.Error(err) + } + + expect := `{"level":"INFO","message":"test message","span.id":"adf3441","trace.id":"123Ad234","timestamp":123456}` + actualString := string(actual) + if expect != actualString { + t.Errorf("Log json did not build correctly: expecting %s, got %s", expect, actualString) + } +} + +func BenchmarkToLogEvent(b *testing.B) { + data := LogData{ + Timestamp: 123456, + Severity: "INFO", + Message: "test message", + } + + b.ReportAllocs() + + for n := 0; n < b.N; n++ { + data.toLogEvent() + } + +} + +func recordLogBenchmarkHelper(b *testing.B, data *LogData, h *harvest) { + event, _ := data.toLogEvent() + event.MergeIntoHarvest(h) +} + +func BenchmarkRecordLog(b *testing.B) { + harvest := newHarvest(time.Now(), testHarvestCfgr) + data := LogData{ + Timestamp: 123456, + Severity: "INFO", + Message: "test message", + } + + b.ReportAllocs() + b.ResetTimer() + + for n := 0; n < b.N; n++ { + recordLogBenchmarkHelper(b, &data, harvest) + } +} + +func BenchmarkWriteJSON(b *testing.B) { + data := LogData{ + Timestamp: 123456, + Severity: "INFO", + Message: "This is a log message that represents an estimate for how long the average log message is. The average log payload is 700 bytese.", + } + + event, err := data.toLogEvent() + if err != nil { + b.Fail() + } + + buf := bytes.NewBuffer(make([]byte, 0, averageLogSizeEstimate)) + + b.ReportAllocs() + b.ResetTimer() + + for n := 0; n < b.N; n++ { + event.WriteJSON(buf) + } +} diff --git a/v3/newrelic/log_events.go b/v3/newrelic/log_events.go new file mode 100644 index 000000000..fccc857da --- /dev/null +++ b/v3/newrelic/log_events.go @@ -0,0 +1,240 @@ +// Copyright 2020 New Relic Corporation. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package newrelic + +import ( + "bytes" + "container/heap" + "time" + + "github.com/newrelic/go-agent/v3/internal/jsonx" +) + +type commonAttributes struct { + entityGUID string + entityName string + hostname string +} + +type logEvents struct { + numSeen int + failedHarvests int + severityCount map[string]int + commonAttributes + config loggingConfig + logs logEventHeap +} + +// NumSeen returns the number of events seen +func (events *logEvents) NumSeen() float64 { + return float64(events.numSeen) +} + +// NumSaved returns the number of events that will be harvested for this cycle +func (events *logEvents) NumSaved() float64 { + return float64(len(events.logs)) +} + +// Adds logging metrics to a harvest metric table if appropriate +func (events *logEvents) RecordLoggingMetrics(metrics *metricTable) { + // This is done to avoid accessing locks 3 times instead of once + seen := events.NumSeen() + saved := events.NumSaved() + + if events.config.collectMetrics && metrics != nil { + metrics.addCount(logsSeen, seen, forced) + for k, v := range events.severityCount { + severitySeen := logsSeen + "/" + k + metrics.addCount(severitySeen, float64(v), forced) + } + } + + if events.config.collectEvents { + metrics.addCount(logsDropped, seen-saved, forced) + } +} + +type logEventHeap []logEvent + +// TODO: when go 1.18 becomes the minimum supported version, re-write to make a generic heap implementation +// for all event heaps, to de-duplicate this code +//func (events *logEvents) +func (h logEventHeap) Len() int { return len(h) } +func (h logEventHeap) Less(i, j int) bool { return h[i].priority.isLowerPriority(h[j].priority) } +func (h logEventHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] } + +// To avoid using interface reflection, this function is used in place of Push() to add log events to the heap +// Please replace all of this when the minimum supported version of go is 1.18 so that we can use generics +func (h *logEventHeap) Add(event *logEvent) { + // when fewer events are in the heap than the capacity, do not heap sort + if len(*h) < cap(*h) { + // copy log event onto event heap + *h = append(*h, *event) + if len(*h) == cap(*h) { + // Delay heap initialization so that we can have + // deterministic ordering for integration tests (the max + // is not being reached). + heap.Init(*h) + } + return + } + + if event.priority.isLowerPriority((*h)[0].priority) { + return + } + + (*h)[0] = *event + heap.Fix(h, 0) +} + +// Push and Pop are unused: only heap.Init and heap.Fix are used. +func (h logEventHeap) Pop() interface{} { return nil } +func (h logEventHeap) Push(x interface{}) {} + +func newLogEvents(ca commonAttributes, loggingConfig loggingConfig) *logEvents { + return &logEvents{ + commonAttributes: ca, + config: loggingConfig, + severityCount: map[string]int{}, + logs: make(logEventHeap, 0, loggingConfig.maxLogEvents), + } +} + +func (events *logEvents) capacity() int { + return events.config.maxLogEvents +} + +func (events *logEvents) Add(e *logEvent) { + // always collect this but do not report logging metrics when disabled + events.numSeen++ + events.severityCount[e.severity]++ + + // Do not collect log events when the harvest capacity is intentionally set to 0 + // or the collection of events is explicitly disabled + if events.capacity() == 0 || !events.config.collectEvents { + // Configurable event harvest limits may be zero. + return + } + + // Add logs to event heap + events.logs.Add(e) +} + +func (events *logEvents) mergeFailed(other *logEvents) { + fails := other.failedHarvests + 1 + if fails >= failedEventsAttemptsLimit { + return + } + events.failedHarvests = fails + events.Merge(other) +} + +// Merge two logEvents together +func (events *logEvents) Merge(other *logEvents) { + allSeen := events.NumSeen() + other.NumSeen() + for _, e := range other.logs { + events.Add(&e) + } + + events.numSeen = int(allSeen) +} + +func (events *logEvents) CollectorJSON(agentRunID string) ([]byte, error) { + if len(events.logs) == 0 { + return nil, nil + } + + estimate := averageLogSizeEstimate * len(events.logs) + buf := bytes.NewBuffer(make([]byte, 0, estimate)) + + if events.numSeen == 0 { + return nil, nil + } + + buf.WriteByte('[') + buf.WriteByte('{') + buf.WriteString(`"common":`) + buf.WriteByte('{') + buf.WriteString(`"attributes":`) + buf.WriteByte('{') + buf.WriteString(`"entity.guid":`) + jsonx.AppendString(buf, events.entityGUID) + buf.WriteByte(',') + buf.WriteString(`"entity.name":`) + jsonx.AppendString(buf, events.entityName) + buf.WriteByte(',') + buf.WriteString(`"hostname":`) + jsonx.AppendString(buf, events.hostname) + buf.WriteByte('}') + buf.WriteByte('}') + buf.WriteByte(',') + buf.WriteString(`"logs":`) + buf.WriteByte('[') + for i, e := range events.logs { + // If severity is empty string, then this is not a user provided entry, and is empty. + // Do not write json to buffer in this case. + if e.severity != "" { + e.WriteJSON(buf) + if i != len(events.logs)-1 { + buf.WriteByte(',') + } + } + + } + buf.WriteByte(']') + buf.WriteByte('}') + buf.WriteByte(']') + return buf.Bytes(), nil +} + +// split splits the events into two. NOTE! The two event pools are not valid +// priority queues, and should only be used to create JSON, not for adding any +// events. +func (events *logEvents) split() (*logEvents, *logEvents) { + // numSeen is conserved: e1.numSeen + e2.numSeen == events.numSeen. + sc1, sc2 := splitSeverityCount(events.severityCount) + e1 := &logEvents{ + numSeen: len(events.logs) / 2, + failedHarvests: events.failedHarvests / 2, + severityCount: sc1, + commonAttributes: events.commonAttributes, + logs: make([]logEvent, len(events.logs)/2), + } + e2 := &logEvents{ + numSeen: events.numSeen - e1.numSeen, + failedHarvests: events.failedHarvests - e1.failedHarvests, + severityCount: sc2, + commonAttributes: events.commonAttributes, + logs: make([]logEvent, len(events.logs)-len(e1.logs)), + } + // Note that slicing is not used to ensure that length == capacity for + // e1.events and e2.events. + copy(e1.logs, events.logs) + copy(e2.logs, events.logs[len(events.logs)/2:]) + + return e1, e2 +} + +// splits the contents and counts of the severity map +func splitSeverityCount(severityCount map[string]int) (map[string]int, map[string]int) { + count1 := map[string]int{} + count2 := map[string]int{} + for k, v := range severityCount { + count1[k] = v / 2 + count2[k] = v - count1[k] + } + return count1, count2 +} + +func (events *logEvents) MergeIntoHarvest(h *harvest) { + h.LogEvents.mergeFailed(events) +} + +func (events *logEvents) Data(agentRunID string, harvestStart time.Time) ([]byte, error) { + return events.CollectorJSON(agentRunID) +} + +func (events *logEvents) EndpointMethod() string { + return cmdLogEvents +} diff --git a/v3/newrelic/log_events_test.go b/v3/newrelic/log_events_test.go new file mode 100644 index 000000000..2a796d06a --- /dev/null +++ b/v3/newrelic/log_events_test.go @@ -0,0 +1,486 @@ +// Copyright 2020 New Relic Corporation. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package newrelic + +import ( + "fmt" + "testing" + "time" + + "github.com/newrelic/go-agent/v3/internal" +) + +var ( + testGUID = "testGUID" + testEntityName = "testEntityName" + testHostname = "testHostname" + testCommonAttributes = commonAttributes{ + entityGUID: testGUID, + entityName: testEntityName, + hostname: testHostname, + } + + commonJSON = `[{"common":{"attributes":{"entity.guid":"testGUID","entity.name":"testEntityName","hostname":"testHostname"}},"logs":[` + + infoLevel = "INFO" +) + +func loggingConfigEnabled(limit int) loggingConfig { + return loggingConfig{ + loggingEnabled: true, + localEnrichment: true, + collectEvents: true, + collectMetrics: true, + maxLogEvents: limit, + } +} + +func sampleLogEvent(priority priority, severity, message string) *logEvent { + return &logEvent{ + priority: priority, + severity: severity, + message: message, + timestamp: 123456, + } +} + +func TestBasicLogEvents(t *testing.T) { + events := newLogEvents(testCommonAttributes, loggingConfigEnabled(5)) + events.Add(sampleLogEvent(0.5, infoLevel, "message1")) + events.Add(sampleLogEvent(0.5, infoLevel, "message2")) + + json, err := events.CollectorJSON(agentRunID) + if nil != err { + t.Fatal(err) + } + + expected := commonJSON + + `{"level":"INFO","message":"message1","timestamp":123456},` + + `{"level":"INFO","message":"message2","timestamp":123456}]}]` + + if string(json) != expected { + t.Error(string(json), expected) + } + if events.numSeen != 2 { + t.Error(events.numSeen) + } + if events.NumSaved() != 2 { + t.Error(events.NumSaved()) + } +} + +func TestEmptyLogEvents(t *testing.T) { + events := newLogEvents(testCommonAttributes, loggingConfigEnabled(10)) + json, err := events.CollectorJSON(agentRunID) + if nil != err { + t.Fatal(err) + } + if nil != json { + t.Error(string(json)) + } + if 0 != events.numSeen { + t.Error(events.numSeen) + } + if 0 != events.NumSaved() { + t.Error(events.NumSaved()) + } +} + +// The events with the highest priority should make it: a, c, e +func TestSamplingLogEvents(t *testing.T) { + events := newLogEvents(testCommonAttributes, loggingConfigEnabled(3)) + + events.Add(sampleLogEvent(0.999999, infoLevel, "a")) + events.Add(sampleLogEvent(0.1, infoLevel, "b")) + events.Add(sampleLogEvent(0.9, infoLevel, "c")) + events.Add(sampleLogEvent(0.2, infoLevel, "d")) + events.Add(sampleLogEvent(0.8, infoLevel, "e")) + events.Add(sampleLogEvent(0.3, infoLevel, "f")) + + json, err := events.CollectorJSON(agentRunID) + if nil != err { + t.Fatal(err) + } + expect := commonJSON + + `{"level":"INFO","message":"e","timestamp":123456},` + + `{"level":"INFO","message":"a","timestamp":123456},` + + `{"level":"INFO","message":"c","timestamp":123456}]}` + + `]` + if string(json) != expect { + t.Error(string(json), expect) + } + if 6 != events.numSeen { + t.Error(events.numSeen) + } + if 3 != events.NumSaved() { + t.Error(events.NumSaved()) + } +} + +func TestMergeEmptyLogEvents(t *testing.T) { + e1 := newLogEvents(testCommonAttributes, loggingConfigEnabled(10)) + e2 := newLogEvents(testCommonAttributes, loggingConfigEnabled(10)) + e1.Merge(e2) + json, err := e1.CollectorJSON(agentRunID) + if nil != err { + t.Fatal(err) + } + if nil != json { + t.Error(string(json)) + } + if 0 != e1.numSeen { + t.Error(e1.numSeen) + } + if 0 != e1.NumSaved() { + t.Error(e1.NumSaved()) + } +} + +func TestMergeFullLogEvents(t *testing.T) { + e1 := newLogEvents(testCommonAttributes, loggingConfigEnabled(2)) + e2 := newLogEvents(testCommonAttributes, loggingConfigEnabled(3)) + + e1.Add(sampleLogEvent(0.1, infoLevel, "a")) + e1.Add(sampleLogEvent(0.15, infoLevel, "b")) + e1.Add(sampleLogEvent(0.25, infoLevel, "c")) + + e2.Add(sampleLogEvent(0.06, infoLevel, "d")) + e2.Add(sampleLogEvent(0.12, infoLevel, "e")) + e2.Add(sampleLogEvent(0.18, infoLevel, "f")) + e2.Add(sampleLogEvent(0.24, infoLevel, "g")) + + e1.Merge(e2) + json, err := e1.CollectorJSON(agentRunID) + if nil != err { + t.Fatal(err) + } + + // expect the highest priority events: c, g + expect := commonJSON + + `{"level":"INFO","message":"g","timestamp":123456},` + + `{"level":"INFO","message":"c","timestamp":123456}]}]` + + if string(json) != expect { + t.Error(string(json)) + } + if e1.numSeen != 7 { + t.Error(e1.numSeen) + } + if e1.NumSaved() != 2 { + t.Error(e1.NumSaved()) + } +} + +func TestLogEventMergeFailedSuccess(t *testing.T) { + e1 := newLogEvents(testCommonAttributes, loggingConfigEnabled(2)) + e2 := newLogEvents(testCommonAttributes, loggingConfigEnabled(3)) + + e1.Add(sampleLogEvent(0.1, infoLevel, "a")) + e1.Add(sampleLogEvent(0.15, infoLevel, "b")) + e1.Add(sampleLogEvent(0.25, infoLevel, "c")) + + e2.Add(sampleLogEvent(0.06, infoLevel, "d")) + e2.Add(sampleLogEvent(0.12, infoLevel, "e")) + e2.Add(sampleLogEvent(0.18, infoLevel, "f")) + e2.Add(sampleLogEvent(0.24, infoLevel, "g")) + + e1.mergeFailed(e2) + + json, err := e1.CollectorJSON(agentRunID) + if nil != err { + t.Fatal(err) + } + // expect the highest priority events: c, g + expect := commonJSON + + `{"level":"INFO","message":"g","timestamp":123456},` + + `{"level":"INFO","message":"c","timestamp":123456}]}]` + + if string(json) != expect { + t.Error(string(json)) + } + if 7 != e1.numSeen { + t.Error(e1.numSeen) + } + if 2 != e1.NumSaved() { + t.Error(e1.NumSaved()) + } + if 1 != e1.failedHarvests { + t.Error(e1.failedHarvests) + } +} + +func TestLogEventMergeFailedLimitReached(t *testing.T) { + e1 := newLogEvents(testCommonAttributes, loggingConfigEnabled(2)) + e2 := newLogEvents(testCommonAttributes, loggingConfigEnabled(3)) + + e1.Add(sampleLogEvent(0.1, infoLevel, "a")) + e1.Add(sampleLogEvent(0.15, infoLevel, "b")) + e1.Add(sampleLogEvent(0.25, infoLevel, "c")) + + e2.Add(sampleLogEvent(0.06, infoLevel, "d")) + e2.Add(sampleLogEvent(0.12, infoLevel, "e")) + e2.Add(sampleLogEvent(0.18, infoLevel, "f")) + e2.Add(sampleLogEvent(0.24, infoLevel, "g")) + + e2.failedHarvests = failedEventsAttemptsLimit + + e1.mergeFailed(e2) + + json, err := e1.CollectorJSON(agentRunID) + if nil != err { + t.Fatal(err) + } + expect := commonJSON + + `{"level":"INFO","message":"b","timestamp":123456},` + + `{"level":"INFO","message":"c","timestamp":123456}]}]` + + if string(json) != expect { + t.Error(string(json)) + } + if 3 != e1.numSeen { + t.Error(e1.numSeen) + } + if 2 != e1.NumSaved() { + t.Error(e1.NumSaved()) + } + if 0 != e1.failedHarvests { + t.Error(e1.failedHarvests) + } +} + +func TestLogEventsSplitFull(t *testing.T) { + events := newLogEvents(testCommonAttributes, loggingConfigEnabled(10)) + for i := 0; i < 15; i++ { + priority := priority(float32(i) / 10.0) + events.Add(sampleLogEvent(priority, "INFO", fmt.Sprint(priority))) + } + // Test that the capacity cannot exceed the max. + if 10 != events.capacity() { + t.Error(events.capacity()) + } + e1, e2 := events.split() + j1, err1 := e1.CollectorJSON(agentRunID) + j2, err2 := e2.CollectorJSON(agentRunID) + if err1 != nil || err2 != nil { + t.Fatal(err1, err2) + } + expect1 := commonJSON + + `{"level":"INFO","message":"0.5","timestamp":123456},` + + `{"level":"INFO","message":"0.7","timestamp":123456},` + + `{"level":"INFO","message":"0.6","timestamp":123456},` + + `{"level":"INFO","message":"0.8","timestamp":123456},` + + `{"level":"INFO","message":"0.9","timestamp":123456}]}]` + if string(j1) != expect1 { + t.Error(string(j1)) + } + + expect2 := commonJSON + + `{"level":"INFO","message":"1.1","timestamp":123456},` + + `{"level":"INFO","message":"1.4","timestamp":123456},` + + `{"level":"INFO","message":"1","timestamp":123456},` + + `{"level":"INFO","message":"1.3","timestamp":123456},` + + `{"level":"INFO","message":"1.2","timestamp":123456}]}]` + if string(j2) != expect2 { + t.Error(string(j2)) + } +} + +// TODO: When miniumu supported go version is 1.18, make an event heap in GO generics and remove all this duplicate code +// interfaces are too slow :( +func TestLogEventsSplitNotFullOdd(t *testing.T) { + events := newLogEvents(testCommonAttributes, loggingConfigEnabled(10)) + for i := 0; i < 7; i++ { + priority := priority(float32(i) / 10.0) + events.Add(sampleLogEvent(priority, "INFO", fmt.Sprint(priority))) + } + e1, e2 := events.split() + j1, err1 := e1.CollectorJSON(agentRunID) + j2, err2 := e2.CollectorJSON(agentRunID) + if err1 != nil || err2 != nil { + t.Fatal(err1, err2) + } + expect1 := commonJSON + + `{"level":"INFO","message":"0","timestamp":123456},` + + `{"level":"INFO","message":"0.1","timestamp":123456},` + + `{"level":"INFO","message":"0.2","timestamp":123456}]}]` + if string(j1) != expect1 { + t.Error(string(j1)) + } + + expect2 := commonJSON + + `{"level":"INFO","message":"0.3","timestamp":123456},` + + `{"level":"INFO","message":"0.4","timestamp":123456},` + + `{"level":"INFO","message":"0.5","timestamp":123456},` + + `{"level":"INFO","message":"0.6","timestamp":123456}]}]` + if string(j2) != expect2 { + t.Error(string(j2)) + } +} + +func TestLogEventsSplitNotFullEven(t *testing.T) { + events := newLogEvents(testCommonAttributes, loggingConfigEnabled(10)) + for i := 0; i < 8; i++ { + priority := priority(float32(i) / 10.0) + events.Add(sampleLogEvent(priority, "INFO", fmt.Sprint(priority))) + } + e1, e2 := events.split() + j1, err1 := e1.CollectorJSON(agentRunID) + j2, err2 := e2.CollectorJSON(agentRunID) + if err1 != nil || err2 != nil { + t.Fatal(err1, err2) + } + expect1 := commonJSON + + `{"level":"INFO","message":"0","timestamp":123456},` + + `{"level":"INFO","message":"0.1","timestamp":123456},` + + `{"level":"INFO","message":"0.2","timestamp":123456},` + + `{"level":"INFO","message":"0.3","timestamp":123456}]}]` + if string(j1) != expect1 { + t.Error(string(j1)) + } + + expect2 := commonJSON + + `{"level":"INFO","message":"0.4","timestamp":123456},` + + `{"level":"INFO","message":"0.5","timestamp":123456},` + + `{"level":"INFO","message":"0.6","timestamp":123456},` + + `{"level":"INFO","message":"0.7","timestamp":123456}]}]` + if string(j2) != expect2 { + t.Error(string(j2)) + } +} + +func TestLogEventsZeroCapacity(t *testing.T) { + // Analytics events methods should be safe when configurable harvest + // settings have an event limit of zero. + events := newLogEvents(testCommonAttributes, loggingConfigEnabled(0)) + if 0 != events.NumSeen() || 0 != events.NumSaved() || 0 != events.capacity() { + t.Error(events.NumSeen(), events.NumSaved(), events.capacity()) + } + events.Add(sampleLogEvent(0.5, "INFO", "TEST")) + if 1 != events.NumSeen() || 0 != events.NumSaved() || 0 != events.capacity() { + t.Error(events.NumSeen(), events.NumSaved(), events.capacity()) + } + js, err := events.CollectorJSON("agentRunID") + if err != nil || js != nil { + t.Error(err, string(js)) + } +} + +func TestLogEventCollectionDisabled(t *testing.T) { + // Analytics events methods should be safe when configurable harvest + // settings have an event limit of zero. + config := loggingConfigEnabled(5) + config.collectEvents = false + events := newLogEvents(testCommonAttributes, config) + if 0 != events.NumSeen() || 0 != len(events.severityCount) || 0 != events.NumSaved() || 5 != events.capacity() { + t.Error(events.NumSeen(), len(events.severityCount), events.NumSaved(), events.capacity()) + } + events.Add(sampleLogEvent(0.5, "INFO", "TEST")) + if 1 != events.NumSeen() || 1 != len(events.severityCount) || 0 != events.NumSaved() || 5 != events.capacity() { + t.Error(events.NumSeen(), len(events.severityCount), events.NumSaved(), events.capacity()) + } + js, err := events.CollectorJSON("agentRunID") + if err != nil || js != nil { + t.Error(err, string(js)) + } +} + +func BenchmarkLogEventsAdd(b *testing.B) { + events := newLogEvents(testCommonAttributes, loggingConfigEnabled(internal.MaxLogEvents)) + event := &logEvent{ + priority: newPriority(), + timestamp: 123456, + severity: "INFO", + message: "test message", + spanID: "Ad300dra7re89", + traceID: "2234iIhfLlejrJ0", + } + + b.ReportAllocs() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + events.Add(event) + } +} + +func BenchmarkLogEventsCollectorJSON(b *testing.B) { + events := newLogEvents(testCommonAttributes, loggingConfigEnabled(internal.MaxLogEvents)) + for i := 0; i < internal.MaxLogEvents; i++ { + event := &logEvent{ + priority: newPriority(), + timestamp: 123456, + severity: "INFO", + message: "This is a log message that represents an estimate for how long the average log message is. The average log payload is 700 bytese.", + spanID: "Ad300dra7re89", + traceID: "2234iIhfLlejrJ0", + } + + events.Add(event) + } + + b.ReportAllocs() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + js, err := events.CollectorJSON(agentRunID) + if nil != err { + b.Fatal(err, js) + } + } +} + +func BenchmarkLogEventCollectorJSON_OneEvent(b *testing.B) { + events := newLogEvents(testCommonAttributes, loggingConfigEnabled(internal.MaxLogEvents)) + event := &logEvent{ + priority: newPriority(), + timestamp: 123456, + severity: "INFO", + message: "This is a log message that represents an estimate for how long the average log message is. The average log payload is 700 bytes.", + spanID: "Ad300dra7re89", + traceID: "2234iIhfLlejrJ0", + } + + events.Add(event) + + b.ReportAllocs() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + js, err := events.CollectorJSON(agentRunID) + if nil != err { + b.Fatal(err, js) + } + } +} + +func BenchmarkRecordLoggingMetrics(b *testing.B) { + now := time.Now() + fixedHarvestTypes := harvestMetricsTraces & harvestTxnEvents & harvestSpanEvents & harvestLogEvents + h := newHarvest(now, harvestConfig{ + ReportPeriods: map[harvestTypes]time.Duration{ + fixedHarvestTypes: fixedHarvestPeriod, + harvestLogEvents: time.Second * 5, + }, + LoggingConfig: loggingConfigEnabled(3), + }) + + for i := 0; i < internal.MaxLogEvents; i++ { + logEvent := logEvent{ + newPriority(), + 123456, + "INFO", + fmt.Sprintf("User 'xyz' logged in %d", i), + "123456789ADF", + "ADF09876565", + } + + h.LogEvents.Add(&logEvent) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + b.ReportAllocs() + h.LogEvents.RecordLoggingMetrics(h.Metrics) + } +} diff --git a/v3/newrelic/metric_names.go b/v3/newrelic/metric_names.go index e7a6856fb..7bb053e23 100644 --- a/v3/newrelic/metric_names.go +++ b/v3/newrelic/metric_names.go @@ -3,6 +3,8 @@ package newrelic +import "fmt" + const ( apdexRollup = "Apdex" apdexPrefix = "Apdex/" @@ -61,8 +63,54 @@ const ( supportCustomEventLimit = "Supportability/EventHarvest/CustomEventData/HarvestLimit" supportErrorEventLimit = "Supportability/EventHarvest/ErrorEventData/HarvestLimit" supportSpanEventLimit = "Supportability/EventHarvest/SpanEventData/HarvestLimit" + supportLogEventLimit = "Supportability/EventHarvest/LogEventData/HarvestLimit" + + // Logging Metrics https://source.datanerd.us/agents/agent-specs/pull/570/files + // User Facing + logsSeen = "Logging/lines" + logsDropped = "Logging/Forwarding/Dropped" + + // Supportability (at connect) + supportLogging = "Supportability/Logging/Golang" + supportLoggingMetrics = "Supportability/Logging/Metrics/Golang" + supportLogForwarding = "Supportability/Logging/Forwarding/Golang" + supportLogDecorating = "Supportability/Logging/LocalDecorating/Golang" + + // Supportability (once per harvest) + logEventsSeen = "Supportability/Logging/Forwarding/Seen" + logEventsSent = "Supportability/Logging/Forwarding/Sent" ) +func supportMetric(metrics *metricTable, b bool, metricName string) { + if b { + metrics.addSingleCount(metricName, forced) + } +} + +// logSupport contains final configuration settings for +// logging features for log data generation and supportability +// metrics generation. +type loggingConfig struct { + loggingEnabled bool // application logging features are enabled + collectEvents bool // collection of log event data is enabled + collectMetrics bool // collection of log metric data is enabled + localEnrichment bool // local log enrichment is enabled + maxLogEvents int // maximum number of log events allowed to be collected +} + +// Logging metrics that are generated at connect response +func (cfg loggingConfig) connectMetrics(ms *metricTable) { + supportMetric(ms, cfg.loggingEnabled, supportLogging) + supportMetric(ms, cfg.collectEvents, supportLogForwarding) + supportMetric(ms, cfg.collectMetrics, supportLoggingMetrics) + supportMetric(ms, cfg.localEnrichment, supportLogDecorating) +} + +func loggingFrameworkMetric(ms *metricTable, framework string) { + name := fmt.Sprintf("%s/%s", supportLogging, framework) + supportMetric(ms, true, name) +} + // distributedTracingSupport is used to track distributed tracing activity for // supportability. type distributedTracingSupport struct { @@ -93,12 +141,6 @@ func (dts distributedTracingSupport) isEmpty() bool { return (distributedTracingSupport{}) == dts } -func supportMetric(metrics *metricTable, b bool, metricName string) { - if b { - metrics.addSingleCount(metricName, forced) - } -} - func (dts distributedTracingSupport) createMetrics(ms *metricTable) { // Distributed Tracing Supportability Metrics supportMetric(ms, dts.AcceptPayloadSuccess, "Supportability/DistributedTrace/AcceptPayload/Success") diff --git a/v3/newrelic/sampler_test.go b/v3/newrelic/sampler_test.go index 43ca42eb5..4a4a2ce8d 100644 --- a/v3/newrelic/sampler_test.go +++ b/v3/newrelic/sampler_test.go @@ -33,7 +33,7 @@ func TestGetSample(t *testing.T) { func TestMetricsCreated(t *testing.T) { now := time.Now() - h := newHarvest(now, dfltHarvestCfgr) + h := newHarvest(now, testHarvestCfgr) stats := systemStats{ heapObjects: 5 * 1000, @@ -71,7 +71,7 @@ func TestMetricsCreated(t *testing.T) { func TestMetricsCreatedEmpty(t *testing.T) { now := time.Now() - h := newHarvest(now, dfltHarvestCfgr) + h := newHarvest(now, testHarvestCfgr) stats := systemStats{} stats.MergeIntoHarvest(h) diff --git a/v3/newrelic/tracing.go b/v3/newrelic/tracing.go index ee8db4983..c17f2a2ad 100644 --- a/v3/newrelic/tracing.go +++ b/v3/newrelic/tracing.go @@ -21,32 +21,32 @@ import ( // https://source.datanerd.us/agents/agent-specs/blob/master/Transaction-Events-PORTED.md // https://newrelic.atlassian.net/wiki/display/eng/Agent+Support+for+Synthetics%3A+Forced+Transaction+Traces+and+Analytic+Events type txnEvent struct { + HasError bool FinalName string + Attrs *attributes + CrossProcess txnCrossProcess + BetterCAT betterCAT Start time.Time Duration time.Duration TotalTime time.Duration Queuing time.Duration Zone apdexZone - Attrs *attributes externalCallCount uint64 externalDuration time.Duration datastoreCallCount uint64 datastoreDuration time.Duration - CrossProcess txnCrossProcess - BetterCAT betterCAT - HasError bool } // betterCAT stores the transaction's priority and all fields related // to a DistributedTracer's Cross-Application Trace. type betterCAT struct { Enabled bool - Priority priority Sampled bool - Inbound *payload + Priority priority TxnID string TraceID string TransportType string + Inbound *payload } // SetTraceAndTxnIDs takes a single 32 character ID and uses it to @@ -79,6 +79,7 @@ type txnData struct { rootSpanID string rootSpanErrData *errorData SpanEvents []*spanEvent + logs logEventHeap customSegments map[string]*metricData datastoreSegments map[datastoreMetricKey]*metricData diff --git a/v3/newrelic/transaction.go b/v3/newrelic/transaction.go index 82e738ed1..39a769862 100644 --- a/v3/newrelic/transaction.go +++ b/v3/newrelic/transaction.go @@ -119,6 +119,29 @@ func (txn *Transaction) AddAttribute(key string, value interface{}) { txn.thread.logAPIError(txn.thread.AddAttribute(key, value), "add attribute", nil) } +// RecordLog records the data from a single log line. +// This consumes a LogData object that should be configured +// with data taken from a logging framework. +// +// Certian parts of this feature can be turned off based on your +// config settings. Record log is capable of recording log events, +// as well as log metrics depending on how your application is +// configured. +func (txn *Transaction) RecordLog(log LogData) { + event, err := log.toLogEvent() + if err != nil { + txn.Application().app.Error("unable to record log", map[string]interface{}{ + "reason": err.Error(), + }) + return + } + + metadata := txn.GetTraceMetadata() + event.spanID = metadata.SpanID + event.traceID = metadata.TraceID + txn.thread.StoreLog(&event) +} + // SetWebRequestHTTP marks the transaction as a web transaction. If // the request is non-nil, SetWebRequestHTTP will additionally collect // details on request attributes, url, and method. If headers are