Skip to content

Commit

Permalink
Safe Reads
Browse files Browse the repository at this point in the history
  • Loading branch information
iamemilio committed May 31, 2022
1 parent 635f319 commit 2a78e04
Show file tree
Hide file tree
Showing 6 changed files with 49 additions and 84 deletions.
2 changes: 1 addition & 1 deletion v3/examples/server/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -289,7 +289,7 @@ func main() {
// For go versions 1.17+ use time.Now().UnixMilli() to generate timestamps
timestamp := time.Now().UnixNano() / int64(time.Millisecond)

data := &newrelic.LogData{
data := newrelic.LogData{
Timestamp: timestamp,
Message: "Log Message",
Severity: "info",
Expand Down
2 changes: 1 addition & 1 deletion v3/integrations/logcontext-v2/nrzerolog/hook.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,5 +32,5 @@ func (h NewRelicHook) Run(e *zerolog.Event, level zerolog.Level, msg string) {
Context: h.Context,
}

h.App.RecordLog(&data)
h.App.RecordLog(data)
}
4 changes: 2 additions & 2 deletions v3/newrelic/application.go
Original file line number Diff line number Diff line change
Expand Up @@ -82,14 +82,14 @@ func (app *Application) RecordCustomMetric(name string, value float64) {
// config settings. Record log is capable of recording log events,
// as well as log metrics depending on how your application is
// configured.
func (app *Application) RecordLog(logEvent *LogData) {
func (app *Application) RecordLog(logEvent LogData) {
if nil == app {
return
}
if nil == app.app {
return
}
err := app.app.RecordLog(logEvent)
err := app.app.RecordLog(&logEvent)
if err != nil {
app.app.Error("unable to record log", map[string]interface{}{
"reason": err.Error(),
Expand Down
2 changes: 1 addition & 1 deletion v3/newrelic/harvest.go
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ func (h *harvest) Ready(now time.Time) *harvest {
h.CustomEvents = newCustomEvents(h.CustomEvents.capacity())
}
if 0 != types&harvestLogEvents {
h.LogEvents.RecordLoggingMetrics(h.Metrics, forced)
h.LogEvents.RecordLoggingMetrics(h.Metrics)
ready.LogEvents = h.LogEvents
h.LogEvents = newLogEvents(h.LogEvents.commonAttributes, h.LogEvents.config)
}
Expand Down
31 changes: 24 additions & 7 deletions v3/newrelic/log_events.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,25 +31,38 @@ type logEvents struct {
}

// NumSeen returns the number of events seen
func (events *logEvents) NumSeen() float64 { return float64(events.numSeen) }
func (events *logEvents) NumSeen() int {
events.rwMutex.RLock()
defer events.rwMutex.RUnlock()
return events.numSeen
}

// NumSaved returns the number of events that will be harvested for this cycle
func (events *logEvents) NumSaved() float64 { return float64(len(events.logs)) }
func (events *logEvents) NumSaved() int {
events.rwMutex.RLock()
defer events.rwMutex.RUnlock()
return len(events.logs)
}

// Adds logging metrics to a harvest metric table if appropriate
func (events *logEvents) RecordLoggingMetrics(metrics *metricTable, forced metricForce) {
func (events *logEvents) RecordLoggingMetrics(metrics *metricTable) {
events.rwMutex.RLock()
defer events.rwMutex.RUnlock()

// This is done to avoid accessing locks 3 times instead of once
seen := float64(events.numSeen)
saved := float64(len(events.logs))

if events.config.collectMetrics && metrics != nil {
metrics.addCount(logsSeen, events.NumSeen(), forced)
metrics.addCount(logsSeen, seen, forced)
for k, v := range events.severityCount {
severitySeen := logsSeen + "/" + k
metrics.addCount(severitySeen, float64(v), forced)
}
}

if events.config.collectEvents {
metrics.addCount(logsDropped, events.NumSeen()-events.NumSaved(), forced)
metrics.addCount(logsDropped, seen-saved, forced)
}
}

Expand Down Expand Up @@ -120,16 +133,20 @@ func (events *logEvents) mergeFailed(other *logEvents) {
events.Merge(other)
}

// Merge two logEvents together
func (events *logEvents) Merge(other *logEvents) {
allSeen := events.numSeen + other.numSeen

allSeen := events.NumSeen() + other.NumSeen()
for _, e := range other.logs {
events.Add(&e)
}

events.numSeen = allSeen
}

func (events *logEvents) CollectorJSON(agentRunID string) ([]byte, error) {
events.rwMutex.RLock()
defer events.rwMutex.RUnlock()

if 0 == len(events.logs) {
return nil, nil
}
Expand Down
92 changes: 20 additions & 72 deletions v3/newrelic/log_events_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -145,6 +145,7 @@ func TestMergeFullLogEvents(t *testing.T) {
e1.Add(sampleLogEvent(0.1, infoLevel, "a"))
e1.Add(sampleLogEvent(0.15, infoLevel, "b"))
e1.Add(sampleLogEvent(0.25, infoLevel, "c"))

e2.Add(sampleLogEvent(0.06, infoLevel, "d"))
e2.Add(sampleLogEvent(0.12, infoLevel, "e"))
e2.Add(sampleLogEvent(0.18, infoLevel, "f"))
Expand All @@ -164,10 +165,10 @@ func TestMergeFullLogEvents(t *testing.T) {
if string(json) != expect {
t.Error(string(json))
}
if 7 != e1.numSeen {
if e1.numSeen != 7 {
t.Error(e1.numSeen)
}
if 2 != e1.NumSaved() {
if e1.NumSaved() != 2 {
t.Error(e1.NumSaved())
}
}
Expand Down Expand Up @@ -529,58 +530,26 @@ func assertInt(expect int, actual int) error {
return nil
}

func BenchmarkAddMaximumLogEvent(b *testing.B) {
eventList := make([]*logEvent, internal.MaxLogEvents)
for n := 0; n < internal.MaxTxnEvents; n++ {
eventList[n] = &logEvent{
priority: newPriority(),
timestamp: 123456,
severity: "INFO",
message: "test message",
spanID: "Ad300dra7re89",
traceID: "2234iIhfLlejrJ0",
}
}
func BenchmarkLogEventsAdd(b *testing.B) {
events := newLogEvents(testCommonAttributes, loggingConfigEnabled(internal.MaxLogEvents))

b.ReportAllocs()
b.ResetTimer()

for n := 0; n < internal.MaxTxnEvents; n++ {
events.Add(eventList[n])
}
}

func BenchmarkWriteMaximumLogEventJSON(b *testing.B) {
eventList := make([]*logEvent, internal.MaxLogEvents)
for n := 0; n < internal.MaxTxnEvents; n++ {
eventList[n] = &logEvent{
priority: newPriority(),
timestamp: 123456,
severity: "INFO",
message: "test message",
spanID: "Ad300dra7re89",
traceID: "2234iIhfLlejrJ0",
}
}
events := newLogEvents(testCommonAttributes, loggingConfigEnabled(internal.MaxLogEvents))

for n := 0; n < internal.MaxTxnEvents; n++ {
events.Add(eventList[n])
event := &logEvent{
priority: newPriority(),
timestamp: 123456,
severity: "INFO",
message: "test message",
spanID: "Ad300dra7re89",
traceID: "2234iIhfLlejrJ0",
}

b.ReportAllocs()
b.ResetTimer()

js, err := events.CollectorJSON(agentRunID)
if nil != err {
b.Fatal(err, js)
for i := 0; i < b.N; i++ {
events.Add(event)
}
}

func BenchmarkAddAndWriteLogEvent(b *testing.B) {
b.ReportAllocs()

func BenchmarkLogEventsCollectorJSON(b *testing.B) {
events := newLogEvents(testCommonAttributes, loggingConfigEnabled(internal.MaxLogEvents))
event := &logEvent{
priority: newPriority(),
Expand All @@ -592,35 +561,14 @@ func BenchmarkAddAndWriteLogEvent(b *testing.B) {
}

events.Add(event)
js, err := events.CollectorJSON(agentRunID)
if nil != err {
b.Fatal(err, js)
}
}

func BenchmarkAddAndWriteMaximumLogEvents(b *testing.B) {

eventList := make([]*logEvent, internal.MaxLogEvents)
events := newLogEvents(testCommonAttributes, loggingConfigEnabled(internal.MaxLogEvents))
for n := 0; n < internal.MaxTxnEvents; n++ {
eventList[n] = &logEvent{
priority: newPriority(),
timestamp: 123456,
severity: "INFO",
message: "test message",
spanID: "Ad300dra7re89",
traceID: "2234iIhfLlejrJ0",
}
}

b.ReportAllocs()
b.ResetTimer()

for n := 0; n < internal.MaxTxnEvents; n++ {
events.Add(eventList[n])
}

js, err := events.CollectorJSON(agentRunID)
if nil != err {
b.Fatal(err, js)
for i := 0; i < b.N; i++ {
js, err := events.CollectorJSON(agentRunID)
if nil != err {
b.Fatal(err, js)
}
}
}

0 comments on commit 2a78e04

Please sign in to comment.