-
Notifications
You must be signed in to change notification settings - Fork 0
/
main.go
153 lines (127 loc) · 3.9 KB
/
main.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
package main
import (
"flag"
"fmt"
"log"
"os"
"os/signal"
"path/filepath"
"sync"
"syscall"
"time"
_ "github.com/go-sql-driver/mysql"
"github.com/joho/godotenv"
"github.com/kristijorgji/docker_metrics_processor/models"
"github.com/kristijorgji/docker_metrics_processor/parser"
"github.com/kristijorgji/docker_metrics_processor/repositories"
)
const defaultBatchSize = 1000
const defaultMaxFilesInParallel = 10
var batchSize int
var maxFilesInParallel int
var inputPath string
var shouldDeleteOnEnd bool = false
var metricsRepository *repositories.MetricsRepository
var filesBeingProcessed = 0
func main() {
start := time.Now()
setupEnv()
bye := func() {
log.Printf("Total execution took %s\n", time.Since(start))
}
defer bye()
c := make(chan os.Signal)
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
go func() {
<-c
bye()
os.Exit(0)
}()
log.Printf("Starting with batch size of %d, processing max %d files in parallel, input path %s, shouldDeleteOnEnd %t\n", batchSize, maxFilesInParallel, inputPath, shouldDeleteOnEnd)
metricsRepository = &repositories.MetricsRepository{}
metricsRepository.Init()
defer metricsRepository.Close()
insertedRowsPerUnitOfWork := make(map[string]int)
var wg sync.WaitGroup
err := filepath.Walk(inputPath,
func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if !info.IsDir() {
if filesBeingProcessed >= maxFilesInParallel {
wg.Wait()
filesBeingProcessed = 0
} else {
filesBeingProcessed++
}
wg.Add(1)
log.Printf("Currently processing %d files in parallel\n", filesBeingProcessed)
go processLog(&wg, insertedRowsPerUnitOfWork, path)
}
return nil
})
if err != nil {
log.Println(err)
}
wg.Wait()
totalInsertedRows := 0
for _, v := range insertedRowsPerUnitOfWork {
totalInsertedRows += v
}
log.Printf("Inserted total %d rows", totalInsertedRows)
}
func processLog(wg *sync.WaitGroup, insertedRowsPerUnitOfWork map[string]int, path string) {
defer timeTrack(time.Now(), fmt.Sprintf("[%s] Finished processing", path))
defer wg.Done()
log.Printf("[%s] Started processing\n", path)
metrics := parser.Parse(path)
log.Printf("[%s] Parsed, will insert into storage\n", path)
var totalInsertedRows int = 0
var batch []*models.ServiceMetrics
i := 0
for ; i < len(metrics); i++ {
batch = append(batch, &metrics[i])
batchLength := len(batch)
if batchLength == batchSize {
metricsRepository.InsertBatch(batch)
log.Printf("[%s] Inserted %d rows", path, batchLength)
totalInsertedRows += batchLength
batch = make([]*models.ServiceMetrics, 0)
}
}
batchLength := len(batch)
if batchLength > 0 {
metricsRepository.InsertBatch(batch)
totalInsertedRows += batchLength
log.Printf("[%s] Inserted %d rows", path, batchSize)
batch = nil
}
log.Printf("[%s] Inserted total %d rows", path, totalInsertedRows)
insertedRowsPerUnitOfWork[path] = totalInsertedRows
if shouldDeleteOnEnd {
log.Printf("[%s] Deleting now the log file", path)
err := os.Remove(path)
if err != nil {
log.Print(err)
} else {
log.Printf("Deleted %s", path)
}
}
}
func setupEnv() {
err := godotenv.Load()
if err != nil {
log.Panic("Error loading .env file")
}
flag.BoolVar(&shouldDeleteOnEnd, "deleteOnEnd", shouldDeleteOnEnd, "if set all logs will be deleted upon successful parsing")
batchSizeFlag := flag.Int("batchSize", defaultBatchSize, "the number of metrics to insert in storage in one batch")
maxFilesInParallel = *flag.Int("maxFilesInParallel", defaultMaxFilesInParallel, "the number of files to process in parallel. Ex mysql allows 10 connections in parallel so makes no sense process more then 10 files in parallel")
flag.StringVar(&inputPath, "inputPath", "input/", "folder path where logs are located")
flag.Parse()
batchSize = *batchSizeFlag
}
func timeTrack(start time.Time, name string) {
elapsed := time.Since(start)
log.Printf("%s. Took %s", name, elapsed)
}