Skip to content

Commit

Permalink
cleanup
Browse files Browse the repository at this point in the history
follow up of #46 (#46 (comment))
  • Loading branch information
gesellix committed Aug 24, 2019
1 parent 4ea8210 commit 742bac7
Show file tree
Hide file tree
Showing 2 changed files with 28 additions and 32 deletions.
26 changes: 13 additions & 13 deletions couchdb-exporter.go
Expand Up @@ -180,37 +180,37 @@ func init() {
func main() {
var appAction = func(c *cli.Context) error {
var databases []string
if *&exporterConfig.databases != "" {
databases = strings.Split(*&exporterConfig.databases, ",")
if exporterConfig.databases != "" {
databases = strings.Split(exporterConfig.databases, ",")
}

exporter := lib.NewExporter(
*&exporterConfig.couchdbURI,
exporterConfig.couchdbURI,
lib.BasicAuth{
Username: *&exporterConfig.couchdbUsername,
Password: *&exporterConfig.couchdbPassword},
Username: exporterConfig.couchdbUsername,
Password: exporterConfig.couchdbPassword},
lib.CollectorConfig{
Databases: databases,
CollectViews: *&exporterConfig.databaseViews,
CollectSchedulerJobs: *&exporterConfig.schedulerJobs,
ConcurrentRequests: *&exporterConfig.databaseConcurrentRequests,
CollectViews: exporterConfig.databaseViews,
CollectSchedulerJobs: exporterConfig.schedulerJobs,
ConcurrentRequests: exporterConfig.databaseConcurrentRequests,
},
*&exporterConfig.couchdbInsecure)
exporterConfig.couchdbInsecure)
prometheus.MustRegister(exporter)

http.Handle(*&exporterConfig.metricsEndpoint, promhttp.Handler())
http.Handle(exporterConfig.metricsEndpoint, promhttp.Handler())
http.HandleFunc("/status", func(w http.ResponseWriter, r *http.Request) {
_, err := fmt.Fprint(w, "OK")
if err != nil {
klog.Error(err)
}
})
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
http.Error(w, fmt.Sprintf("Please GET %s", *&exporterConfig.metricsEndpoint), http.StatusNotFound)
http.Error(w, fmt.Sprintf("Please GET %s", exporterConfig.metricsEndpoint), http.StatusNotFound)
})

klog.Infof("Starting exporter at '%s' to read from CouchDB at '%s'", *&exporterConfig.listenAddress, *&exporterConfig.couchdbURI)
err := http.ListenAndServe(*&exporterConfig.listenAddress, nil)
klog.Infof("Starting exporter at '%s' to read from CouchDB at '%s'", exporterConfig.listenAddress, exporterConfig.couchdbURI)
err := http.ListenAndServe(exporterConfig.listenAddress, nil)
if err != nil {
klog.Fatal(err)
}
Expand Down
34 changes: 15 additions & 19 deletions lib/couchdb-client.go
Expand Up @@ -254,15 +254,16 @@ func (c *CouchdbClient) getStats(config CollectorConfig) (Stats, error) {
}
}

type dbStatsResult struct {
dbName string
dbStats DatabaseStats
err error
}

func (c *CouchdbClient) getDatabasesStatsByDbName(databases []string, concurrency uint) (map[string]DatabaseStats, error) {
dbStatsByDbName := make(map[string]DatabaseStats)
type result struct {
dbName string
dbStats DatabaseStats
err error
}
// Setup for concurrent scatter/gather scrapes, with concurrency limit
r := make(chan result, len(databases))
r := make(chan dbStatsResult, len(databases))
semaphore := NewSemaphore(concurrency) // semaphore to limit concurrency

// scatter
Expand All @@ -277,13 +278,13 @@ func (c *CouchdbClient) getDatabasesStatsByDbName(databases []string, concurrenc
data, err := c.Request("GET", fmt.Sprintf("%s/%s", c.BaseUri, dbName), nil)
semaphore.Release()
if err != nil {
r <- result{err: fmt.Errorf("error reading database '%s' stats: %v", dbName, err)}
r <- dbStatsResult{err: fmt.Errorf("error reading database '%s' stats: %v", dbName, err)}
return
}

err = json.Unmarshal(data, &dbStats)
if err != nil {
r <- result{err: fmt.Errorf("error unmarshalling database '%s' stats: %v", dbName, err)}
r <- dbStatsResult{err: fmt.Errorf("error unmarshalling database '%s' stats: %v", dbName, err)}
return
}
dbStats.DiskSizeOverhead = dbStats.DiskSize - dbStats.DataSize
Expand All @@ -292,7 +293,7 @@ func (c *CouchdbClient) getDatabasesStatsByDbName(databases []string, concurrenc
} else {
dbStats.CompactRunning = 0
}
r <- result{dbName, dbStats, nil}
r <- dbStatsResult{dbName, dbStats, nil}
}()
}
// gather
Expand All @@ -308,13 +309,8 @@ func (c *CouchdbClient) getDatabasesStatsByDbName(databases []string, concurrenc
}

func (c *CouchdbClient) enhanceWithViewUpdateSeq(dbStatsByDbName map[string]DatabaseStats, concurrency uint) error {
type result struct {
dbName string
dbStats DatabaseStats
err error
}
// Setup for concurrent scatter/gather scrapes, with concurrency limit
r := make(chan result, len(dbStatsByDbName))
r := make(chan dbStatsResult, len(dbStatsByDbName))
semaphore := NewSemaphore(concurrency) // semaphore to limit concurrency

// scatter
Expand All @@ -334,14 +330,14 @@ func (c *CouchdbClient) enhanceWithViewUpdateSeq(dbStatsByDbName map[string]Data
designDocData, err := c.Request("GET", fmt.Sprintf("%s/%s/_all_docs?%s", c.BaseUri, dbName, query), nil)
semaphore.Release()
if err != nil {
r <- result{err: fmt.Errorf("error reading database '%s' stats: %v", dbName, err)}
r <- dbStatsResult{err: fmt.Errorf("error reading database '%s' stats: %v", dbName, err)}
return
}

var designDocs DocsResponse
err = json.Unmarshal(designDocData, &designDocs)
if err != nil {
r <- result{err: fmt.Errorf("error unmarshalling design docs for database '%s': %v", dbName, err)}
r <- dbStatsResult{err: fmt.Errorf("error unmarshalling design docs for database '%s': %v", dbName, err)}
return
}
views := make(ViewStatsByDesignDocName)
Expand Down Expand Up @@ -393,7 +389,7 @@ func (c *CouchdbClient) enhanceWithViewUpdateSeq(dbStatsByDbName map[string]Data
for range row.Doc.Views {
res := <-v
if res.err != nil {
r <- result{err: res.err}
r <- dbStatsResult{err: res.err}
return
}
updateSeqByView[res.viewName] = res.updateSeq
Expand All @@ -408,7 +404,7 @@ func (c *CouchdbClient) enhanceWithViewUpdateSeq(dbStatsByDbName map[string]Data
<-done
}
dbStats.Views = views
r <- result{dbName, dbStats, nil}
r <- dbStatsResult{dbName, dbStats, nil}
}()
}
// gather
Expand Down

0 comments on commit 742bac7

Please sign in to comment.