Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update code to fix almost all pre-existing lint errors #2008

Merged
merged 31 commits into from Feb 24, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
31 commits
Select commit Hold shift + click to select a range
0fc660e
Cleanup linting errors around deadcode
zendern Jan 19, 2020
43520a2
Replaced by keysFn and if the config defines it then it will distribute
zendern Jan 19, 2020
c3564f3
Hold over from when readBatch as also the iterator
zendern Jan 19, 2020
782bd54
Necessary to fix the false sharing problem
zendern Jan 19, 2020
18d997f
Removing unused code
zendern Jan 19, 2020
a60dcbb
Cleanup all gosimple suggestions
zendern Jan 19, 2020
2a3a929
Fixing all errcheck
zendern Jan 20, 2020
e77ea36
Fix most staticcheck lint issues
zendern Jan 20, 2020
62b7efd
Cleanup deprecated call to snappy.NewWriter
zendern Jan 20, 2020
ff69505
Remove rev from Makefile
zendern Jan 20, 2020
b666336
Reorder imports
zendern Jan 21, 2020
fb4161c
Ignoring this for now
zendern Jan 21, 2020
ac3f181
Explicitly ignoring this error
zendern Jan 21, 2020
e8413ff
Require noerror since this is a test
zendern Jan 23, 2020
9bfd5ef
Switch over to use require.NoError
zendern Jan 23, 2020
2dd3252
Move func to test class since that is only place it was used
zendern Jan 23, 2020
4dfa046
Log warning if save to cache errors
zendern Jan 23, 2020
a8e0f53
Condense a little
zendern Jan 23, 2020
0e81880
Use returned error instead of capturing it
zendern Jan 23, 2020
4d3838f
Bringing back ctx and adding comment
zendern Jan 24, 2020
c8e8f56
Log error if changing ring state fails when Leaving
zendern Jan 24, 2020
aa2ecc6
If context deadline exceeded return the error
zendern Jan 24, 2020
5d857af
Can't defer this otherwise we will have no data
zendern Jan 24, 2020
f48805a
Comment to make it clear why this nolint was added
zendern Jan 24, 2020
6e5b06f
Refactor method out
zendern Jan 24, 2020
f910457
io.Copy added to global errcheck exclude
zendern Jan 24, 2020
7d4ef02
If error dont do anything else
zendern Jan 28, 2020
eea746e
Cleanup unused function
zendern Jan 28, 2020
71c71e2
Adding tracer to global excludes
zendern Jan 28, 2020
2e09e2c
Cleanup post rebase
zendern Feb 13, 2020
fec6e23
Ratelimiter returns resource exhausted error
zendern Feb 24, 2020
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
2 changes: 2 additions & 0 deletions .errcheck-exclude
@@ -1,3 +1,5 @@
io/ioutil.WriteFile
io/ioutil.ReadFile
(github.com/go-kit/kit/log.Logger).Log
io.Copy
(github.com/opentracing/opentracing-go.Tracer).Inject
2 changes: 1 addition & 1 deletion Makefile
Expand Up @@ -122,7 +122,7 @@ protos: $(PROTO_GOS)

lint:
misspell -error docs
golangci-lint run --new-from-rev ed7c302fd968 --build-tags netgo --timeout=5m --enable golint --enable misspell --enable gofmt
golangci-lint run --build-tags netgo --timeout=5m --enable golint --enable misspell --enable gofmt

# Validate Kubernetes spec files. Requires:
# https://kubeval.instrumenta.dev
Expand Down
4 changes: 2 additions & 2 deletions cmd/cortex/main_test.go
Expand Up @@ -161,13 +161,13 @@ func captureOutput(t *testing.T) *capturedOutput {
co.wg.Add(1)
go func() {
defer co.wg.Done()
_, _ = io.Copy(&co.stdoutBuf, stdoutR)
io.Copy(&co.stdoutBuf, stdoutR)
}()

co.wg.Add(1)
go func() {
defer co.wg.Done()
_, _ = io.Copy(&co.stderrBuf, stderrR)
io.Copy(&co.stderrBuf, stderrR)
}()

return co
Expand Down
3 changes: 2 additions & 1 deletion cmd/test-exporter/main.go
Expand Up @@ -52,5 +52,6 @@ func main() {
}))

prometheus.MustRegister(runner)
server.Run()
err = server.Run()
util.CheckFatal("running server", err)
}
4 changes: 3 additions & 1 deletion pkg/alertmanager/alertmanager.go
Expand Up @@ -78,6 +78,8 @@ func init() {
// from disk, we just ignore web-based reload signals. Config updates are
// only applied externally via ApplyConfig().
case <-webReload:
default:
continue
}
}
}()
Expand Down Expand Up @@ -176,7 +178,7 @@ func clusterWait(p *cluster.Peer, timeout time.Duration) func() time.Duration {

// ApplyConfig applies a new configuration to an Alertmanager.
func (am *Alertmanager) ApplyConfig(userID string, conf *config.Config) error {
templateFiles := make([]string, len(conf.Templates), len(conf.Templates))
templateFiles := make([]string, len(conf.Templates))
if len(conf.Templates) > 0 {
for i, t := range conf.Templates {
templateFiles[i] = filepath.Join(am.cfg.DataDir, "templates", userID, t)
Expand Down
4 changes: 2 additions & 2 deletions pkg/chunk/aws/dynamodb_storage_client.go
Expand Up @@ -245,7 +245,7 @@ func (a dynamoDBStorageClient) BatchWrite(ctx context.Context, input chunk.Write
if awsErr, ok := err.(awserr.Error); ok && ((awsErr.Code() == dynamodb.ErrCodeProvisionedThroughputExceededException) || request.Retryable()) {
logWriteRetry(ctx, requests)
unprocessed.TakeReqs(requests, -1)
a.writeThrottle.WaitN(ctx, len(requests))
_ = a.writeThrottle.WaitN(ctx, len(requests))
pracucci marked this conversation as resolved.
Show resolved Hide resolved
backoff.Wait()
continue
} else if ok && awsErr.Code() == validationException {
Expand All @@ -269,7 +269,7 @@ func (a dynamoDBStorageClient) BatchWrite(ctx context.Context, input chunk.Write
unprocessedItems := dynamoDBWriteBatch(resp.UnprocessedItems)
if len(unprocessedItems) > 0 {
logWriteRetry(ctx, unprocessedItems)
a.writeThrottle.WaitN(ctx, unprocessedItems.Len())
_ = a.writeThrottle.WaitN(ctx, unprocessedItems.Len())
zendern marked this conversation as resolved.
Show resolved Hide resolved
unprocessed.TakeReqs(unprocessedItems, -1)
}

Expand Down
3 changes: 2 additions & 1 deletion pkg/chunk/aws/dynamodb_storage_client_test.go
Expand Up @@ -17,7 +17,8 @@ const (

func TestChunksPartialError(t *testing.T) {
fixture := dynamoDBFixture(0, 10, 20)
defer fixture.Teardown()
defer testutils.TeardownFixture(t, fixture)

_, client, err := testutils.Setup(fixture, tableName)
require.NoError(t, err)

Expand Down
2 changes: 1 addition & 1 deletion pkg/chunk/aws/dynamodb_table_client.go
Expand Up @@ -80,7 +80,7 @@ func (d dynamoTableClient) backoffAndRetry(ctx context.Context, fn func(context.

func (d callManager) backoffAndRetry(ctx context.Context, fn func(context.Context) error) error {
if d.limiter != nil { // Tests will have a nil limiter.
d.limiter.Wait(ctx)
_ = d.limiter.Wait(ctx)
}

backoff := util.NewBackoff(ctx, d.backoffConfig)
Expand Down
1 change: 1 addition & 0 deletions pkg/chunk/aws/mock.go
Expand Up @@ -56,6 +56,7 @@ func (a dynamoDBStorageClient) setErrorParameters(provisionedErr, errAfter int)
}
}

//nolint:unused //Leaving this around in the case we need to create a table via mock this is useful.
func (m *mockDynamoDBClient) createTable(name string) {
m.mtx.Lock()
defer m.mtx.Unlock()
Expand Down
2 changes: 1 addition & 1 deletion pkg/chunk/cache/fifo_cache.go
Expand Up @@ -228,7 +228,7 @@ func (c *FifoCache) Get(ctx context.Context, key string) (interface{}, bool) {
index, ok := c.index[key]
if ok {
updated := c.entries[index].updated
if c.validity == 0 || time.Now().Sub(updated) < c.validity {
if c.validity == 0 || time.Since(updated) < c.validity {
return c.entries[index].value, true
}

Expand Down
8 changes: 0 additions & 8 deletions pkg/chunk/cache/fifo_cache_test.go
Expand Up @@ -2,7 +2,6 @@ package cache

import (
"context"
"fmt"
"strconv"
"testing"
"time"
Expand Down Expand Up @@ -88,10 +87,3 @@ func TestFifoCacheExpiry(t *testing.T) {
_, ok = c.Get(ctx, strconv.Itoa(0))
require.False(t, ok)
}

func (c *FifoCache) print() {
fmt.Println("first", c.first, "last", c.last)
for i, entry := range c.entries {
fmt.Printf(" %d -> key: %s, value: %v, next: %d, prev: %d\n", i, entry.key, entry.value, entry.next, entry.prev)
}
}
4 changes: 2 additions & 2 deletions pkg/chunk/cache/instrumented.go
Expand Up @@ -75,7 +75,7 @@ func (i *instrumentedCache) Store(ctx context.Context, keys []string, bufs [][]b
}

method := i.name + ".store"
instr.CollectedRequest(ctx, method, requestDuration, instr.ErrorCode, func(ctx context.Context) error {
_ = instr.CollectedRequest(ctx, method, requestDuration, instr.ErrorCode, func(ctx context.Context) error {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I question why Store() has no error return.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

FWIW from pkg/chunk/cache/cache.go

// Cache byte arrays by key.
//
// NB we intentionally do not return errors in this interface - caching is best
// effort by definition.  We found that when these methods did return errors,
// the caller would just log them - so its easier for implementation to do that.
// Whats more, we found partially successful Fetchs were often treated as failed
// when they returned an error.
type Cache interface {
	Store(ctx context.Context, key []string, buf [][]byte)
	Fetch(ctx context.Context, keys []string) (found []string, bufs [][]byte, missing []string)
	Stop() error
}

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@bboreham just double checking are we good with this?? as per the comment above it was very intentional.

sp := ot.SpanFromContext(ctx)
sp.LogFields(otlog.Int("keys", len(keys)))
i.Cache.Store(ctx, keys, bufs)
Expand All @@ -91,7 +91,7 @@ func (i *instrumentedCache) Fetch(ctx context.Context, keys []string) ([]string,
method = i.name + ".fetch"
)

instr.CollectedRequest(ctx, method, requestDuration, instr.ErrorCode, func(ctx context.Context) error {
_ = instr.CollectedRequest(ctx, method, requestDuration, instr.ErrorCode, func(ctx context.Context) error {
sp := ot.SpanFromContext(ctx)
sp.LogFields(otlog.Int("keys requested", len(keys)))

Expand Down
12 changes: 8 additions & 4 deletions pkg/chunk/cache/memcached.go
Expand Up @@ -36,7 +36,7 @@ type observableVecCollector struct {
func (observableVecCollector) Register() {}
func (observableVecCollector) Before(method string, start time.Time) {}
func (o observableVecCollector) After(method, statusCode string, start time.Time) {
o.v.WithLabelValues(method, statusCode).Observe(time.Now().Sub(start).Seconds())
o.v.WithLabelValues(method, statusCode).Observe(time.Since(start).Seconds())
}

// MemcachedConfig is config to make a Memcached
Expand Down Expand Up @@ -135,7 +135,7 @@ func memcacheStatusCode(err error) string {

// Fetch gets keys from the cache. The keys that are found must be in the order of the keys requested.
func (c *Memcached) Fetch(ctx context.Context, keys []string) (found []string, bufs [][]byte, missed []string) {
instr.CollectedRequest(ctx, "Memcache.Get", c.requestDuration, memcacheStatusCode, func(ctx context.Context) error {
_ = instr.CollectedRequest(ctx, "Memcache.Get", c.requestDuration, memcacheStatusCode, func(ctx context.Context) error {
if c.cfg.BatchSize == 0 {
found, bufs, missed = c.fetch(ctx, keys)
return nil
Expand All @@ -149,7 +149,7 @@ func (c *Memcached) Fetch(ctx context.Context, keys []string) (found []string, b

func (c *Memcached) fetch(ctx context.Context, keys []string) (found []string, bufs [][]byte, missed []string) {
var items map[string]*memcache.Item
instr.CollectedRequest(ctx, "Memcache.GetMulti", c.requestDuration, memcacheStatusCode, func(_ context.Context) error {
err := instr.CollectedRequest(ctx, "Memcache.GetMulti", c.requestDuration, memcacheStatusCode, func(_ context.Context) error {
sp := opentracing.SpanFromContext(ctx)
sp.LogFields(otlog.Int("keys requested", len(keys)))

Expand All @@ -166,6 +166,10 @@ func (c *Memcached) fetch(ctx context.Context, keys []string) (found []string, b
return err
})

if err != nil {
return found, bufs, keys
}

for _, key := range keys {
item, ok := items[key]
if ok {
Expand Down Expand Up @@ -248,7 +252,7 @@ func (c *Memcached) Stop() {
// HashKey hashes key into something you can store in memcached.
func HashKey(key string) string {
hasher := fnv.New64a()
hasher.Write([]byte(key)) // This'll never error.
_, _ = hasher.Write([]byte(key)) // This'll never error.
zendern marked this conversation as resolved.
Show resolved Hide resolved

// Hex because memcache errors for the bytes produced by the hash.
return hex.EncodeToString(hasher.Sum(nil))
Expand Down
4 changes: 2 additions & 2 deletions pkg/chunk/cache/redis_cache_test.go
Expand Up @@ -19,9 +19,9 @@ func TestRedisCache(t *testing.T) {

conn := redigomock.NewConn()
conn.Clear()
pool := redis.NewPool(func() (redis.Conn, error) {
pool := &redis.Pool{Dial: func() (redis.Conn, error) {
return conn, nil
}, 10)
}, MaxIdle: 10}

keys := []string{"key1", "key2", "key3"}
bufs := [][]byte{[]byte("data1"), []byte("data2"), []byte("data3")}
Expand Down
1 change: 0 additions & 1 deletion pkg/chunk/cassandra/storage_client.go
Expand Up @@ -294,7 +294,6 @@ func (s *StorageClient) query(ctx context.Context, query chunk.IndexQuery, callb

// readBatch represents a batch of rows read from Cassandra.
type readBatch struct {
consumed bool
rangeValue []byte
value []byte
}
Expand Down
6 changes: 5 additions & 1 deletion pkg/chunk/chunk_store.go
Expand Up @@ -118,14 +118,17 @@ func (c *store) Put(ctx context.Context, chunks []Chunk) error {

// PutOne implements ChunkStore
func (c *store) PutOne(ctx context.Context, from, through model.Time, chunk Chunk) error {
log, ctx := spanlogger.New(ctx, "ChunkStore.PutOne")
chunks := []Chunk{chunk}

err := c.storage.PutChunks(ctx, chunks)
if err != nil {
return err
}

c.writeBackCache(ctx, chunks)
if cacheErr := c.writeBackCache(ctx, chunks); cacheErr != nil {
level.Warn(log).Log("msg", "could not store chunks in chunk cache", "err", cacheErr)
}

writeReqs, err := c.calculateIndexEntries(chunk.UserID, from, through, chunk)
if err != nil {
Expand Down Expand Up @@ -252,6 +255,7 @@ func (c *store) LabelNamesForMetricName(ctx context.Context, userID string, from
}

func (c *store) validateQueryTimeRange(ctx context.Context, userID string, from *model.Time, through *model.Time) (bool, error) {
//nolint:ineffassign,staticcheck //Leaving ctx even though we don't currently use it, we want to make it available for when we might need it and hopefully will ensure us using the correct context at that time
log, ctx := spanlogger.New(ctx, "store.validateQueryTimeRange")
defer log.Span.Finish()

Expand Down
5 changes: 3 additions & 2 deletions pkg/chunk/chunk_store_test.go
Expand Up @@ -711,7 +711,8 @@ func BenchmarkIndexCaching(b *testing.B) {
b.ResetTimer()

for i := 0; i < b.N; i++ {
store.Put(ctx, []Chunk{fooChunk1})
err := store.Put(ctx, []Chunk{fooChunk1})
require.NoError(b, err)
}
}

Expand Down Expand Up @@ -813,7 +814,7 @@ func TestStoreMaxLookBack(t *testing.T) {
chunks, err = storeWithLookBackLimit.Get(ctx, userID, now.Add(-time.Hour), now, matchers...)
require.NoError(t, err)
require.Equal(t, 1, len(chunks))
chunks[0].Through.Equal(now)
require.Equal(t, now, chunks[0].Through)
pracucci marked this conversation as resolved.
Show resolved Hide resolved
}

func benchmarkParseIndexEntries(i int64, b *testing.B) {
Expand Down
3 changes: 2 additions & 1 deletion pkg/chunk/chunk_test.go
Expand Up @@ -268,7 +268,8 @@ func BenchmarkEncode(b *testing.B) {

for i := 0; i < b.N; i++ {
chunk.encoded = nil
chunk.Encode()
err := chunk.Encode()
require.NoError(b, err)
}
}

Expand Down
13 changes: 6 additions & 7 deletions pkg/chunk/composite_store_test.go
Expand Up @@ -6,6 +6,8 @@ import (
"reflect"
"testing"

"github.com/stretchr/testify/require"

"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/weaveworks/common/test"
Expand Down Expand Up @@ -180,7 +182,8 @@ func TestCompositeStore(t *testing.T) {
} {
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
have := []result{}
tc.cs.forStores(model.TimeFromUnix(tc.from), model.TimeFromUnix(tc.through), collect(&have))
err := tc.cs.forStores(model.TimeFromUnix(tc.from), model.TimeFromUnix(tc.through), collect(&have))
require.NoError(t, err)
if !reflect.DeepEqual(tc.want, have) {
t.Fatalf("wrong stores - %s", test.Diff(tc.want, have))
}
Expand Down Expand Up @@ -231,16 +234,12 @@ func TestCompositeStoreLabels(t *testing.T) {
} {
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
have, err := cs.LabelNamesForMetricName(context.Background(), "", model.TimeFromUnix(tc.from), model.TimeFromUnix(tc.through), "")
if err != nil {
t.Fatalf("err - %s", err)
}
require.NoError(t, err)
if !reflect.DeepEqual(tc.want, have) {
t.Fatalf("wrong label names - %s", test.Diff(tc.want, have))
}
have, err = cs.LabelValuesForMetricName(context.Background(), "", model.TimeFromUnix(tc.from), model.TimeFromUnix(tc.through), "", "")
if err != nil {
t.Fatalf("err - %s", err)
}
require.NoError(t, err)
if !reflect.DeepEqual(tc.want, have) {
t.Fatalf("wrong label values - %s", test.Diff(tc.want, have))
}
Expand Down
2 changes: 1 addition & 1 deletion pkg/chunk/encoding/bigchunk_test.go
Expand Up @@ -84,7 +84,7 @@ func BenchmarkBiggerChunkMemory(b *testing.B) {
// printSize calculates various sizes of the chunk when encoded, and in memory.
func (b *bigchunk) printSize() {
var buf bytes.Buffer
b.Marshal(&buf)
_ = b.Marshal(&buf)

var size, allocd int
for _, c := range b.chunks {
Expand Down
2 changes: 2 additions & 0 deletions pkg/chunk/encoding/chunk_test.go
Expand Up @@ -117,6 +117,8 @@ func testChunkEncoding(t *testing.T, encoding Encoding, samples int) {

bs1 := buf.Bytes()
chunk, err = NewForEncoding(encoding)
require.NoError(t, err)

err = chunk.UnmarshalFromBuf(bs1)
require.NoError(t, err)

Expand Down
2 changes: 1 addition & 1 deletion pkg/chunk/encoding/varbit.go
Expand Up @@ -13,7 +13,7 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

//nolint //Since this was copied from Prometheus leave it as is
package encoding

import (
Expand Down
2 changes: 1 addition & 1 deletion pkg/chunk/encoding/varbit_helpers.go
Expand Up @@ -13,7 +13,7 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

//nolint //Since this was copied from Prometheus leave it as is
package encoding

import "github.com/prometheus/common/model"
Expand Down
2 changes: 0 additions & 2 deletions pkg/chunk/gcp/bigtable_index_client.go
Expand Up @@ -60,8 +60,6 @@ type storageClientColumnKey struct {
schemaCfg chunk.SchemaConfig
client *bigtable.Client
keysFn keysFn

distributeKeys bool
pracucci marked this conversation as resolved.
Show resolved Hide resolved
}

// storageClientV1 implements chunk.storageClient for GCP.
Expand Down
6 changes: 0 additions & 6 deletions pkg/chunk/schema_config.go
Expand Up @@ -320,12 +320,6 @@ func (cfg *SchemaConfig) Load() error {
return cfg.Validate()
}

// PrintYaml dumps the yaml to stdout, to aid in migration
func (cfg SchemaConfig) PrintYaml() {
encoder := yaml.NewEncoder(os.Stdout)
encoder.Encode(cfg)
}

// Bucket describes a range of time with a tableName and hashKey
type Bucket struct {
from uint32
Expand Down