Skip to content

Commit

Permalink
Make incremental analysis (#808)
Browse files Browse the repository at this point in the history
Cache linting results. Reanalyze only changed packages
and packages tree depending on them.

Fixes #768, fixes #809
  • Loading branch information
jirfag committed Oct 13, 2019
1 parent ca6effb commit 9ba730e
Show file tree
Hide file tree
Showing 59 changed files with 931 additions and 420 deletions.
14 changes: 13 additions & 1 deletion .golangci.yml
Expand Up @@ -43,6 +43,8 @@ linters-settings:
disabled-checks:
- wrapperFunc
- dupImport # https://github.com/go-critic/go-critic/issues/845
- ifElseChain
- octalLiteral
funlen:
lines: 100
statements: 50
Expand Down Expand Up @@ -95,7 +97,17 @@ linters:
run:
skip-dirs:
- test/testdata_etc
- internal/(cache|renameio|robustio)
skip-files:
- internal/cache/.*_test.go

issues:
exclude-rules:
- path: internal/(cache|renameio)/
linters:
- lll
- gochecknoinits
- gocyclo
- funlen

# golangci.com configuration
# https://github.com/golangci/golangci/wiki/Configuration
Expand Down
14 changes: 13 additions & 1 deletion README.md
Expand Up @@ -945,6 +945,8 @@ linters-settings:
disabled-checks:
- wrapperFunc
- dupImport # https://github.com/go-critic/go-critic/issues/845
- ifElseChain
- octalLiteral
funlen:
lines: 100
statements: 50
Expand Down Expand Up @@ -997,7 +999,17 @@ linters:
run:
skip-dirs:
- test/testdata_etc
- internal/(cache|renameio|robustio)
skip-files:
- internal/cache/.*_test.go
issues:
exclude-rules:
- path: internal/(cache|renameio)/
linters:
- lll
- gochecknoinits
- gocyclo
- funlen
# golangci.com configuration
# https://github.com/golangci/golangci/wiki/Configuration
Expand Down
123 changes: 84 additions & 39 deletions internal/cache/cache.go
Expand Up @@ -12,7 +12,6 @@ import (
"bytes"
"crypto/sha256"
"encoding/hex"
"errors"
"fmt"
"io"
"io/ioutil"
Expand All @@ -22,6 +21,8 @@ import (
"strings"
"time"

"github.com/pkg/errors"

"github.com/golangci/golangci-lint/internal/renameio"
)

Expand Down Expand Up @@ -144,47 +145,56 @@ func (c *Cache) get(id ActionID) (Entry, error) {
missing := func() (Entry, error) {
return Entry{}, errMissing
}
f, err := os.Open(c.fileName(id, "a"))
failed := func(err error) (Entry, error) {
return Entry{}, err
}
fileName := c.fileName(id, "a")
f, err := os.Open(fileName)
if err != nil {
return missing()
if os.IsNotExist(err) {
return missing()
}
return failed(err)
}
defer f.Close()
entry := make([]byte, entrySize+1) // +1 to detect whether f is too long
if n, err := io.ReadFull(f, entry); n != entrySize || err != io.ErrUnexpectedEOF {
return missing()
if n, readErr := io.ReadFull(f, entry); n != entrySize || readErr != io.ErrUnexpectedEOF {
return failed(fmt.Errorf("read %d/%d bytes from %s with error %s", n, entrySize, fileName, readErr))
}
if entry[0] != 'v' || entry[1] != '1' || entry[2] != ' ' || entry[3+hexSize] != ' ' || entry[3+hexSize+1+hexSize] != ' ' || entry[3+hexSize+1+hexSize+1+20] != ' ' || entry[entrySize-1] != '\n' {
return missing()
return failed(fmt.Errorf("bad data in %s", fileName))
}
eid, entry := entry[3:3+hexSize], entry[3+hexSize:]
eout, entry := entry[1:1+hexSize], entry[1+hexSize:]
esize, entry := entry[1:1+20], entry[1+20:]
etime, entry := entry[1:1+20], entry[1+20:]
etime := entry[1 : 1+20]
var buf [HashSize]byte
if _, err := hex.Decode(buf[:], eid); err != nil || buf != id {
return missing()
if _, err = hex.Decode(buf[:], eid); err != nil || buf != id {
return failed(errors.Wrapf(err, "failed to hex decode eid data in %s", fileName))
}
if _, err := hex.Decode(buf[:], eout); err != nil {
return missing()
if _, err = hex.Decode(buf[:], eout); err != nil {
return failed(errors.Wrapf(err, "failed to hex decode eout data in %s", fileName))
}
i := 0
for i < len(esize) && esize[i] == ' ' {
i++
}
size, err := strconv.ParseInt(string(esize[i:]), 10, 64)
if err != nil || size < 0 {
return missing()
return failed(fmt.Errorf("failed to parse esize int from %s with error %s", fileName, err))
}
i = 0
for i < len(etime) && etime[i] == ' ' {
i++
}
tm, err := strconv.ParseInt(string(etime[i:]), 10, 64)
if err != nil || tm < 0 {
return missing()
return failed(fmt.Errorf("failed to parse etime int from %s with error %s", fileName, err))
}

c.used(c.fileName(id, "a"))
if err = c.used(fileName); err != nil {
return failed(errors.Wrapf(err, "failed to mark %s as used", fileName))
}

return Entry{buf, size, time.Unix(0, tm)}, nil
}
Expand All @@ -196,7 +206,12 @@ func (c *Cache) GetFile(id ActionID) (file string, entry Entry, err error) {
if err != nil {
return "", Entry{}, err
}
file = c.OutputFile(entry.OutputID)

file, err = c.OutputFile(entry.OutputID)
if err != nil {
return "", Entry{}, err
}

info, err := os.Stat(file)
if err != nil || info.Size() != entry.Size {
return "", Entry{}, errMissing
Expand All @@ -212,18 +227,29 @@ func (c *Cache) GetBytes(id ActionID) ([]byte, Entry, error) {
if err != nil {
return nil, entry, err
}
data, _ := ioutil.ReadFile(c.OutputFile(entry.OutputID))
outputFile, err := c.OutputFile(entry.OutputID)
if err != nil {
return nil, entry, err
}

data, err := ioutil.ReadFile(outputFile)
if err != nil {
return nil, entry, err
}

if sha256.Sum256(data) != entry.OutputID {
return nil, entry, errMissing
}
return data, entry, nil
}

// OutputFile returns the name of the cache file storing output with the given OutputID.
func (c *Cache) OutputFile(out OutputID) string {
func (c *Cache) OutputFile(out OutputID) (string, error) {
file := c.fileName(out, "d")
c.used(file)
return file
if err := c.used(file); err != nil {
return "", err
}
return file, nil
}

// Time constants for cache expiration.
Expand Down Expand Up @@ -253,12 +279,21 @@ const (
// mtime is more than an hour old. This heuristic eliminates
// nearly all of the mtime updates that would otherwise happen,
// while still keeping the mtimes useful for cache trimming.
func (c *Cache) used(file string) {
func (c *Cache) used(file string) error {
info, err := os.Stat(file)
if err == nil && c.now().Sub(info.ModTime()) < mtimeInterval {
return
if err != nil {
return errors.Wrapf(err, "failed to stat file %s", file)
}
os.Chtimes(file, c.now(), c.now())

if c.now().Sub(info.ModTime()) < mtimeInterval {
return nil
}

if err := os.Chtimes(file, c.now(), c.now()); err != nil {
return errors.Wrapf(err, "failed to change time of file %s", file)
}

return nil
}

// Trim removes old cache entries that are likely not to be reused.
Expand All @@ -285,7 +320,7 @@ func (c *Cache) Trim() {

// Ignore errors from here: if we don't write the complete timestamp, the
// cache will appear older than it is, and we'll trim it again next time.
renameio.WriteFile(filepath.Join(c.dir, "trim.txt"), []byte(fmt.Sprintf("%d", now.Unix())), 0666)
_ = renameio.WriteFile(filepath.Join(c.dir, "trim.txt"), []byte(fmt.Sprintf("%d", now.Unix())), 0666)
}

// trimSubdir trims a single cache subdirectory.
Expand Down Expand Up @@ -367,7 +402,9 @@ func (c *Cache) putIndexEntry(id ActionID, out OutputID, size int64, allowVerify
os.Remove(file)
return err
}
os.Chtimes(file, c.now(), c.now()) // mainly for tests
if err = os.Chtimes(file, c.now(), c.now()); err != nil { // mainly for tests
return errors.Wrapf(err, "failed to change time of file %s", file)
}

return nil
}
Expand Down Expand Up @@ -421,9 +458,12 @@ func (c *Cache) copyFile(file io.ReadSeeker, out OutputID, size int64) error {
info, err := os.Stat(name)
if err == nil && info.Size() == size {
// Check hash.
if f, err := os.Open(name); err == nil {
if f, openErr := os.Open(name); openErr == nil {
h := sha256.New()
io.Copy(h, f)
if _, copyErr := io.Copy(h, f); copyErr != nil {
return errors.Wrap(copyErr, "failed to copy to sha256")
}

f.Close()
var out2 OutputID
h.Sum(out2[:0])
Expand Down Expand Up @@ -456,44 +496,49 @@ func (c *Cache) copyFile(file io.ReadSeeker, out OutputID, size int64) error {
// before returning, to avoid leaving bad bytes in the file.

// Copy file to f, but also into h to double-check hash.
if _, err := file.Seek(0, 0); err != nil {
f.Truncate(0)
if _, err = file.Seek(0, 0); err != nil {
_ = f.Truncate(0)
return err
}
h := sha256.New()
w := io.MultiWriter(f, h)
if _, err := io.CopyN(w, file, size-1); err != nil {
f.Truncate(0)
if _, err = io.CopyN(w, file, size-1); err != nil {
_ = f.Truncate(0)
return err
}
// Check last byte before writing it; writing it will make the size match
// what other processes expect to find and might cause them to start
// using the file.
buf := make([]byte, 1)
if _, err := file.Read(buf); err != nil {
f.Truncate(0)
if _, err = file.Read(buf); err != nil {
_ = f.Truncate(0)
return err
}
h.Write(buf)
if n, wErr := h.Write(buf); n != len(buf) {
return fmt.Errorf("wrote to hash %d/%d bytes with error %s", n, len(buf), wErr)
}

sum := h.Sum(nil)
if !bytes.Equal(sum, out[:]) {
f.Truncate(0)
_ = f.Truncate(0)
return fmt.Errorf("file content changed underfoot")
}

// Commit cache file entry.
if _, err := f.Write(buf); err != nil {
f.Truncate(0)
if _, err = f.Write(buf); err != nil {
_ = f.Truncate(0)
return err
}
if err := f.Close(); err != nil {
if err = f.Close(); err != nil {
// Data might not have been written,
// but file may look like it is the right size.
// To be extra careful, remove cached file.
os.Remove(name)
return err
}
os.Chtimes(name, c.now(), c.now()) // mainly for tests
if err = os.Chtimes(name, c.now(), c.now()); err != nil { // mainly for tests
return errors.Wrapf(err, "failed to change time of file %s", name)
}

return nil
}
4 changes: 3 additions & 1 deletion internal/cache/default.go
Expand Up @@ -39,7 +39,9 @@ func initDefaultCache() {
}
if _, err := os.Stat(filepath.Join(dir, "README")); err != nil {
// Best effort.
ioutil.WriteFile(filepath.Join(dir, "README"), []byte(cacheREADME), 0666)
if wErr := ioutil.WriteFile(filepath.Join(dir, "README"), []byte(cacheREADME), 0666); wErr != nil {
log.Fatalf("Failed to write README file to cache dir %s: %s", dir, err)
}
}

c, err := Open(dir)
Expand Down
26 changes: 18 additions & 8 deletions internal/cache/hash.go
Expand Up @@ -42,11 +42,19 @@ func SetSalt(b []byte) {

// Subkey returns an action ID corresponding to mixing a parent
// action ID with a string description of the subkey.
func Subkey(parent ActionID, desc string) ActionID {
func Subkey(parent ActionID, desc string) (ActionID, error) {
h := sha256.New()
h.Write([]byte("subkey:"))
h.Write(parent[:])
h.Write([]byte(desc))
const subkeyPrefix = "subkey:"
if n, err := h.Write([]byte(subkeyPrefix)); n != len(subkeyPrefix) {
return ActionID{}, fmt.Errorf("wrote %d/%d bytes of subkey prefix with error %s", n, len(subkeyPrefix), err)
}
if n, err := h.Write(parent[:]); n != len(parent) {
return ActionID{}, fmt.Errorf("wrote %d/%d bytes of parent with error %s", n, len(parent), err)
}
if n, err := h.Write([]byte(desc)); n != len(desc) {
return ActionID{}, fmt.Errorf("wrote %d/%d bytes of desc with error %s", n, len(desc), err)
}

var out ActionID
h.Sum(out[:0])
if debugHash {
Expand All @@ -57,21 +65,23 @@ func Subkey(parent ActionID, desc string) ActionID {
hashDebug.m[out] = fmt.Sprintf("subkey %x %q", parent, desc)
hashDebug.Unlock()
}
return out
return out, nil
}

// NewHash returns a new Hash.
// The caller is expected to Write data to it and then call Sum.
func NewHash(name string) *Hash {
func NewHash(name string) (*Hash, error) {
h := &Hash{h: sha256.New(), name: name}
if debugHash {
fmt.Fprintf(os.Stderr, "HASH[%s]\n", h.name)
}
h.Write(hashSalt)
if n, err := h.Write(hashSalt); n != len(hashSalt) {
return nil, fmt.Errorf("wrote %d/%d bytes of hash salt with error %s", n, len(hashSalt), err)
}
if verify {
h.buf = new(bytes.Buffer)
}
return h
return h, nil
}

// Write writes data to the running hash.
Expand Down
2 changes: 1 addition & 1 deletion internal/cache/hash_test.go
Expand Up @@ -18,7 +18,7 @@ func TestHash(t *testing.T) {
hashSalt = oldSalt
}()

h := NewHash("alice")
h, _ := NewHash("alice")
h.Write([]byte("hello world"))
sum := fmt.Sprintf("%x", h.Sum())
want := "b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9"
Expand Down

0 comments on commit 9ba730e

Please sign in to comment.