From 4348caf720ef46e508bfced4c291ed6cde1ee27f Mon Sep 17 00:00:00 2001 From: Asra Ali Date: Mon, 23 May 2022 11:34:25 -0500 Subject: [PATCH] fix: fix fetching updated targets from TUF root Signed-off-by: Asra Ali add comment Signed-off-by: Asra Ali update Signed-off-by: Asra Ali update Signed-off-by: Asra Ali possible fix windows Signed-off-by: Asra Ali lint Signed-off-by: Asra Ali fix windows maybe Signed-off-by: Asra Ali fix close Signed-off-by: Asra Ali --- Makefile | 2 +- .../cli/fulcio/fulcioroots/fulcioroots.go | 2 +- pkg/cosign/tuf/client.go | 311 +++++++++++++----- pkg/cosign/tuf/client_test.go | 117 ++++++- 4 files changed, 340 insertions(+), 92 deletions(-) diff --git a/Makefile b/Makefile index e7e4fc91e74..a47074f75ca 100644 --- a/Makefile +++ b/Makefile @@ -118,7 +118,7 @@ cross: golangci-lint: rm -f $(GOLANGCI_LINT_BIN) || : set -e ;\ - GOBIN=$(GOLANGCI_LINT_DIR) go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.43.0 ;\ + GOBIN=$(GOLANGCI_LINT_DIR) go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.46.0 ;\ lint: golangci-lint ## Run golangci-lint linter $(GOLANGCI_LINT_BIN) run -n diff --git a/cmd/cosign/cli/fulcio/fulcioroots/fulcioroots.go b/cmd/cosign/cli/fulcio/fulcioroots/fulcioroots.go index c77378a9b3a..cba25ffe376 100644 --- a/cmd/cosign/cli/fulcio/fulcioroots/fulcioroots.go +++ b/cmd/cosign/cli/fulcio/fulcioroots/fulcioroots.go @@ -121,7 +121,7 @@ func initRoots() (*x509.CertPool, *x509.CertPool, error) { // call is made to update the root. targets, err := tufClient.GetTargetsByMeta(tuf.Fulcio, []string{fulcioTargetStr, fulcioV1TargetStr}) if err != nil { - return nil, nil, errors.New("error getting targets") + return nil, nil, fmt.Errorf("error getting targets: %w", err) } if len(targets) == 0 { return nil, nil, errors.New("none of the Fulcio roots have been found") diff --git a/pkg/cosign/tuf/client.go b/pkg/cosign/tuf/client.go index 9994d981c4c..cb3abddb180 100644 --- a/pkg/cosign/tuf/client.go +++ b/pkg/cosign/tuf/client.go @@ -20,18 +20,20 @@ import ( "context" "embed" "encoding/json" + "errors" "fmt" "io" + "io/fs" "io/ioutil" "net/url" "os" - "path" "path/filepath" "runtime" "strconv" "strings" "time" + "github.com/secure-systems-lab/go-securesystemslib/cjson" "github.com/theupdateframework/go-tuf/client" tuf_leveldbstore "github.com/theupdateframework/go-tuf/client/leveldbstore" "github.com/theupdateframework/go-tuf/data" @@ -44,6 +46,10 @@ const ( SigstoreNoCache = "SIGSTORE_NO_CACHE" ) +var GetRemoteRoot = func() string { + return DefaultRemoteRoot +} + type TUF struct { client *client.Client targets targetImpl @@ -186,7 +192,11 @@ func getRoot(meta map[string]json.RawMessage) (json.RawMessage, error) { return trustedRoot, nil } // On first initialize, there will be no root in the TUF DB, so read from embedded. - trustedRoot, err := embeddedRootRepo.ReadFile(path.Join("repository", "root.json")) + rd, ok := GetEmbedded().(fs.ReadFileFS) + if !ok { + return nil, errors.New("fs.ReadFileFS unimplemented for embedded repo") + } + trustedRoot, err := rd.ReadFile(filepath.FromSlash(filepath.Join("repository", "root.json"))) if err != nil { return nil, err } @@ -222,20 +232,18 @@ func initializeTUF(ctx context.Context, embed bool, mirror string, root []byte, embedded: embed, } + t.targets = newFileImpl() + // Lazily init the local store in case we start with an embedded. + var initLocal = newLocalStore var err error if t.embedded { - t.local, err = embeddedLocalStore() - if err != nil { - return nil, err - } - t.targets = newEmbeddedImpl() + t.targets = wrapEmbedded(t.targets) + t.local = wrapEmbeddedLocal(initLocal) } else { - tufDB := filepath.Join(rootCacheDir(), "tuf.db") - t.local, err = localStore(tufDB) + t.local, err = initLocal() if err != nil { return nil, err } - t.targets = newFileImpl() } t.remote, err = remoteFromMirror(ctx, t.mirror) @@ -283,7 +291,7 @@ func initializeTUF(ctx context.Context, embed bool, mirror string, root []byte, func NewFromEnv(ctx context.Context) (*TUF, error) { // Get local and mirror from env - tufDB := filepath.Join(rootCacheDir(), "tuf.db") + tufDB := filepath.FromSlash(filepath.Join(rootCacheDir(), "tuf.db")) var embed bool // Check for the current local. @@ -296,12 +304,12 @@ func NewFromEnv(ctx context.Context) (*TUF, error) { // Some other error, bail return nil, statErr default: - // There is a root! Happy path. + // There is a local root! Happy path. embed = false } // Check for the current remote mirror. - mirror := DefaultRemoteRoot + mirror := GetRemoteRoot() b, err := os.ReadFile(cachedRemote(rootCacheDir())) if err == nil { remoteInfo := remoteCache{} @@ -340,7 +348,6 @@ func (t *TUF) GetTarget(name string) ([]byte, error) { if err != nil { return nil, fmt.Errorf("error verifying local metadata; local cache may be corrupt: %w", err) } - targetBytes, err := t.targets.Get(name) if err != nil { return nil, err @@ -379,7 +386,7 @@ func (t *TUF) GetTargetsByMeta(usage UsageKind, fallbacks []string) ([]TargetFil if scm.Sigstore.Usage == usage { target, err := t.GetTarget(name) if err != nil { - return nil, fmt.Errorf("error getting target by usage: %w", err) + return nil, fmt.Errorf("error getting target %s by usage: %w", name, err) } matchedTargets = append(matchedTargets, TargetFile{Target: target, Status: scm.Sigstore.Status}) } @@ -400,30 +407,9 @@ func (t *TUF) GetTargetsByMeta(usage UsageKind, fallbacks []string) ([]TargetFil return matchedTargets, nil } -func localStore(cacheRoot string) (client.LocalStore, error) { - local, err := tuf_leveldbstore.FileLocalStore(cacheRoot) - if err != nil { - return nil, fmt.Errorf("creating cached local store: %w", err) - } - return local, nil -} - -func embeddedLocalStore() (client.LocalStore, error) { - local := client.MemoryLocalStore() - for _, mdFilename := range []string{"root.json", "targets.json", "snapshot.json", "timestamp.json"} { - b, err := embeddedRootRepo.ReadFile(path.Join("repository", mdFilename)) - if err != nil { - return nil, fmt.Errorf("reading embedded file: %w", err) - } - if err := local.SetMeta(mdFilename, b); err != nil { - return nil, fmt.Errorf("setting local meta: %w", err) - } - } - return local, nil -} - func (t *TUF) updateMetadataAndDownloadTargets() error { // Download updated targets and cache new metadata and targets in ${TUF_ROOT}. + // NOTE: This only returns *updated* targets. targetFiles, err := t.client.Update() if err != nil { // Get some extra information for debugging. What was the state of the metadata @@ -459,8 +445,8 @@ func (t *TUF) updateMetadataAndDownloadTargets() error { return fmt.Errorf("error updating to TUF remote mirror: %w\nremote status:%s", err, string(b)) } - // Update the in-memory targets. - // If the cache directory is enabled, update that too. + // Download newly updated targets. + // TODO: Consider lazily downloading these -- be careful with embedded targets if so. for name := range targetFiles { buf := bytes.Buffer{} if err := downloadRemoteTarget(name, t.client, &buf); err != nil { @@ -503,49 +489,175 @@ func rootCacheDir() string { if err != nil { home = "" } - return filepath.Join(home, ".sigstore", "root") + return filepath.FromSlash(filepath.Join(home, ".sigstore", "root")) } return rootDir } func cachedRemote(cacheRoot string) string { - return filepath.Join(cacheRoot, "remote.json") + return filepath.FromSlash(filepath.Join(cacheRoot, "remote.json")) } func cachedTargetsDir(cacheRoot string) string { - return filepath.Join(cacheRoot, "targets") + return filepath.FromSlash(filepath.Join(cacheRoot, "targets")) } -type targetImpl interface { - Get(string) ([]byte, error) - setImpl +// Local store implementations +func newLocalStore() (client.LocalStore, error) { + if noCache() { + return client.MemoryLocalStore(), nil + } + tufDB := filepath.FromSlash(filepath.Join(rootCacheDir(), "tuf.db")) + local, err := tuf_leveldbstore.FileLocalStore(tufDB) + if err != nil { + return nil, fmt.Errorf("creating cached local store: %w", err) + } + return local, nil } -type setImpl interface { - Set(string, []byte) error +type localInitFunc func() (client.LocalStore, error) + +type embeddedLocalStore struct { + // This is where updated repository info will be written to, if needed. + writeable client.LocalStore + writeableFunc localInitFunc } -type memoryCache struct { - targets map[string][]byte +var GetEmbedded = func() fs.FS { + return embeddedRootRepo } -func (m *memoryCache) Set(p string, b []byte) error { - if m.targets == nil { - m.targets = map[string][]byte{} +func wrapEmbeddedLocal(s localInitFunc) client.LocalStore { + return &embeddedLocalStore{writeableFunc: s, writeable: nil} +} + +func (e embeddedLocalStore) GetMeta() (map[string]json.RawMessage, error) { + if e.writeable != nil { + // We are using a writeable store, so use that. + return e.writeable.GetMeta() + } + // We haven't needed to create or write new metadata, so get the embedded metadata. + meta := make(map[string]json.RawMessage) + ed, ok := GetEmbedded().(fs.ReadDirFS) + if !ok { + return nil, errors.New("fs.ReadDirFS unimplemented for embedded repo") + } + entries, err := ed.ReadDir("repository") + if err != nil { + return nil, err + } + rd, ok := GetEmbedded().(fs.ReadFileFS) + if !ok { + return nil, errors.New("fs.ReadFileFS unimplemented for embedded repo") + } + for _, entry := range entries { + if entry.Type().IsRegular() { + b, err := rd.ReadFile(filepath.FromSlash(filepath.Join("repository", entry.Name()))) + if err != nil { + return nil, fmt.Errorf("reading embedded file: %w", err) + } + meta[entry.Name()] = b + } + } + return meta, err +} + +func (e *embeddedLocalStore) SetMeta(name string, meta json.RawMessage) error { + // Only write to the underlying writeable store if the metadata is new. + embeddedMeta, err := e.GetMeta() + if err != nil { + return err + } + if metaContent, ok := embeddedMeta[name]; ok { + stored, err := cjson.EncodeCanonical(metaContent) + if err != nil { + return err + } + toSet, err := cjson.EncodeCanonical(meta) + if err != nil { + return err + } + if bytes.EqualFold(stored, toSet) { + return nil + } + } + // If we reach here, we have to update metadata. + if e.writeable == nil { + // We haven't needed an update yet, so create and populate the writeable store! + meta, err := e.GetMeta() + if err != nil { + return fmt.Errorf("error retrieving embedded repo: %w", err) + } + e.writeable, err = e.writeableFunc() + if err != nil { + return fmt.Errorf("initializing local: %w", err) + } + for m, md := range meta { + if err := e.writeable.SetMeta(m, md); err != nil { + return fmt.Errorf("error transferring to cached repo: %w", err) + } + } + } + // We have a writeable store, so set the metadata. + return e.writeable.SetMeta(name, meta) +} + +func (e embeddedLocalStore) DeleteMeta(name string) error { + if e.writeable != nil { + return e.writeable.DeleteMeta(name) + } + return nil +} + +func (e embeddedLocalStore) Close() error { + if e.writeable != nil { + return e.writeable.Close() } - m.targets[p] = b return nil } //go:embed repository var embeddedRootRepo embed.FS -type embedded struct { - setImpl +// Target Implementations +type targetImpl interface { + Set(string, []byte) error + Get(string) ([]byte, error) +} + +func newFileImpl() targetImpl { + if noCache() { + return &memoryCache{} + } + return &diskCache{base: cachedTargetsDir(rootCacheDir())} } -func (e *embedded) Get(p string) ([]byte, error) { - b, err := embeddedRootRepo.ReadFile(path.Join("repository", "targets", p)) +func wrapEmbedded(t targetImpl) targetImpl { + return &embeddedWrapper{writeable: t, embedded: true} +} + +type embeddedWrapper struct { + // If we have an embedded fallback that needs updates, use + // the writeable targetImpl + writeable targetImpl + // Whether we are using the embedded or writeable + embedded bool +} + +func (e *embeddedWrapper) Get(p string) ([]byte, error) { + if !e.embedded { + // Get it from the writeable target store since there's been updates. + b, err := e.writeable.Get(p) + if err == nil { + return b, nil + } + fmt.Fprintf(os.Stderr, "**Warning** Updated target not found; falling back on embedded target %s\n", p) + } + rd, ok := GetEmbedded().(fs.ReadFileFS) + if !ok { + return nil, errors.New("fs.ReadFileFS unimplemented for embedded repo") + } + b, err := rd.ReadFile(filepath.FromSlash(filepath.Join("repository", "targets", p))) if err != nil { return nil, err } @@ -558,25 +670,75 @@ func (e *embedded) Get(p string) ([]byte, error) { return b, nil } -type file struct { - base string - setImpl +func (e *embeddedWrapper) Set(name string, b []byte) error { + // An embedded targetImpl needs to set a file, copy files to cache. + if e.embedded { + ed, ok := GetEmbedded().(fs.ReadDirFS) + if !ok { + return errors.New("fs.ReadFileFS unimplemented for embedded repo") + } + entries, err := ed.ReadDir(filepath.FromSlash(filepath.Join("repository", "targets"))) + if err != nil { + return err + } + rd, ok := GetEmbedded().(fs.ReadFileFS) + if !ok { + return errors.New("fs.ReadFileFS unimplemented for embedded repo") + } + for _, entry := range entries { + b, err := rd.ReadFile(filepath.FromSlash(filepath.Join("repository", "targets", entry.Name()))) + if err != nil { + return fmt.Errorf("reading embedded file: %w", err) + } + if err := e.writeable.Set(entry.Name(), b); err != nil { + return fmt.Errorf("setting embedded file: %w", err) + } + } + e.embedded = false + } + return e.writeable.Set(name, b) } -func (f *file) Get(p string) ([]byte, error) { - fp := filepath.Join(f.base, p) - return os.ReadFile(fp) +// In-memory cache for targets +type memoryCache struct { + targets map[string][]byte +} + +func (m *memoryCache) Set(p string, b []byte) error { + if m.targets == nil { + m.targets = map[string][]byte{} + } + m.targets[p] = b + return nil +} + +func (m *memoryCache) Get(p string) ([]byte, error) { + if m.targets == nil { + // This should never happen, a memory cache is used only after items are Set. + return nil, fmt.Errorf("no cached targets available, cannot retrieve %s", p) + } + b, ok := m.targets[p] + if !ok { + return nil, fmt.Errorf("missing cached target %s", p) + } + return b, nil } +// On-disk cache for targets type diskCache struct { base string } +func (d *diskCache) Get(p string) ([]byte, error) { + fp := filepath.FromSlash(filepath.Join(d.base, p)) + return os.ReadFile(fp) +} + func (d *diskCache) Set(p string, b []byte) error { if err := os.MkdirAll(d.base, 0700); err != nil { return fmt.Errorf("creating targets dir: %w", err) } - fp := filepath.Join(d.base, p) + fp := filepath.FromSlash(filepath.Join(d.base, p)) return os.WriteFile(fp, b, 0600) } @@ -588,27 +750,6 @@ func noCache() bool { return b } -func newEmbeddedImpl() targetImpl { - e := &embedded{} - if noCache() { - e.setImpl = &memoryCache{} - } else { - e.setImpl = &diskCache{base: cachedTargetsDir(rootCacheDir())} - } - return e -} - -func newFileImpl() targetImpl { - base := cachedTargetsDir(rootCacheDir()) - f := &file{base: base} - if noCache() { - f.setImpl = &memoryCache{} - } else { - f.setImpl = &diskCache{base: base} - } - return f -} - func remoteFromMirror(ctx context.Context, mirror string) (client.RemoteStore, error) { if _, parseErr := url.ParseRequestURI(mirror); parseErr != nil { return GcsRemoteStore(ctx, mirror, nil, nil) diff --git a/pkg/cosign/tuf/client_test.go b/pkg/cosign/tuf/client_test.go index 380441a319e..1a9c7206731 100644 --- a/pkg/cosign/tuf/client_test.go +++ b/pkg/cosign/tuf/client_test.go @@ -19,6 +19,7 @@ import ( "bytes" "context" "encoding/json" + "io/fs" "io/ioutil" "net/http" "net/http/httptest" @@ -28,6 +29,7 @@ import ( "sort" "strings" "testing" + "testing/fstest" "time" "github.com/google/go-cmp/cmp" @@ -66,8 +68,8 @@ func TestNewFromEnv(t *testing.T) { if err != nil { t.Fatal(err) } - tuf.Close() checkTargetsAndMeta(t, tuf) + tuf.Close() if err := Initialize(ctx, DefaultRemoteRoot, nil); err != nil { t.Error() @@ -137,12 +139,12 @@ func TestCache(t *testing.T) { if err != nil { t.Fatal(err) } - tuf.Close() if l := dirLen(t, td); l == 0 { t.Errorf("expected filesystem writes, got %d entries", l) } checkTargetsAndMeta(t, tuf) + tuf.Close() } func TestCustomRoot(t *testing.T) { @@ -308,6 +310,84 @@ func TestGetTargetsByMeta(t *testing.T) { } } +func makeMapFS(repo string) (fs fstest.MapFS) { + fs = make(fstest.MapFS) + _ = filepath.Walk(repo, + func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + rel, _ := filepath.Rel(repo, path) + if info.IsDir() { + fs[filepath.FromSlash(filepath.Join("repository", rel))] = &fstest.MapFile{Mode: os.ModeDir} + } else { + b, _ := os.ReadFile(path) + fs[filepath.FromSlash(filepath.Join("repository", rel))] = &fstest.MapFile{Data: b} + } + return nil + }) + return +} + +// Regression test for failure to locate newly added/renamed targets from a TUF update. +// Previously, the embedded or in-memory targetImpl did not fetch any updated targets. +func TestUpdatedTargetNamesEmbedded(t *testing.T) { + td := t.TempDir() + // Set the TUF_ROOT so we don't interact with other tests and local TUF roots. + t.Setenv("TUF_ROOT", td) + + origEmbedded := GetEmbedded + origDefaultRemote := GetRemoteRoot + defer func() { + GetEmbedded = origEmbedded + GetRemoteRoot = origDefaultRemote + }() + + // Create an "expired" embedded repository. + ctx := context.Background() + store, r := newTufCustomRepo(t, td, "foo") + repository := filepath.FromSlash(filepath.Join(td, "repository")) + mapfs := makeMapFS(repository) + GetEmbedded = func() fs.FS { return mapfs } + + oldIsExpired := verify.IsExpired + isExpiredTimestamp = func(metadata []byte) bool { + m, _ := store.GetMeta() + timestampExpires, _ := getExpiration(m["timestamp.json"]) + metadataExpires, _ := getExpiration(metadata) + return metadataExpires.Sub(*timestampExpires) <= 0 + } + + // Serve an updated remote repository with a new target. + addNewCustomTarget(t, td, r, "newdata") + s := httptest.NewServer(http.FileServer(http.Dir(repository))) + defer s.Close() + GetRemoteRoot = func() string { return s.URL } + + // Initialize. + tufObj, err := NewFromEnv(ctx) + if err != nil { + t.Fatal(err) + } + defer tufObj.Close() + + // Try to retrieve the newly added target. + targets, err := tufObj.GetTargetsByMeta(Fulcio, []string{"fooNoCustom.txt"}) + if err != nil { + t.Fatal(err) + } + if len(targets) != 3 { + t.Fatalf("expected three target without custom metadata, got %d targets", len(targets)) + } + targetBytes := []string{string(targets[0].Target), string(targets[1].Target), string(targets[2].Target)} + expectedTB := []string{"foo", "foo", "newdata"} + if !cmp.Equal(targetBytes, expectedTB, + cmpopts.SortSlices(func(a, b string) bool { return a < b })) { + t.Fatalf("target data mismatched, expected: %v, got: %v", expectedTB, targetBytes) + } + verify.IsExpired = oldIsExpired +} + func checkTargetsAndMeta(t *testing.T, tuf *TUF) { // Check the targets t.Helper() @@ -399,7 +479,7 @@ func newTufCustomRepo(t *testing.T, td string, targetData string) (tuf.LocalStor for name, scm := range map[string]json.RawMessage{ "fooNoCustom.txt": nil, "fooNoCustomOther.txt": nil, "fooActive.txt": scmActive, "fooExpired.txt": scmExpired} { - targetPath := filepath.Join(td, "staged", "targets", name) + targetPath := filepath.FromSlash(filepath.Join(td, "staged", "targets", name)) if err := os.MkdirAll(filepath.Dir(targetPath), 0755); err != nil { t.Error(err) } @@ -422,6 +502,33 @@ func newTufCustomRepo(t *testing.T, td string, targetData string) (tuf.LocalStor return remote, r } +func addNewCustomTarget(t *testing.T, td string, r *tuf.Repo, targetData string) { + scmActive, err := json.Marshal(&sigstoreCustomMetadata{Sigstore: customMetadata{Usage: Fulcio, Status: Active}}) + if err != nil { + t.Error(err) + } + + targetPath := filepath.FromSlash(filepath.Join(td, "staged", "targets", "fooNew.txt")) + if err := os.MkdirAll(filepath.Dir(targetPath), 0755); err != nil { + t.Error(err) + } + if err := ioutil.WriteFile(targetPath, []byte(targetData), 0600); err != nil { + t.Error(err) + } + if err := r.AddTarget("fooNew.txt", scmActive); err != nil { + t.Error(err) + } + if err := r.Snapshot(); err != nil { + t.Error(err) + } + if err := r.Timestamp(); err != nil { + t.Error(err) + } + if err := r.Commit(); err != nil { + t.Error(err) + } +} + func newTufRepo(t *testing.T, td string, targetData string) (tuf.LocalStore, *tuf.Repo) { remote := tuf.FileSystemStore(td, nil) r, err := tuf.NewRepo(remote) @@ -436,7 +543,7 @@ func newTufRepo(t *testing.T, td string, targetData string) (tuf.LocalStore, *tu t.Error(err) } } - targetPath := filepath.Join(td, "staged", "targets", "foo.txt") + targetPath := filepath.FromSlash(filepath.Join(td, "staged", "targets", "foo.txt")) if err := os.MkdirAll(filepath.Dir(targetPath), 0755); err != nil { t.Error(err) } @@ -459,7 +566,7 @@ func newTufRepo(t *testing.T, td string, targetData string) (tuf.LocalStore, *tu } func updateTufRepo(t *testing.T, td string, r *tuf.Repo, targetData string) { - targetPath := filepath.Join(td, "staged", "targets", "foo.txt") + targetPath := filepath.FromSlash(filepath.Join(td, "staged", "targets", "foo.txt")) if err := os.MkdirAll(filepath.Dir(targetPath), 0755); err != nil { t.Error(err) }