Skip to content

Commit

Permalink
WIP missing implementations
Browse files Browse the repository at this point in the history
also see the added FIXME

Signed-off-by: Miloslav Trmač <mitr@redhat.com>
  • Loading branch information
mtrmac committed Apr 11, 2024
1 parent a1a4b68 commit 6dae67d
Show file tree
Hide file tree
Showing 3 changed files with 89 additions and 13 deletions.
43 changes: 41 additions & 2 deletions pkg/blobinfocache/boltdb/boltdb.go
Expand Up @@ -25,6 +25,8 @@ var (

// uncompressedDigestBucket stores a mapping from any digest to an uncompressed digest.
uncompressedDigestBucket = []byte("uncompressedDigest")
// uncompressedDigestByTOCBucket stores a mapping from a TOC digest to an uncompressed digest.
uncompressedDigestByTOCBucket = []byte("uncompressedDigestByTOC")
// digestCompressorBucket stores a mapping from any digest to a compressor, or blobinfocache.Uncompressed (not blobinfocache.UnknownCompression).
// It may not exist in caches created by older versions, even if uncompressedDigestBucket is present.
digestCompressorBucket = []byte("digestCompressor")
Expand Down Expand Up @@ -243,19 +245,56 @@ func (bdc *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompre
}) // FIXME? Log error (but throttle the log volume on repeated accesses)?
}

// FIXME: the comment has a pasto, fix everywhere
// UncompressedDigest returns an uncompressed digest corresponding to anyDigest.
// Returns "" if the uncompressed digest is unknown.
// FIXME: Does this need to record TOC/compression type?
func (bdc *cache) UncompressedDigestForTOC(tocDigest digest.Digest) digest.Digest {
return "" // FIXME: UNIMPLEMENTED
var res digest.Digest
if err := bdc.view(func(tx *bolt.Tx) error {
if b := tx.Bucket(uncompressedDigestByTOCBucket); b != nil {
if uncompressedBytes := b.Get([]byte(tocDigest.String())); uncompressedBytes != nil {
d, err := digest.Parse(string(uncompressedBytes))
if err == nil {
res = d
return nil
}
// FIXME? Log err (but throttle the log volume on repeated accesses)?
}
}
res = ""
return nil
}); err != nil { // Including os.IsNotExist(err)
return "" // FIXME? Log err (but throttle the log volume on repeated accesses)?
}
return res
}

// RecordDigestUncompressedPair records that the tocDigest corresponds to uncompressed.
// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g.
// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
func (bdc *cache) RecordTOCUncompressedPair(tocDigest digest.Digest, uncompressed digest.Digest) {
// FIXME: UNIMPLEMENTED
_ = bdc.update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucketIfNotExists(uncompressedDigestByTOCBucket)
if err != nil {
return err
}
key := []byte(tocDigest.String())
if previousBytes := b.Get(key); previousBytes != nil {
previous, err := digest.Parse(string(previousBytes))
if err != nil {
return err
}
if previous != uncompressed {
logrus.Warnf("Uncompressed digest for blob wth TOC %q previously recorded as %q, now %q", tocDigest, previous, uncompressed)

Check failure on line 290 in pkg/blobinfocache/boltdb/boltdb.go

View workflow job for this annotation

GitHub Actions / Check for spelling errors

wth ==> with
}
}
if err := b.Put(key, []byte(uncompressed.String())); err != nil {
return err
}
return nil
}) // FIXME? Log error (but throttle the log volume on repeated accesses)?
}

// RecordDigestCompressorName records that the blob with digest anyDigest was compressed with the specified
Expand Down
24 changes: 24 additions & 0 deletions pkg/blobinfocache/internal/test/test.go
Expand Up @@ -43,6 +43,8 @@ func GenericCache(t *testing.T, newTestCache func(t *testing.T) blobinfocache.Bl
}{
{"UncompressedDigest", testGenericUncompressedDigest},
{"RecordDigestUncompressedPair", testGenericRecordDigestUncompressedPair},
{"UncompressedDigestForTOC", testGenericUncompressedDigestForTOC},
{"RecordTOCUncompressedPair", testGenericRecordTOCUncompressedPair},
{"RecordKnownLocations", testGenericRecordKnownLocations},
{"CandidateLocations", testGenericCandidateLocations},
{"CandidateLocations2", testGenericCandidateLocations2},
Expand Down Expand Up @@ -99,6 +101,28 @@ func testGenericRecordDigestUncompressedPair(t *testing.T, cache blobinfocache.B
}
}

func testGenericUncompressedDigestForTOC(t *testing.T, cache blobinfocache.BlobInfoCache2) {
// Nothing is known.
assert.Equal(t, digest.Digest(""), cache.UncompressedDigestForTOC(digestUnknown))

cache.RecordTOCUncompressedPair(digestCompressedA, digestUncompressed)
cache.RecordTOCUncompressedPair(digestCompressedB, digestUncompressed)
// Known TOC→uncompressed mapping
assert.Equal(t, digestUncompressed, cache.UncompressedDigestForTOC(digestCompressedA))
assert.Equal(t, digestUncompressed, cache.UncompressedDigestForTOC(digestCompressedB))
}

func testGenericRecordTOCUncompressedPair(t *testing.T, cache blobinfocache.BlobInfoCache2) {
for i := 0; i < 2; i++ { // Record the same data twice to ensure redundant writes don’t break things.
// Known TOC→uncompressed mapping
cache.RecordTOCUncompressedPair(digestCompressedA, digestUncompressed)
assert.Equal(t, digestUncompressed, cache.UncompressedDigestForTOC(digestCompressedA))
// Two mappings to the same uncompressed digest
cache.RecordTOCUncompressedPair(digestCompressedB, digestUncompressed)
assert.Equal(t, digestUncompressed, cache.UncompressedDigestForTOC(digestCompressedB))
}
}

func testGenericRecordKnownLocations(t *testing.T, cache blobinfocache.BlobInfoCache2) {
transport := mocks.NameImageTransport("==BlobInfocache transport mock")
for i := 0; i < 2; i++ { // Record the same data twice to ensure redundant writes don’t break things.
Expand Down
35 changes: 24 additions & 11 deletions pkg/blobinfocache/memory/memory.go
Expand Up @@ -24,10 +24,11 @@ type locationKey struct {
type cache struct {
mutex sync.Mutex
// The following fields can only be accessed with mutex held.
uncompressedDigests map[digest.Digest]digest.Digest
digestsByUncompressed map[digest.Digest]*set.Set[digest.Digest] // stores a set of digests for each uncompressed digest
knownLocations map[locationKey]map[types.BICLocationReference]time.Time // stores last known existence time for each location reference
compressors map[digest.Digest]string // stores a compressor name, or blobinfocache.Unknown (not blobinfocache.UnknownCompression), for each digest
uncompressedDigests map[digest.Digest]digest.Digest
uncompressedDigestsByTOC map[digest.Digest]digest.Digest
digestsByUncompressed map[digest.Digest]*set.Set[digest.Digest] // stores a set of digests for each uncompressed digest
knownLocations map[locationKey]map[types.BICLocationReference]time.Time // stores last known existence time for each location reference
compressors map[digest.Digest]string // stores a compressor name, or blobinfocache.Unknown (not blobinfocache.UnknownCompression), for each digest
}

// New returns a BlobInfoCache implementation which is in-memory only.
Expand All @@ -44,10 +45,11 @@ func New() types.BlobInfoCache {

func new2() *cache {
return &cache{
uncompressedDigests: map[digest.Digest]digest.Digest{},
digestsByUncompressed: map[digest.Digest]*set.Set[digest.Digest]{},
knownLocations: map[locationKey]map[types.BICLocationReference]time.Time{},
compressors: map[digest.Digest]string{},
uncompressedDigests: map[digest.Digest]digest.Digest{},
uncompressedDigestsByTOC: map[digest.Digest]digest.Digest{},
digestsByUncompressed: map[digest.Digest]*set.Set[digest.Digest]{},
knownLocations: map[locationKey]map[types.BICLocationReference]time.Time{},
compressors: map[digest.Digest]string{},
}
}

Expand Down Expand Up @@ -104,19 +106,30 @@ func (mem *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompre
anyDigestSet.Add(anyDigest)
}

// UncompressedDigest returns an uncompressed digest corresponding to anyDigest.
// FIXME: the comment has a pasto, fix everywhere
// UncompressedDigestForTOC returns an uncompressed digest corresponding to anyDigest.
// Returns "" if the uncompressed digest is unknown.
// FIXME: Does this need to record TOC/compression type?
func (mem *cache) UncompressedDigestForTOC(tocDigest digest.Digest) digest.Digest {
return "" // FIXME: UNIMPLEMENTED
mem.mutex.Lock()
defer mem.mutex.Unlock()
if d, ok := mem.uncompressedDigestsByTOC[tocDigest]; ok {
return d
}
return ""
}

// RecordDigestUncompressedPair records that the tocDigest corresponds to uncompressed.
// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g.
// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
func (mem *cache) RecordTOCUncompressedPair(tocDigest digest.Digest, uncompressed digest.Digest) {
// FIXME: UNIMPLEMENTED
mem.mutex.Lock()
defer mem.mutex.Unlock()
if previous, ok := mem.uncompressedDigestsByTOC[tocDigest]; ok && previous != uncompressed {
logrus.Warnf("Uncompressed digest for blob with TOC %q previously recorded as %q, now %q", tocDigest, previous, uncompressed)
}
mem.uncompressedDigestsByTOC[tocDigest] = uncompressed
}

// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
Expand Down

0 comments on commit 6dae67d

Please sign in to comment.