Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove users of pkg/errors.WithStack, pkg/errors.Cause #1588

Merged
merged 15 commits into from Jul 1, 2022
Merged
16 changes: 9 additions & 7 deletions copy/blob.go
Expand Up @@ -2,12 +2,14 @@ package copy

import (
"context"
"errors"
"fmt"
"io"

"github.com/containers/image/v5/internal/private"
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
"github.com/containers/image/v5/types"
"github.com/pkg/errors"
perrors "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)

Expand All @@ -34,7 +36,7 @@ func (ic *imageCopier) copyBlobFromStream(ctx context.Context, srcReader io.Read
// read stream to the end, and validation does not happen.
digestingReader, err := newDigestingReader(stream.reader, srcInfo.Digest)
if err != nil {
return types.BlobInfo{}, errors.Wrapf(err, "preparing to verify blob %s", srcInfo.Digest)
return types.BlobInfo{}, perrors.Wrapf(err, "preparing to verify blob %s", srcInfo.Digest)
}
stream.reader = digestingReader

Expand Down Expand Up @@ -105,7 +107,7 @@ func (ic *imageCopier) copyBlobFromStream(ctx context.Context, srcReader io.Read
}
uploadedInfo, err := ic.c.dest.PutBlobWithOptions(ctx, &errorAnnotationReader{stream.reader}, stream.info, options)
if err != nil {
return types.BlobInfo{}, errors.Wrap(err, "writing blob")
return types.BlobInfo{}, perrors.Wrap(err, "writing blob")
}

uploadedInfo.Annotations = stream.info.Annotations
Expand All @@ -124,15 +126,15 @@ func (ic *imageCopier) copyBlobFromStream(ctx context.Context, srcReader io.Read
logrus.Debugf("Consuming rest of the original blob to satisfy getOriginalLayerCopyWriter")
_, err := io.Copy(io.Discard, originalLayerReader)
if err != nil {
return types.BlobInfo{}, errors.Wrapf(err, "reading input blob %s", srcInfo.Digest)
return types.BlobInfo{}, perrors.Wrapf(err, "reading input blob %s", srcInfo.Digest)
}
}

if digestingReader.validationFailed { // Coverage: This should never happen.
return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, digest verification failed but was ignored", srcInfo.Digest)
return types.BlobInfo{}, fmt.Errorf("Internal error writing blob %s, digest verification failed but was ignored", srcInfo.Digest)
}
if stream.info.Digest != "" && uploadedInfo.Digest != stream.info.Digest {
return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, blob with digest %s saved with digest %s", srcInfo.Digest, stream.info.Digest, uploadedInfo.Digest)
return types.BlobInfo{}, fmt.Errorf("Internal error writing blob %s, blob with digest %s saved with digest %s", srcInfo.Digest, stream.info.Digest, uploadedInfo.Digest)
}
if digestingReader.validationSucceeded {
if err := compressionStep.recordValidatedDigestData(ic.c, uploadedInfo, srcInfo, encryptionStep, decryptionStep); err != nil {
Expand Down Expand Up @@ -164,7 +166,7 @@ type errorAnnotationReader struct {
func (r errorAnnotationReader) Read(b []byte) (n int, err error) {
n, err = r.reader.Read(b)
if err != io.EOF {
return n, errors.Wrapf(err, "happened during read")
return n, perrors.Wrapf(err, "happened during read")
}
return n, err
}
8 changes: 5 additions & 3 deletions copy/compression.go
@@ -1,13 +1,15 @@
package copy

import (
"errors"
"fmt"
"io"

internalblobinfocache "github.com/containers/image/v5/internal/blobinfocache"
"github.com/containers/image/v5/pkg/compression"
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
"github.com/containers/image/v5/types"
"github.com/pkg/errors"
perrors "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)

Expand All @@ -26,7 +28,7 @@ func blobPipelineDetectCompressionStep(stream *sourceStream, srcInfo types.BlobI
// This requires us to “peek ahead” into the stream to read the initial part, which requires us to chain through another io.Reader returned by DetectCompression.
format, decompressor, reader, err := compression.DetectCompressionFormat(stream.reader) // We could skip this in some cases, but let's keep the code path uniform
if err != nil {
return bpDetectCompressionStepData{}, errors.Wrapf(err, "reading blob %s", srcInfo.Digest)
return bpDetectCompressionStepData{}, perrors.Wrapf(err, "reading blob %s", srcInfo.Digest)
}
stream.reader = reader

Expand Down Expand Up @@ -259,7 +261,7 @@ func (d *bpCompressionStepData) recordValidatedDigestData(c *copier, uploadedInf
case types.Decompress:
c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, uploadedInfo.Digest)
default:
return errors.Errorf("Internal error: Unexpected d.operation value %#v", d.operation)
return fmt.Errorf("Internal error: Unexpected d.operation value %#v", d.operation)
}
}
if d.uploadedCompressorName != "" && d.uploadedCompressorName != internalblobinfocache.UnknownCompression {
Expand Down