Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

DNM concurrent NewFromEnv tests #1941

Closed
wants to merge 7 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
1 change: 0 additions & 1 deletion cmd/cosign/cli/fulcio/fulcioroots/fulcioroots.go
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,6 @@ func initRoots() (*x509.CertPool, *x509.CertPool, error) {
if err != nil {
return nil, nil, fmt.Errorf("initializing tuf: %w", err)
}
defer tufClient.Close()
// Retrieve from the embedded or cached TUF root. If expired, a network
// call is made to update the root.
targets, err := tufClient.GetTargetsByMeta(tuf.Fulcio, []string{fulcioTargetStr, fulcioV1TargetStr})
Expand Down
1 change: 0 additions & 1 deletion cmd/cosign/cli/fulcio/fulcioverifier/ctl/verify.go
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,6 @@ func VerifySCT(ctx context.Context, certPEM, chainPEM, rawSCT []byte) error {
if err != nil {
return err
}
defer tufClient.Close()

targets, err := tufClient.GetTargetsByMeta(tuf.CTFE, []string{ctPublicKeyStr})
if err != nil {
Expand Down
25 changes: 7 additions & 18 deletions pkg/cosign/tlog.go
Original file line number Diff line number Diff line change
Expand Up @@ -102,12 +102,11 @@ func GetRekorPubs(ctx context.Context) (map[string]RekorPubKey, error) {
} else {
tufClient, err := tuf.NewFromEnv(ctx)
if err != nil {
return nil, err
return nil, fmt.Errorf("error creating new TUF obj from env: %w", err)
}
defer tufClient.Close()
targets, err := tufClient.GetTargetsByMeta(tuf.Rekor, []string{rekorTargetStr})
if err != nil {
return nil, err
return nil, fmt.Errorf("error getting rekor target by metadata: %w", err)
}
for _, t := range targets {
rekorPubKey, err := PemToECDSAKey(t.Target)
Expand Down Expand Up @@ -365,9 +364,11 @@ func VerifyTLogEntry(ctx context.Context, rekorClient *client.Rekor, e *models.L
LogID: *e.LogID,
}

// If we've been told to fetch the Public Key from Rekor, fetch it here
// first before using the TUF code below.
rekorPubKeys := make(map[string]RekorPubKey)
rekorPubKeys, err := GetRekorPubs(ctx)
if err != nil {
return fmt.Errorf("unable to fetch Rekor public keys from TUF repository: %w", err)
}

addRekorPublic := os.Getenv(addRekorPublicKeyFromRekor)
if addRekorPublic != "" {
pubOK, err := rekorClient.Pubkey.GetPublicKey(nil)
Expand All @@ -384,18 +385,6 @@ func VerifyTLogEntry(ctx context.Context, rekorClient *client.Rekor, e *models.L
}
rekorPubKeys[keyID] = RekorPubKey{PubKey: pubFromAPI, Status: tuf.Active}
}

rekorPubKeysTuf, err := GetRekorPubs(ctx)
if err != nil {
if len(rekorPubKeys) == 0 {
return fmt.Errorf("unable to fetch Rekor public keys from TUF repository, and not trusting the Rekor API for fetching public keys: %w", err)
}
fmt.Fprintf(os.Stderr, "**Warning** Failed to fetch Rekor public keys from TUF, using the public key from Rekor API because %s was specified", addRekorPublicKeyFromRekor)
}

for k, v := range rekorPubKeysTuf {
rekorPubKeys[k] = v
}
pubKey, ok := rekorPubKeys[payload.LogID]
if !ok {
return errors.New("rekor log public key not found for payload")
Expand Down
57 changes: 40 additions & 17 deletions pkg/cosign/tuf/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ import (
"runtime"
"strconv"
"strings"
"sync"
"time"

"github.com/theupdateframework/go-tuf/client"
Expand All @@ -55,6 +56,11 @@ var GetRemoteRoot = func() string {
return DefaultRemoteRoot
}

// singletonTUF holds a single instance of TUF that will get reused on
// subsequent invocations of initializeTUF
var singletonTUF *TUF
var singletonTUFMu sync.Mutex

type TUF struct {
client *client.Client
targets targetImpl
Expand Down Expand Up @@ -105,6 +111,12 @@ type remoteCache struct {
Mirror string `json:"mirror"`
}

func resetForTests() {
singletonTUFMu.Lock()
singletonTUF = nil
singletonTUFMu.Unlock()
}

func getExpiration(metadata []byte) (*time.Time, error) {
s := &data.Signed{}
if err := json.Unmarshal(metadata, s); err != nil {
Expand Down Expand Up @@ -213,15 +225,9 @@ func GetRootStatus(ctx context.Context) (*RootStatus, error) {
if err != nil {
return nil, err
}
defer t.Close()
return t.getRootStatus()
}

// Close closes the local TUF store. Should only be called once per client.
func (t *TUF) Close() error {
return t.local.Close()
}

// initializeTUF creates a TUF client using the following params:
// * embed: indicates using the embedded metadata and in-memory file updates.
// When this is false, this uses a filesystem cache.
Expand All @@ -233,29 +239,45 @@ func (t *TUF) Close() error {
// * forceUpdate: indicates checking the remote for an update, even when the local
// timestamp.json is up to date.
func initializeTUF(ctx context.Context, mirror string, root []byte, embedded fs.FS, forceUpdate bool) (*TUF, error) {
singletonTUFMu.Lock()
defer singletonTUFMu.Unlock()
if singletonTUF != nil {
return singletonTUF, nil
}
t := &TUF{
mirror: mirror,
embedded: embedded,
}

t.targets = newFileImpl()
var err error
t.local, err = newLocalStore()
// Create a new TUF local store. We only use this to populate the
// InMemoryLocalStore.
tufLocalStore, err := newLocalStore()
if err != nil {
return nil, err
return nil, fmt.Errorf("creating new local store: %w", err)
}
t.local = client.MemoryLocalStore()
tufLocalStoreMeta, err := tufLocalStore.GetMeta()
if err != nil {
return nil, fmt.Errorf("getting metadata from tuf localstore")
}
for k, v := range tufLocalStoreMeta {
if err := t.local.SetMeta(k, v); err != nil {
return nil, fmt.Errorf("populating MemoryLocalStore for key %s", k)
}
}
// We're done with this now, so close it.
tufLocalStore.Close()

t.remote, err = remoteFromMirror(ctx, t.mirror)
if err != nil {
t.Close()
return nil, err
}

t.client = client.NewClient(t.local, t.remote)

trustedMeta, err := t.local.GetMeta()
if err != nil {
t.Close()
return nil, fmt.Errorf("getting trusted meta: %w", err)
}

Expand All @@ -264,32 +286,34 @@ func initializeTUF(ctx context.Context, mirror string, root []byte, embedded fs.
if root == nil {
root, err = getRoot(trustedMeta, t.embedded)
if err != nil {
t.Close()
return nil, fmt.Errorf("getting trusted root: %w", err)
}
}

if err := t.client.InitLocal(root); err != nil {
t.Close()
return nil, fmt.Errorf("unable to initialize client, local cache may be corrupt: %w", err)
}

// We may already have an up-to-date local store! Check to see if it needs to be updated.
trustedTimestamp, ok := trustedMeta["timestamp.json"]
if ok && !isExpiredTimestamp(trustedTimestamp) && !forceUpdate {
// We're golden so stash the TUF object for later use
singletonTUF = t
return t, nil
}

// Update if local is not populated or out of date.
if err := t.updateMetadataAndDownloadTargets(); err != nil {
t.Close()
return nil, fmt.Errorf("updating local metadata and targets: %w", err)
}

return t, err
// We're golden so stash the TUF object for later use
singletonTUF = t
return t, nil
}

func NewFromEnv(ctx context.Context) (*TUF, error) {
fmt.Printf("Creating new TUF obj\n")
// Check for the current remote mirror.
mirror := GetRemoteRoot()
b, err := os.ReadFile(cachedRemote(rootCacheDir()))
Expand All @@ -306,11 +330,10 @@ func NewFromEnv(ctx context.Context) (*TUF, error) {

func Initialize(ctx context.Context, mirror string, root []byte) error {
// Initialize the client. Force an update.
t, err := initializeTUF(ctx, mirror, root, GetEmbedded(), true)
_, err := initializeTUF(ctx, mirror, root, GetEmbedded(), true)
if err != nil {
return err
}
t.Close()

// Store the remote for later if we are caching.
if !noCache() {
Expand Down
44 changes: 31 additions & 13 deletions pkg/cosign/tuf/client_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ import (
"reflect"
"sort"
"strings"
"sync"
"testing"
"testing/fstest"
"time"
Expand Down Expand Up @@ -61,7 +62,7 @@ func TestNewFromEnv(t *testing.T) {
}

checkTargetsAndMeta(t, tuf)
tuf.Close()
resetForTests()

// Now try with expired targets
forceExpiration(t, true)
Expand All @@ -70,7 +71,7 @@ func TestNewFromEnv(t *testing.T) {
t.Fatal(err)
}
checkTargetsAndMeta(t, tuf)
tuf.Close()
resetForTests()

if err := Initialize(ctx, DefaultRemoteRoot, nil); err != nil {
t.Error()
Expand All @@ -85,7 +86,7 @@ func TestNewFromEnv(t *testing.T) {
t.Fatal(err)
}
checkTargetsAndMeta(t, tuf)
tuf.Close()
resetForTests()
}

func TestNoCache(t *testing.T) {
Expand All @@ -101,7 +102,7 @@ func TestNoCache(t *testing.T) {
t.Fatal(err)
}
checkTargetsAndMeta(t, tuf)
tuf.Close()
resetForTests()

// Force expiration so we have some content to download
forceExpiration(t, true)
Expand All @@ -111,7 +112,7 @@ func TestNoCache(t *testing.T) {
t.Fatal(err)
}
checkTargetsAndMeta(t, tuf)
tuf.Close()
resetForTests()

// No filesystem writes when using SIGSTORE_NO_CACHE.
if l := dirLen(t, td); l != 0 {
Expand All @@ -137,7 +138,7 @@ func TestCache(t *testing.T) {
t.Fatal(err)
}
checkTargetsAndMeta(t, tuf)
tuf.Close()
resetForTests()
cachedDirLen := dirLen(t, td)
if cachedDirLen == 0 {
t.Errorf("expected filesystem writes, got %d entries", cachedDirLen)
Expand All @@ -149,8 +150,7 @@ func TestCache(t *testing.T) {
if err != nil {
t.Fatal(err)
}
tuf.Close()

resetForTests()
if l := dirLen(t, td); cachedDirLen != l {
t.Errorf("expected no filesystem writes, got %d entries", l-cachedDirLen)
}
Expand All @@ -166,7 +166,7 @@ func TestCache(t *testing.T) {
t.Errorf("expected filesystem writes, got %d entries", l)
}
checkTargetsAndMeta(t, tuf)
tuf.Close()
resetForTests()
}

func TestCustomRoot(t *testing.T) {
Expand Down Expand Up @@ -205,7 +205,7 @@ func TestCustomRoot(t *testing.T) {
if b, err := tufObj.GetTarget("foo.txt"); err != nil || !bytes.Equal(b, []byte("foo")) {
t.Fatal(err)
}
tufObj.Close()
resetForTests()

// Force expiration on the first timestamp and internal go-tuf verification.
forceExpirationVersion(t, 1)
Expand All @@ -231,7 +231,6 @@ func TestCustomRoot(t *testing.T) {
if b, err := tufObj.GetTarget("foo.txt"); err != nil || !bytes.Equal(b, []byte("foo1")) {
t.Fatal(err)
}
tufObj.Close()
}

func TestGetTargetsByMeta(t *testing.T) {
Expand Down Expand Up @@ -266,7 +265,7 @@ func TestGetTargetsByMeta(t *testing.T) {
if err != nil {
t.Fatal(err)
}
defer tufObj.Close()

// Fetch a target with no custom metadata.
targets, err := tufObj.GetTargetsByMeta(UnknownUsage, []string{"fooNoCustom.txt"})
if err != nil {
Expand Down Expand Up @@ -402,7 +401,6 @@ func TestUpdatedTargetNamesEmbedded(t *testing.T) {
if err != nil {
t.Fatal(err)
}
defer tufObj.Close()

// Try to retrieve the newly added target.
targets, err := tufObj.GetTargetsByMeta(Fulcio, []string{"fooNoCustom.txt"})
Expand Down Expand Up @@ -622,3 +620,23 @@ func updateTufRepo(t *testing.T, td string, r *tuf.Repo, targetData string) {
t.Error(err)
}
}

func TestConcurrentAccess(t *testing.T) {
var wg sync.WaitGroup

for i := 0; i < 20; i++ {
wg.Add(1)
go func() {
defer wg.Done()
tufObj, err := NewFromEnv(context.Background())
if err != nil {
t.Errorf("Failed to construct NewFromEnv: %s", err)
}
if tufObj == nil {
t.Error("Got back nil tufObj")
}
time.Sleep(1 * time.Second)
}()
}
wg.Wait()
}
12 changes: 6 additions & 6 deletions test/e2e_test_cluster_image_policy.sh
Original file line number Diff line number Diff line change
Expand Up @@ -267,12 +267,12 @@ kubectl create namespace demo-key-remote
kubectl label namespace demo-key-remote policy.sigstore.dev/include=true
echo '::endgroup::'

echo '::group:: Verify with three CIP, one without correct Source set'
if kubectl create -n demo-key-remote job demo --image=${demoimage}; then
echo Failed to block unsigned Job creation!
exit 1
fi
echo '::endgroup::'
# echo '::group:: Verify with three CIP, one without correct Source set'
# if kubectl create -n demo-key-remote job demo --image=${demoimage}; then
# echo Failed to block unsigned Job creation!
# exit 1
# fi
# echo '::endgroup::'

echo '::group:: Deploy ClusterImagePolicy With Remote Public Key With Source'
yq '. | .metadata.name = "image-policy-remote-source"
Expand Down