diff --git a/Makefile b/Makefile index 9aac36dc0b..e2cf39b15e 100644 --- a/Makefile +++ b/Makefile @@ -56,9 +56,13 @@ help: ## Display this help ## -------------------------------------- .PHONY: test -test: ## Run the script check-everything.sh which will check all. +test: test-tools ## Run the script check-everything.sh which will check all. TRACE=1 ./hack/check-everything.sh +.PHONY: test-tools +test-tools: ## tests the tools codebase (setup-envtest) + cd tools/setup-envtest && go test ./... + ## -------------------------------------- ## Binaries ## -------------------------------------- @@ -76,10 +80,17 @@ $(CONTROLLER_GEN): $(TOOLS_DIR)/go.mod # Build controller-gen from tools folder. ## Linting ## -------------------------------------- -.PHONY: lint -lint: $(GOLANGCI_LINT) ## Lint codebase. +.PHONY: lint-libs +lint-libs: $(GOLANGCI_LINT) ## Lint library codebase. $(GOLANGCI_LINT) run -v +.PHONY: lint-tools +lint-tools: $(GOLANGCI_LINT) ## Lint tools codebase. + cd tools/setup-envtest && $(GOLANGCI_LINT) run -v + +.PHONY: lint +lint: lint-libs lint-tools + ## -------------------------------------- ## Generate ## -------------------------------------- diff --git a/hack/check-everything.sh b/hack/check-everything.sh index a444f6206b..f0367ac17f 100755 --- a/hack/check-everything.sh +++ b/hack/check-everything.sh @@ -20,16 +20,27 @@ set -o pipefail hack_dir=$(dirname ${BASH_SOURCE}) source ${hack_dir}/common.sh -source ${hack_dir}/setup-envtest.sh tmp_root=/tmp kb_root_dir=$tmp_root/kubebuilder ENVTEST_K8S_VERSION=${ENVTEST_K8S_VERSION:-"1.19.2"} -fetch_envtest_tools "$kb_root_dir" -fetch_envtest_tools "${hack_dir}/../pkg/internal/testing/integration/assets" -setup_envtest_env "$kb_root_dir" +# set up envtest tools if necessary + +header_text "installing envtest tools@${ENVTEST_K8S_VERSION} with setup-envtest if necessary" +tmp_bin=/tmp/cr-tests-bin +( + # don't presume to install for the user + cd ${hack_dir}/../tools/setup-envtest + GOBIN=${tmp_bin} go install . +) +source <(${tmp_bin}/setup-envtest use --use-env -p env ${ENVTEST_K8S_VERSION}) + +# link the assets into integration +for tool in kube-apiserver etcd kubectl; do + ln -f -s "${KUBEBUILDER_ASSETS:?unable find envtest assets}/${tool}" "${hack_dir}/../pkg/internal/testing/integration/assets/bin/${tool}" +done ${hack_dir}/verify.sh ${hack_dir}/test-all.sh diff --git a/hack/setup-envtest.sh b/hack/setup-envtest.sh deleted file mode 100755 index dbbd206352..0000000000 --- a/hack/setup-envtest.sh +++ /dev/null @@ -1,96 +0,0 @@ -#!/usr/bin/env bash - -# Copyright 2020 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -o errexit -set -o pipefail - -# Turn colors in this script off by setting the NO_COLOR variable in your -# environment to any value: -# -# $ NO_COLOR=1 test.sh -NO_COLOR=${NO_COLOR:-""} -if [ -z "$NO_COLOR" ]; then - header=$'\e[1;33m' - reset=$'\e[0m' -else - header='' - reset='' -fi - -function header_text { - echo "$header$*$reset" -} - -function setup_envtest_env { - header_text "setting up env vars" - - # Setup env vars - KUBEBUILDER_ASSETS=${KUBEBUILDER_ASSETS:-""} - if [[ -z "${KUBEBUILDER_ASSETS}" ]]; then - export KUBEBUILDER_ASSETS=$1/bin - fi -} - -# fetch k8s API gen tools and make it available under envtest_root_dir/bin. -# -# Skip fetching and untaring the tools by setting the SKIP_FETCH_TOOLS variable -# in your environment to any value: -# -# $ SKIP_FETCH_TOOLS=1 ./check-everything.sh -# -# If you skip fetching tools, this script will use the tools already on your -# machine. -function fetch_envtest_tools { - SKIP_FETCH_TOOLS=${SKIP_FETCH_TOOLS:-""} - if [ -n "$SKIP_FETCH_TOOLS" ]; then - return 0 - fi - - tmp_root=/tmp - envtest_root_dir=$tmp_root/envtest - - k8s_version="${ENVTEST_K8S_VERSION:-1.19.2}" - goarch="$(go env GOARCH)" - goos="$(go env GOOS)" - - if [[ ("$goos" != "linux" && "$goos" != "darwin") || ("$goos" == "darwin" && "$goarch" != "amd64") ]]; then - echo "OS '$goos' with '$goarch' arch is not supported. Aborting." >&2 - return 1 - fi - - local dest_dir="${1}" - - # use the pre-existing version in the temporary folder if it matches our k8s version - if [[ -x "${dest_dir}/bin/kube-apiserver" ]]; then - version=$("${dest_dir}"/bin/kube-apiserver --version) - if [[ $version == *"${k8s_version}"* ]]; then - header_text "Using cached envtest tools from ${dest_dir}" - return 0 - fi - fi - - header_text "fetching envtest tools@${k8s_version} (into '${dest_dir}')" - envtest_tools_archive_name="kubebuilder-tools-$k8s_version-$goos-$goarch.tar.gz" - envtest_tools_download_url="https://storage.googleapis.com/kubebuilder-tools/$envtest_tools_archive_name" - - envtest_tools_archive_path="$tmp_root/$envtest_tools_archive_name" - if [ ! -f $envtest_tools_archive_path ]; then - curl -sL ${envtest_tools_download_url} -o "$envtest_tools_archive_path" - fi - - mkdir -p "${dest_dir}" - tar -C "${dest_dir}" --strip-components=1 -zvxf "$envtest_tools_archive_path" -} diff --git a/tools/setup-envtest/README.md b/tools/setup-envtest/README.md new file mode 100644 index 0000000000..0a497c3ec4 --- /dev/null +++ b/tools/setup-envtest/README.md @@ -0,0 +1,125 @@ +# Envtest Binaries Manager + +This is a small tool that manages binaries for envtest. It can be used to +download new binaries, list currently installed and available ones, and +clean up versions. + +To use it, just go-install it on 1.16+ (it's a separate, self-contained +module): + +```shell +go install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest +``` + +For full documentation, run it with the `--help` flag, but here are some +examples: + +```shell +# download the latest envtest, and print out info about it +setup-envtest use + +# download the latest 1.19 envtest, and print out the path +setup-envtest use -p path 1.19.x! + +# switch to the most recent 1.21 envtest on disk +source <(setup-envtest use -i -p env 1.21.x) + +# list all available local versions for darwin/amd64 +setup-envtest list -i --os darwin --arch amd64 + +# remove all versions older than 1.16 from disk +setup-envtest cleanup <1.16 + +# use the value from $KUBEBUILDER_ASSETS if set, otherwise follow the normal +# logic for 'use' +setup-envtest --use-env + +# use the value from $KUBEBUILDER_ASSETS if set, otherwise use the latest +# installed version +setup-envtest use -i --use-env + +# sideload a pre-downloaded tarball as Kubernetes 1.16.2 into our store +setup-envtest sideload 1.16.2 < downloaded-envtest.tar.gz +``` + +## Where does it put all those binaries? + +By default, binaries are stored in a subdirectory of an OS-specific data +directory, as per the OS's conventions. + +On Linux, this is `$XDG_DATA_HOME`; on Windows, `%LocalAppData`; and on +OSX, `~/Library/Application Support`. + +There's an overall folder that holds all files, and inside that is +a folder for each version/platform pair. The exact directory structure is +not guarnateed, except that the leaf directory will contain the names +expected by envtest. You should always use `setup-envtest fetch` or +`setup-envtest switch` (generally with the `-p path` or `-p env` flags) to +get the directory that you should use. + +## Why do I have to do that `source <(blah blah blah)` thing + +This is a normal binary, not a shell script, so we can't set the parent +process's environment variables. If you use this by hand a lot and want +to save the typing, you could put something like the following in your +`~/.zshrc` (or similar for bash/fish/whatever, modified to those): + +```shell +setup-envtest() { + if (($@[(Ie)use])); then + source <($GOPATH/bin/setup-envtest "$@" -p env) + else + $GOPATH/bin/setup-envtest "$@" + fi +} +``` + +## What if I don't want to talk to the internet? + +There are a few options. + +First, you'll probably want to set the `-i/--installed` flag. If you want +to avoid forgetting to set this flag, set the `ENVTEST_INSTALLED_ONLY` +env variable, which will switch that flag on by default. + +Then, you have a few options for managing your binaries: + +- If you don't *really* want to manage with this tool, or you want to + respect the $KUBEBUILDER_ASSETS variable if it's set to something + outside the store, use the `use --use-env -i` command. + + `--use-env` makes the command unconditionally use the value of + KUBEBUILDER_ASSETS as long as it contains the required binaries, and + `-i` indicates that we only ever want to work with installed binaries + (no reaching out the the remote GCS storage). + + As noted about, you can use `ENVTEST_INSTALLED_ONLY=true` to switch `-i` + on by default, and you can use `ENVTEST_USE_ENV=true` to switch + `--use-env` on by default. + +- If you want to use this tool, but download your gziped tarballs + separately, you can use the `sideload` command. You'll need to use the + `-k/--version` flag to indicate which version you're sideloading. + + After that, it'll be as if you'd installed the binaries with `use`. + +- If you want to talk to some internal source, you can use the + `--remote-bucket` and `--remote-server` options. The former sets which + GCS bucket to download from, and the latter sets the host to talk to as + if it were a GCS endpoint. Theoretically, you could use the latter + version to run an internal "mirror" -- the tool expects + + - `HOST/storage/v1/b/BUCKET/o` to return JSON like + + ```json + {"items": [ + {"name": "kubebuilder-tools-X.Y.Z-os-arch.tar.gz", "md5Hash": ""}, + {"name": "kubebuilder-tools-X.Y.Z-os-arch.tar.gz", "md5Hash": ""}, + ]} + ``` + + - `HOST/storage/v1/b/BUCKET/o/TARBALL_NAME` to return JSON like + `{"name": "kubebuilder-tools-X.Y.Z-os-arch.tar.gz", "md5Hash": ""}` + + - `HOST/storage/v1/b/BUCKET/o/TARBALL_NAME?alt=media` to return the + actual file contents diff --git a/tools/setup-envtest/env/env.go b/tools/setup-envtest/env/env.go new file mode 100644 index 0000000000..0abea29597 --- /dev/null +++ b/tools/setup-envtest/env/env.go @@ -0,0 +1,471 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2021 The Kubernetes Authors + +package env + +import ( + "context" + "errors" + "fmt" + "io" + "io/fs" + "path/filepath" + "sort" + "text/tabwriter" + + "github.com/go-logr/logr" + "github.com/spf13/afero" // too bad fs.FS isn't writable :-/ + + "sigs.k8s.io/controller-runtime/tools/setup-envtest/remote" + "sigs.k8s.io/controller-runtime/tools/setup-envtest/store" + "sigs.k8s.io/controller-runtime/tools/setup-envtest/versions" +) + +// Env represents an environment for downloading and otherwise manipulating +// envtest binaries. +// +// In general, the methods will use the Exit{,Cause} functions from this package +// to indicate errors. Catch them with a `defer HandleExitWithCode()`. +type Env struct { + // the following *must* be set on input + + // Platform is our current platform + Platform versions.PlatformItem + + // VerifiySum indicates whether or not we should run checksums. + VerifySum bool + // NoDownload forces us to not contact GCS, looking only + // at local files instead. + NoDownload bool + // ForceDownload forces us to ignore local files and always + // contact GCS & re-download. + ForceDownload bool + + // Client is our remote client for contacting GCS. + Client *remote.Client + + // Log allows us to log. + Log logr.Logger + + // the following *may* be set on input, or may be discovered + + // Version is the version(s) that we want to download + // (may be automatically retrieved later on). + Version versions.Spec + + // Store is used to load/store entries to/from disk. + Store *store.Store + + // FS is the file system to read from/write to for provisioning temp files + // for storing the archives temporarily. + FS afero.Afero + + // Out is the place to write output text to + Out io.Writer + + // manualPath is the manually discovered path from PathMatches, if + // a non-store path was used. It'll be printed by PrintInfo if present. + manualPath string +} + +// CheckCoherence checks that this environment has filled-out, coherent settings +// (e.g. NoDownload & ForceDownload aren't both set). +func (e *Env) CheckCoherence() { + if e.NoDownload && e.ForceDownload { + Exit(2, "cannot both skip downloading *and* force re-downloading") + } + + if e.Platform.OS == "" || e.Platform.Arch == "" { + Exit(2, "must specify non-empty OS and arch (did you specify bad --os or --arch values?)") + } +} + +func (e *Env) filter() store.Filter { + return store.Filter{Version: e.Version, Platform: e.Platform.Platform} +} + +func (e *Env) item() store.Item { + concreteVer := e.Version.AsConcrete() + if concreteVer == nil || e.Platform.IsWildcard() { + panic("no platform/version set") // unexpected, print stack trace + } + return store.Item{Version: *concreteVer, Platform: e.Platform.Platform} +} + +// ListVersions prints out all available versions matching this Env's +// platform & version selector (respecting NoDownload to figure +// out whether or not to match remote versions) +func (e *Env) ListVersions(ctx context.Context) { + out := tabwriter.NewWriter(e.Out, 4, 4, 2, ' ', 0) + defer out.Flush() + localVersions, err := e.Store.List(ctx, e.filter()) + if err != nil { + ExitCause(2, err, "unable to list installed versions") + } + for _, item := range localVersions { + // already filtered by onDiskVersions + fmt.Fprintf(out, "(installed)\tv%s\t%s\n", item.Version, item.Platform) + } + + if e.NoDownload { + return + } + + remoteVersions, err := e.Client.ListVersions(ctx) + if err != nil { + ExitCause(2, err, "unable list to available versions") + } + + for _, set := range remoteVersions { + if !e.Version.Matches(set.Version) { + continue + } + sort.Slice(set.Platforms, func(i, j int) bool { + return orderPlatforms(set.Platforms[i].Platform, set.Platforms[j].Platform) + }) + for _, plat := range set.Platforms { + if e.Platform.Matches(plat.Platform) { + fmt.Fprintf(out, "(available)\tv%s\t%s\n", set.Version, plat) + } + } + } + +} + +// LatestVersion returns the latest version matching our version selector and +// platform from the remote server, with the correspoding checksum for later +// use as well. +func (e *Env) LatestVersion(ctx context.Context) (versions.Concrete, versions.PlatformItem) { + vers, err := e.Client.ListVersions(ctx) + if err != nil { + ExitCause(2, err, "unable to list versions to find latest one") + } + for _, set := range vers { + if !e.Version.Matches(set.Version) { + e.Log.V(1).Info("skipping non-matching version", "version", set.Version) + continue + } + // double-check that our platform is supported + for _, plat := range set.Platforms { + // NB(directxman12): we're already iterating in order, so no + // need to check if the wildcard is latest vs any + if e.Platform.Matches(plat.Platform) && e.Version.Matches(set.Version) { + return set.Version, plat + } + } + e.Log.Info("latest version not supported for your platform, checking older ones", "version", set.Version, "platform", e.Platform) + } + + Exit(2, "unable to find a version that was supported for platform %s", e.Platform) + return versions.Concrete{}, versions.PlatformItem{} // unreachable, but Go's type system can't express the "never" type +} + +// ExistsAndValid checks if our current (concrete) version & platform +// exist on disk (unless ForceDownload is set, in which cause it always +// returns false). +// +// Must be called after EnsureVersionIsSet so that we have a concrete +// Version selected. Must have a concrete platform, or ForceDownload +// must be set. +func (e *Env) ExistsAndValid() bool { + if e.ForceDownload { + // we always want to download, so don't check here + return false + } + + if e.Platform.IsWildcard() { + Exit(2, "you must have a concrete platform with this command -- you cannot use wildcard platforms with fetch or switch") + } + + exists, err := e.Store.Has(e.item()) + if err != nil { + ExitCause(2, err, "unable to check if existing version exists") + } + + if exists { + e.Log.Info("applicable version found on disk", "version", e.Version) + } + return exists +} + +// EnsureVersionIsSet ensures that we have a non-wildcard version +// configured. +// +// If necessary, it will enumerate on-disk and remote versions to accomplish +// this, finding a version that matches our version selector and platform. +// It will always yield a concrete version, it *may* yield a concrete platorm +// as well. +func (e *Env) EnsureVersionIsSet(ctx context.Context) { + if e.Version.AsConcrete() != nil { + return + } + var localVer *versions.Concrete + var localPlat versions.Platform + + items, err := e.Store.List(ctx, e.filter()) + if err != nil { + ExitCause(2, err, "unable to determine installed versions") + } + + for _, item := range items { + if !e.Version.Matches(item.Version) || !e.Platform.Matches(item.Platform) { + e.Log.V(1).Info("skipping version, doesn't match", "version", item.Version, "platform", item.Platform) + continue + } + // NB(directxman12): we're already iterating in order, so no + // need to check if the wildcard is latest vs any + ver := item.Version // copy to avoid referencing iteration variable + localVer = &ver + localPlat = item.Platform + break + } + + if e.NoDownload || !e.Version.CheckLatest { + // no version specified, but we either + // + // a) shouldn't contact remote + // b) don't care to find the absolute latest + // + // so just find the latest local version + if localVer != nil { + e.Version.MakeConcrete(*localVer) + e.Platform.Platform = localPlat + return + } + if e.NoDownload { + Exit(2, "no applicable on-disk versions for %s found, you'll have to download one, or run list -i to see what you do have", e.Platform) + } + // if we didn't ask for the latest version, but don't have anything + // available, try the internet ;-) + } + + // no version specified and we need the latest in some capacity, so find latest from remote + // so find the latest local first, then compare it to the latest remote, and use whichever + // of the two is more recent. + e.Log.Info("no version specified, finding latest") + serverVer, platform := e.LatestVersion(ctx) + + // if we're not forcing a download, and we have a newer local version, just use that + if !e.ForceDownload && localVer != nil && localVer.NewerThan(serverVer) { + e.Platform.Platform = localPlat // update our data with md5 + e.Version.MakeConcrete(*localVer) + return + } + + // otherwise, use the new version from the server + e.Platform = platform // update our data with md5 + e.Version.MakeConcrete(serverVer) +} + +// Fetch ensures that the requested platform and version are on disk. +// You must call EnsureVersionIsSet before calling this method. +// +// If ForceDownload is set, we always download, otherwise we only download +// if we're missing the version on disk. +func (e *Env) Fetch(ctx context.Context) { + log := e.Log.WithName("fetch") + + // if we didn't just fetch it, grab the sum to verify + if e.VerifySum && e.Platform.MD5 == "" { + if err := e.Client.FetchSum(ctx, *e.Version.AsConcrete(), &e.Platform); err != nil { + ExitCause(2, err, "unable to fetch checksum for requested version") + } + } + if !e.VerifySum { + e.Platform.MD5 = "" // skip verification + } + + var packedPath string + + // cleanup on error (needs to be here so it will happen after the other defers) + defer e.cleanupOnError(func() { + if packedPath != "" { + e.Log.V(1).Info("cleaning up downloaded archive", "path", packedPath) + if err := e.FS.Remove(packedPath); err != nil && !errors.Is(err, fs.ErrNotExist) { + e.Log.Error(err, "unable to clean up archive path", "path", packedPath) + } + } + }) + + archiveOut, err := e.FS.TempFile("", "*-"+e.Platform.ArchiveName(*e.Version.AsConcrete())) + if err != nil { + ExitCause(2, err, "unable to open file to write downloaded archive to") + } + defer archiveOut.Close() + packedPath = archiveOut.Name() + log.V(1).Info("writing downloaded archive", "path", packedPath) + + if err := e.Client.GetVersion(ctx, *e.Version.AsConcrete(), e.Platform, archiveOut); err != nil { + ExitCause(2, err, "unable to download requested version") + } + log.V(1).Info("downloaded archive", "path", packedPath) + + if err := archiveOut.Sync(); err != nil { // sync before reading back + ExitCause(2, err, "unable to flush downloaded archive file") + } + if _, err := archiveOut.Seek(0, 0); err != nil { + ExitCause(2, err, "unable to jump back to beginning of archive file to unzip") + } + + if err := e.Store.Add(ctx, e.item(), archiveOut); err != nil { + ExitCause(2, err, "unable to store version to disk") + } + + log.V(1).Info("removing archive from disk", "path", packedPath) + if err := e.FS.Remove(packedPath); err != nil { + // don't bail, this isn't fatal + log.Error(err, "unable to remove downloaded archive", "path", packedPath) + } +} + +// cleanup on error cleans up if we hit an exitCode error. +// +// Use it in a defer. +func (e *Env) cleanupOnError(extraCleanup func()) { + cause := recover() + if cause == nil { + return + } + // don't panic in a panic handler + var exit *exitCode + if asExit(cause, &exit) && exit.code != 0 { + e.Log.Info("cleaning up due to error") + // we already log in the function, and don't want to panic, so + // ignore the error + extraCleanup() + } + panic(cause) // re-start the panic now that we're done +} + +// Remove removes the data for our version selector & platform from disk. +func (e *Env) Remove(ctx context.Context) { + items, err := e.Store.Remove(ctx, e.filter()) + for _, item := range items { + fmt.Fprintf(e.Out, "removed %s\n", item) + } + if err != nil { + ExitCause(2, err, "unable to remove all requested version(s)") + } +} + +// PrintInfo prints out information about a single, current version +// and platform, according to the given formatting info. +func (e *Env) PrintInfo(printFmt PrintFormat) { + // use the manual path if it's set, otherwise use the standard path + path := e.manualPath + if e.manualPath == "" { + item := e.item() + var err error + path, err = e.Store.Path(item) + if err != nil { + ExitCause(2, err, "unable to get path for version %s", item) + } + } + switch printFmt { + case PrintOverview: + fmt.Fprintf(e.Out, "Version: %s\n", e.Version) + fmt.Fprintf(e.Out, "OS/Arch: %s\n", e.Platform) + if e.Platform.MD5 != "" { + fmt.Fprintf(e.Out, "md5: %s\n", e.Platform.MD5) + } + fmt.Fprintf(e.Out, "Path: %s\n", path) + case PrintPath: + fmt.Fprint(e.Out, path) // NB(directxman12): no newline -- want the bare path here + case PrintEnv: + fmt.Fprintf(e.Out, "export KUBEBUILDER_ASSETS=%s\n", path) + default: + panic(fmt.Sprintf("unexpected print format %v", printFmt)) + } +} + +// EnsureBaseDirs ensures that the base packed and unpacked directories +// exist. +// +// This should be the first thing called after CheckCoherence. +func (e *Env) EnsureBaseDirs(ctx context.Context) { + if err := e.Store.Initialize(ctx); err != nil { + ExitCause(2, err, "unable to make sure store is initialized") + } +} + +// Sideload takes an input stream, and loads it as if it had been a downloaded .tar.gz file +// for the current *concrete* version and platform. +func (e *Env) Sideload(ctx context.Context, input io.Reader) { + log := e.Log.WithName("sideload") + if e.Version.AsConcrete() == nil || e.Platform.IsWildcard() { + Exit(2, "must specify a concrete version and platform to sideload. Make sure you've passed a version, like 'sideload 1.21.0'") + } + log.V(1).Info("sideloading from input stream to version", "version", e.Version, "platform", e.Platform) + if err := e.Store.Add(ctx, e.item(), input); err != nil { + ExitCause(2, err, "unable to sideload item to disk") + } +} + +var ( + // expectedExectuables are the executables that are checked in PathMatches + // for non-store paths. + expectedExecutables = []string{ + "kube-apiserver", + "etcd", + "kubectl", + } +) + +// PathMatches checks if the path (e.g. from the environment variable) +// matches this version & platform selector, and if so, returns true. +func (e *Env) PathMatches(value string) bool { + e.Log.V(1).Info("checking if (env var) path represents our desired version", "path", value) + if value == "" { + // if we're unset, + return false + } + + if e.versionFromPathName(value) { + e.Log.V(1).Info("path appears to be in our store, using that info", "path", value) + return true + } + + e.Log.V(1).Info("path is not in our store, checking for binaries", "path", value) + for _, expected := range expectedExecutables { + _, err := e.FS.Stat(filepath.Join(value, expected)) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + // one of our required binaries is missing, return false + e.Log.V(1).Info("missing required binary in (env var) path", "binary", expected, "path", value) + return false + } + ExitCause(2, err, "unable to check for existence of binary %s from existing (env var) path %s", value, expected) + } + } + + // success, all binaries present + e.Log.V(1).Info("all required binaries present in (env var) path, using that", "path", value) + + // don't bother checking the version, the user explicitly asked us to use this + // we don't know the version, so set it to wildcard + e.Version = versions.AnyVersion + e.Platform.OS = "*" + e.Platform.Arch = "*" + e.manualPath = value + return true +} + +// versionFromPathName checks if the given path's last component looks like one +// of our versions, and, if so, what version it represents. If succesfull, +// it'll set version and platform, and return true. Otherwise it returns +// false. +func (e *Env) versionFromPathName(value string) bool { + baseName := filepath.Base(value) + ver, pl := versions.ExtractWithPlatform(versions.VersionPlatformRE, baseName) + if ver == nil { + // not a version that we can tell + return false + } + + // yay we got a version! + e.Version.MakeConcrete(*ver) + e.Platform.Platform = pl + e.manualPath = value // might be outside our store, set this just in case + + return true +} diff --git a/tools/setup-envtest/env/exit.go b/tools/setup-envtest/env/exit.go new file mode 100644 index 0000000000..44da97dff1 --- /dev/null +++ b/tools/setup-envtest/env/exit.go @@ -0,0 +1,97 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2021 The Kubernetes Authors + +package env + +import ( + "errors" + "fmt" + "os" +) + +// Exit exits with the given code and error message. +// +// Defer HandleExitWithCode in main to catch this and get the right behavior. +func Exit(code int, msg string, args ...interface{}) { + panic(&exitCode{ + code: code, + err: fmt.Errorf(msg, args...), + }) +} + +// ExitCause exits with the given code and error message, automatically +// wrapping the underlying error passed as well. +// +// Defer HandleExitWithCode in main to catch this and get the right behavior. +func ExitCause(code int, err error, msg string, args ...interface{}) { + args = append(args, err) + panic(&exitCode{ + code: code, + err: fmt.Errorf(msg+": %w", args...), + }) +} + +// exitCode is an error that indicates, on a panic, to exit with the given code +// and message. +type exitCode struct { + code int + err error +} + +func (c *exitCode) Error() string { + return fmt.Sprintf("%v (exit code %d)", c.err, c.code) +} +func (c *exitCode) Unwrap() error { + return c.err +} + +// asExit checks if the given (panic) value is an exitCode error, +// and if so stores it in the given pointer. It's roughly analogous +// to errors.As, except it works on recover() values. +func asExit(val interface{}, exit **exitCode) bool { + if val == nil { + return false + } + err, isErr := val.(error) + if !isErr { + return false + } + if !errors.As(err, exit) { + return false + } + return true +} + +// HandleExitWithCode handles panics of type exitCode, +// printing the status message and existing with the given +// exit code, or re-raising if not an exitCode error. +// +// This should be the first defer in your main function. +func HandleExitWithCode() { + cause := recover() + if CheckRecover(cause, func(code int, err error) { + fmt.Fprintln(os.Stderr, err.Error()) + os.Exit(code) + }) { + panic(cause) + } +} + +// CheckRecover checks the value of cause, calling the given callback +// if it's an exitCode error. It returns true if we should re-panic +// the cause. +// +// It's mainly useful for testing, normally you'd use HandleExitWithCode +func CheckRecover(cause interface{}, cb func(int, error)) bool { + if cause == nil { + return false + } + var exitErr *exitCode + if !asExit(cause, &exitErr) { + // re-raise if it's not an exit error + return true + } + + cb(exitErr.code, exitErr.err) + return false +} diff --git a/tools/setup-envtest/env/helpers.go b/tools/setup-envtest/env/helpers.go new file mode 100644 index 0000000000..439293ffb0 --- /dev/null +++ b/tools/setup-envtest/env/helpers.go @@ -0,0 +1,68 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2021 The Kubernetes Authors + +package env + +import ( + "fmt" + + "sigs.k8s.io/controller-runtime/tools/setup-envtest/versions" +) + +// orderPlatforms orders platforms by OS then arch. +func orderPlatforms(first, second versions.Platform) bool { + // sort by OS, then arch + if first.OS != second.OS { + return first.OS < second.OS + } + return first.Arch < second.Arch +} + +// PrintFormat indicates how to print out fetch and switch results. +// It's a valid pflag.Value so it can be used as a flag directly. +type PrintFormat int + +const ( + // PrintOverview prints human-readable data, + // including path, version, arch, and checksum (when available). + PrintOverview PrintFormat = iota + // PrintPath prints *only* the path, with no decoration. + PrintPath + // PrintEnv prints the path with the corresponding env variable, so that + // you can source the output like + // `source $(fetch-envtest switch -p env 1.20.x)` + PrintEnv +) + +func (f PrintFormat) String() string { + switch f { + case PrintOverview: + return "overview" + case PrintPath: + return "path" + case PrintEnv: + return "env" + default: + panic(fmt.Sprintf("unexpected print format %d", int(f))) + } +} + +// Set sets the value of this as a flag. +func (f *PrintFormat) Set(val string) error { + switch val { + case "overview": + *f = PrintOverview + case "path": + *f = PrintPath + case "env": + *f = PrintEnv + default: + return fmt.Errorf("unknown print format %q, use one of overview|path|env", val) + } + return nil +} + +// Type is the type of this value as a flag. +func (PrintFormat) Type() string { + return "{overview|path|env}" +} diff --git a/tools/setup-envtest/go.mod b/tools/setup-envtest/go.mod new file mode 100644 index 0000000000..cdcee04e08 --- /dev/null +++ b/tools/setup-envtest/go.mod @@ -0,0 +1,13 @@ +module sigs.k8s.io/controller-runtime/tools/setup-envtest + +go 1.16 + +require ( + github.com/go-logr/logr v0.4.0 + github.com/go-logr/zapr v0.4.0 + github.com/onsi/ginkgo v1.16.1 + github.com/onsi/gomega v1.11.0 + github.com/spf13/afero v1.6.0 + github.com/spf13/pflag v1.0.5 + go.uber.org/zap v1.16.0 +) diff --git a/tools/setup-envtest/go.sum b/tools/setup-envtest/go.sum new file mode 100644 index 0000000000..0d9a66c095 --- /dev/null +++ b/tools/setup-envtest/go.sum @@ -0,0 +1,138 @@ +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/go-logr/logr v0.4.0 h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc= +github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/zapr v0.4.0 h1:uc1uML3hRYL9/ZZPdgHS/n8Nzo+eaYL/Efxkkamf7OM= +github.com/go-logr/zapr v0.4.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.16.1 h1:foqVmeWDD6yYpK+Yz3fHyNIxFYNxswxqNFjSKe+vI54= +github.com/onsi/ginkgo v1.16.1/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.11.0 h1:+CqWgvj0OZycCaqclBD1pxKHAU+tOkHmQIWvDHq2aug= +github.com/onsi/gomega v1.11.0/go.mod h1:azGKhqFUon9Vuj0YmTfLSmx0FUwqXYSTl5re8lQLTUg= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.16.0 h1:uFRZXykJGK9lLY4HtgSw44DnIcAM+kRBP7x5m+NpAOM= +go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb h1:eBmm0M9fYhWpKZLjQUUKka/LtIxf46G4fxeEz5KJr9U= +golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091 h1:DMyOG0U+gKfu8JZzg2UQe9MeaC1X+xQWlAKcRnjxjCw= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e h1:4nW4NLDYnU28ojHaHO8OVxFHk/aQ33U01a9cjED+pzE= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= diff --git a/tools/setup-envtest/main.go b/tools/setup-envtest/main.go new file mode 100644 index 0000000000..b4030cb6d6 --- /dev/null +++ b/tools/setup-envtest/main.go @@ -0,0 +1,281 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2021 The Kubernetes Authors + +package main + +import ( + goflag "flag" + "fmt" + "os" + "runtime" + + "github.com/go-logr/logr" + "github.com/go-logr/zapr" + "github.com/spf13/afero" + flag "github.com/spf13/pflag" + "go.uber.org/zap" + + envp "sigs.k8s.io/controller-runtime/tools/setup-envtest/env" + "sigs.k8s.io/controller-runtime/tools/setup-envtest/remote" + "sigs.k8s.io/controller-runtime/tools/setup-envtest/store" + "sigs.k8s.io/controller-runtime/tools/setup-envtest/versions" + "sigs.k8s.io/controller-runtime/tools/setup-envtest/workflows" +) + +const ( + // envNoDownload is an env variable that can be set to always force + // the --installed-only, -i flag to be set. + envNoDownload = "ENVTEST_INSTALLED_ONLY" + // envUseEnv is an env variable that can be set to control the --use-env + // flag globally. + envUseEnv = "ENVTEST_USE_ENV" +) + +var ( + force = flag.Bool("force", false, "force re-downloading dependencies, even if they're already present and correct") + installedOnly = flag.BoolP("installed-only", "i", os.Getenv(envNoDownload) != "", + "only look at installed versions -- do not query the remote API server, "+ + "and error out if it would be necessary to") + verify = flag.Bool("verify", true, "verify dependencies while downloading") + useEnv = flag.Bool("use-env", os.Getenv(envUseEnv) != "", "whether to return the value of KUBEBUILDER_ASSETS if it's already set") + + targetOS = flag.String("os", runtime.GOOS, "os to download for (e.g. linux, darwin, for listing operations, use '*' to list all platforms)") + targetArch = flag.String("arch", runtime.GOARCH, "architecture to download for (e.g. amd64, for listing operations, use '*' to list all platforms)") + + // printFormat is the flag value for -p, --print + printFormat = envp.PrintOverview + // zapLvl is the flag value for logging verbosity + zapLvl = zap.WarnLevel + + binDir = flag.String("bin-dir", "", + "directory to store binary assets (default: $OS_SPECIFIC_DATA_DIR/envtest-binaries)") + remoteBucket = flag.String("remote-bucket", "kubebuilder-tools", "remote GCS bucket to download from") + remoteServer = flag.String("remote-server", "storage.googleapis.com", + "remote server to query from. You can override this if you want to run "+ + "an internal storage server instead, or for testing.") +) + +// TODO(directxman12): handle interrupts? + +// setupLogging configures a Zap logger +func setupLogging() logr.Logger { + logCfg := zap.NewDevelopmentConfig() + logCfg.Level = zap.NewAtomicLevelAt(zapLvl) + zapLog, err := logCfg.Build() + if err != nil { + envp.ExitCause(1, err, "who logs the logger errors?") + } + return zapr.NewLogger(zapLog) +} + +// setupEnv initializes the environment from flags. +func setupEnv(globalLog logr.Logger, version string) *envp.Env { + log := globalLog.WithName("setup") + if *binDir == "" { + dataDir, err := store.DefaultStoreDir() + if err != nil { + envp.ExitCause(1, err, "unable to deterimine default binaries directory (use --bin-dir to manually override)") + } + + *binDir = dataDir + } + log.V(1).Info("using binaries directory", "dir", *binDir) + + env := &envp.Env{ + Log: globalLog, + Client: &remote.Client{ + Log: globalLog.WithName("storage-client"), + Bucket: *remoteBucket, + Server: *remoteServer, + }, + VerifySum: *verify, + ForceDownload: *force, + NoDownload: *installedOnly, + Platform: versions.PlatformItem{ + Platform: versions.Platform{ + OS: *targetOS, + Arch: *targetArch, + }, + }, + FS: afero.Afero{Fs: afero.NewOsFs()}, + Store: store.NewAt(*binDir), + Out: os.Stdout, + } + + switch version { + case "", "latest": + env.Version = versions.LatestVersion + case "latest-on-disk": + // we sort by version, latest first, so this'll give us the latest on + // disk (as per the contract from env.List & store.List) + env.Version = versions.AnyVersion + env.NoDownload = true + default: + var err error + env.Version, err = versions.FromExpr(version) + if err != nil { + envp.ExitCause(1, err, "version be a valid version, or simply 'latest' or 'latest-on-disk'") + } + } + + env.CheckCoherence() + + return env +} + +func main() { + // exit with appropriate error codes -- this should be the first defer so + // that it's the last one executed. + defer envp.HandleExitWithCode() + + // set up flags + flag.Usage = func() { + name := os.Args[0] + fmt.Fprintf(os.Stderr, "Usage: %s [FLAGS] use|list|cleanup|sideload [VERSION]\n", name) + flag.PrintDefaults() + fmt.Fprintf(os.Stderr, + ` +Note: this command is currently alpha, and the usage/behavior may change from release to release. + +Examples: + + # download the latest envtest, and print out info about it + %[1]s use + + # download the latest 1.19 envtest, and print out the path + %[1]s use -p path 1.19.x! + + # switch to the most recent 1.21 envtest on disk + source <(%[1]s use -i -p env 1.21.x) + + # list all available local versions for darwin/amd64 + %[1]s list -i --os darwin --arch amd64 + + # remove all versions older than 1.16 from disk + %[1]s cleanup <1.16 + + # use the value from $KUBEBUILDER_ASSETS if set, otherwise follow the normal + # logic for 'use' + %[1]s --use-env + + # use the value from $KUBEBUILDER_ASSETS if set, otherwise use the latest + # installed version + %[1]s use -i --use-env + + # sideload a pre-downloaded tarball as Kubernetes 1.16.2 into our store + %[1]s sideload 1.16.2 < downloaded-envtest.tar.gz + +Commands: + + use: + get information for the requested version, downloading it if necessary and allowed. + Needs a concrete platform (no wildcards), but wilcard versions are supported. + + list: + list installed *and* available versions matching the given version & platform. + May have wildcard versions *and* platforms. + If the -i flag is passed, only installed versions are listed. + + cleanup: + remove all versions matching the given version & platform selector. + May have wildcard versions *and* platforms. + + sideload: + reads a .tar.gz file from stdin and expand it into the store. + must have a concrete version and platform. + +Versions: + + Versions take the form of a small subset of semver selectors. + + Basic semver whole versions are accepted: X.Y.Z. + Z may also be '*' or 'x' to match a wildcard. + You may also just write X.Y, which means X.Y.*. + + A version may be prefixed with '~' to match the the most recent Z release + in the given Y release ( [X.Y.Z, X.Y+1.0) ). + + Finally, you may suffix the version with '!' to force checking the + remote API server for the latest version. + + For example: + + 1.16.x / 1.16.* / 1.16 # any 1.16 version + ~1.19.3 # any 1.19 version that's at least 1.19.3 + <1.17 # any release 1.17.x or below + 1.22.x! # the latest one 1.22 release avaible remotely + +Output: + + The fetch & switch commands respect the --print, -p flag. + + overview: human readable information + path: print out the path, by itself + env: print out the path in a form that can be sourced to use that version with envtest + + Other command have human-readable output formats only. + +Environment Variables: + + KUBEBUILDER_ASSETS: + --use-env will check this, and '-p/--print env' will return this. + If --use-env is true and this is set, we won't check our store + for versions -- we'll just immediately return whatever's in + this env var. + + %[2]s: + will switch the default of -i/--installed to true if set to any value + + %[3]s: + will switch the default of --use-env to true if set to any value + +`, name, envNoDownload, envUseEnv) + } + flag.CommandLine.AddGoFlag(&goflag.Flag{Name: "v", Usage: "logging level", Value: &zapLvl}) + flag.VarP(&printFormat, "print", "p", "what info to print after fetch-style commands (overview, path, env)") + needHelp := flag.Bool("help", false, "print out this help text") // register help so that we don't get an error at the end + flag.Parse() + + if *needHelp { + flag.Usage() + envp.Exit(2, "") + } + + // check our argument count + if numArgs := flag.NArg(); numArgs < 1 || numArgs > 2 { + flag.Usage() + envp.Exit(2, "please specify a command to use, and optionally a version selector") + } + + // set up logging + globalLog := setupLogging() + + // set up the environment + var version string + if flag.NArg() > 1 { + version = flag.Arg(1) + } + env := setupEnv(globalLog, version) + + // perform our main set of actions + switch action := flag.Arg(0); action { + case "use": + workflows.Use{ + UseEnv: *useEnv, + PrintFormat: printFormat, + AssetsPath: os.Getenv("KUBEBUILDER_ASSETS"), + }.Do(env) + case "list": + workflows.List{}.Do(env) + case "cleanup": + workflows.Cleanup{}.Do(env) + case "sideload": + workflows.Sideload{ + Input: os.Stdin, + PrintFormat: printFormat, + }.Do(env) + default: + flag.Usage() + envp.Exit(2, "unknown action %q", action) + } +} diff --git a/tools/setup-envtest/remote/client.go b/tools/setup-envtest/remote/client.go new file mode 100644 index 0000000000..f910a2f153 --- /dev/null +++ b/tools/setup-envtest/remote/client.go @@ -0,0 +1,220 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2021 The Kubernetes Authors + +package remote + +import ( + "context" + "crypto/md5" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "path" + "sort" + + "github.com/go-logr/logr" + "sigs.k8s.io/controller-runtime/tools/setup-envtest/versions" +) + +// objectList is the parts we need of the GCS "list-objects-in-bucket" endpoint. +type objectList struct { + Items []bucketObject `json:"items"` + NextPageToken string `json:"nextPageToken"` +} + +// bucketObject is the parts we need of the GCS object metadata. +type bucketObject struct { + Name string `json:"name"` + Hash string `json:"md5Hash"` +} + +// Client is a basic client for fetching versions of the envtest binary archives +// from GCS. +type Client struct { + // Bucket is the bucket to fetch from. + Bucket string + + // Server is the GCS-like storage server + Server string + + // Log allows us to log. + Log logr.Logger + + // Insecure uses http for testing + Insecure bool +} + +func (c *Client) scheme() string { + if c.Insecure { + return "http" + } + return "https" +} + +// ListVersions lists all available tools versions in the given bucket, along +// with supported os/arch combos and the corresponding hash. +// +// The results are sorted with newer versions first. +func (c *Client) ListVersions(ctx context.Context) ([]versions.Set, error) { + loc := &url.URL{ + Scheme: c.scheme(), + Host: c.Server, + Path: path.Join("/storage/v1/b/", c.Bucket, "o"), + } + query := make(url.Values) + + knownVersions := map[versions.Concrete][]versions.PlatformItem{} + for cont := true; cont; { + c.Log.V(1).Info("listing bucket to get versions", "bucket", c.Bucket) + + loc.RawQuery = query.Encode() + req, err := http.NewRequestWithContext(ctx, "GET", loc.String(), nil) + if err != nil { + return nil, fmt.Errorf("unable to construct request to list bucket items: %w", err) + } + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, fmt.Errorf("unable to perform request to list bucket items: %w", err) + } + + err = func() error { + defer resp.Body.Close() + if resp.StatusCode != 200 { + return fmt.Errorf("unable list bucket items -- got status %q from GCS", resp.Status) + } + + var list objectList + if err := json.NewDecoder(resp.Body).Decode(&list); err != nil { + return fmt.Errorf("unable unmarshal bucket items list: %w", err) + } + + // continue listing if needed + cont = list.NextPageToken != "" + query.Set("pageToken", list.NextPageToken) + + for _, item := range list.Items { + ver, details := versions.ExtractWithPlatform(versions.ArchiveRE, item.Name) + if ver == nil { + c.Log.V(1).Info("skipping bucket object -- does not appear to be a versioned tools object", "name", item.Name) + continue + } + c.Log.V(1).Info("found version", "version", ver, "platform", details) + knownVersions[*ver] = append(knownVersions[*ver], versions.PlatformItem{ + Platform: details, + MD5: item.Hash, + }) + } + + return nil + }() + if err != nil { + return nil, err + } + } + + var res []versions.Set + for ver, details := range knownVersions { + res = append(res, versions.Set{Version: ver, Platforms: details}) + } + // sort in inverse order so that the newest one is first + sort.Slice(res, func(i, j int) bool { + first, second := res[i].Version, res[j].Version + return first.NewerThan(second) + }) + + return res, nil +} + +// GetVersion downloads the given concrete version for the given concrete platform, writing it to the out. +func (c *Client) GetVersion(ctx context.Context, version versions.Concrete, platform versions.PlatformItem, out io.Writer) error { + itemName := platform.ArchiveName(version) + loc := &url.URL{ + Scheme: c.scheme(), + Host: c.Server, + Path: path.Join("/storage/v1/b/", c.Bucket, "o", itemName), + RawQuery: "alt=media", + } + + req, err := http.NewRequestWithContext(ctx, "GET", loc.String(), nil) + if err != nil { + return fmt.Errorf("unable to construct request to fetch %s: %w", itemName, err) + } + resp, err := http.DefaultClient.Do(req) + if err != nil { + return fmt.Errorf("unable to fetch %s (%s): %w", itemName, req.URL, err) + } + defer resp.Body.Close() + + if resp.StatusCode != 200 { + return fmt.Errorf("unable fetch %s (%s) -- got status %q from GCS", itemName, req.URL, resp.Status) + } + + if platform.MD5 != "" { + // stream in chunks to do the checksum, don't load the whole thing into + // memory to avoid causing issues with big files. + buf := make([]byte, 32*1024) // 32KiB, same as io.Copy + checksum := md5.New() + for cont := true; cont; { + amt, err := resp.Body.Read(buf) + if err != nil && err != io.EOF { + return fmt.Errorf("unable read next chunk of %s: %w", itemName, err) + } + if amt > 0 { + // checksum never returns errors according to docs + checksum.Write(buf[:amt]) //nolint errcheck + if _, err := out.Write(buf[:amt]); err != nil { + return fmt.Errorf("unable write next chunk of %s: %w", itemName, err) + } + } + cont = amt > 0 && err != io.EOF + } + + sum := base64.StdEncoding.EncodeToString(checksum.Sum(nil)) + + if sum != platform.MD5 { + return fmt.Errorf("checksum mismatch for %s: %s (computed) != %s (reported from GCS)", itemName, sum, platform.MD5) + } + } else { + if _, err := io.Copy(out, resp.Body); err != nil { + return fmt.Errorf("unable to download %s: %w", itemName, err) + } + } + return nil +} + +// FetchSum fetches the checksum for the given concrete version & platform into +// the given platform item. +func (c *Client) FetchSum(ctx context.Context, ver versions.Concrete, pl *versions.PlatformItem) error { + itemName := pl.ArchiveName(ver) + loc := &url.URL{ + Scheme: c.scheme(), + Host: c.Server, + Path: path.Join("/storage/v1/b/", c.Bucket, "o", itemName), + } + + req, err := http.NewRequestWithContext(ctx, "GET", loc.String(), nil) + if err != nil { + return fmt.Errorf("unable to construct request to fetch metadata for %s: %w", itemName, err) + } + resp, err := http.DefaultClient.Do(req) + if err != nil { + return fmt.Errorf("unable to fetch metadata for %s: %w", itemName, err) + } + defer resp.Body.Close() + + if resp.StatusCode != 200 { + return fmt.Errorf("unable fetch metadata for %s -- got status %q from GCS", itemName, resp.Status) + } + + var item bucketObject + if err := json.NewDecoder(resp.Body).Decode(&item); err != nil { + return fmt.Errorf("unable to unmarshal metadata for %s: %w", itemName, err) + } + + pl.MD5 = item.Hash + return nil +} diff --git a/tools/setup-envtest/store/helpers.go b/tools/setup-envtest/store/helpers.go new file mode 100644 index 0000000000..30902187e9 --- /dev/null +++ b/tools/setup-envtest/store/helpers.go @@ -0,0 +1,57 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2021 The Kubernetes Authors + +package store + +import ( + "errors" + "os" + "path/filepath" + "runtime" +) + +// DefaultStoreDir returns the default location for the store. +// It's dependent on operating system: +// +// - Windows: %LocalAppData%\kubebuilder-envtest +// - OSX: ~/Library/Application Support/io.kubebuilder.envtest +// - Others: ${XDG_DATA_HOME:-~/.local/share}/kubebuilder-envtest +// +// Otherwise, it errors out. Note that these paths must not be relied upon +// manually. +func DefaultStoreDir() (string, error) { + var baseDir string + + // find the base data directory + switch runtime.GOOS { + case "windows": + baseDir = os.Getenv("LocalAppData") + if baseDir == "" { + return "", errors.New("%LocalAppData% is not defined") + } + case "darwin", "ios": + homeDir := os.Getenv("HOME") + if homeDir == "" { + return "", errors.New("$HOME is not defined") + } + baseDir = filepath.Join(homeDir, "Library/Application Support") + default: + baseDir = os.Getenv("XDG_DATA_HOME") + if baseDir == "" { + homeDir := os.Getenv("HOME") + if homeDir == "" { + return "", errors.New("neither $XDG_DATA_HOME nor $HOME are defined") + } + baseDir = filepath.Join(homeDir, ".local/share") + } + } + + // append our program-specific dir to it (OSX has a slightly different + // convention so try to follow that). + switch runtime.GOOS { + case "darwin", "ios": + return filepath.Join(baseDir, "io.kubebuilder.envtest"), nil + default: + return filepath.Join(baseDir, "kubebuilder-envtest"), nil + } +} diff --git a/tools/setup-envtest/store/store.go b/tools/setup-envtest/store/store.go new file mode 100644 index 0000000000..6220dea70c --- /dev/null +++ b/tools/setup-envtest/store/store.go @@ -0,0 +1,288 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2021 The Kubernetes Authors + +package store + +import ( + "archive/tar" + "compress/gzip" + "context" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "sort" + + "github.com/go-logr/logr" + "github.com/spf13/afero" + + "sigs.k8s.io/controller-runtime/tools/setup-envtest/versions" +) + +// TODO(directxman12): error messages don't show full path, which is gonna make +// things hard to debug + +// Item is a version-platform pair. +type Item struct { + Version versions.Concrete + Platform versions.Platform +} + +// dirName returns the directory name in the store for this item. +func (i Item) dirName() string { + return i.Platform.BaseName(i.Version) +} +func (i Item) String() string { + return fmt.Sprintf("%s (%s)", i.Version, i.Platform) +} + +// Filter is a version spec & platform selector (i.e. platform +// potentially with wilcards) to filter store items. +type Filter struct { + Version versions.Spec + Platform versions.Platform +} + +// Matches checks if this filter matches the given item. +func (f Filter) Matches(item Item) bool { + return f.Version.Matches(item.Version) && f.Platform.Matches(item.Platform) +} + +// Store knows how to list, load, store, and delete envtest tools. +type Store struct { + // Root is the root FS that the store stores in. You'll probably + // want to use a BasePathFS to scope it down to a particular directory. + // + // Note that if for some reason there are nested BasePathFSes, and they're + // interrupted by a non-BasePathFS, Path won't work properly. + Root afero.Fs +} + +// NewAt creates a new store on disk at the given path. +func NewAt(path string) *Store { + return &Store{ + Root: afero.NewBasePathFs(afero.NewOsFs(), path), + } +} + +// Initialize ensures that the store is all set up on disk, etc. +func (s *Store) Initialize(ctx context.Context) error { + logr.FromContext(ctx).V(1).Info("ensuring base binaries dir exists") + if err := s.unpackedBase().MkdirAll("", 0755); err != nil { + return fmt.Errorf("unable to make sure base binaries dir exists: %w", err) + } + return nil +} + +// Has checks if an item exists in the store. +func (s *Store) Has(item Item) (bool, error) { + path := s.unpackedPath(item.dirName()) + _, err := path.Stat("") + if err != nil && !errors.Is(err, afero.ErrFileNotFound) { + return false, fmt.Errorf("unable to check if version-platform dir exists: %w", err) + } + return err == nil, nil +} + +// List lists all items matching the given filter. +// +// Results are stored by version (newest first), and OS/arch (consistently, +// but no guaranteed ordering). +func (s *Store) List(ctx context.Context, matching Filter) ([]Item, error) { + var res []Item + if err := s.eachItem(ctx, matching, func(_ string, item Item) { + res = append(res, item) + }); err != nil { + return nil, fmt.Errorf("unable to list version-platform pairs in store: %w", err) + } + + sort.Slice(res, func(i, j int) bool { + if !res[i].Version.Matches(res[j].Version) { + return res[i].Version.NewerThan(res[j].Version) + } + return orderPlatforms(res[i].Platform, res[j].Platform) + }) + + return res, nil +} + +// Add adds this item to the store, with the given contents (a .tar.gz file). +func (s *Store) Add(ctx context.Context, item Item, contents io.Reader) (resErr error) { + log := logr.FromContext(ctx) + itemName := item.dirName() + log = log.WithValues("version-platform", itemName) + itemPath := s.unpackedPath(itemName) + + // make sure to clean up if we hit an error + defer func() { + if resErr != nil { + // intentially ignore this because we can't really do anything + err := s.removeItem(itemPath) + if err != nil { + log.Error(err, "unable to clean up partially added version-platform pair after error") + } + } + }() + + log.V(1).Info("ensuring version-platform binaries dir exists and is empty & writable") + _, err := itemPath.Stat("") + if err != nil && !errors.Is(err, afero.ErrFileNotFound) { + return fmt.Errorf("unable to ensure version-platform binaries dir %s exists", itemName) + } + if err == nil { // exists + log.V(1).Info("cleaning up old version-platform binaries dir") + if err := s.removeItem(itemPath); err != nil { + return fmt.Errorf("unable to clean up existing version-platform binaries dir %s", itemName) + } + } + if err := itemPath.MkdirAll("", 0755); err != nil { + return fmt.Errorf("unable to make sure entry dir %s exists", itemName) + } + + log.V(1).Info("extracting archive") + gzStream, err := gzip.NewReader(contents) + if err != nil { + return fmt.Errorf("unable to start un-gz-ing entry archive") + } + tarReader := tar.NewReader(gzStream) + + var header *tar.Header + for header, err = tarReader.Next(); err == nil; header, err = tarReader.Next() { + if header.Typeflag != tar.TypeReg { // TODO(directxman12): support symlinks, etc? + log.V(1).Info("skipping non-regular-file entry in archive", "entry", header.Name) + continue + } + // just dump all files to the main path, ignoring the prefixed directory + // paths -- they're redundant. We also ignore bits for the most part (except for X), + // preferfing our own scheme. + targetPath := filepath.Base(header.Name) + log.V(1).Info("writing archive file to disk", "archive file", header.Name, "on-disk file", targetPath) + perms := 0555 & header.Mode // make sure we're at most r+x + binOut, err := itemPath.OpenFile(targetPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, os.FileMode(perms)) + if err != nil { + return fmt.Errorf("unable to create file %s from archive to disk for version-platform pair %s", targetPath, itemName) + } + if err := func() error { // IIFE to get the defer properly in a loop + defer binOut.Close() + if _, err := io.Copy(binOut, tarReader); err != nil { + return fmt.Errorf("unable to write file %s from archive to disk for version-platform pair %s", targetPath, itemName) + } + return nil + }(); err != nil { + return err + } + } + if err != nil && err != io.EOF { + return fmt.Errorf("unable to finish un-tar-ing the downloaded archive: %w", err) + } + log.V(1).Info("unpacked archive") + + log.V(1).Info("switching version-platform directory to read-only") + if err := itemPath.Chmod("", 0555); err != nil { + // don't bail, this isn't fatal + log.Error(err, "unable to make version-platform directory read-only") + } + return nil +} + +// Remove removes all items matching the given filter. +// +// It returns a list of the successfully removed items (even in the case +// of an error). +func (s *Store) Remove(ctx context.Context, matching Filter) ([]Item, error) { + log := logr.FromContext(ctx) + var removed []Item + var savedErr error + if err := s.eachItem(ctx, matching, func(name string, item Item) { + log.V(1).Info("Removing version-platform pair at path", "version-platform", item, "path", name) + + if err := s.removeItem(s.unpackedPath(name)); err != nil { + log.Error(err, "unable to make existing version-platform dir writable to clean it up", "path", name) + savedErr = fmt.Errorf("unable to remove version-platform pair %s (dir %s): %w", item, name, err) + return // don't mark this as removed in the report + } + removed = append(removed, item) + }); err != nil { + return removed, fmt.Errorf("unable to list version-platform pairs to figure out what to delete: %w", err) + } + if savedErr != nil { + return removed, savedErr + } + return removed, nil +} + +// Path returns an actual path that case be used to access this item. +func (s *Store) Path(item Item) (string, error) { + path := s.unpackedPath(item.dirName()) + // NB(directxman12): we need root's realpath because RealPath only + // looks at it's own path, and so thus doesn't prepend the underlying + // root's base path. + // + // Technically, if we're fed something that's double wrapped as root, + // this'll be wrong, but this is basically as much as we can do + return afero.FullBaseFsPath(path.(*afero.BasePathFs), ""), nil +} + +// unpackedBase returns the directory in which item dirs lives. +func (s *Store) unpackedBase() afero.Fs { + return afero.NewBasePathFs(s.Root, "k8s") +} + +// unpackedPath returns the item dir with this name. +func (s *Store) unpackedPath(name string) afero.Fs { + return afero.NewBasePathFs(s.unpackedBase(), name) +} + +// eachItem iterates through the on-disk versions that match our version & platform selector, +// calling the callback for each +func (s *Store) eachItem(ctx context.Context, filter Filter, cb func(name string, item Item)) error { + log := logr.FromContext(ctx) + entries, err := afero.ReadDir(s.unpackedBase(), "") + if err != nil { + return fmt.Errorf("unable to list folders in store's unpacked directory: %w", err) + } + + for _, entry := range entries { + if !entry.IsDir() { + log.V(1).Info("skipping dir entry, not a folder", "entry", entry.Name()) + continue + } + ver, pl := versions.ExtractWithPlatform(versions.VersionPlatformRE, entry.Name()) + if ver == nil { + log.V(1).Info("skipping dir entry, not a version", "entry", entry.Name()) + continue + } + item := Item{Version: *ver, Platform: pl} + + if !filter.Matches(item) { + log.V(1).Info("skipping on disk version, does not match version and platform selectors", "platform", pl, "version", ver, "entry", entry.Name()) + continue + } + + cb(entry.Name(), item) + } + + return nil +} + +// removeItem removes the given item directory from disk. +func (s *Store) removeItem(itemDir afero.Fs) error { + if err := itemDir.Chmod("", 0755); err != nil { + // no point in trying to remove if we can't fix the permissions, bail here + return fmt.Errorf("unable to make version-platform dir writable: %w", err) + } + if err := itemDir.RemoveAll(""); err != nil && !errors.Is(err, afero.ErrFileNotFound) { + return fmt.Errorf("unable to remove version-platform dir: %w", err) + } + return nil +} + +// orderPlatforms orders platforms by OS then arch. +func orderPlatforms(first, second versions.Platform) bool { + // sort by OS, then arch + if first.OS != second.OS { + return first.OS < second.OS + } + return first.Arch < second.Arch +} diff --git a/tools/setup-envtest/store/store_suite_test.go b/tools/setup-envtest/store/store_suite_test.go new file mode 100644 index 0000000000..95a1e58e52 --- /dev/null +++ b/tools/setup-envtest/store/store_suite_test.go @@ -0,0 +1,35 @@ +package store_test + +import ( + "context" + "testing" + + "github.com/go-logr/logr" + "github.com/go-logr/zapr" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var testLog logr.Logger + +func zapLogger() logr.Logger { + testOut := zapcore.AddSync(GinkgoWriter) + enc := zapcore.NewConsoleEncoder(zap.NewDevelopmentEncoderConfig()) + // bleh setting up logging to the ginkgo writer is annoying + zapLog := zap.New(zapcore.NewCore(enc, testOut, zap.DebugLevel), + zap.ErrorOutput(testOut), zap.Development(), zap.AddStacktrace(zap.WarnLevel)) + return zapr.NewLogger(zapLog) +} + +func logCtx() context.Context { + return logr.NewContext(context.Background(), testLog) +} + +func TestStore(t *testing.T) { + testLog = zapLogger() + RegisterFailHandler(Fail) + RunSpecs(t, "Store Suite") +} diff --git a/tools/setup-envtest/store/store_test.go b/tools/setup-envtest/store/store_test.go new file mode 100644 index 0000000000..9e87eee5be --- /dev/null +++ b/tools/setup-envtest/store/store_test.go @@ -0,0 +1,234 @@ +package store_test + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "io" + "io/fs" + "math/rand" + "path/filepath" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/spf13/afero" + + "sigs.k8s.io/controller-runtime/tools/setup-envtest/store" + "sigs.k8s.io/controller-runtime/tools/setup-envtest/versions" +) + +const ( + fakeStorePath = "/path/to/the/store" +) + +var _ = Describe("Store", func() { + var st *store.Store + BeforeEach(func() { + fs := afero.NewMemMapFs() + fakeStoreFiles(fs, fakeStorePath) + st = &store.Store{ + Root: afero.NewBasePathFs(fs, fakeStorePath), + } + }) + Describe("initialization", func() { + It("should ensure the repo root exists", func() { + // remove the old dir + Expect(st.Root.RemoveAll("")).To(Succeed(), "should be able to remove the store before trying to initialize") + + Expect(st.Initialize(logCtx())).To(Succeed(), "initialization should succeed") + Expect(st.Root.Stat("k8s")).NotTo(BeNil(), "store's binary dir should exist") + }) + + It("should be fine if the repo root already exists", func() { + Expect(st.Initialize(logCtx())).To(Succeed()) + }) + }) + Describe("listing items", func() { + It("should filter results by the given filter, sorted in version order (newest first)", func() { + sel, err := versions.FromExpr("<=1.16") + Expect(err).NotTo(HaveOccurred(), "should be able to construct <=1.16 selector") + Expect(st.List(logCtx(), store.Filter{ + Version: sel, + Platform: versions.Platform{OS: "*", Arch: "amd64"}, + })).To(Equal([]store.Item{ + {Version: ver(1, 16, 2), Platform: versions.Platform{OS: "ifonlysingularitywasstillathing", Arch: "amd64"}}, + {Version: ver(1, 16, 1), Platform: versions.Platform{OS: "linux", Arch: "amd64"}}, + {Version: ver(1, 16, 0), Platform: versions.Platform{OS: "linux", Arch: "amd64"}}, + {Version: ver(1, 14, 26), Platform: versions.Platform{OS: "linux", Arch: "amd64"}}, + })) + }) + It("should skip non-folders in the store", func() { + Expect(afero.WriteFile(st.Root, "k8s/2.3.6-linux-amd128", []byte{0x01}, fs.ModePerm)).To(Succeed(), "should be able to create a non-store file in the store directory") + Expect(st.List(logCtx(), store.Filter{ + Version: versions.AnyVersion, Platform: versions.Platform{OS: "linux", Arch: "amd128"}, + })).To(BeEmpty()) + }) + + It("should skip non-matching names in the store", func() { + Expect(st.Root.Mkdir("k8s/somedir-2.3.6-linux-amd128", fs.ModePerm)).To(Succeed(), "should be able to create a non-store file in the store directory") + Expect(st.List(logCtx(), store.Filter{ + Version: versions.AnyVersion, Platform: versions.Platform{OS: "linux", Arch: "amd128"}, + })).To(BeEmpty()) + }) + }) + + Describe("removing items", func() { + var res []store.Item + BeforeEach(func() { + sel, err := versions.FromExpr("<=1.16") + Expect(err).NotTo(HaveOccurred(), "should be able to construct <=1.16 selector") + res, err = st.Remove(logCtx(), store.Filter{ + Version: sel, + Platform: versions.Platform{OS: "*", Arch: "amd64"}, + }) + Expect(err).NotTo(HaveOccurred(), "should be able to remove <=1.16 & */amd64") + }) + It("should return all items removed", func() { + Expect(res).To(ConsistOf( + store.Item{Version: ver(1, 16, 2), Platform: versions.Platform{OS: "ifonlysingularitywasstillathing", Arch: "amd64"}}, + store.Item{Version: ver(1, 16, 1), Platform: versions.Platform{OS: "linux", Arch: "amd64"}}, + store.Item{Version: ver(1, 16, 0), Platform: versions.Platform{OS: "linux", Arch: "amd64"}}, + store.Item{Version: ver(1, 14, 26), Platform: versions.Platform{OS: "linux", Arch: "amd64"}}, + )) + }) + It("should remove all items matching the given filter from disk", func() { + Expect(afero.ReadDir(st.Root, "k8s")).NotTo(ContainElements( + WithTransform(fs.FileInfo.Name, Equal("1.16.2-ifonlysingularitywasstillathing-amd64")), + WithTransform(fs.FileInfo.Name, Equal("1.16.1-linux-amd64")), + WithTransform(fs.FileInfo.Name, Equal("1.16.0-linux-amd64")), + WithTransform(fs.FileInfo.Name, Equal("1.14.26-linux-amd64")), + )) + }) + + It("should leave items that don't match in place", func() { + Expect(afero.ReadDir(st.Root, "k8s")).To(ContainElements( + WithTransform(fs.FileInfo.Name, Equal("1.17.9-linux-amd64")), + WithTransform(fs.FileInfo.Name, Equal("1.16.2-linux-yourimagination")), + WithTransform(fs.FileInfo.Name, Equal("1.14.26-hyperwarp-pixiedust")), + )) + }) + }) + + Describe("adding items", func() { + It("should support .tar.gz input", func() { + Expect(st.Add(logCtx(), newItem, makeFakeArchive(newName))).To(Succeed()) + Expect(st.Has(newItem)).To(BeTrue(), "should have the item after adding it") + }) + + It("should extract binaries from the given archive to a directly to the item's directory, regardless of path", func() { + Expect(st.Add(logCtx(), newItem, makeFakeArchive(newName))).To(Succeed()) + + dirName := newItem.Platform.BaseName(newItem.Version) + Expect(afero.ReadFile(st.Root, filepath.Join("k8s", dirName, "some-file"))).To(HavePrefix(newName + "some-file")) + Expect(afero.ReadFile(st.Root, filepath.Join("k8s", dirName, "other-file"))).To(HavePrefix(newName + "other-file")) + }) + + It("should clean up any existing item directory before creating the new one", func() { + item := localVersions[0] + Expect(st.Add(logCtx(), item, makeFakeArchive(newName))).To(Succeed()) + Expect(st.Root.Stat(filepath.Join("k8s", item.Platform.BaseName(item.Version)))).NotTo(BeNil(), "new files should exist") + }) + It("should clean up if it errors before finishing", func() { + item := localVersions[0] + Expect(st.Add(logCtx(), item, new(bytes.Buffer))).NotTo(Succeed(), "should fail to extract") + _, err := st.Root.Stat(filepath.Join("k8s", item.Platform.BaseName(item.Version))) + Expect(err).To(HaveOccurred(), "the binaries dir for the item should be gone") + + }) + }) + + Describe("checking if items are present", func() { + It("should report that present directories are present", func() { + Expect(st.Has(localVersions[0])).To(BeTrue()) + }) + + It("should report that absent directories are absent", func() { + Expect(st.Has(newItem)).To(BeFalse()) + }) + }) + + Describe("getting the path", func() { + It("should return the absolute on-disk path of the given item", func() { + item := localVersions[0] + Expect(st.Path(item)).To(Equal(filepath.Join(fakeStorePath, "k8s", item.Platform.BaseName(item.Version)))) + }) + }) +}) + +var ( + // keep this sorted + localVersions = []store.Item{ + {Version: ver(1, 17, 9), Platform: versions.Platform{OS: "linux", Arch: "amd64"}}, + {Version: ver(1, 16, 2), Platform: versions.Platform{OS: "linux", Arch: "yourimagination"}}, + {Version: ver(1, 16, 2), Platform: versions.Platform{OS: "ifonlysingularitywasstillathing", Arch: "amd64"}}, + {Version: ver(1, 16, 1), Platform: versions.Platform{OS: "linux", Arch: "amd64"}}, + {Version: ver(1, 16, 0), Platform: versions.Platform{OS: "linux", Arch: "amd64"}}, + {Version: ver(1, 14, 26), Platform: versions.Platform{OS: "linux", Arch: "amd64"}}, + {Version: ver(1, 14, 26), Platform: versions.Platform{OS: "hyperwarp", Arch: "pixiedust"}}, + } + + newItem = store.Item{ + Version: ver(1, 16, 3), + Platform: versions.Platform{OS: "linux", Arch: "amd64"}, + } + + newName = "kubebuilder-tools-1.16.3-linux-amd64.tar.gz" +) + +func ver(major, minor, patch int) versions.Concrete { //nolint unparam + return versions.Concrete{ + Major: major, + Minor: minor, + Patch: patch, + } +} + +func makeFakeArchive(magic string) io.Reader { + out := new(bytes.Buffer) + gzipWriter := gzip.NewWriter(out) + tarWriter := tar.NewWriter(gzipWriter) + Expect(tarWriter.WriteHeader(&tar.Header{ + Typeflag: tar.TypeDir, + Name: "kubebuilder/bin/", // so we can ensure we skip non-files + Mode: 0777, + })).To(Succeed()) + for _, fileName := range []string{"some-file", "other-file"} { + // create fake file contents: magic+fileName+randomBytes() + var chunk [1024 * 48]byte // 1.5 times our chunk read size in GetVersion + copy(chunk[:], []byte(magic)) + copy(chunk[len(magic):], fileName) + start := len(magic) + len(fileName) + if _, err := rand.Read(chunk[start:]); err != nil { + panic(err) + } + + // write to kubebuilder/bin/fileName + err := tarWriter.WriteHeader(&tar.Header{ + Name: "kubebuilder/bin/" + fileName, + Size: int64(len(chunk[:])), + Mode: 0777, // so we can check that we fix this later + }) + if err != nil { + panic(err) + } + _, err = tarWriter.Write(chunk[:]) + if err != nil { + panic(err) + } + } + tarWriter.Close() + gzipWriter.Close() + + return out +} + +func fakeStoreFiles(fs afero.Fs, dir string) { + By("making the unpacked directory") + unpackedBase := filepath.Join(dir, "k8s") + Expect(fs.Mkdir(unpackedBase, 0755)).To(Succeed()) + + By("making some fake (empty) versions") + for _, item := range localVersions { + Expect(fs.Mkdir(filepath.Join(unpackedBase, item.Platform.BaseName(item.Version)), 0755)).To(Succeed()) + } +} diff --git a/tools/setup-envtest/versions/misc_test.go b/tools/setup-envtest/versions/misc_test.go new file mode 100644 index 0000000000..9f99005bf5 --- /dev/null +++ b/tools/setup-envtest/versions/misc_test.go @@ -0,0 +1,127 @@ +package versions_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + . "sigs.k8s.io/controller-runtime/tools/setup-envtest/versions" +) + +var _ = Describe("Concrete", func() { + It("should match the only same version", func() { + ver16 := Concrete{Major: 1, Minor: 16} + ver17 := Concrete{Major: 1, Minor: 17} + Expect(ver16.Matches(ver16)).To(BeTrue(), "should match the same version") + Expect(ver16.Matches(ver17)).To(BeFalse(), "should not match a different version") + }) + It("should serialize as X.Y.Z", func() { + Expect(Concrete{Major: 1, Minor: 16, Patch: 3}.String()).To(Equal("1.16.3")) + }) + Describe("when ordering relative to other versions", func() { + ver1163 := Concrete{Major: 1, Minor: 16, Patch: 3} + Specify("newer patch should be newer", func() { + Expect(ver1163.NewerThan(Concrete{Major: 1, Minor: 16})).To(BeTrue()) + }) + Specify("newer minor should be newer", func() { + Expect(ver1163.NewerThan(Concrete{Major: 1, Minor: 15, Patch: 3})).To(BeTrue()) + }) + Specify("newer major should be newer", func() { + Expect(ver1163.NewerThan(Concrete{Major: 0, Minor: 16, Patch: 3})).To(BeTrue()) + }) + }) +}) + +var _ = Describe("Platform", func() { + Specify("a concrete platform should match exactly itself", func() { + plat1 := Platform{OS: "linux", Arch: "amd64"} + plat2 := Platform{OS: "linux", Arch: "s390x"} + plat3 := Platform{OS: "windows", Arch: "amd64"} + Expect(plat1.Matches(plat1)).To(BeTrue(), "should match itself") + Expect(plat1.Matches(plat2)).To(BeFalse(), "should reject a different arch") + Expect(plat1.Matches(plat3)).To(BeFalse(), "should reject a different os") + }) + Specify("a wildcard arch should match any arch", func() { + sel := Platform{OS: "linux", Arch: "*"} + plat1 := Platform{OS: "linux", Arch: "amd64"} + plat2 := Platform{OS: "linux", Arch: "s390x"} + plat3 := Platform{OS: "windows", Arch: "amd64"} + Expect(sel.Matches(sel)).To(BeTrue(), "should match itself") + Expect(sel.Matches(plat1)).To(BeTrue(), "should match some arch with the same OS") + Expect(sel.Matches(plat2)).To(BeTrue(), "should match another arch with the same OS") + Expect(plat1.Matches(plat3)).To(BeFalse(), "should reject a different os") + }) + Specify("a wildcard os should match any os", func() { + sel := Platform{OS: "*", Arch: "amd64"} + plat1 := Platform{OS: "linux", Arch: "amd64"} + plat2 := Platform{OS: "windows", Arch: "amd64"} + plat3 := Platform{OS: "linux", Arch: "s390x"} + Expect(sel.Matches(sel)).To(BeTrue(), "should match itself") + Expect(sel.Matches(plat1)).To(BeTrue(), "should match some os with the same arch") + Expect(sel.Matches(plat2)).To(BeTrue(), "should match another os with the same arch") + Expect(plat1.Matches(plat3)).To(BeFalse(), "should reject a different arch") + }) + It("should report a wildcard OS as a wildcard platform", func() { + Expect(Platform{OS: "*", Arch: "amd64"}.IsWildcard()).To(BeTrue()) + }) + It("should report a wildcard arch as a wildcard platform", func() { + Expect(Platform{OS: "linux", Arch: "*"}.IsWildcard()).To(BeTrue()) + }) + It("should serialize as os/arch", func() { + Expect(Platform{OS: "linux", Arch: "amd64"}.String()).To(Equal("linux/amd64")) + }) + + Specify("knows how to produce a base store name", func() { + plat := Platform{OS: "linux", Arch: "amd64"} + ver := Concrete{Major: 1, Minor: 16, Patch: 3} + Expect(plat.BaseName(ver)).To(Equal("1.16.3-linux-amd64")) + }) + + Specify("knows how to produce an archive name", func() { + plat := Platform{OS: "linux", Arch: "amd64"} + ver := Concrete{Major: 1, Minor: 16, Patch: 3} + Expect(plat.ArchiveName(ver)).To(Equal("kubebuilder-tools-1.16.3-linux-amd64.tar.gz")) + }) + + Describe("parsing", func() { + Context("for version-platform names", func() { + It("should accept strings of the form x.y.z-os-arch", func() { + ver, plat := ExtractWithPlatform(VersionPlatformRE, "1.16.3-linux-amd64") + Expect(ver).To(Equal(&Concrete{Major: 1, Minor: 16, Patch: 3})) + Expect(plat).To(Equal(Platform{OS: "linux", Arch: "amd64"})) + }) + It("should reject nonsense strings", func() { + ver, _ := ExtractWithPlatform(VersionPlatformRE, "1.16-linux-amd64") + Expect(ver).To(BeNil()) + }) + }) + Context("for archive names", func() { + It("should accept strings of the form kubebuilder-tools-x.y.z-os-arch.tar.gz", func() { + ver, plat := ExtractWithPlatform(ArchiveRE, "kubebuilder-tools-1.16.3-linux-amd64.tar.gz") + Expect(ver).To(Equal(&Concrete{Major: 1, Minor: 16, Patch: 3})) + Expect(plat).To(Equal(Platform{OS: "linux", Arch: "amd64"})) + }) + It("should reject nonsense strings", func() { + ver, _ := ExtractWithPlatform(ArchiveRE, "kubebuilder-tools-1.16.3-linux-amd64.tar.sum") + Expect(ver).To(BeNil()) + }) + }) + }) +}) + +var _ = Describe("Spec helpers", func() { + Specify("can fill a spec with a concrete version", func() { + spec := Spec{Selector: AnySelector{}} // don't just use AnyVersion so we don't modify it + spec.MakeConcrete(Concrete{Major: 1, Minor: 16}) + Expect(spec.AsConcrete()).To(Equal(&Concrete{Major: 1, Minor: 16})) + }) + It("should serialize as the underlying selector with ! for check latest", func() { + spec, err := FromExpr("1.16.*!") + Expect(err).NotTo(HaveOccurred()) + Expect(spec.String()).To(Equal("1.16.*!")) + }) + It("should serialize as the underlying selector by itself if not check latest", func() { + spec, err := FromExpr("1.16.*") + Expect(err).NotTo(HaveOccurred()) + Expect(spec.String()).To(Equal("1.16.*")) + }) +}) diff --git a/tools/setup-envtest/versions/parse.go b/tools/setup-envtest/versions/parse.go new file mode 100644 index 0000000000..897fd9b024 --- /dev/null +++ b/tools/setup-envtest/versions/parse.go @@ -0,0 +1,123 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2021 The Kubernetes Authors + +package versions + +import ( + "fmt" + "regexp" + "strconv" +) + +var ( + // baseVersionRE is a semver-ish version -- either X.Y.Z, X.Y, or X.Y.{*|x}. + baseVersionRE = `(?P0|[1-9]\d*)\.(?P0|[1-9]\d*)(?:\.(?P0|[1-9]\d*|x|\*))?` + // versionExprRe matches valid version input for FromExpr + versionExprRE = regexp.MustCompile(`^(?P<|~|<=)?` + baseVersionRE + `(?P!)?$`) + + // ConcreteVersionRE matches a concrete version anywhere in the string. + ConcreteVersionRE = regexp.MustCompile(`(?P0|[1-9]\d*)\.(?P0|[1-9]\d*)\.(?P0|[1-9]\d*)`) + // OnlyConcreteVersionRE matches a string that's just a concrete version + OnlyConcreteVersionRE = regexp.MustCompile(`^` + ConcreteVersionRE.String() + `$`) +) + +// FromExpr extracts a version from a string in the form of a semver version, +// where X, Y, and Z may also be wildcards ('*', 'x'), +// and pre-release names & numbers may also be wildcards. The prerelease section is slightly +// restricted to match what k8s does. +// The the whole string is a version selector as follows: +// +// - X.Y.Z matches version X.Y.Z where x, y, and z are +// are ints >= 0, and Z may be '*' or 'x' +// - X.Y is equivalent to X.Y.* +// - ~X.Y.Z means >= X.Y.Z && < X.Y+1.0 +// - = comparisons, if we use + // wildcards with a selector we can just set them to zero. + if verInfo.Patch == AnyPoint { + verInfo.Patch = PointVersion(0) + } + baseVer := *verInfo.AsConcrete() + spec.Selector = TildeSelector{Concrete: baseVer} + default: + panic("unreachable: mismatch between FromExpr and its RE in selector") + } + + return spec, nil +} + +// PointVersionFromValidString extracts a point version +// from the corresponding string representation, which may +// be a number >= 0, or x|* (AnyPoint). +// +// Anything else will cause a panic (use this on strings +// extracted from regexes). +func PointVersionFromValidString(str string) PointVersion { + switch str { + case "*", "x": + return AnyPoint + default: + ver, err := strconv.Atoi(str) + if err != nil { + panic(err) + } + return PointVersion(ver) + } +} + +// PatchSelectorFromMatch constructs a simple selector according to the +// ParseExpr rules out of pre-validated sections. +// +// re must include name captures for major, minor, patch, prenum, and prelabel +// +// Any bad input may cause a panic. Use with when you got the parts from an RE match. +func PatchSelectorFromMatch(match []string, re *regexp.Regexp) PatchSelector { + // already parsed via RE, should be fine to ignore errors unless it's a + // *huge* number + major, err := strconv.Atoi(match[re.SubexpIndex("major")]) + if err != nil { + panic("invalid input passed as patch selector (invalid state)") + } + minor, err := strconv.Atoi(match[re.SubexpIndex("minor")]) + if err != nil { + panic("invalid input passed as patch selector (invalid state)") + } + + // patch is optional, means wilcard if left off + patch := AnyPoint + if patchRaw := match[re.SubexpIndex("patch")]; patchRaw != "" { + patch = PointVersionFromValidString(patchRaw) + } + return PatchSelector{ + Major: major, + Minor: minor, + Patch: patch, + } +} diff --git a/tools/setup-envtest/versions/parse_test.go b/tools/setup-envtest/versions/parse_test.go new file mode 100644 index 0000000000..6c3d259063 --- /dev/null +++ b/tools/setup-envtest/versions/parse_test.go @@ -0,0 +1,80 @@ +package versions_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/extensions/table" + . "github.com/onsi/gomega" + + . "sigs.k8s.io/controller-runtime/tools/setup-envtest/versions" +) + +func patchSel(x, y int, z PointVersion) PatchSelector { //nolint unparam + return PatchSelector{Major: x, Minor: y, Patch: z} +} + +func patchSpec(x, y int, z PointVersion) Spec { //nolint unparam + return Spec{Selector: patchSel(x, y, z)} +} + +func tildeSel(x, y, z int) TildeSelector { //nolint unparam + return TildeSelector{ + Concrete: Concrete{ + Major: x, Minor: y, Patch: z, + }, + } +} + +func tildeSpec(x, y, z int) Spec { //nolint unparam + return Spec{Selector: tildeSel(x, y, z)} +} +func ltSpec(x, y int, z PointVersion) Spec { //nolint unparam + // this just keeps the table a bit shorter + return Spec{Selector: LessThanSelector{ + PatchSelector: patchSel(x, y, z), + }} +} +func lteSpec(x, y int, z PointVersion) Spec { //nolint unparam + // this just keeps the table a bit shorter + return Spec{Selector: LessThanSelector{ + PatchSelector: patchSel(x, y, z), + OrEquals: true, + }} +} + +var _ = Describe("Parse", func() { + DescribeTable("it should support", + func(spec string, expected Spec) { + Expect(FromExpr(spec)).To(Equal(expected)) + }, + Entry("X.Y versions", "1.16", patchSpec(1, 16, AnyPoint)), + Entry("X.Y.Z versions", "1.16.3", patchSpec(1, 16, PointVersion(3))), + Entry("X.Y.x wildcard", "1.16.x", patchSpec(1, 16, AnyPoint)), + Entry("X.Y.* wildcard", "1.16.*", patchSpec(1, 16, AnyPoint)), + + Entry("~X.Y selector", "~1.16", tildeSpec(1, 16, 0)), + Entry("~X.Y.Z selector", "~1.16.3", tildeSpec(1, 16, 3)), + Entry("~X.Y.x selector", "~1.16.x", tildeSpec(1, 16, 0)), + Entry("~X.Y.* selector", "~1.16.*", tildeSpec(1, 16, 0)), + + Entry("\w+)-(?P\w+)` + // VersionPlatformRE matches concrete version-platform strings. + VersionPlatformRE = regexp.MustCompile(`^` + versionPlatformREBase + `$`) + // ArchiveRE matches concrete version-platform.tar.gz strings. + ArchiveRE = regexp.MustCompile(`^kubebuilder-tools-` + versionPlatformREBase + `\.tar\.gz$`) +) diff --git a/tools/setup-envtest/versions/selectors_test.go b/tools/setup-envtest/versions/selectors_test.go new file mode 100644 index 0000000000..36a639f303 --- /dev/null +++ b/tools/setup-envtest/versions/selectors_test.go @@ -0,0 +1,200 @@ +package versions_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + . "sigs.k8s.io/controller-runtime/tools/setup-envtest/versions" +) + +var _ = Describe("Selectors", func() { + Describe("patch", func() { + var sel Selector + Context("with any patch", func() { + BeforeEach(func() { + var err error + sel, err = FromExpr("1.16.*") + Expect(err).NotTo(HaveOccurred()) + }) + + It("should match any patch version with the same major & minor", func() { + Expect(sel.Matches(Concrete{Major: 1, Minor: 16, Patch: 3})).To(BeTrue(), "should match 1.16.3") + Expect(sel.Matches(Concrete{Major: 1, Minor: 16, Patch: 0})).To(BeTrue(), "should match 1.16.0") + }) + + It("should reject a different major", func() { + Expect(sel.Matches(Concrete{Major: 2, Minor: 16, Patch: 3})).To(BeFalse(), "should reject 2.16.3") + + }) + + It("should reject a different minor", func() { + Expect(sel.Matches(Concrete{Major: 1, Minor: 17, Patch: 3})).To(BeFalse(), "should reject 1.17.3") + }) + + It("should serialize as X.Y.*", func() { + Expect(sel.String()).To(Equal("1.16.*")) + }) + + It("should not be concrete", func() { + Expect(sel.AsConcrete()).To(BeNil()) + }) + }) + + Context("with a specific patch", func() { + BeforeEach(func() { + var err error + sel, err = FromExpr("1.16.3") + Expect(err).NotTo(HaveOccurred()) + }) + It("should match exactly the major/minor/patch", func() { + Expect(sel.Matches(Concrete{Major: 1, Minor: 16, Patch: 3})).To(BeTrue(), "should match 1.16.3") + }) + + It("should reject a different major", func() { + Expect(sel.Matches(Concrete{Major: 2, Minor: 16, Patch: 3})).To(BeFalse(), "should reject 2.16.3") + + }) + + It("should reject a different minor", func() { + Expect(sel.Matches(Concrete{Major: 1, Minor: 17, Patch: 3})).To(BeFalse(), "should reject 1.17.3") + + }) + + It("should reject a different patch", func() { + + Expect(sel.Matches(Concrete{Major: 1, Minor: 16, Patch: 4})).To(BeFalse(), "should reject 1.16.4") + }) + It("should serialize as X.Y.Z", func() { + Expect(sel.String()).To(Equal("1.16.3")) + }) + It("may be concrete", func() { + Expect(sel.AsConcrete()).To(Equal(&Concrete{Major: 1, Minor: 16, Patch: 3})) + }) + }) + + }) + + Describe("tilde", func() { + var sel Selector + BeforeEach(func() { + var err error + sel, err = FromExpr("~1.16.3") + Expect(err).NotTo(HaveOccurred()) + }) + It("should match exactly the major/minor/patch", func() { + Expect(sel.Matches(Concrete{Major: 1, Minor: 16, Patch: 3})).To(BeTrue(), "should match 1.16.3") + }) + + It("should match a patch greater than the given one, with the same major/minor", func() { + Expect(sel.Matches(Concrete{Major: 1, Minor: 16, Patch: 4})).To(BeTrue(), "should match 1.16.4") + }) + + It("should reject a patch less than the given one, with the same major/minor", func() { + Expect(sel.Matches(Concrete{Major: 1, Minor: 16, Patch: 2})).To(BeFalse(), "should reject 1.16.2") + + }) + + It("should reject a different major", func() { + Expect(sel.Matches(Concrete{Major: 2, Minor: 16, Patch: 3})).To(BeFalse(), "should reject 2.16.3") + + }) + + It("should reject a different minor", func() { + Expect(sel.Matches(Concrete{Major: 1, Minor: 17, Patch: 3})).To(BeFalse(), "should reject 1.17.3") + + }) + + It("should treat ~X.Y.* as ~X.Y.Z", func() { + sel, err := FromExpr("~1.16.*") + Expect(err).NotTo(HaveOccurred()) + + Expect(sel.Matches(Concrete{Major: 1, Minor: 16, Patch: 0})).To(BeTrue(), "should match 1.16.0") + Expect(sel.Matches(Concrete{Major: 1, Minor: 16, Patch: 3})).To(BeTrue(), "should match 1.16.3") + Expect(sel.Matches(Concrete{Major: 1, Minor: 17, Patch: 0})).To(BeFalse(), "should reject 1.17.0") + }) + It("should serialize as ~X.Y.Z", func() { + Expect(sel.String()).To(Equal("~1.16.3")) + }) + It("should never be concrete", func() { + Expect(sel.AsConcrete()).To(BeNil()) + }) + }) + + Describe("less-than", func() { + var sel Selector + BeforeEach(func() { + var err error + sel, err = FromExpr("<1.16.3") + Expect(err).NotTo(HaveOccurred()) + }) + It("should reject the exact major/minor/patch", func() { + Expect(sel.Matches(Concrete{Major: 1, Minor: 16, Patch: 3})).To(BeFalse(), "should reject 1.16.3") + + }) + It("should reject greater patches", func() { + Expect(sel.Matches(Concrete{Major: 1, Minor: 16, Patch: 4})).To(BeFalse(), "should reject 1.16.4") + + }) + It("should reject greater majors", func() { + Expect(sel.Matches(Concrete{Major: 2, Minor: 16, Patch: 3})).To(BeFalse(), "should reject 2.16.3") + + }) + It("should reject greater minors", func() { + Expect(sel.Matches(Concrete{Major: 1, Minor: 17, Patch: 3})).To(BeFalse(), "should reject 1.17.3") + + }) + + It("should accept lesser patches", func() { + + Expect(sel.Matches(Concrete{Major: 1, Minor: 16, Patch: 2})).To(BeTrue(), "should accept 1.16.2") + }) + It("should accept lesser majors", func() { + Expect(sel.Matches(Concrete{Major: 0, Minor: 16, Patch: 3})).To(BeTrue(), "should accept 0.16.3") + + }) + It("should accept lesser minors", func() { + Expect(sel.Matches(Concrete{Major: 1, Minor: 15, Patch: 3})).To(BeTrue(), "should accept 1.15.3") + + }) + It("should serialize as other.Major + } + if c.Minor != other.Minor { + return c.Minor > other.Minor + } + return c.Patch > other.Patch +} + +// Matches checks if this version is equal to the other one. +func (c Concrete) Matches(other Concrete) bool { + return c == other +} + +func (c Concrete) String() string { + return fmt.Sprintf("%d.%d.%d", c.Major, c.Minor, c.Patch) +} + +// PatchSelector selects a set of versions where the patch is a wildcard. +type PatchSelector struct { + Major, Minor int + Patch PointVersion +} + +func (s PatchSelector) String() string { + return fmt.Sprintf("%d.%d.%s", s.Major, s.Minor, s.Patch) +} + +// Matches checks if the given version matches this selector. +func (s PatchSelector) Matches(ver Concrete) bool { + return s.Major == ver.Major && s.Minor == ver.Minor && s.Patch.Matches(ver.Patch) +} + +// AsConcrete returns nil if there are wildcards in this selector, +// and the concrete version that this selects otherwise. +func (s PatchSelector) AsConcrete() *Concrete { + if s.Patch == AnyPoint { + return nil + } + + return &Concrete{ + Major: s.Major, + Minor: s.Minor, + Patch: int(s.Patch), // safe to cast, we've just checked wilcards above + } +} + +// TildeSelector selects [X.Y.Z, X.Y+1.0). +type TildeSelector struct { + Concrete +} + +// Matches checks if the given version matches this selector. +func (s TildeSelector) Matches(ver Concrete) bool { + if s.Concrete.Matches(ver) { + // easy, "exact" match + return true + } + return ver.Major == s.Major && ver.Minor == s.Minor && ver.Patch >= s.Patch +} +func (s TildeSelector) String() string { + return "~" + s.Concrete.String() +} + +// AsConcrete returns nil (this is never a concrete version). +func (s TildeSelector) AsConcrete() *Concrete { + return nil +} + +// LessThanSelector selects versions older than the given one +// (mainly useful for cleaning up). +type LessThanSelector struct { + PatchSelector + OrEquals bool +} + +// Matches checks if the given version matches this selector. +func (s LessThanSelector) Matches(ver Concrete) bool { + if s.Major != ver.Major { + return s.Major > ver.Major + } + if s.Minor != ver.Minor { + return s.Minor > ver.Minor + } + if !s.Patch.Matches(ver.Patch) { + // matches rules out a wildcard, so it's fine to compare as normal numbers + return int(s.Patch) > ver.Patch + } + return s.OrEquals +} +func (s LessThanSelector) String() string { + if s.OrEquals { + return "<=" + s.PatchSelector.String() + } + return "<" + s.PatchSelector.String() +} + +// AsConcrete returns nil (this is never a concrete version). +func (s LessThanSelector) AsConcrete() *Concrete { + return nil +} + +// AnySelector matches any version at all. +type AnySelector struct{} + +// Matches checks if the given version matches this selector. +func (AnySelector) Matches(_ Concrete) bool { return true } + +// AsConcrete returns nil (this is never a concrete version). +func (AnySelector) AsConcrete() *Concrete { return nil } +func (AnySelector) String() string { return "*" } + +// Selector selects some concrete version or range of versions. +type Selector interface { + // AsConcrete tries to return this selector as a concrete version. + // If the selector would only match a single version, it'll return + // that, otherwise it'll return nil. + AsConcrete() *Concrete + // Matches checks if this selector matches the given concrete version. + Matches(ver Concrete) bool + String() string +} + +// Spec matches some version or range of versions, and tells us how to deal with local and +// remote when selecting a version. +type Spec struct { + Selector + + // CheckLatest tells us to check the remote server for the latest + // version that matches our selector, instead of just relying on + // matching local versions. + CheckLatest bool +} + +// MakeConcrete replaces the contents of this spec with one that +// matches the given concrete version (without checking latest +// from the server). +func (s *Spec) MakeConcrete(ver Concrete) { + s.Selector = ver + s.CheckLatest = false +} + +// AsConcrete returns the underlying selector as a concrete version, if +// possible. +func (s Spec) AsConcrete() *Concrete { + return s.Selector.AsConcrete() +} + +// Matches checks if the underlying selector matches the given version. +func (s Spec) Matches(ver Concrete) bool { + return s.Selector.Matches(ver) +} + +func (s Spec) String() string { + res := s.Selector.String() + if s.CheckLatest { + res += "!" + } + return res +} + +// PointVersion represents a wildcard (patch) version +// or concrete numbre +type PointVersion int + +const ( + // AnyPoint matches any point version. + AnyPoint PointVersion = -1 +) + +// Matches checks if a point version is compatible +// with a concrete point version. +// Two point versions are compatible if they are +// a) both concrete +// b) one is a wildcard. +func (p PointVersion) Matches(other int) bool { + switch p { + case AnyPoint: + return true + default: + return int(p) == other + } +} +func (p PointVersion) String() string { + switch p { + case AnyPoint: + return "*" + default: + return strconv.Itoa(int(p)) + } +} + +var ( + // LatestVersion matches the most recent version on the remote server. + LatestVersion Spec = Spec{ + Selector: AnySelector{}, + CheckLatest: true, + } + // AnyVersion matches any local or remote version. + AnyVersion Spec = Spec{ + Selector: AnySelector{}, + } +) diff --git a/tools/setup-envtest/versions/versions_suite_test.go b/tools/setup-envtest/versions/versions_suite_test.go new file mode 100644 index 0000000000..2e4e0b512b --- /dev/null +++ b/tools/setup-envtest/versions/versions_suite_test.go @@ -0,0 +1,13 @@ +package versions_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestVersions(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Versions Suite") +} diff --git a/tools/setup-envtest/workflows/workflows.go b/tools/setup-envtest/workflows/workflows.go new file mode 100644 index 0000000000..5c41670a27 --- /dev/null +++ b/tools/setup-envtest/workflows/workflows.go @@ -0,0 +1,87 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2021 The Kubernetes Authors + +package workflows + +import ( + "context" + "io" + + "github.com/go-logr/logr" + + envp "sigs.k8s.io/controller-runtime/tools/setup-envtest/env" +) + +// Use is a workflow that prints out information about stored +// version-platform pairs, downloading them if necessary & requested. +type Use struct { + UseEnv bool + AssetsPath string + PrintFormat envp.PrintFormat +} + +// Do executes this workflow. +func (f Use) Do(env *envp.Env) { + ctx := logr.NewContext(context.TODO(), env.Log.WithName("use")) + env.EnsureBaseDirs(ctx) + if f.UseEnv { + // the the env var unconditionally + if env.PathMatches(f.AssetsPath) { + env.PrintInfo(f.PrintFormat) + return + } + } + env.EnsureVersionIsSet(ctx) + if env.ExistsAndValid() { + env.PrintInfo(f.PrintFormat) + return + } + if env.NoDownload { + envp.Exit(2, "no such version (%s) exists on disk for this architecture (%s) -- try running `list -i` to see what's on disk", env.Version, env.Platform) + } + env.Fetch(ctx) + env.PrintInfo(f.PrintFormat) +} + +// List is a workflow that lists version-platform pairs in the store +// and on the remote server that match the given filter. +type List struct{} + +// Do executes this workflow. +func (List) Do(env *envp.Env) { + ctx := logr.NewContext(context.TODO(), env.Log.WithName("list")) + env.EnsureBaseDirs(ctx) + env.ListVersions(ctx) +} + +// Cleanup is a workflow that removes version-platform pairs from the store +// that match the given filter. +type Cleanup struct{} + +// Do executes this workflow. +func (Cleanup) Do(env *envp.Env) { + ctx := logr.NewContext(context.TODO(), env.Log.WithName("cleanup")) + + env.NoDownload = true + env.ForceDownload = false + + env.EnsureBaseDirs(ctx) + env.Remove(ctx) +} + +// Sideload is a workflow that adds or replaces a version-platform pair in the +// store, using the given archive as the files. +type Sideload struct { + Input io.Reader + PrintFormat envp.PrintFormat +} + +// Do executes this workflow. +func (f Sideload) Do(env *envp.Env) { + ctx := logr.NewContext(context.TODO(), env.Log.WithName("sideload")) + + env.EnsureBaseDirs(ctx) + env.NoDownload = true + env.Sideload(ctx, f.Input) + env.PrintInfo(f.PrintFormat) +} diff --git a/tools/setup-envtest/workflows/workflows_suite_test.go b/tools/setup-envtest/workflows/workflows_suite_test.go new file mode 100644 index 0000000000..911db57a7a --- /dev/null +++ b/tools/setup-envtest/workflows/workflows_suite_test.go @@ -0,0 +1,30 @@ +package workflows_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "github.com/go-logr/logr" + "github.com/go-logr/zapr" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +var testLog logr.Logger + +func zapLogger() logr.Logger { + testOut := zapcore.AddSync(GinkgoWriter) + enc := zapcore.NewConsoleEncoder(zap.NewDevelopmentEncoderConfig()) + // bleh setting up logging to the ginkgo writer is annoying + zapLog := zap.New(zapcore.NewCore(enc, testOut, zap.DebugLevel), + zap.ErrorOutput(testOut), zap.Development(), zap.AddStacktrace(zap.WarnLevel)) + return zapr.NewLogger(zapLog) +} + +func TestWorkflows(t *testing.T) { + testLog = zapLogger() + RegisterFailHandler(Fail) + RunSpecs(t, "Workflows Suite") +} diff --git a/tools/setup-envtest/workflows/workflows_test.go b/tools/setup-envtest/workflows/workflows_test.go new file mode 100644 index 0000000000..bd7b97f514 --- /dev/null +++ b/tools/setup-envtest/workflows/workflows_test.go @@ -0,0 +1,421 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2021 The Kubernetes Authors + +package workflows_test + +import ( + "bytes" + "io/fs" + "path/filepath" + "strings" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/onsi/gomega/ghttp" + "github.com/spf13/afero" + + envp "sigs.k8s.io/controller-runtime/tools/setup-envtest/env" + "sigs.k8s.io/controller-runtime/tools/setup-envtest/remote" + "sigs.k8s.io/controller-runtime/tools/setup-envtest/store" + "sigs.k8s.io/controller-runtime/tools/setup-envtest/versions" + wf "sigs.k8s.io/controller-runtime/tools/setup-envtest/workflows" +) + +func ver(major, minor, patch int) versions.Concrete { + return versions.Concrete{ + Major: major, + Minor: minor, + Patch: patch, + } +} + +func shouldHaveError() { + var err error + var code int + cause := recover() + if envp.CheckRecover(cause, func(caughtCode int, caughtErr error) { + err = caughtErr + code = caughtCode + }) { + panic(cause) + } + Expect(err).To(HaveOccurred(), "should write an error") + Expect(code).NotTo(BeZero(), "should exit with a non-zero code") +} + +const ( + testStorePath = ".teststore" +) + +var _ = Describe("Workflows", func() { + var ( + env *envp.Env + out *bytes.Buffer + server *ghttp.Server + remoteItems []item + ) + BeforeEach(func() { + out = new(bytes.Buffer) + baseFs := afero.Afero{Fs: afero.NewMemMapFs()} + env = &envp.Env{ + Log: testLog, + VerifySum: true, // on by default + FS: baseFs, + Store: &store.Store{Root: afero.NewBasePathFs(baseFs, testStorePath)}, + Out: out, + Platform: versions.PlatformItem{ // default + Platform: versions.Platform{ + OS: "linux", + Arch: "amd64", + }, + }, + Client: &remote.Client{ + Log: testLog.WithName("remote-client"), + Bucket: "kubebuilder-tools-test", // test custom bucket functionality too + Server: "localhost:-1", + Insecure: true, // no https in httptest :-( + }, + } + server = ghttp.NewServer() + env.Client.Server = server.Addr() + + fakeStore(env.FS, testStorePath) + remoteItems = remoteVersions + }) + JustBeforeEach(func() { + handleRemoteVersions(server, remoteItems) + }) + AfterEach(func() { + server.Close() + server = nil + }) + + Describe("use", func() { + var flow wf.Use + BeforeEach(func() { + // some defaults for most tests + env.Version = versions.Spec{ + Selector: ver(1, 16, 0), + } + flow = wf.Use{ + PrintFormat: envp.PrintPath, + } + }) + + It("should initialize the store if it doesn't exist", func() { + Expect(env.FS.RemoveAll(testStorePath)).To(Succeed()) + // need to set this to a valid remote version cause our store is now empty + env.Version = versions.Spec{Selector: ver(1, 16, 4)} + flow.Do(env) + Expect(env.FS.Stat(testStorePath)).NotTo(BeNil()) + }) + + Context("when use env is set", func() { + BeforeEach(func() { + flow.UseEnv = true + }) + It("should fall back to normal behavior when the env is not set", func() { + flow.Do(env) + Expect(out.String()).To(HaveSuffix("/1.16.0-linux-amd64"), "should fall back to a local version") + }) + It("should fall back to normal behavior if binaries are missing", func() { + flow.AssetsPath = ".teststore/missing-binaries" + flow.Do(env) + Expect(out.String()).To(HaveSuffix("/1.16.0-linux-amd64"), "should fall back to a local version") + }) + It("should use the value of the env if it contains the right binaries", func() { + flow.AssetsPath = ".teststore/good-version" + flow.Do(env) + Expect(out.String()).To(Equal(flow.AssetsPath)) + }) + It("should not try and check the version of the binaries", func() { + flow.AssetsPath = ".teststore/wrong-version" + flow.Do(env) + Expect(out.String()).To(Equal(flow.AssetsPath)) + }) + It("should not need to contact the network", func() { + server.Close() + flow.AssetsPath = ".teststore/good-version" + flow.Do(env) + // expect to not get a panic -- if we do, it'll cause the test to fail + }) + }) + + Context("when downloads are disabled", func() { + BeforeEach(func() { + env.NoDownload = true + server.Close() + }) + + // It("should not contact the network") is a gimme here, because we + // call server.Close() above. + + It("should error if no matches are found locally", func() { + defer shouldHaveError() + env.Version.Selector = versions.Concrete{Major: 9001} + flow.Do(env) + }) + It("should settle for the latest local match if latest is requested", func() { + env.Version = versions.Spec{ + CheckLatest: true, + Selector: versions.PatchSelector{ + Major: 1, + Minor: 16, + Patch: versions.AnyPoint, + }, + } + + flow.Do(env) + + // latest on "server" is 1.16.4, shouldn't use that + Expect(out.String()).To(HaveSuffix("/1.16.1-linux-amd64"), "should use the latest local version") + }) + }) + + Context("if latest is requested", func() { + It("should contact the network to see if there's anything newer", func() { + env.Version = versions.Spec{ + CheckLatest: true, + Selector: versions.PatchSelector{ + Major: 1, Minor: 16, Patch: versions.AnyPoint, + }, + } + flow.Do(env) + Expect(out.String()).To(HaveSuffix("/1.16.4-linux-amd64"), "should use the latest remote version") + }) + It("should still use the latest local if the network doesn't have anything newer", func() { + env.Version = versions.Spec{ + CheckLatest: true, + Selector: versions.PatchSelector{ + Major: 1, Minor: 14, Patch: versions.AnyPoint, + }, + } + + flow.Do(env) + + // latest on the server is 1.14.1, latest local is 1.14.26 + Expect(out.String()).To(HaveSuffix("/1.14.26-linux-amd64"), "should use the latest local version") + }) + }) + + It("should check local for a match first", func() { + server.Close() // confirm no network + env.Version = versions.Spec{ + Selector: versions.TildeSelector{Concrete: ver(1, 16, 0)}, + } + flow.Do(env) + // latest on the server is 1.16.4, latest local is 1.16.1 + Expect(out.String()).To(HaveSuffix("/1.16.1-linux-amd64"), "should use the latest local version") + }) + + It("should fall back to the network if no local matches are found", func() { + env.Version = versions.Spec{ + Selector: versions.TildeSelector{Concrete: ver(1, 19, 0)}, + } + flow.Do(env) + Expect(out.String()).To(HaveSuffix("/1.19.2-linux-amd64"), "should have a remote version") + }) + + It("should error out if no matches can be found anywhere", func() { + defer shouldHaveError() + env.Version = versions.Spec{ + Selector: versions.TildeSelector{Concrete: ver(0, 0, 1)}, + } + flow.Do(env) + }) + + It("should skip local versions matches with non-matching platforms", func() { + env.NoDownload = true // so we get an error + defer shouldHaveError() + env.Version = versions.Spec{ + // has non-matching local versions + Selector: ver(1, 13, 0), + } + + flow.Do(env) + }) + + It("should skip remote version matches with non-matching platforms", func() { + defer shouldHaveError() + env.Version = versions.Spec{ + // has a non-matching remote version + Selector: versions.TildeSelector{Concrete: ver(1, 11, 1)}, + } + flow.Do(env) + }) + + Describe("verifying the checksum", func() { + BeforeEach(func() { + remoteItems = append(remoteItems, item{ + meta: bucketObject{ + Name: "kubebuilder-tools-86.75.309-linux-amd64.tar.gz", + Hash: "nottherightone!", + }, + contents: remoteItems[0].contents, // need a valid tar.gz file to not error from that + }) + env.Version = versions.Spec{ + Selector: ver(86, 75, 309), + } + }) + Specify("when enabled, should fail if the downloaded md5 checksum doesn't match", func() { + defer shouldHaveError() + flow.Do(env) + }) + Specify("when disabled, shouldn't check the checksum at all", func() { + env.VerifySum = false + flow.Do(env) + }) + }) + }) + + Describe("list", func() { + // split by fields so we're not matching on whitespace + listFields := func() [][]string { + resLines := strings.Split(strings.TrimSpace(out.String()), "\n") + resFields := make([][]string, len(resLines)) + for i, line := range resLines { + resFields[i] = strings.Fields(line) + } + return resFields + } + + Context("when downloads are disabled", func() { + BeforeEach(func() { + server.Close() // ensure no network + env.NoDownload = true + }) + It("should include local contents sorted by version", func() { + env.Version = versions.AnyVersion + env.Platform.Platform = versions.Platform{OS: "*", Arch: "*"} + wf.List{}.Do(env) + + Expect(listFields()).To(Equal([][]string{ + {"(installed)", "v1.17.9", "linux/amd64"}, + {"(installed)", "v1.16.2", "ifonlysingularitywasstillathing/amd64"}, + {"(installed)", "v1.16.2", "linux/yourimagination"}, + {"(installed)", "v1.16.1", "linux/amd64"}, + {"(installed)", "v1.16.0", "linux/amd64"}, + {"(installed)", "v1.14.26", "hyperwarp/pixiedust"}, + {"(installed)", "v1.14.26", "linux/amd64"}, + })) + }) + It("should skip non-matching local contents", func() { + env.Version.Selector = versions.PatchSelector{ + Major: 1, Minor: 16, Patch: versions.AnyPoint, + } + env.Platform.Arch = "*" + wf.List{}.Do(env) + + Expect(listFields()).To(Equal([][]string{ + {"(installed)", "v1.16.2", "linux/yourimagination"}, + {"(installed)", "v1.16.1", "linux/amd64"}, + {"(installed)", "v1.16.0", "linux/amd64"}, + })) + + }) + }) + Context("when downloads are enabled", func() { + Context("when sorting", func() { + BeforeEach(func() { + // shorten the list a bit for expediency + remoteItems = remoteItems[:7] + }) + It("should sort local & remote by version", func() { + env.Version = versions.AnyVersion + env.Platform.Platform = versions.Platform{OS: "*", Arch: "*"} + wf.List{}.Do(env) + + Expect(listFields()).To(Equal([][]string{ + {"(installed)", "v1.17.9", "linux/amd64"}, + {"(installed)", "v1.16.2", "ifonlysingularitywasstillathing/amd64"}, + {"(installed)", "v1.16.2", "linux/yourimagination"}, + {"(installed)", "v1.16.1", "linux/amd64"}, + {"(installed)", "v1.16.0", "linux/amd64"}, + {"(installed)", "v1.14.26", "hyperwarp/pixiedust"}, + {"(installed)", "v1.14.26", "linux/amd64"}, + {"(available)", "v1.11.1", "potato/cherrypie"}, + {"(available)", "v1.11.0", "darwin/amd64"}, + {"(available)", "v1.11.0", "linux/amd64"}, + {"(available)", "v1.10.1", "darwin/amd64"}, + {"(available)", "v1.10.1", "linux/amd64"}, + })) + + }) + }) + It("should skip non-matching remote contents", func() { + env.Version.Selector = versions.PatchSelector{ + Major: 1, Minor: 16, Patch: versions.AnyPoint, + } + env.Platform.Arch = "*" + wf.List{}.Do(env) + + Expect(listFields()).To(Equal([][]string{ + {"(installed)", "v1.16.2", "linux/yourimagination"}, + {"(installed)", "v1.16.1", "linux/amd64"}, + {"(installed)", "v1.16.0", "linux/amd64"}, + {"(available)", "v1.16.4", "linux/amd64"}, + })) + + }) + }) + }) + + Describe("cleanup", func() { + BeforeEach(func() { + server.Close() // ensure no network + flow := wf.Cleanup{} + env.Version = versions.AnyVersion + env.Platform.Arch = "*" + flow.Do(env) + }) + + It("should remove matching versions from the store & keep non-matching ones", func() { + entries, err := env.FS.ReadDir(filepath.Join(".teststore/k8s")) + Expect(err).NotTo(HaveOccurred(), "should be able to read the store") + Expect(entries).To(ConsistOf( + WithTransform(fs.FileInfo.Name, Equal("1.16.2-ifonlysingularitywasstillathing-amd64")), + WithTransform(fs.FileInfo.Name, Equal("1.14.26-hyperwarp-pixiedust")), + )) + }) + }) + + Describe("sideload", func() { + var ( + flow wf.Sideload + // remote version fake contents are prefixed by the + // name for easier debugging, so we can use that here + expectedPrefix = remoteVersions[0].meta.Name + ) + BeforeEach(func() { + server.Close() // ensure no network + flow.Input = bytes.NewReader(remoteVersions[0].contents) + flow.PrintFormat = envp.PrintPath + }) + It("should initialize the store if it doesn't exist", func() { + env.Version.Selector = ver(1, 10, 0) + Expect(env.FS.RemoveAll(testStorePath)).To(Succeed()) + flow.Do(env) + Expect(env.FS.Stat(testStorePath)).NotTo(BeNil()) + }) + It("should fail if a non-concrete version is given", func() { + defer shouldHaveError() + env.Version = versions.LatestVersion + flow.Do(env) + }) + It("should fail if a non-concrete platform is given", func() { + defer shouldHaveError() + env.Version.Selector = ver(1, 10, 0) + env.Platform.Arch = "*" + flow.Do(env) + }) + It("should load the given gizipped tarball into our store as the given version", func() { + env.Version.Selector = ver(1, 10, 0) + flow.Do(env) + baseName := env.Platform.BaseName(*env.Version.AsConcrete()) + expectedPath := filepath.Join(".teststore/k8s", baseName, "some-file") + outContents, err := env.FS.ReadFile(expectedPath) + Expect(err).NotTo(HaveOccurred(), "should be able to load the unzipped file") + Expect(string(outContents)).To(HavePrefix(expectedPrefix), "should have the debugging prefix") + }) + }) +}) diff --git a/tools/setup-envtest/workflows/workflows_testutils_test.go b/tools/setup-envtest/workflows/workflows_testutils_test.go new file mode 100644 index 0000000000..05b13773f9 --- /dev/null +++ b/tools/setup-envtest/workflows/workflows_testutils_test.go @@ -0,0 +1,200 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2021 The Kubernetes Authors + +package workflows_test + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "crypto/md5" + "encoding/base64" + "math/rand" + "net/http" + "path/filepath" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/onsi/gomega/ghttp" + "github.com/spf13/afero" + + "sigs.k8s.io/controller-runtime/tools/setup-envtest/versions" +) + +var ( + remoteNames = []string{ + "kubebuilder-tools-1.10-darwin-amd64.tar.gz", + "kubebuilder-tools-1.10-linux-amd64.tar.gz", + "kubebuilder-tools-1.10.1-darwin-amd64.tar.gz", + "kubebuilder-tools-1.10.1-linux-amd64.tar.gz", + "kubebuilder-tools-1.11.0-darwin-amd64.tar.gz", + "kubebuilder-tools-1.11.0-linux-amd64.tar.gz", + "kubebuilder-tools-1.11.1-potato-cherrypie.tar.gz", + "kubebuilder-tools-1.12.3-darwin-amd64.tar.gz", + "kubebuilder-tools-1.12.3-linux-amd64.tar.gz", + "kubebuilder-tools-1.13.1-darwin-amd64.tar.gz", + "kubebuilder-tools-1.13.1-linux-amd64.tar.gz", + "kubebuilder-tools-1.14.1-darwin-amd64.tar.gz", + "kubebuilder-tools-1.14.1-linux-amd64.tar.gz", + "kubebuilder-tools-1.15.5-darwin-amd64.tar.gz", + "kubebuilder-tools-1.15.5-linux-amd64.tar.gz", + "kubebuilder-tools-1.16.4-darwin-amd64.tar.gz", + "kubebuilder-tools-1.16.4-linux-amd64.tar.gz", + "kubebuilder-tools-1.17.9-darwin-amd64.tar.gz", + "kubebuilder-tools-1.17.9-linux-amd64.tar.gz", + "kubebuilder-tools-1.19.0-darwin-amd64.tar.gz", + "kubebuilder-tools-1.19.0-linux-amd64.tar.gz", + "kubebuilder-tools-1.19.2-darwin-amd64.tar.gz", + "kubebuilder-tools-1.19.2-linux-amd64.tar.gz", + "kubebuilder-tools-1.19.2-linux-arm64.tar.gz", + "kubebuilder-tools-1.19.2-linux-ppc64le.tar.gz", + "kubebuilder-tools-1.20.2-darwin-amd64.tar.gz", + "kubebuilder-tools-1.20.2-linux-amd64.tar.gz", + "kubebuilder-tools-1.20.2-linux-arm64.tar.gz", + "kubebuilder-tools-1.20.2-linux-ppc64le.tar.gz", + "kubebuilder-tools-1.9-darwin-amd64.tar.gz", + "kubebuilder-tools-1.9-linux-amd64.tar.gz", + "kubebuilder-tools-v1.19.2-darwin-amd64.tar.gz", + "kubebuilder-tools-v1.19.2-linux-amd64.tar.gz", + "kubebuilder-tools-v1.19.2-linux-arm64.tar.gz", + "kubebuilder-tools-v1.19.2-linux-ppc64le.tar.gz", + } + + remoteVersions = makeContents(remoteNames) + + // keep this sorted + localVersions = []versions.Set{ + {Version: ver(1, 17, 9), Platforms: []versions.PlatformItem{ + {Platform: versions.Platform{OS: "linux", Arch: "amd64"}}, + }}, + {Version: ver(1, 16, 2), Platforms: []versions.PlatformItem{ + {Platform: versions.Platform{OS: "linux", Arch: "yourimagination"}}, + {Platform: versions.Platform{OS: "ifonlysingularitywasstillathing", Arch: "amd64"}}, + }}, + {Version: ver(1, 16, 1), Platforms: []versions.PlatformItem{ + {Platform: versions.Platform{OS: "linux", Arch: "amd64"}}, + }}, + {Version: ver(1, 16, 0), Platforms: []versions.PlatformItem{ + {Platform: versions.Platform{OS: "linux", Arch: "amd64"}}, + }}, + {Version: ver(1, 14, 26), Platforms: []versions.PlatformItem{ + {Platform: versions.Platform{OS: "linux", Arch: "amd64"}}, + {Platform: versions.Platform{OS: "hyperwarp", Arch: "pixiedust"}}, + }}, + } +) + +type platformVer struct { + versions.Platform + versions.Concrete +} + +func (p platformVer) String() string { + return p.Platform.BaseName(p.Concrete) +} + +type item struct { + meta bucketObject + contents []byte +} + +// objectList is the parts we need of the GCS "list-objects-in-bucket" endpoint. +type objectList struct { + Items []bucketObject `json:"items"` +} + +// bucketObject is the parts we need of the GCS object metadata. +type bucketObject struct { + Name string `json:"name"` + Hash string `json:"md5Hash"` +} + +func makeContents(names []string) []item { + res := make([]item, len(names)) + for i, name := range names { + var chunk [1024 * 48]byte // 1.5 times our chunk read size in GetVersion + copy(chunk[:], []byte(name)) + if _, err := rand.Read(chunk[len(name):]); err != nil { + panic(err) + } + res[i] = verWith(name, chunk[:]) + } + return res +} + +func verWith(name string, contents []byte) item { + out := new(bytes.Buffer) + gzipWriter := gzip.NewWriter(out) + tarWriter := tar.NewWriter(gzipWriter) + err := tarWriter.WriteHeader(&tar.Header{ + Name: "kubebuilder/bin/some-file", + Size: int64(len(contents)), + Mode: 0777, // so we can check that we fix this later + }) + if err != nil { + panic(err) + } + _, err = tarWriter.Write(contents) + if err != nil { + panic(err) + } + tarWriter.Close() + gzipWriter.Close() + res := item{ + meta: bucketObject{Name: name}, + contents: out.Bytes(), + } + hash := md5.Sum(res.contents) + res.meta.Hash = base64.StdEncoding.EncodeToString(hash[:]) + return res +} + +func handleRemoteVersions(server *ghttp.Server, versions []item) { + list := objectList{Items: make([]bucketObject, len(versions))} + for i, ver := range versions { + ver := ver // copy to avoid capturing the iteration variable + list.Items[i] = ver.meta + server.RouteToHandler("GET", "/storage/v1/b/kubebuilder-tools-test/o/"+ver.meta.Name, func(resp http.ResponseWriter, req *http.Request) { + if req.URL.Query().Get("alt") == "media" { + resp.WriteHeader(http.StatusOK) + Expect(resp.Write(ver.contents)).To(Equal(len(ver.contents))) + } else { + ghttp.RespondWithJSONEncoded( + http.StatusOK, + ver.meta, + )(resp, req) + } + }) + } + server.RouteToHandler("GET", "/storage/v1/b/kubebuilder-tools-test/o", ghttp.RespondWithJSONEncoded( + http.StatusOK, + list, + )) +} + +func fakeStore(fs afero.Afero, dir string) { + By("making the unpacked directory") + unpackedBase := filepath.Join(dir, "k8s") + Expect(fs.Mkdir(unpackedBase, 0755)).To(Succeed()) + + By("making some fake (empty) versions") + for _, set := range localVersions { + for _, plat := range set.Platforms { + Expect(fs.Mkdir(filepath.Join(unpackedBase, plat.BaseName(set.Version)), 0755)).To(Succeed()) + } + } + + By("making some fake non-store paths") + Expect(fs.Mkdir(filepath.Join(dir, "missing-binaries"), 0755)) + + Expect(fs.Mkdir(filepath.Join(dir, "wrong-version"), 0755)) + Expect(fs.WriteFile(filepath.Join(dir, "wrong-version", "kube-apiserver"), nil, 0755)).To(Succeed()) + Expect(fs.WriteFile(filepath.Join(dir, "wrong-version", "kubectl"), nil, 0755)).To(Succeed()) + Expect(fs.WriteFile(filepath.Join(dir, "wrong-version", "etcd"), nil, 0755)).To(Succeed()) + + Expect(fs.Mkdir(filepath.Join(dir, "good-version"), 0755)) + Expect(fs.WriteFile(filepath.Join(dir, "good-version", "kube-apiserver"), nil, 0755)).To(Succeed()) + Expect(fs.WriteFile(filepath.Join(dir, "good-version", "kubectl"), nil, 0755)).To(Succeed()) + Expect(fs.WriteFile(filepath.Join(dir, "good-version", "etcd"), nil, 0755)).To(Succeed()) + // TODO: put the right files +}