Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Annotate resources with URNs to prevent accidental deletion #2999

Draft
wants to merge 4 commits into
base: master
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
30 changes: 27 additions & 3 deletions provider/pkg/await/await.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ import (
"encoding/json"
"errors"
"fmt"
"net/http"
"os"
"strings"

Expand Down Expand Up @@ -790,7 +791,7 @@ func Deletion(c DeleteConfig) error {
return nilIfGVKDeleted(err)
}

err = deleteResource(c.Context, c.Name, client)
err = deleteResource(c.Context, c.Name, c.URN, client)
if err != nil {
return nilIfGVKDeleted(err)
}
Expand Down Expand Up @@ -880,8 +881,31 @@ func Deletion(c DeleteConfig) error {
return nil
}

// deleteResource deletes the specified resource using foreground cascading delete.
func deleteResource(ctx context.Context, name string, client dynamic.ResourceInterface) error {
// deleteResource deletes the specified physical resource using foreground
// cascading delete. It should not be invoked on Patch resources; use
// ssa.Relinquish instead.
//
// If the resource does not exist in the cluster a StatusError{404} is
// returned. This is intended to short-circuit subsequent await logic.
//
// If the resource exists in the cluster but is annotated with an URN different
// from the URN being deleted we also return a StatusError{404}. This
// ameliorates situations like pulumi-kubernetes#2948 where resources could be
// unintentionally deleted after renaming.
func deleteResource(ctx context.Context, name string, urn resource.URN, client dynamic.ResourceInterface) error {
errNotFound := &apierrors.StatusError{ErrStatus: metav1.Status{Code: http.StatusNotFound}}

done, live := checkIfResourceDeleted(ctx, name, client)
if done {
return errNotFound
}

annotatedURN := metadata.GetAnnotationValue(live, metadata.AnnotationURN)
if annotatedURN != "" && annotatedURN != string(urn) {
// TODO: Check project/stack.
return errNotFound
}

fg := metav1.DeletePropagationForeground
deleteOpts := metav1.DeleteOptions{
PropagationPolicy: &fg,
Expand Down
3 changes: 1 addition & 2 deletions provider/pkg/await/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -63,8 +63,7 @@ func watchAddedEvent(obj runtime.Object) watch.Event {
// --------------------------------------------------------------------------

func is404(err error) bool {
statusErr, ok := err.(*errors.StatusError)
return ok && statusErr.ErrStatus.Code == 404
return errors.IsNotFound(err)
}

// --------------------------------------------------------------------------
Expand Down
1 change: 1 addition & 0 deletions provider/pkg/metadata/annotations.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ const (
AnnotationSkipAwait = AnnotationPrefix + "skipAwait"
AnnotationTimeoutSeconds = AnnotationPrefix + "timeoutSeconds"
AnnotationReplaceUnready = AnnotationPrefix + "replaceUnready"
AnnotationURN = AnnotationPrefix + "urn"

AnnotationPatchForce = AnnotationPrefix + "patchForce"
AnnotationPatchFieldManager = AnnotationPrefix + "patchFieldManager"
Expand Down
52 changes: 32 additions & 20 deletions provider/pkg/provider/provider.go
Original file line number Diff line number Diff line change
Expand Up @@ -811,8 +811,8 @@ func (k *kubeProvider) Configure(_ context.Context, req *pulumirpc.ConfigureRequ

// Invoke dynamically executes a built-in function in the provider.
func (k *kubeProvider) Invoke(ctx context.Context,
req *pulumirpc.InvokeRequest) (*pulumirpc.InvokeResponse, error) {

req *pulumirpc.InvokeRequest,
) (*pulumirpc.InvokeResponse, error) {
// Important: Some invoke logic is intended to run during preview, and the Kubernetes provider
// inputs may not have resolved yet. Any invoke logic that depends on an active cluster must check
// k.clusterUnreachable and handle that condition appropriately.
Expand Down Expand Up @@ -920,8 +920,8 @@ func (k *kubeProvider) Invoke(ctx context.Context,
// StreamInvoke dynamically executes a built-in function in the provider. The result is streamed
// back as a series of messages.
func (k *kubeProvider) StreamInvoke(
req *pulumirpc.InvokeRequest, server pulumirpc.ResourceProvider_StreamInvokeServer) error {

req *pulumirpc.InvokeRequest, server pulumirpc.ResourceProvider_StreamInvokeServer,
) error {
// Important: Some invoke logic is intended to run during preview, and the Kubernetes provider
// inputs may not have resolved yet. Any invoke logic that depends on an active cluster must check
// k.clusterUnreachable and handle that condition appropriately.
Expand Down Expand Up @@ -1328,6 +1328,13 @@ func (k *kubeProvider) Check(ctx context.Context, req *pulumirpc.CheckRequest) (
return nil, err
}

// Annotate physical resources with their URN in order to avoid accidental
// deletions.
// TODO: Add provider config for opt-in.
if !kinds.IsPatchURN(urn) {
metadata.SetAnnotation(newInputs, metadata.AnnotationURN, string(urn))
}

if k.serverSideApplyMode && kinds.IsPatchURN(urn) {
if len(newInputs.GetName()) == 0 {
return nil, fmt.Errorf("patch resources require the `.metadata.name` field to be set")
Expand Down Expand Up @@ -1718,8 +1725,7 @@ func (k *kubeProvider) Diff(ctx context.Context, req *pulumirpc.DiffRequest) (*p

// Delete before replacement if we are forced to replace the old object, and the new version of
// that object MUST have the same name.
deleteBeforeReplace :=
// 1. We know resource must be replaced.
deleteBeforeReplace := // 1. We know resource must be replaced.
len(replaces) > 0 &&
// 2. Object is named (i.e., not using metadata.generateName).
metadata.IsNamed(newInputs, newResInputs) &&
Expand Down Expand Up @@ -1800,7 +1806,7 @@ func (k *kubeProvider) Create(
}

initialAPIVersion := newInputs.GetAPIVersion()
fieldManager := k.fieldManagerName(nil, newResInputs, newInputs)
fieldManager := k.fieldManagerName(urn, newResInputs, newInputs)

if k.yamlRenderMode {
if newResInputs.ContainsSecrets() {
Expand Down Expand Up @@ -2057,7 +2063,7 @@ func (k *kubeProvider) Read(ctx context.Context, req *pulumirpc.ReadRequest) (*p
}

initialAPIVersion := initialAPIVersion(oldState, oldInputs)
fieldManager := k.fieldManagerName(nil, oldState, oldInputs)
fieldManager := k.fieldManagerName(urn, oldState, oldInputs)

if k.yamlRenderMode {
// Return a new "checkpoint object".
Expand Down Expand Up @@ -2297,8 +2303,8 @@ func (k *kubeProvider) Update(
oldLivePruned := pruneLiveState(oldLive, oldInputs)

initialAPIVersion := initialAPIVersion(oldState, oldInputs)
fieldManagerOld := k.fieldManagerName(nil, oldState, oldInputs)
fieldManager := k.fieldManagerName(nil, oldState, newInputs)
fieldManagerOld := k.fieldManagerName(urn, oldState, oldInputs)
fieldManager := k.fieldManagerName(urn, oldState, newInputs)

if k.yamlRenderMode {
if newResInputs.ContainsSecrets() {
Expand Down Expand Up @@ -2485,7 +2491,7 @@ func (k *kubeProvider) Delete(ctx context.Context, req *pulumirpc.DeleteRequest)
}

initialAPIVersion := initialAPIVersion(oldState, &unstructured.Unstructured{})
fieldManager := k.fieldManagerName(nil, oldState, oldInputs)
fieldManager := k.fieldManagerName(urn, oldState, oldInputs)
resources, err := k.getResources()
if err != nil {
return nil, pkgerrors.Wrapf(err, "Failed to fetch OpenAPI schema from the API server")
Expand Down Expand Up @@ -2671,7 +2677,7 @@ func (k *kubeProvider) isDryRunDisabledError(err error) bool {
// 2. Value from the Pulumi state
// 3. Randomly generated name
func (k *kubeProvider) fieldManagerName(
randomSeed []byte, state resource.PropertyMap, inputs *unstructured.Unstructured,
urn resource.URN, state resource.PropertyMap, inputs *unstructured.Unstructured,
) string {
// Always use the same fieldManager name for Client-Side Apply mode to avoid conflicts based on the name of the
// provider executable.
Expand All @@ -2686,6 +2692,15 @@ func (k *kubeProvider) fieldManagerName(
return v.StringValue()
}

// Seed our randomness with our project/stack so all physical resources in
// the same stack get the same manager. Patch resources still require
// random managers in order to correctly stack on top of each other.
randomSeed := []byte{}
if !kinds.IsPatchURN(urn) {
randomSeed = append(randomSeed, []byte(urn.Project())...)
randomSeed = append(randomSeed, []byte(urn.Stack())...)
}

prefix := "pulumi-kubernetes-"
// This function is called from other provider function apart from Check and so doesn't have a randomSeed
// for those calls, but the field manager name should have already been filled in via Check so this case
Expand Down Expand Up @@ -2856,8 +2871,8 @@ func initialAPIVersion(state resource.PropertyMap, oldInputs *unstructured.Unstr
}

func checkpointObject(inputs, live *unstructured.Unstructured, fromInputs resource.PropertyMap,
initialAPIVersion, fieldManager string) resource.PropertyMap {

initialAPIVersion, fieldManager string,
) resource.PropertyMap {
object := resource.NewPropertyMapFromMap(live.Object)
inputsPM := resource.NewPropertyMapFromMap(inputs.Object)

Expand Down Expand Up @@ -2955,7 +2970,6 @@ var deleteResponse = &pulumirpc.ReadResponse{Id: "", Properties: nil}
func convertPatchToDiff(
patch, oldLiveState, newInputs, oldInputs map[string]any, forceNewFields ...string,
) (map[string]*pulumirpc.PropertyDiff, error) {

contract.Requiref(len(patch) != 0, "patch", "expected len() != 0")
contract.Requiref(oldLiveState != nil, "oldLiveState", "expected != nil")

Expand Down Expand Up @@ -3137,7 +3151,6 @@ func (pc *patchConverter) addPatchValueToDiff(
func (pc *patchConverter) addPatchMapToDiff(
path []any, m, old, newInput, oldInput map[string]any, inArray bool,
) error {

if newInput == nil {
newInput = map[string]any{}
}
Expand Down Expand Up @@ -3167,7 +3180,6 @@ func (pc *patchConverter) addPatchMapToDiff(
func (pc *patchConverter) addPatchArrayToDiff(
path []any, a, old, newInput, oldInput []any, inArray bool,
) error {

at := func(arr []any, i int) any {
if i < len(arr) {
return arr[i]
Expand Down Expand Up @@ -3263,20 +3275,20 @@ func renderYaml(resource *unstructured.Unstructured, yamlDirectory string) error
manifestDirectory := filepath.Join(yamlDirectory, "1-manifest")

if _, err := os.Stat(crdDirectory); os.IsNotExist(err) {
err = os.MkdirAll(crdDirectory, 0700)
err = os.MkdirAll(crdDirectory, 0o700)
if err != nil {
return pkgerrors.Wrapf(err, "failed to create directory for rendered YAML: %q", crdDirectory)
}
}
if _, err := os.Stat(manifestDirectory); os.IsNotExist(err) {
err = os.MkdirAll(manifestDirectory, 0700)
err = os.MkdirAll(manifestDirectory, 0o700)
if err != nil {
return pkgerrors.Wrapf(err, "failed to create directory for rendered YAML: %q", manifestDirectory)
}
}

path := renderPathForResource(resource, yamlDirectory)
err = os.WriteFile(path, yamlBytes, 0600)
err = os.WriteFile(path, yamlBytes, 0o600)
if err != nil {
return pkgerrors.Wrapf(err, "failed to write YAML file: %q", path)
}
Expand Down
37 changes: 37 additions & 0 deletions tests/provider/delete/patch/Pulumi.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
name: delete-patch-resource
runtime: yaml
description: |
Deleting a logical patch resource should not delete the underlying physical
resource.

outputs:
managedFields: ${patch2.metadata.managedFields}

resources:
configmap:
type: kubernetes:core/v1:ConfigMap
properties:
metadata:
name: patched-configmap

patch1:
type: kubernetes:core/v1:ConfigMapPatch
properties:
metadata:
name: patched-configmap
data:
foo: bar
options:
dependsOn:
- ${configmap}

patch2:
type: kubernetes:core/v1:ConfigMapPatch
properties:
metadata:
name: patched-configmap
data:
boo: baz
options:
dependsOn:
- ${patch1}
40 changes: 40 additions & 0 deletions tests/provider/delete/patch/step2/Pulumi.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
name: delete-patch-resource
runtime: yaml
description: |
Deleting a logical patch resource should not delete the underlying physical
resource.

outputs:
managedFields: ${patch1.metadata.managedFields}

resources:
configmap:
type: kubernetes:core/v1:ConfigMap
properties:
metadata:
name: patched-configmap

patch1:
type: kubernetes:core/v1:ConfigMapPatch
properties:
metadata:
name: patched-configmap
data:
foo: bar
options:
dependsOn:
- ${configmap}

# Delete patch2 - the underlying ConfigMap should not be deleted, and patch1
# should still be applied.
#
# patch2:
# type: kubernetes:core/v1:ConfigMapPatch
# properties:
# metadata:
# name: patched-configmap
# data:
# boo: baz
# options:
# dependsOn:
# - ${patch1}
25 changes: 25 additions & 0 deletions tests/provider/delete/rename/Pulumi.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
name: delete-with-rename
runtime: yaml
description: |
Changing a resource's name, but leaving .metadata untouched, should not
result in a deletion from the cluster.

resources:
ns:
type: kubernetes:core/v1:Namespace
properties:
metadata:
name: rename-namespace
namespace: rename-namespace

pod:
type: kubernetes:core/v1:Pod
properties:
metadata:
namespace: rename-namespace
spec:
containers:
- image: nginx:1.14.2
name: nginx
ports:
- containerPort: 80
27 changes: 27 additions & 0 deletions tests/provider/delete/rename/step2/Pulumi.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
name: delete-with-rename
runtime: yaml
description: |
Changing a resource's name, but leaving .metadata untouched, should not
result in a deletion from the cluster.

resources:
# Change the resource's name from "ns" to "namespace" but leave everything
# else the same.
namespace:
type: kubernetes:core/v1:Namespace
properties:
metadata:
name: rename-namespace
namespace: rename-namespace

pod:
type: kubernetes:core/v1:Pod
properties:
metadata:
namespace: rename-namespace
spec:
containers:
- image: nginx:1.14.2
name: nginx
ports:
- containerPort: 80