From 26daec2c0d07ba5cdb2caa316aa200d151345bd6 Mon Sep 17 00:00:00 2001 From: Alexander Matyushentsev Date: Mon, 18 Dec 2023 08:40:23 -0800 Subject: [PATCH] feat: PostDelete hook support (#16595) * feat: PostDelete hooks support Signed-off-by: Alexander Matyushentsev --------- Signed-off-by: Alexander Matyushentsev Signed-off-by: penglongli --- controller/appcontroller.go | 150 +++++++++----- controller/appcontroller_test.go | 183 +++++++++++++++++- controller/hook.go | 158 +++++++++++++++ controller/state.go | 23 ++- controller/sync.go | 1 + docs/user-guide/helm.md | 4 +- docs/user-guide/resource_hooks.md | 2 + .../v1alpha1/application_defaults.go | 3 + pkg/apis/application/v1alpha1/types.go | 12 ++ test/e2e/hook_test.go | 20 ++ test/e2e/testdata/post-delete-hook/hook.yaml | 14 ++ 11 files changed, 501 insertions(+), 69 deletions(-) create mode 100644 controller/hook.go create mode 100644 test/e2e/testdata/post-delete-hook/hook.yaml diff --git a/controller/appcontroller.go b/controller/appcontroller.go index 964bcc1aa0dec..b98334b8fa567 100644 --- a/controller/appcontroller.go +++ b/controller/appcontroller.go @@ -15,7 +15,6 @@ import ( "sync" "time" - "github.com/argoproj/argo-cd/v2/pkg/ratelimiter" clustercache "github.com/argoproj/gitops-engine/pkg/cache" "github.com/argoproj/gitops-engine/pkg/diff" "github.com/argoproj/gitops-engine/pkg/health" @@ -59,6 +58,7 @@ import ( kubeerrors "k8s.io/apimachinery/pkg/api/errors" + "github.com/argoproj/argo-cd/v2/pkg/ratelimiter" appstatecache "github.com/argoproj/argo-cd/v2/util/cache/appstate" "github.com/argoproj/argo-cd/v2/util/db" "github.com/argoproj/argo-cd/v2/util/errors" @@ -884,11 +884,10 @@ func (ctrl *ApplicationController) processAppOperationQueueItem() (processNext b if app.Operation != nil { ctrl.processRequestedAppOperation(app) - } else if app.DeletionTimestamp != nil && app.CascadedDeletion() { - _, err = ctrl.finalizeApplicationDeletion(app, func(project string) ([]*appv1.Cluster, error) { + } else if app.DeletionTimestamp != nil { + if err = ctrl.finalizeApplicationDeletion(app, func(project string) ([]*appv1.Cluster, error) { return ctrl.db.GetProjectClusters(context.Background(), project) - }) - if err != nil { + }); err != nil { ctrl.setAppCondition(app, appv1.ApplicationCondition{ Type: appv1.ApplicationConditionDeletionError, Message: err.Error(), @@ -1023,57 +1022,63 @@ func (ctrl *ApplicationController) getPermittedAppLiveObjects(app *appv1.Applica return objsMap, nil } -func (ctrl *ApplicationController) finalizeApplicationDeletion(app *appv1.Application, projectClusters func(project string) ([]*appv1.Cluster, error)) ([]*unstructured.Unstructured, error) { +func (ctrl *ApplicationController) isValidDestination(app *appv1.Application) (bool, *argov1alpha.Cluster) { + // Validate the cluster using the Application destination's `name` field, if applicable, + // and set the Server field, if needed. + if err := argo.ValidateDestination(context.Background(), &app.Spec.Destination, ctrl.db); err != nil { + log.Warnf("Unable to validate destination of the Application being deleted: %v", err) + return false, nil + } + + cluster, err := ctrl.db.GetCluster(context.Background(), app.Spec.Destination.Server) + if err != nil { + log.Warnf("Unable to locate cluster URL for Application being deleted: %v", err) + return false, nil + } + return true, cluster +} + +func (ctrl *ApplicationController) finalizeApplicationDeletion(app *appv1.Application, projectClusters func(project string) ([]*appv1.Cluster, error)) error { logCtx := log.WithField("application", app.QualifiedName()) - logCtx.Infof("Deleting resources") // Get refreshed application info, since informer app copy might be stale app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(app.Namespace).Get(context.Background(), app.Name, metav1.GetOptions{}) if err != nil { if !apierr.IsNotFound(err) { logCtx.Errorf("Unable to get refreshed application info prior deleting resources: %v", err) } - return nil, nil + return nil } proj, err := ctrl.getAppProj(app) if err != nil { - return nil, err + return err } - // validDestination is true if the Application destination points to a cluster that is managed by Argo CD - // (and thus either a cluster secret exists for it, or it's local); validDestination is false otherwise. - validDestination := true - - // Validate the cluster using the Application destination's `name` field, if applicable, - // and set the Server field, if needed. - if err := argo.ValidateDestination(context.Background(), &app.Spec.Destination, ctrl.db); err != nil { - log.Warnf("Unable to validate destination of the Application being deleted: %v", err) - validDestination = false - } - - objs := make([]*unstructured.Unstructured, 0) - var cluster *appv1.Cluster - - // Attempt to validate the destination via its URL - if validDestination { - if cluster, err = ctrl.db.GetCluster(context.Background(), app.Spec.Destination.Server); err != nil { - log.Warnf("Unable to locate cluster URL for Application being deleted: %v", err) - validDestination = false + isValid, cluster := ctrl.isValidDestination(app) + if !isValid { + app.UnSetCascadedDeletion() + app.UnSetPostDeleteFinalizer() + if err := ctrl.updateFinalizers(app); err != nil { + return err } + logCtx.Infof("Resource entries removed from undefined cluster") + return nil } + config := metrics.AddMetricsTransportWrapper(ctrl.metricsServer, app, cluster.RESTConfig()) - if validDestination { + if app.CascadedDeletion() { + logCtx.Infof("Deleting resources") // ApplicationDestination points to a valid cluster, so we may clean up the live objects - + objs := make([]*unstructured.Unstructured, 0) objsMap, err := ctrl.getPermittedAppLiveObjects(app, proj, projectClusters) if err != nil { - return nil, err + return err } for k := range objsMap { // Wait for objects pending deletion to complete before proceeding with next sync wave if objsMap[k].GetDeletionTimestamp() != nil { logCtx.Infof("%d objects remaining for deletion", len(objsMap)) - return objs, nil + return nil } if ctrl.shouldBeDeleted(app, objsMap[k]) { @@ -1081,8 +1086,6 @@ func (ctrl *ApplicationController) finalizeApplicationDeletion(app *appv1.Applic } } - config := metrics.AddMetricsTransportWrapper(ctrl.metricsServer, app, cluster.RESTConfig()) - filteredObjs := FilterObjectsForDeletion(objs) propagationPolicy := metav1.DeletePropagationForeground @@ -1096,12 +1099,12 @@ func (ctrl *ApplicationController) finalizeApplicationDeletion(app *appv1.Applic return ctrl.kubectl.DeleteResource(context.Background(), config, obj.GroupVersionKind(), obj.GetName(), obj.GetNamespace(), metav1.DeleteOptions{PropagationPolicy: &propagationPolicy}) }) if err != nil { - return objs, err + return err } objsMap, err = ctrl.getPermittedAppLiveObjects(app, proj, projectClusters) if err != nil { - return nil, err + return err } for k, obj := range objsMap { @@ -1111,38 +1114,67 @@ func (ctrl *ApplicationController) finalizeApplicationDeletion(app *appv1.Applic } if len(objsMap) > 0 { logCtx.Infof("%d objects remaining for deletion", len(objsMap)) - return objs, nil + return nil } + logCtx.Infof("Successfully deleted %d resources", len(objs)) + app.UnSetCascadedDeletion() + return ctrl.updateFinalizers(app) } - if err := ctrl.cache.SetAppManagedResources(app.Name, nil); err != nil { - return objs, err - } + if app.HasPostDeleteFinalizer() { + objsMap, err := ctrl.getPermittedAppLiveObjects(app, proj, projectClusters) + if err != nil { + return err + } - if err := ctrl.cache.SetAppResourcesTree(app.Name, nil); err != nil { - return objs, err + done, err := ctrl.executePostDeleteHooks(app, proj, objsMap, config, logCtx) + if err != nil { + return err + } + if !done { + return nil + } + app.UnSetPostDeleteFinalizer() + return ctrl.updateFinalizers(app) } - if err := ctrl.removeCascadeFinalizer(app); err != nil { - return objs, err + if app.HasPostDeleteFinalizer("cleanup") { + objsMap, err := ctrl.getPermittedAppLiveObjects(app, proj, projectClusters) + if err != nil { + return err + } + + done, err := ctrl.cleanupPostDeleteHooks(objsMap, config, logCtx) + if err != nil { + return err + } + if !done { + return nil + } + app.UnSetPostDeleteFinalizer("cleanup") + return ctrl.updateFinalizers(app) } - if validDestination { - logCtx.Infof("Successfully deleted %d resources", len(objs)) - } else { - logCtx.Infof("Resource entries removed from undefined cluster") + if !app.CascadedDeletion() && !app.HasPostDeleteFinalizer() { + if err := ctrl.cache.SetAppManagedResources(app.Name, nil); err != nil { + return err + } + + if err := ctrl.cache.SetAppResourcesTree(app.Name, nil); err != nil { + return err + } + ctrl.projectRefreshQueue.Add(fmt.Sprintf("%s/%s", ctrl.namespace, app.Spec.GetProject())) } - ctrl.projectRefreshQueue.Add(fmt.Sprintf("%s/%s", ctrl.namespace, app.Spec.GetProject())) - return objs, nil + return nil } -func (ctrl *ApplicationController) removeCascadeFinalizer(app *appv1.Application) error { +func (ctrl *ApplicationController) updateFinalizers(app *appv1.Application) error { _, err := ctrl.getAppProj(app) if err != nil { return fmt.Errorf("error getting project: %w", err) } - app.UnSetCascadedDeletion() + var patch []byte patch, _ = json.Marshal(map[string]interface{}{ "metadata": map[string]interface{}{ @@ -1566,6 +1598,20 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo app.Status.SourceTypes = compareResult.appSourceTypes app.Status.ControllerNamespace = ctrl.namespace patchMs = ctrl.persistAppStatus(origApp, &app.Status) + if (compareResult.hasPostDeleteHooks != app.HasPostDeleteFinalizer() || compareResult.hasPostDeleteHooks != app.HasPostDeleteFinalizer("cleanup")) && + app.GetDeletionTimestamp() == nil { + if compareResult.hasPostDeleteHooks { + app.SetPostDeleteFinalizer() + app.SetPostDeleteFinalizer("cleanup") + } else { + app.UnSetPostDeleteFinalizer() + app.UnSetPostDeleteFinalizer("cleanup") + } + + if err := ctrl.updateFinalizers(app); err != nil { + logCtx.Errorf("Failed to update finalizers: %v", err) + } + } return } diff --git a/controller/appcontroller_test.go b/controller/appcontroller_test.go index 9c2933feeb6df..2efc7ac723bc7 100644 --- a/controller/appcontroller_test.go +++ b/controller/appcontroller_test.go @@ -7,8 +7,11 @@ import ( "testing" "time" + "github.com/argoproj/gitops-engine/pkg/utils/kube/kubetest" "github.com/sirupsen/logrus" + "github.com/stretchr/testify/require" "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/client-go/rest" clustercache "github.com/argoproj/gitops-engine/pkg/cache" @@ -18,7 +21,6 @@ import ( "github.com/argoproj/gitops-engine/pkg/cache/mocks" synccommon "github.com/argoproj/gitops-engine/pkg/sync/common" "github.com/argoproj/gitops-engine/pkg/utils/kube" - "github.com/argoproj/gitops-engine/pkg/utils/kube/kubetest" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" corev1 "k8s.io/api/core/v1" @@ -59,6 +61,23 @@ type fakeData struct { applicationNamespaces []string } +type MockKubectl struct { + kube.Kubectl + + DeletedResources []kube.ResourceKey + CreatedResources []*unstructured.Unstructured +} + +func (m *MockKubectl) CreateResource(ctx context.Context, config *rest.Config, gvk schema.GroupVersionKind, name string, namespace string, obj *unstructured.Unstructured, createOptions metav1.CreateOptions, subresources ...string) (*unstructured.Unstructured, error) { + m.CreatedResources = append(m.CreatedResources, obj) + return m.Kubectl.CreateResource(ctx, config, gvk, name, namespace, obj, createOptions, subresources...) +} + +func (m *MockKubectl) DeleteResource(ctx context.Context, config *rest.Config, gvk schema.GroupVersionKind, name string, namespace string, deleteOptions metav1.DeleteOptions) error { + m.DeletedResources = append(m.DeletedResources, kube.NewResourceKey(gvk.Group, gvk.Kind, namespace, name)) + return m.Kubectl.DeleteResource(ctx, config, gvk, name, namespace, deleteOptions) +} + func newFakeController(data *fakeData, repoErr error) *ApplicationController { var clust corev1.Secret err := yaml.Unmarshal([]byte(fakeCluster), &clust) @@ -109,7 +128,7 @@ func newFakeController(data *fakeData, repoErr error) *ApplicationController { } kubeClient := fake.NewSimpleClientset(&clust, &cm, &secret) settingsMgr := settings.NewSettingsManager(context.Background(), kubeClient, test.FakeArgoCDNamespace) - kubectl := &kubetest.MockKubectlCmd{} + kubectl := &MockKubectl{Kubectl: &kubetest.MockKubectlCmd{}} ctrl, err := NewApplicationController( test.FakeArgoCDNamespace, settingsMgr, @@ -337,6 +356,38 @@ metadata: data: ` +var fakePostDeleteHook = ` +{ + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "name": "post-delete-hook", + "namespace": "default", + "labels": { + "app.kubernetes.io/instance": "my-app" + }, + "annotations": { + "argocd.argoproj.io/hook": "PostDelete", + "argocd.argoproj.io/hook-delete-policy": "HookSucceeded" + } + }, + "spec": { + "containers": [ + { + "name": "post-delete-hook", + "image": "busybox", + "restartPolicy": "Never", + "command": [ + "/bin/sh", + "-c", + "sleep 5 && echo hello from the post-delete-hook pod" + ] + } + ] + } +} +` + func newFakeApp() *v1alpha1.Application { return createFakeApp(fakeApp) } @@ -371,6 +422,15 @@ func newFakeCM() map[string]interface{} { return cm } +func newFakePostDeleteHook() map[string]interface{} { + var cm map[string]interface{} + err := yaml.Unmarshal([]byte(fakePostDeleteHook), &cm) + if err != nil { + panic(err) + } + return cm +} + func TestAutoSync(t *testing.T) { app := newFakeApp() ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}, nil) @@ -619,6 +679,7 @@ func TestFinalizeAppDeletion(t *testing.T) { // Ensure app can be deleted cascading t.Run("CascadingDelete", func(t *testing.T) { app := newFakeApp() + app.SetCascadedDeletion(v1alpha1.ResourcesFinalizerName) app.Spec.Destination.Namespace = test.FakeArgoCDNamespace appObj := kube.MustToUnstructured(&app) ctrl := newFakeController(&fakeData{apps: []runtime.Object{app, &defaultProj}, managedLiveObjs: map[kube.ResourceKey]*unstructured.Unstructured{ @@ -636,7 +697,7 @@ func TestFinalizeAppDeletion(t *testing.T) { patched = true return true, &v1alpha1.Application{}, nil }) - _, err := ctrl.finalizeApplicationDeletion(app, func(project string) ([]*v1alpha1.Cluster, error) { + err := ctrl.finalizeApplicationDeletion(app, func(project string) ([]*v1alpha1.Cluster, error) { return []*v1alpha1.Cluster{}, nil }) assert.NoError(t, err) @@ -662,6 +723,7 @@ func TestFinalizeAppDeletion(t *testing.T) { }, } app := newFakeApp() + app.SetCascadedDeletion(v1alpha1.ResourcesFinalizerName) app.Spec.Destination.Namespace = test.FakeArgoCDNamespace app.Spec.Project = "restricted" appObj := kube.MustToUnstructured(&app) @@ -686,7 +748,7 @@ func TestFinalizeAppDeletion(t *testing.T) { patched = true return true, &v1alpha1.Application{}, nil }) - objs, err := ctrl.finalizeApplicationDeletion(app, func(project string) ([]*v1alpha1.Cluster, error) { + err := ctrl.finalizeApplicationDeletion(app, func(project string) ([]*v1alpha1.Cluster, error) { return []*v1alpha1.Cluster{}, nil }) assert.NoError(t, err) @@ -697,14 +759,16 @@ func TestFinalizeAppDeletion(t *testing.T) { } // Managed objects must be empty assert.Empty(t, objsMap) + // Loop through all deleted objects, ensure that test-cm is none of them - for _, o := range objs { - assert.NotEqual(t, "test-cm", o.GetName()) + for _, o := range ctrl.kubectl.(*MockKubectl).DeletedResources { + assert.NotEqual(t, "test-cm", o.Name) } }) t.Run("DeleteWithDestinationClusterName", func(t *testing.T) { app := newFakeAppWithDestName() + app.SetCascadedDeletion(v1alpha1.ResourcesFinalizerName) appObj := kube.MustToUnstructured(&app) ctrl := newFakeController(&fakeData{apps: []runtime.Object{app, &defaultProj}, managedLiveObjs: map[kube.ResourceKey]*unstructured.Unstructured{ kube.GetResourceKey(appObj): appObj, @@ -720,7 +784,7 @@ func TestFinalizeAppDeletion(t *testing.T) { patched = true return true, &v1alpha1.Application{}, nil }) - _, err := ctrl.finalizeApplicationDeletion(app, func(project string) ([]*v1alpha1.Cluster, error) { + err := ctrl.finalizeApplicationDeletion(app, func(project string) ([]*v1alpha1.Cluster, error) { return []*v1alpha1.Cluster{}, nil }) assert.NoError(t, err) @@ -745,7 +809,7 @@ func TestFinalizeAppDeletion(t *testing.T) { fakeAppCs.AddReactor("get", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) { return defaultReactor.React(action) }) - _, err := ctrl.finalizeApplicationDeletion(app, func(project string) ([]*v1alpha1.Cluster, error) { + err := ctrl.finalizeApplicationDeletion(app, func(project string) ([]*v1alpha1.Cluster, error) { return []*v1alpha1.Cluster{}, nil }) assert.NoError(t, err) @@ -766,6 +830,109 @@ func TestFinalizeAppDeletion(t *testing.T) { }) + t.Run("PostDelete_HookIsCreated", func(t *testing.T) { + app := newFakeApp() + app.SetPostDeleteFinalizer() + app.Spec.Destination.Namespace = test.FakeArgoCDNamespace + ctrl := newFakeController(&fakeData{ + manifestResponses: []*apiclient.ManifestResponse{{ + Manifests: []string{fakePostDeleteHook}, + }}, + apps: []runtime.Object{app, &defaultProj}, + managedLiveObjs: map[kube.ResourceKey]*unstructured.Unstructured{}}, nil) + + patched := false + fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset) + defaultReactor := fakeAppCs.ReactionChain[0] + fakeAppCs.ReactionChain = nil + fakeAppCs.AddReactor("get", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) { + return defaultReactor.React(action) + }) + fakeAppCs.AddReactor("patch", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) { + patched = true + return true, &v1alpha1.Application{}, nil + }) + err := ctrl.finalizeApplicationDeletion(app, func(project string) ([]*v1alpha1.Cluster, error) { + return []*v1alpha1.Cluster{}, nil + }) + assert.NoError(t, err) + // finalizer is not deleted + assert.False(t, patched) + // post-delete hook is created + require.Len(t, ctrl.kubectl.(*MockKubectl).CreatedResources, 1) + require.Equal(t, "post-delete-hook", ctrl.kubectl.(*MockKubectl).CreatedResources[0].GetName()) + }) + + t.Run("PostDelete_HookIsExecuted", func(t *testing.T) { + app := newFakeApp() + app.SetPostDeleteFinalizer() + app.Spec.Destination.Namespace = test.FakeArgoCDNamespace + liveHook := &unstructured.Unstructured{Object: newFakePostDeleteHook()} + require.NoError(t, unstructured.SetNestedField(liveHook.Object, "Succeeded", "status", "phase")) + ctrl := newFakeController(&fakeData{ + manifestResponses: []*apiclient.ManifestResponse{{ + Manifests: []string{fakePostDeleteHook}, + }}, + apps: []runtime.Object{app, &defaultProj}, + managedLiveObjs: map[kube.ResourceKey]*unstructured.Unstructured{ + kube.GetResourceKey(liveHook): liveHook, + }}, nil) + + patched := false + fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset) + defaultReactor := fakeAppCs.ReactionChain[0] + fakeAppCs.ReactionChain = nil + fakeAppCs.AddReactor("get", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) { + return defaultReactor.React(action) + }) + fakeAppCs.AddReactor("patch", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) { + patched = true + return true, &v1alpha1.Application{}, nil + }) + err := ctrl.finalizeApplicationDeletion(app, func(project string) ([]*v1alpha1.Cluster, error) { + return []*v1alpha1.Cluster{}, nil + }) + assert.NoError(t, err) + // finalizer is removed + assert.True(t, patched) + }) + + t.Run("PostDelete_HookIsDeleted", func(t *testing.T) { + app := newFakeApp() + app.SetPostDeleteFinalizer("cleanup") + app.Spec.Destination.Namespace = test.FakeArgoCDNamespace + liveHook := &unstructured.Unstructured{Object: newFakePostDeleteHook()} + require.NoError(t, unstructured.SetNestedField(liveHook.Object, "Succeeded", "status", "phase")) + ctrl := newFakeController(&fakeData{ + manifestResponses: []*apiclient.ManifestResponse{{ + Manifests: []string{fakePostDeleteHook}, + }}, + apps: []runtime.Object{app, &defaultProj}, + managedLiveObjs: map[kube.ResourceKey]*unstructured.Unstructured{ + kube.GetResourceKey(liveHook): liveHook, + }}, nil) + + patched := false + fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset) + defaultReactor := fakeAppCs.ReactionChain[0] + fakeAppCs.ReactionChain = nil + fakeAppCs.AddReactor("get", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) { + return defaultReactor.React(action) + }) + fakeAppCs.AddReactor("patch", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) { + patched = true + return true, &v1alpha1.Application{}, nil + }) + err := ctrl.finalizeApplicationDeletion(app, func(project string) ([]*v1alpha1.Cluster, error) { + return []*v1alpha1.Cluster{}, nil + }) + assert.NoError(t, err) + // post-delete hook is deleted + require.Len(t, ctrl.kubectl.(*MockKubectl).DeletedResources, 1) + require.Equal(t, "post-delete-hook", ctrl.kubectl.(*MockKubectl).DeletedResources[0].Name) + // finalizer is not removed + assert.False(t, patched) + }) } // TestNormalizeApplication verifies we normalize an application during reconciliation diff --git a/controller/hook.go b/controller/hook.go new file mode 100644 index 0000000000000..0c019ac6a1e08 --- /dev/null +++ b/controller/hook.go @@ -0,0 +1,158 @@ +package controller + +import ( + "context" + + "github.com/argoproj/gitops-engine/pkg/health" + "github.com/argoproj/gitops-engine/pkg/sync/common" + "github.com/argoproj/gitops-engine/pkg/sync/hook" + "github.com/argoproj/gitops-engine/pkg/utils/kube" + log "github.com/sirupsen/logrus" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/client-go/rest" + + "github.com/argoproj/argo-cd/v2/util/lua" + + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" +) + +var ( + postDeleteHook = "PostDelete" + postDeleteHooks = map[string]string{ + "argocd.argoproj.io/hook": postDeleteHook, + "helm.sh/hook": "post-delete", + } +) + +func isHook(obj *unstructured.Unstructured) bool { + return hook.IsHook(obj) || isPostDeleteHook(obj) +} + +func isPostDeleteHook(obj *unstructured.Unstructured) bool { + if obj == nil || obj.GetAnnotations() == nil { + return false + } + for k, v := range postDeleteHooks { + if val, ok := obj.GetAnnotations()[k]; ok && val == v { + return true + } + } + return false +} + +func (ctrl *ApplicationController) executePostDeleteHooks(app *v1alpha1.Application, proj *v1alpha1.AppProject, liveObjs map[kube.ResourceKey]*unstructured.Unstructured, config *rest.Config, logCtx *log.Entry) (bool, error) { + appLabelKey, err := ctrl.settingsMgr.GetAppInstanceLabelKey() + if err != nil { + return false, err + } + var revisions []string + for _, src := range app.Spec.GetSources() { + revisions = append(revisions, src.TargetRevision) + } + + targets, _, err := ctrl.appStateManager.GetRepoObjs(app, app.Spec.GetSources(), appLabelKey, revisions, false, false, false, proj) + if err != nil { + return false, err + } + runningHooks := map[kube.ResourceKey]*unstructured.Unstructured{} + for key, obj := range liveObjs { + if isPostDeleteHook(obj) { + runningHooks[key] = obj + } + } + + expectedHook := map[kube.ResourceKey]*unstructured.Unstructured{} + for _, obj := range targets { + if obj.GetNamespace() == "" { + obj.SetNamespace(app.Spec.Destination.Namespace) + } + if !isPostDeleteHook(obj) { + continue + } + if runningHook := runningHooks[kube.GetResourceKey(obj)]; runningHook == nil { + expectedHook[kube.GetResourceKey(obj)] = obj + } + } + createdCnt := 0 + for _, obj := range expectedHook { + _, err = ctrl.kubectl.CreateResource(context.Background(), config, obj.GroupVersionKind(), obj.GetName(), obj.GetNamespace(), obj, v1.CreateOptions{}) + if err != nil { + return false, err + } + createdCnt++ + } + if createdCnt > 0 { + logCtx.Infof("Created %d post-delete hooks", createdCnt) + return false, nil + } + resourceOverrides, err := ctrl.settingsMgr.GetResourceOverrides() + if err != nil { + return false, err + } + healthOverrides := lua.ResourceHealthOverrides(resourceOverrides) + + progressingHooksCnt := 0 + for _, obj := range runningHooks { + hookHealth, err := health.GetResourceHealth(obj, healthOverrides) + if err != nil { + return false, err + } + if hookHealth.Status == health.HealthStatusProgressing { + progressingHooksCnt++ + } + } + if progressingHooksCnt > 0 { + logCtx.Infof("Waiting for %d post-delete hooks to complete", progressingHooksCnt) + return false, nil + } + + return true, nil +} + +func (ctrl *ApplicationController) cleanupPostDeleteHooks(liveObjs map[kube.ResourceKey]*unstructured.Unstructured, config *rest.Config, logCtx *log.Entry) (bool, error) { + resourceOverrides, err := ctrl.settingsMgr.GetResourceOverrides() + if err != nil { + return false, err + } + healthOverrides := lua.ResourceHealthOverrides(resourceOverrides) + + pendingDeletionCount := 0 + aggregatedHealth := health.HealthStatusHealthy + var hooks []*unstructured.Unstructured + for _, obj := range liveObjs { + if !isPostDeleteHook(obj) { + continue + } + hookHealth, err := health.GetResourceHealth(obj, healthOverrides) + if err != nil { + return false, err + } + if health.IsWorse(aggregatedHealth, hookHealth.Status) { + aggregatedHealth = hookHealth.Status + } + hooks = append(hooks, obj) + } + + for _, obj := range hooks { + for _, policy := range hook.DeletePolicies(obj) { + if policy == common.HookDeletePolicyHookFailed && aggregatedHealth == health.HealthStatusDegraded || policy == common.HookDeletePolicyHookSucceeded && aggregatedHealth == health.HealthStatusHealthy { + pendingDeletionCount++ + if obj.GetDeletionTimestamp() != nil { + continue + } + logCtx.Infof("Deleting post-delete hook %s/%s", obj.GetNamespace(), obj.GetName()) + err = ctrl.kubectl.DeleteResource(context.Background(), config, obj.GroupVersionKind(), obj.GetName(), obj.GetNamespace(), v1.DeleteOptions{}) + if err != nil { + return false, err + } + } + } + + } + if pendingDeletionCount > 0 { + logCtx.Infof("Waiting for %d post-delete hooks to be deleted", pendingDeletionCount) + return false, nil + } + return true, nil +} diff --git a/controller/state.go b/controller/state.go index 59e7fa31248ae..ccd17bf532643 100644 --- a/controller/state.go +++ b/controller/state.go @@ -71,6 +71,7 @@ type managedResource struct { type AppStateManager interface { CompareAppState(app *v1alpha1.Application, project *v1alpha1.AppProject, revisions []string, sources []v1alpha1.ApplicationSource, noCache bool, noRevisionCache bool, localObjects []string, hasMultipleSources bool) (*comparisonResult, error) SyncAppState(app *v1alpha1.Application, state *v1alpha1.OperationState) + GetRepoObjs(app *v1alpha1.Application, sources []v1alpha1.ApplicationSource, appLabelKey string, revisions []string, noCache, noRevisionCache, verifySignature bool, proj *v1alpha1.AppProject) ([]*unstructured.Unstructured, []*apiclient.ManifestResponse, error) } // comparisonResult holds the state of an application after the reconciliation @@ -85,8 +86,9 @@ type comparisonResult struct { // appSourceTypes stores the SourceType for each application source under sources field appSourceTypes []v1alpha1.ApplicationSourceType // timings maps phases of comparison to the duration it took to complete (for statistical purposes) - timings map[string]time.Duration - diffResultList *diff.DiffResultList + timings map[string]time.Duration + diffResultList *diff.DiffResultList + hasPostDeleteHooks bool } func (res *comparisonResult) GetSyncStatus() *v1alpha1.SyncStatus { @@ -116,11 +118,11 @@ type appStateManager struct { repoErrorGracePeriod time.Duration } -// getRepoObjs will generate the manifests for the given application delegating the +// GetRepoObjs will generate the manifests for the given application delegating the // task to the repo-server. It returns the list of generated manifests as unstructured // objects. It also returns the full response from all calls to the repo server as the // second argument. -func (m *appStateManager) getRepoObjs(app *v1alpha1.Application, sources []v1alpha1.ApplicationSource, appLabelKey string, revisions []string, noCache, noRevisionCache, verifySignature bool, proj *v1alpha1.AppProject) ([]*unstructured.Unstructured, []*apiclient.ManifestResponse, error) { +func (m *appStateManager) GetRepoObjs(app *v1alpha1.Application, sources []v1alpha1.ApplicationSource, appLabelKey string, revisions []string, noCache, noRevisionCache, verifySignature bool, proj *v1alpha1.AppProject) ([]*unstructured.Unstructured, []*apiclient.ManifestResponse, error) { ts := stats.NewTimingStats() helmRepos, err := m.db.ListHelmRepositories(context.Background()) if err != nil { @@ -236,7 +238,7 @@ func (m *appStateManager) getRepoObjs(app *v1alpha1.Application, sources []v1alp logCtx = logCtx.WithField(k, v.Milliseconds()) } logCtx = logCtx.WithField("time_ms", time.Since(ts.StartTime).Milliseconds()) - logCtx.Info("getRepoObjs stats") + logCtx.Info("GetRepoObjs stats") return targetObjs, manifestInfos, nil } @@ -415,7 +417,7 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1 } } - targetObjs, manifestInfos, err = m.getRepoObjs(app, sources, appLabelKey, revisions, noCache, noRevisionCache, verifySignature, project) + targetObjs, manifestInfos, err = m.GetRepoObjs(app, sources, appLabelKey, revisions, noCache, noRevisionCache, verifySignature, project) if err != nil { targetObjs = make([]*unstructured.Unstructured, 0) msg := fmt.Sprintf("Failed to load target state: %s", err.Error()) @@ -569,6 +571,12 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1 } } } + hasPostDeleteHooks := false + for _, obj := range targetObjs { + if isPostDeleteHook(obj) { + hasPostDeleteHooks = true + } + } reconciliation := sync.Reconcile(targetObjs, liveObjByKey, app.Spec.Destination.Namespace, infoProvider) ts.AddCheckpoint("live_ms") @@ -643,7 +651,7 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1 Kind: gvk.Kind, Version: gvk.Version, Group: gvk.Group, - Hook: hookutil.IsHook(obj), + Hook: isHook(obj), RequiresPruning: targetObj == nil && liveObj != nil && isSelfReferencedObj, } if targetObj != nil { @@ -776,6 +784,7 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1 reconciliationResult: reconciliation, diffConfig: diffConfig, diffResultList: diffResults, + hasPostDeleteHooks: hasPostDeleteHooks, } if hasMultipleSources { diff --git a/controller/sync.go b/controller/sync.go index 2b925b7782b9e..435f62e587c34 100644 --- a/controller/sync.go +++ b/controller/sync.go @@ -283,6 +283,7 @@ func (m *appStateManager) SyncAppState(app *v1alpha1.Application, state *v1alpha sync.WithInitialState(state.Phase, state.Message, initialResourcesRes, state.StartedAt), sync.WithResourcesFilter(func(key kube.ResourceKey, target *unstructured.Unstructured, live *unstructured.Unstructured) bool { return (len(syncOp.Resources) == 0 || + isPostDeleteHook(target) || argo.ContainsSyncResource(key.Name, key.Namespace, schema.GroupVersionKind{Kind: key.Kind, Group: key.Group}, syncOp.Resources)) && m.isSelfReferencedObj(live, target, app.GetName(), appLabelKey, trackingMethod) }), diff --git a/docs/user-guide/helm.md b/docs/user-guide/helm.md index 76853480a6def..866f9c6d935aa 100644 --- a/docs/user-guide/helm.md +++ b/docs/user-guide/helm.md @@ -210,7 +210,7 @@ is any normal Kubernetes resource annotated with the `helm.sh/hook` annotation. Argo CD supports many (most?) Helm hooks by mapping the Helm annotations onto Argo CD's own hook annotations: | Helm Annotation | Notes | -| ------------------------------- | --------------------------------------------------------------------------------------------- | +| ------------------------------- |-----------------------------------------------------------------------------------------------| | `helm.sh/hook: crd-install` | Supported as equivalent to `argocd.argoproj.io/hook: PreSync`. | | `helm.sh/hook: pre-delete` | Not supported. In Helm stable there are 3 cases used to clean up CRDs and 3 to clean-up jobs. | | `helm.sh/hook: pre-rollback` | Not supported. Never used in Helm stable. | @@ -218,7 +218,7 @@ Argo CD supports many (most?) Helm hooks by mapping the Helm annotations onto Ar | `helm.sh/hook: pre-upgrade` | Supported as equivalent to `argocd.argoproj.io/hook: PreSync`. | | `helm.sh/hook: post-upgrade` | Supported as equivalent to `argocd.argoproj.io/hook: PostSync`. | | `helm.sh/hook: post-install` | Supported as equivalent to `argocd.argoproj.io/hook: PostSync`. | -| `helm.sh/hook: post-delete` | Not supported. Never used in Helm stable. | +| `helm.sh/hook: post-delete` | Supported as equivalent to `argocd.argoproj.io/hook: PostDelete`. | | `helm.sh/hook: post-rollback` | Not supported. Never used in Helm stable. | | `helm.sh/hook: test-success` | Not supported. No equivalent in Argo CD. | | `helm.sh/hook: test-failure` | Not supported. No equivalent in Argo CD. | diff --git a/docs/user-guide/resource_hooks.md b/docs/user-guide/resource_hooks.md index d705f8d21423d..74098b1810011 100644 --- a/docs/user-guide/resource_hooks.md +++ b/docs/user-guide/resource_hooks.md @@ -9,6 +9,8 @@ and after a Sync operation. Hooks can also be run if a Sync operation fails at a Kubernetes rolling update strategy. * Using a `PostSync` hook to run integration and health checks after a deployment. * Using a `SyncFail` hook to run clean-up or finalizer logic if a Sync operation fails. _`SyncFail` hooks are only available starting in v1.2_ +* Using a `PostDelete` hook to run clean-up or finalizer logic after an all Application resources are deleted. Please note that + `PostDelete` hooks are only deleted if delete policy matches to the aggregated deletion hooks status and not garbage collected after the application is deleted. ## Usage diff --git a/pkg/apis/application/v1alpha1/application_defaults.go b/pkg/apis/application/v1alpha1/application_defaults.go index 2bc9b1bd0d744..ad8112af8c88d 100644 --- a/pkg/apis/application/v1alpha1/application_defaults.go +++ b/pkg/apis/application/v1alpha1/application_defaults.go @@ -9,6 +9,9 @@ const ( // ResourcesFinalizerName is the finalizer value which we inject to finalize deletion of an application ResourcesFinalizerName string = "resources-finalizer.argocd.argoproj.io" + // PostDeleteFinalizerName is the finalizer that controls post-delete hooks execution + PostDeleteFinalizerName string = "post-delete-finalizer.argocd.argoproj.io" + // ForegroundPropagationPolicyFinalizer is the finalizer we inject to delete application with foreground propagation policy ForegroundPropagationPolicyFinalizer string = "resources-finalizer.argocd.argoproj.io/foreground" diff --git a/pkg/apis/application/v1alpha1/types.go b/pkg/apis/application/v1alpha1/types.go index dc6a73f05dee5..49be82a443bc4 100644 --- a/pkg/apis/application/v1alpha1/types.go +++ b/pkg/apis/application/v1alpha1/types.go @@ -2651,6 +2651,18 @@ func (app *Application) IsRefreshRequested() (RefreshType, bool) { return refreshType, true } +func (app *Application) HasPostDeleteFinalizer(stage ...string) bool { + return getFinalizerIndex(app.ObjectMeta, strings.Join(append([]string{PostDeleteFinalizerName}, stage...), "/")) > -1 +} + +func (app *Application) SetPostDeleteFinalizer(stage ...string) { + setFinalizer(&app.ObjectMeta, strings.Join(append([]string{PostDeleteFinalizerName}, stage...), "/"), true) +} + +func (app *Application) UnSetPostDeleteFinalizer(stage ...string) { + setFinalizer(&app.ObjectMeta, strings.Join(append([]string{PostDeleteFinalizerName}, stage...), "/"), false) +} + // SetCascadedDeletion will enable cascaded deletion by setting the propagation policy finalizer func (app *Application) SetCascadedDeletion(finalizer string) { setFinalizer(&app.ObjectMeta, finalizer, true) diff --git a/test/e2e/hook_test.go b/test/e2e/hook_test.go index f6bb1be872ac6..2db8ff87795ad 100644 --- a/test/e2e/hook_test.go +++ b/test/e2e/hook_test.go @@ -1,12 +1,14 @@ package e2e import ( + "context" "fmt" "testing" "time" "github.com/stretchr/testify/assert" v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/argoproj/gitops-engine/pkg/health" . "github.com/argoproj/gitops-engine/pkg/sync/common" @@ -48,6 +50,24 @@ func testHookSuccessful(t *testing.T, hookType HookType) { Expect(ResourceResultIs(ResourceResult{Version: "v1", Kind: "Pod", Namespace: DeploymentNamespace(), Name: "hook", Message: "pod/hook created", HookType: hookType, HookPhase: OperationSucceeded, SyncPhase: SyncPhase(hookType)})) } +func TestPostDeleteHook(t *testing.T) { + Given(t). + Path("post-delete-hook"). + When(). + CreateApp(). + Refresh(RefreshTypeNormal). + Delete(true). + Then(). + Expect(DoesNotExist()). + AndAction(func() { + hooks, err := KubeClientset.CoreV1().Pods(DeploymentNamespace()).List(context.Background(), metav1.ListOptions{}) + CheckError(err) + assert.Len(t, hooks.Items, 1) + assert.Equal(t, "hook", hooks.Items[0].Name) + }) + +} + // make sure that that hooks do not appear in "argocd app diff" func TestHookDiff(t *testing.T) { Given(t). diff --git a/test/e2e/testdata/post-delete-hook/hook.yaml b/test/e2e/testdata/post-delete-hook/hook.yaml new file mode 100644 index 0000000000000..5631db681f1d0 --- /dev/null +++ b/test/e2e/testdata/post-delete-hook/hook.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Pod +metadata: + annotations: + argocd.argoproj.io/hook: PostDelete + name: hook +spec: + containers: + - command: + - "true" + image: quay.io/argoprojlabs/argocd-e2e-container:0.1 + imagePullPolicy: IfNotPresent + name: main + restartPolicy: Never \ No newline at end of file