You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@camel.apache.org by pc...@apache.org on 2024/01/09 14:21:56 UTC

(camel-k) 08/08: chore: syntetic Integration ownership

This is an automated email from the ASF dual-hosted git repository.

pcongiusti pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/camel-k.git

commit 2bbe92b6936a6bf13d0048feaeb56b5d4b71b7b9
Author: Pasquale Congiusti <pa...@gmail.com>
AuthorDate: Wed Jan 3 16:19:07 2024 +0100

    chore: syntetic Integration ownership
---
 docs/modules/ROOT/pages/running/import.adoc        |  6 +-
 e2e/commonwithcustominstall/synthetic_test.go      | 33 ++++++--
 pkg/apis/camel/v1/integration_types.go             |  6 --
 pkg/cmd/operator/operator.go                       |  2 +-
 .../integration/integration_controller.go          | 25 ++++--
 pkg/controller/integration/monitor.go              | 19 +----
 pkg/controller/integration/monitor_synthetic.go    | 15 +---
 .../integration/monitor_synthetic_test.go          | 18 +---
 pkg/controller/synthetic/synthetic.go              | 98 ++++++++++------------
 pkg/controller/synthetic/synthetic_test.go         | 36 +++++++-
 10 files changed, 136 insertions(+), 122 deletions(-)

diff --git a/docs/modules/ROOT/pages/running/import.adoc b/docs/modules/ROOT/pages/running/import.adoc
index c09552121..9fa8099b8 100644
--- a/docs/modules/ROOT/pages/running/import.adoc
+++ b/docs/modules/ROOT/pages/running/import.adoc
@@ -23,13 +23,13 @@ The operator immediately creates a synthetic Integration:
 ```
 $ kubectl get it
 NAMESPACE                                   NAME    PHASE   RUNTIME PROVIDER   RUNTIME VERSION   KIT   REPLICAS
-test-79c385c3-d58e-4c28-826d-b14b6245f908   my-it   Cannot Monitor Pods
+test-79c385c3-d58e-4c28-826d-b14b6245f908   my-it   Running
 ```
-You can see it will be in `Cannot Monitor Pods` status phase. This is expected because the way Camel K operator monitor Pods. It requires that the same label applied to the Deployment is inherited by the generated Pods. For this reason, beside labelling the Deployment, we need to add a label in the Deployment template.
+You can see it will be in `Running` status phase. However, checking the conditions you will be able to see that the Integration is not yet able to be fully monitored. This is expected because the way Camel K operator monitor Pods. It requires that the same label applied to the Deployment is inherited by the generated Pods. For this reason, beside labelling the Deployment, we need to add a label in the Deployment template.
 ```
 $ kubectl patch deployment my-camel-sb-svc --patch '{"spec": {"template": {"metadata": {"labels": {"camel.apache.org/integration": "my-it"}}}}}'
 ```
-Also this operator can be performed manually or automated in the deployment procedure. We can see now that the operator will be able to monitor accordingly the status of the Pods:
+Also this operation can be performed manually or automated in the deployment procedure. We can see now that the operator will be able to monitor accordingly the status of the Pods:
 ```
 $ kubectl get it
 NAMESPACE                                   NAME    PHASE   RUNTIME PROVIDER   RUNTIME VERSION   KIT   REPLICAS
diff --git a/e2e/commonwithcustominstall/synthetic_test.go b/e2e/commonwithcustominstall/synthetic_test.go
index a1b92f40e..2979d0b39 100644
--- a/e2e/commonwithcustominstall/synthetic_test.go
+++ b/e2e/commonwithcustominstall/synthetic_test.go
@@ -34,6 +34,26 @@ import (
 	corev1 "k8s.io/api/core/v1"
 )
 
+func TestSyntheticIntegrationOff(t *testing.T) {
+	RegisterTestingT(t)
+	WithNewTestNamespace(t, func(ns string) {
+		// Install Camel K without synthetic Integration feature variable (default)
+		operatorID := "camel-k-synthetic-env-off"
+		Expect(KamelInstallWithID(operatorID, ns).Execute()).To(Succeed())
+
+		// Run the external deployment
+		ExpectExecSucceed(t, Kubectl("apply", "-f", "files/deploy.yaml", "-n", ns))
+		Eventually(DeploymentCondition(ns, "my-camel-sb-svc", appsv1.DeploymentProgressing), TestTimeoutShort).
+			Should(MatchFields(IgnoreExtras, Fields{
+				"Status": Equal(corev1.ConditionTrue),
+				"Reason": Equal("NewReplicaSetAvailable"),
+			}))
+
+		// Label the deployment --> Verify the Integration is not created
+		ExpectExecSucceed(t, Kubectl("label", "deploy", "my-camel-sb-svc", "camel.apache.org/integration=my-it", "-n", ns))
+		Eventually(Integration(ns, "my-it"), TestTimeoutShort).Should(BeNil())
+	})
+}
 func TestSyntheticIntegrationFromDeployment(t *testing.T) {
 	RegisterTestingT(t)
 	WithNewTestNamespace(t, func(ns string) {
@@ -53,8 +73,10 @@ func TestSyntheticIntegrationFromDeployment(t *testing.T) {
 
 		// Label the deployment --> Verify the Integration is created (cannot still monitor)
 		ExpectExecSucceed(t, Kubectl("label", "deploy", "my-camel-sb-svc", "camel.apache.org/integration=my-it", "-n", ns))
-		Eventually(IntegrationPhase(ns, "my-it"), TestTimeoutShort).Should(Equal(v1.IntegrationPhaseCannotMonitor))
-		Eventually(IntegrationConditionStatus(ns, "my-it", v1.IntegrationConditionReady), TestTimeoutShort).Should(Equal(corev1.ConditionTrue))
+		Eventually(IntegrationPhase(ns, "my-it"), TestTimeoutShort).Should(Equal(v1.IntegrationPhaseRunning))
+		Eventually(IntegrationConditionStatus(ns, "my-it", v1.IntegrationConditionReady), TestTimeoutShort).Should(Equal(corev1.ConditionFalse))
+		Eventually(IntegrationCondition(ns, "my-it", v1.IntegrationConditionReady), TestTimeoutShort).Should(
+			WithTransform(IntegrationConditionReason, Equal(v1.IntegrationConditionMonitoringPodsAvailableReason)))
 
 		// Label the deployment template --> Verify the Integration is monitored
 		ExpectExecSucceed(t, Kubectl("patch", "deployment", "my-camel-sb-svc", "--patch", `{"spec": {"template": {"metadata": {"labels": {"camel.apache.org/integration": "my-it"}}}}}`, "-n", ns))
@@ -63,12 +85,9 @@ func TestSyntheticIntegrationFromDeployment(t *testing.T) {
 		one := int32(1)
 		Eventually(IntegrationStatusReplicas(ns, "my-it"), TestTimeoutShort).Should(Equal(&one))
 
-		// Delete the deployment --> Verify the Integration is in missing status
+		// Delete the deployment --> Verify the Integration is eventually garbage collected
 		ExpectExecSucceed(t, Kubectl("delete", "deploy", "my-camel-sb-svc", "-n", ns))
-		Eventually(IntegrationPhase(ns, "my-it"), TestTimeoutShort).Should(Equal(v1.IntegrationPhaseImportMissing))
-		Eventually(IntegrationConditionStatus(ns, "my-it", v1.IntegrationConditionReady), TestTimeoutShort).Should(Equal(corev1.ConditionFalse))
-		zero := int32(0)
-		Eventually(IntegrationStatusReplicas(ns, "my-it"), TestTimeoutShort).Should(Equal(&zero))
+		Eventually(Integration(ns, "my-it"), TestTimeoutShort).Should(BeNil())
 
 		// Recreate the deployment and label --> Verify the Integration is monitored
 		ExpectExecSucceed(t, Kubectl("apply", "-f", "files/deploy.yaml", "-n", ns))
diff --git a/pkg/apis/camel/v1/integration_types.go b/pkg/apis/camel/v1/integration_types.go
index 9bcecaad2..9f293f6a6 100644
--- a/pkg/apis/camel/v1/integration_types.go
+++ b/pkg/apis/camel/v1/integration_types.go
@@ -155,10 +155,6 @@ const (
 	IntegrationPhaseRunning IntegrationPhase = "Running"
 	// IntegrationPhaseError --.
 	IntegrationPhaseError IntegrationPhase = "Error"
-	// IntegrationPhaseImportMissing used when the application from which the Integration is imported has been deleted.
-	IntegrationPhaseImportMissing IntegrationPhase = "Application Missing"
-	// IntegrationPhaseCannotMonitor used when the application from which the Integration has not enough information to monitor its pods.
-	IntegrationPhaseCannotMonitor IntegrationPhase = "Cannot Monitor Pods"
 
 	// IntegrationConditionReady --.
 	IntegrationConditionReady IntegrationConditionType = "Ready"
@@ -186,8 +182,6 @@ const (
 	IntegrationConditionProbesAvailable IntegrationConditionType = "ProbesAvailable"
 	// IntegrationConditionTraitInfo --.
 	IntegrationConditionTraitInfo IntegrationConditionType = "TraitInfo"
-	// IntegrationConditionMonitoringPodsAvailable used to specify that the Pods generated are available for monitoring.
-	IntegrationConditionMonitoringPodsAvailable IntegrationConditionType = "MonitoringPodsAvailable"
 
 	// IntegrationConditionKitAvailableReason --.
 	IntegrationConditionKitAvailableReason string = "IntegrationKitAvailable"
diff --git a/pkg/cmd/operator/operator.go b/pkg/cmd/operator/operator.go
index 97996f48c..f4bf99b82 100644
--- a/pkg/cmd/operator/operator.go
+++ b/pkg/cmd/operator/operator.go
@@ -235,7 +235,7 @@ func Run(healthPort, monitoringPort int32, leaderElection bool, leaderElectionID
 	synthEnvVal, synth := os.LookupEnv("CAMEL_K_SYNTHETIC_INTEGRATIONS")
 	if synth && synthEnvVal == "true" {
 		log.Info("Starting the synthetic Integration manager")
-		exitOnError(synthetic.ManageSyntheticIntegrations(ctx, ctrlClient, mgr.GetCache(), mgr.GetAPIReader()), "synthetic Integration manager error")
+		exitOnError(synthetic.ManageSyntheticIntegrations(ctx, ctrlClient, mgr.GetCache()), "synthetic Integration manager error")
 	} else {
 		log.Info("Synthetic Integration manager not configured, skipping")
 	}
diff --git a/pkg/controller/integration/integration_controller.go b/pkg/controller/integration/integration_controller.go
index a16aa6967..5129f6ade 100644
--- a/pkg/controller/integration/integration_controller.go
+++ b/pkg/controller/integration/integration_controller.go
@@ -415,15 +415,28 @@ func watchCronJobResources(b *builder.Builder) {
 }
 
 func watchKnativeResources(ctx context.Context, c client.Client, b *builder.Builder) error {
-	// Check for permission to watch the Knative Service resource
-	checkCtx, cancel := context.WithTimeout(ctx, time.Minute)
-	defer cancel()
-	if ok, err := kubernetes.CheckPermission(checkCtx, c, serving.GroupName, "services", platform.GetOperatorWatchNamespace(), "", "watch"); err != nil {
+	// Watch for the owned Knative Services conditionally
+	if ok, err := kubernetes.IsAPIResourceInstalled(c, servingv1.SchemeGroupVersion.String(), reflect.TypeOf(servingv1.Service{}).Name()); err != nil {
 		return err
 	} else if ok {
-		// Watch for the owned Knative Services
-		b.Owns(&servingv1.Service{}, builder.WithPredicates(StatusChangedPredicate{}))
+		// Check for permission to watch the Knative Service resource
+		checkCtx, cancel := context.WithTimeout(ctx, time.Minute)
+		defer cancel()
+		if ok, err = kubernetes.CheckPermission(checkCtx, c, serving.GroupName, "services", platform.GetOperatorWatchNamespace(), "", "watch"); err != nil {
+			return err
+		} else if ok {
+			log.Info("KnativeService resources installed in the cluster. RBAC privileges assigned correctly, you can use Knative features.")
+			b.Owns(&servingv1.Service{}, builder.WithPredicates(StatusChangedPredicate{}))
+		} else {
+			log.Info(` KnativeService resources installed in the cluster. However Camel K operator has not the required RBAC privileges. You can't use Knative features.
+				Make sure to apply the required RBAC privileges and restart the Camel K Operator Pod to be able to watch for Camel K managed Knative Services.`)
+		}
+	} else {
+		log.Info(`KnativeService resources are not installed in the cluster. You can't use Knative features. If you install Knative Serving resources after the
+			Camel K operator, make sure to apply the required RBAC privileges and restart the Camel K Operator Pod to be able to watch for
+			Camel K managed Knative Services.`)
 	}
+
 	return nil
 }
 
diff --git a/pkg/controller/integration/monitor.go b/pkg/controller/integration/monitor.go
index fb86ed41d..048136d91 100644
--- a/pkg/controller/integration/monitor.go
+++ b/pkg/controller/integration/monitor.go
@@ -59,8 +59,7 @@ func (action *monitorAction) Name() string {
 func (action *monitorAction) CanHandle(integration *v1.Integration) bool {
 	return integration.Status.Phase == v1.IntegrationPhaseDeploying ||
 		integration.Status.Phase == v1.IntegrationPhaseRunning ||
-		integration.Status.Phase == v1.IntegrationPhaseError ||
-		integration.Status.Phase == v1.IntegrationPhaseCannotMonitor
+		integration.Status.Phase == v1.IntegrationPhaseError
 }
 
 func (action *monitorAction) Handle(ctx context.Context, integration *v1.Integration) (*v1.Integration, error) {
@@ -142,10 +141,9 @@ func (action *monitorAction) monitorPods(ctx context.Context, environment *trait
 	if !controller.hasTemplateIntegrationLabel() {
 		// This is happening when the Deployment, CronJob, etc resources
 		// miss the Integration label, required to identify sibling Pods.
-		integration.Status.Phase = v1.IntegrationPhaseCannotMonitor
 		integration.Status.SetConditions(
 			v1.IntegrationCondition{
-				Type:   v1.IntegrationConditionMonitoringPodsAvailable,
+				Type:   v1.IntegrationConditionReady,
 				Status: corev1.ConditionFalse,
 				Reason: v1.IntegrationConditionMonitoringPodsAvailableReason,
 				Message: fmt.Sprintf(
@@ -158,13 +156,6 @@ func (action *monitorAction) monitorPods(ctx context.Context, environment *trait
 		return integration, nil
 	}
 
-	integration.Status.SetConditions(
-		v1.IntegrationCondition{
-			Type:   v1.IntegrationConditionMonitoringPodsAvailable,
-			Status: corev1.ConditionTrue,
-			Reason: v1.IntegrationConditionMonitoringPodsAvailableReason,
-		},
-	)
 	// Enforce the scale sub-resource label selector.
 	// It is used by the HPA that queries the scale sub-resource endpoint,
 	// to list the pods owned by the integration.
@@ -296,8 +287,6 @@ type controller interface {
 	checkReadyCondition(ctx context.Context) (bool, error)
 	getPodSpec() corev1.PodSpec
 	updateReadyCondition(readyPods int) bool
-	getSelector() metav1.LabelSelector
-	isEmptySelector() bool
 	hasTemplateIntegrationLabel() bool
 	getControllerName() string
 }
@@ -359,10 +348,6 @@ func (action *monitorAction) updateIntegrationPhaseAndReadyCondition(
 	ctx context.Context, controller controller, environment *trait.Environment, integration *v1.Integration,
 	pendingPods []corev1.Pod, runningPods []corev1.Pod,
 ) error {
-	controller, err := action.newController(environment, integration)
-	if err != nil {
-		return err
-	}
 	if done, err := controller.checkReadyCondition(ctx); done || err != nil {
 		// There may be pods that are not ready but still probable for getting error messages.
 		// Ignore returned error from probing as it's expected when the ctrl obj is not ready.
diff --git a/pkg/controller/integration/monitor_synthetic.go b/pkg/controller/integration/monitor_synthetic.go
index a51758814..beb736b1d 100644
--- a/pkg/controller/integration/monitor_synthetic.go
+++ b/pkg/controller/integration/monitor_synthetic.go
@@ -19,7 +19,6 @@ package integration
 
 import (
 	"context"
-	"fmt"
 
 	corev1 "k8s.io/api/core/v1"
 
@@ -46,18 +45,8 @@ func (action *monitorSyntheticAction) Handle(ctx context.Context, integration *v
 	if err != nil {
 		// Importing application no longer available
 		if k8serrors.IsNotFound(err) {
-			// It could be a normal condition, don't report as an error
-			integration.Status.Phase = v1.IntegrationPhaseImportMissing
-			message := fmt.Sprintf(
-				"import %s %s no longer available",
-				integration.Annotations[v1.IntegrationImportedKindLabel],
-				integration.Annotations[v1.IntegrationImportedNameLabel],
-			)
-			integration.SetReadyConditionError(message)
-			zero := int32(0)
-			integration.Status.Phase = v1.IntegrationPhaseImportMissing
-			integration.Status.Replicas = &zero
-			return integration, nil
+			// Application was deleted. The GC will take care of
+			return nil, nil
 		}
 		// other reasons, likely some error to report
 		integration.Status.Phase = v1.IntegrationPhaseError
diff --git a/pkg/controller/integration/monitor_synthetic_test.go b/pkg/controller/integration/monitor_synthetic_test.go
index aa1f9b232..b1cf8a66c 100644
--- a/pkg/controller/integration/monitor_synthetic_test.go
+++ b/pkg/controller/integration/monitor_synthetic_test.go
@@ -125,13 +125,10 @@ func TestMonitorSyntheticIntegrationCannotMonitorPods(t *testing.T) {
 	assert.True(t, a.CanHandle(importedIt))
 	handledIt, err := a.Handle(context.TODO(), importedIt)
 	assert.Nil(t, err)
-	assert.Equal(t, v1.IntegrationPhaseCannotMonitor, handledIt.Status.Phase)
-	// Ready condition should be still true
-	assert.Equal(t, corev1.ConditionTrue, handledIt.Status.GetCondition(v1.IntegrationConditionReady).Status)
+	assert.Equal(t, corev1.ConditionFalse, handledIt.Status.GetCondition(v1.IntegrationConditionReady).Status)
 	// Check monitoring pods condition
-	assert.Equal(t, corev1.ConditionFalse, handledIt.Status.GetCondition(v1.IntegrationConditionMonitoringPodsAvailable).Status)
-	assert.Equal(t, v1.IntegrationConditionMonitoringPodsAvailableReason, handledIt.Status.GetCondition(v1.IntegrationConditionMonitoringPodsAvailable).Reason)
-	assert.Equal(t, "Could not find `camel.apache.org/integration: my-imported-it` label in the Deployment/my-deploy template. Make sure to include this label in the template for Pod monitoring purposes.", handledIt.Status.GetCondition(v1.IntegrationConditionMonitoringPodsAvailable).Message)
+	assert.Equal(t, v1.IntegrationConditionMonitoringPodsAvailableReason, handledIt.Status.GetCondition(v1.IntegrationConditionReady).Reason)
+	assert.Equal(t, "Could not find `camel.apache.org/integration: my-imported-it` label in the Deployment/my-deploy template. Make sure to include this label in the template for Pod monitoring purposes.", handledIt.Status.GetCondition(v1.IntegrationConditionReady).Message)
 }
 
 func TestMonitorSyntheticIntegrationDeployment(t *testing.T) {
@@ -246,9 +243,6 @@ func TestMonitorSyntheticIntegrationDeployment(t *testing.T) {
 	assert.Equal(t, corev1.ConditionTrue, handledIt.Status.GetCondition(v1.IntegrationConditionReady).Status)
 	assert.Equal(t, v1.IntegrationConditionDeploymentReadyReason, handledIt.Status.GetCondition(v1.IntegrationConditionReady).Reason)
 	assert.Equal(t, "1/1 ready replicas", handledIt.Status.GetCondition(v1.IntegrationConditionReady).Message)
-	// Check monitoring pods condition
-	assert.Equal(t, corev1.ConditionTrue, handledIt.Status.GetCondition(v1.IntegrationConditionMonitoringPodsAvailable).Status)
-	assert.Equal(t, v1.IntegrationConditionMonitoringPodsAvailableReason, handledIt.Status.GetCondition(v1.IntegrationConditionMonitoringPodsAvailable).Reason)
 
 	// Remove label from deployment
 	deploy.Labels = nil
@@ -369,9 +363,6 @@ func TestMonitorSyntheticIntegrationCronJob(t *testing.T) {
 	assert.Equal(t, corev1.ConditionTrue, handledIt.Status.GetCondition(v1.IntegrationConditionReady).Status)
 	assert.Equal(t, v1.IntegrationConditionCronJobCreatedReason, handledIt.Status.GetCondition(v1.IntegrationConditionReady).Reason)
 	assert.Equal(t, "cronjob created", handledIt.Status.GetCondition(v1.IntegrationConditionReady).Message)
-	// Check monitoring pods condition
-	assert.Equal(t, corev1.ConditionTrue, handledIt.Status.GetCondition(v1.IntegrationConditionMonitoringPodsAvailable).Status)
-	assert.Equal(t, v1.IntegrationConditionMonitoringPodsAvailableReason, handledIt.Status.GetCondition(v1.IntegrationConditionMonitoringPodsAvailable).Reason)
 }
 
 func TestMonitorSyntheticIntegrationKnativeService(t *testing.T) {
@@ -492,7 +483,4 @@ func TestMonitorSyntheticIntegrationKnativeService(t *testing.T) {
 	// Ready condition
 	assert.Equal(t, corev1.ConditionTrue, handledIt.Status.GetCondition(v1.IntegrationConditionReady).Status)
 	assert.Equal(t, v1.IntegrationConditionKnativeServiceReadyReason, handledIt.Status.GetCondition(v1.IntegrationConditionReady).Reason)
-	// Check monitoring pods condition
-	assert.Equal(t, corev1.ConditionTrue, handledIt.Status.GetCondition(v1.IntegrationConditionMonitoringPodsAvailable).Status)
-	assert.Equal(t, v1.IntegrationConditionMonitoringPodsAvailableReason, handledIt.Status.GetCondition(v1.IntegrationConditionMonitoringPodsAvailable).Reason)
 }
diff --git a/pkg/controller/synthetic/synthetic.go b/pkg/controller/synthetic/synthetic.go
index c5f7bbb34..974a2eb05 100644
--- a/pkg/controller/synthetic/synthetic.go
+++ b/pkg/controller/synthetic/synthetic.go
@@ -31,6 +31,7 @@ import (
 	appsv1 "k8s.io/api/apps/v1"
 	batchv1 "k8s.io/api/batch/v1"
 	k8serrors "k8s.io/apimachinery/pkg/api/errors"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	clientgocache "k8s.io/client-go/tools/cache"
 	"knative.dev/serving/pkg/apis/serving"
 	servingv1 "knative.dev/serving/pkg/apis/serving/v1"
@@ -38,12 +39,17 @@ import (
 	ctrl "sigs.k8s.io/controller-runtime/pkg/client"
 )
 
+var (
+	controller         = true
+	blockOwnerDeletion = true
+)
+
 // ManageSyntheticIntegrations is the controller for synthetic Integrations. Consider that the lifecycle of the objects are driven
-// by the way we are monitoring them. Since we're filtering by `camel.apache.org/integration` label in the cached clinet,
+// by the way we are monitoring them. Since we're filtering by `camel.apache.org/integration` label in the cached client,
 // you must consider an add, update or delete
 // accordingly, ie, when the user label the resource, then it is considered as an add, when it removes the label, it is considered as a delete.
 // We must filter only non managed objects in order to avoid to conflict with the reconciliation loop of managed objects (owned by an Integration).
-func ManageSyntheticIntegrations(ctx context.Context, c client.Client, cache cache.Cache, reader ctrl.Reader) error {
+func ManageSyntheticIntegrations(ctx context.Context, c client.Client, cache cache.Cache) error {
 	informers, err := getInformers(ctx, c, cache)
 	if err != nil {
 		return err
@@ -73,15 +79,7 @@ func ManageSyntheticIntegrations(ctx context.Context, c client.Client, cache cac
 							log.Errorf(err, "Some error happened while loading a synthetic Integration %s", integrationName)
 						}
 					} else {
-						if it.Status.Phase == v1.IntegrationPhaseImportMissing {
-							// Update with proper phase (reconciliation will take care)
-							it.Status.Phase = v1.IntegrationPhaseNone
-							if err = updateSyntheticIntegration(ctx, c, it); err != nil {
-								log.Errorf(err, "Some error happened while updatinf a synthetic Integration %s", integrationName)
-							}
-						} else {
-							log.Infof("Synthetic Integration %s is in phase %s. Skipping.", integrationName, it.Status.Phase)
-						}
+						log.Infof("Synthetic Integration %s is in phase %s. Skipping.", integrationName, it.Status.Phase)
 					}
 				}
 			},
@@ -93,44 +91,11 @@ func ManageSyntheticIntegrations(ctx context.Context, c client.Client, cache cac
 				}
 				if !isManagedObject(ctrlObj) {
 					integrationName := ctrlObj.GetLabels()[v1.IntegrationLabel]
-					// We must use a non caching client to understand if the object has been deleted from the cluster or only deleted from
-					// the cache (ie, user removed the importing label)
-					err := reader.Get(ctx, ctrl.ObjectKeyFromObject(ctrlObj), ctrlObj)
-					if err != nil {
-						if k8serrors.IsNotFound(err) {
-							// Object removed from the cluster
-							it, err := getSyntheticIntegration(ctx, c, ctrlObj.GetNamespace(), integrationName)
-							if err != nil {
-								log.Errorf(err, "Some error happened while loading a synthetic Integration %s", it.Name)
-								return
-							}
-							// The resource from which we imported has been deleted, report in it status.
-							// It may be a temporary situation, for example, if the deployment from which the Integration is imported
-							// is being redeployed. For this reason we should keep the Integration instead of forcefully removing it.
-							message := fmt.Sprintf(
-								"import %s %s no longer available",
-								it.Annotations[v1.IntegrationImportedKindLabel],
-								it.Annotations[v1.IntegrationImportedNameLabel],
-							)
-							it.SetReadyConditionError(message)
-							zero := int32(0)
-							it.Status.Phase = v1.IntegrationPhaseImportMissing
-							it.Status.Replicas = &zero
-							if err = updateSyntheticIntegration(ctx, c, it); err != nil {
-								log.Errorf(err, "Some error happened while updating a synthetic Integration %s", it.Name)
-							}
-							log.Infof("Updated synthetic Integration %s with status %s", it.GetName(), it.Status.Phase)
-						} else {
-							log.Errorf(err, "Some error happened while loading object %s from the cluster", ctrlObj.GetName())
-							return
-						}
-					} else {
-						// Importing label removed
-						if err = deleteSyntheticIntegration(ctx, c, ctrlObj.GetNamespace(), integrationName); err != nil {
-							log.Errorf(err, "Some error happened while deleting a synthetic Integration %s", integrationName)
-						}
-						log.Infof("Deleted synthetic Integration %s", integrationName)
+					// Importing label removed
+					if err = deleteSyntheticIntegration(ctx, c, ctrlObj.GetNamespace(), integrationName); err != nil {
+						log.Errorf(err, "Some error happened while deleting a synthetic Integration %s", integrationName)
 					}
+					log.Infof("Deleted synthetic Integration %s", integrationName)
 				}
 			},
 		})
@@ -186,10 +151,6 @@ func deleteSyntheticIntegration(ctx context.Context, c client.Client, namespace,
 	return c.Delete(ctx, &it)
 }
 
-func updateSyntheticIntegration(ctx context.Context, c client.Client, it *v1.Integration) error {
-	return c.Status().Update(ctx, it, ctrl.FieldOwner("camel-k-operator"))
-}
-
 // isManagedObject returns true if the object is managed by an Integration.
 func isManagedObject(obj ctrl.Object) bool {
 	for _, mr := range obj.GetOwnerReferences() {
@@ -243,6 +204,17 @@ func (app *nonManagedCamelDeployment) Integration() *v1.Integration {
 			},
 		},
 	}
+	references := []metav1.OwnerReference{
+		{
+			APIVersion:         "apps/v1",
+			Kind:               "Deployment",
+			Name:               app.deploy.Name,
+			UID:                app.deploy.UID,
+			Controller:         &controller,
+			BlockOwnerDeletion: &blockOwnerDeletion,
+		},
+	}
+	it.SetOwnerReferences(references)
 	return &it
 }
 
@@ -277,6 +249,17 @@ func (app *NonManagedCamelCronjob) Integration() *v1.Integration {
 	it.Spec = v1.IntegrationSpec{
 		Traits: v1.Traits{},
 	}
+	references := []metav1.OwnerReference{
+		{
+			APIVersion:         "batch/v1",
+			Kind:               "CronJob",
+			Name:               app.cron.Name,
+			UID:                app.cron.UID,
+			Controller:         &controller,
+			BlockOwnerDeletion: &blockOwnerDeletion,
+		},
+	}
+	it.SetOwnerReferences(references)
 	return &it
 }
 
@@ -296,5 +279,16 @@ func (app *NonManagedCamelKnativeService) Integration() *v1.Integration {
 	it.Spec = v1.IntegrationSpec{
 		Traits: v1.Traits{},
 	}
+	references := []metav1.OwnerReference{
+		{
+			APIVersion:         servingv1.SchemeGroupVersion.String(),
+			Kind:               "Service",
+			Name:               app.ksvc.Name,
+			UID:                app.ksvc.UID,
+			Controller:         &controller,
+			BlockOwnerDeletion: &blockOwnerDeletion,
+		},
+	}
+	it.SetOwnerReferences(references)
 	return &it
 }
diff --git a/pkg/controller/synthetic/synthetic_test.go b/pkg/controller/synthetic/synthetic_test.go
index c600f6d3e..fcc15077a 100644
--- a/pkg/controller/synthetic/synthetic_test.go
+++ b/pkg/controller/synthetic/synthetic_test.go
@@ -115,6 +115,17 @@ func TestNonManagedDeployment(t *testing.T) {
 			},
 		},
 	}
+	references := []metav1.OwnerReference{
+		{
+			APIVersion:         "apps/v1",
+			Kind:               "Deployment",
+			Name:               deploy.Name,
+			UID:                deploy.UID,
+			Controller:         &controller,
+			BlockOwnerDeletion: &blockOwnerDeletion,
+		},
+	}
+	expectedIt.SetOwnerReferences(references)
 
 	deploymentAdapter, err := nonManagedCamelApplicationFactory(deploy)
 	assert.Nil(t, err)
@@ -164,7 +175,17 @@ func TestNonManagedCronJob(t *testing.T) {
 		v1.IntegrationImportedKindLabel: "CronJob",
 		v1.IntegrationSyntheticLabel:    "true",
 	})
-
+	references := []metav1.OwnerReference{
+		{
+			APIVersion:         "batch/v1",
+			Kind:               "CronJob",
+			Name:               cron.Name,
+			UID:                cron.UID,
+			Controller:         &controller,
+			BlockOwnerDeletion: &blockOwnerDeletion,
+		},
+	}
+	expectedIt.SetOwnerReferences(references)
 	cronJobAdapter, err := nonManagedCamelApplicationFactory(cron)
 	assert.Nil(t, err)
 	assert.NotNil(t, cronJobAdapter)
@@ -174,7 +195,7 @@ func TestNonManagedCronJob(t *testing.T) {
 func TestNonManagedKnativeService(t *testing.T) {
 	ksvc := &servingv1.Service{
 		TypeMeta: metav1.TypeMeta{
-			APIVersion: appsv1.SchemeGroupVersion.String(),
+			APIVersion: servingv1.SchemeGroupVersion.String(),
 			Kind:       "Service",
 		},
 		ObjectMeta: metav1.ObjectMeta{
@@ -213,6 +234,17 @@ func TestNonManagedKnativeService(t *testing.T) {
 		v1.IntegrationImportedKindLabel: "KnativeService",
 		v1.IntegrationSyntheticLabel:    "true",
 	})
+	references := []metav1.OwnerReference{
+		{
+			APIVersion:         servingv1.SchemeGroupVersion.String(),
+			Kind:               "Service",
+			Name:               ksvc.Name,
+			UID:                ksvc.UID,
+			Controller:         &controller,
+			BlockOwnerDeletion: &blockOwnerDeletion,
+		},
+	}
+	expectedIt.SetOwnerReferences(references)
 
 	knativeServiceAdapter, err := nonManagedCamelApplicationFactory(ksvc)
 	assert.Nil(t, err)