You are viewing a plain text version of this content. The canonical link for it is here.
Posted to dev@camel.apache.org by GitBox <gi...@apache.org> on 2018/09/28 13:00:47 UTC

[GitHub] lburgazzoli closed pull request #138: Add support for plain Kubernetes

lburgazzoli closed pull request #138: Add support for plain Kubernetes
URL: https://github.com/apache/camel-k/pull/138
 
 
   

This is a PR merged from a forked repository.
As GitHub hides the original diff on merge, it is displayed below for
the sake of provenance:

As this is a foreign pull request (from a fork), the diff is supplied
below (as it won't show otherwise due to GitHub magic):

diff --git a/README.adoc b/README.adoc
index fadd6d1..085ee5a 100644
--- a/README.adoc
+++ b/README.adoc
@@ -13,7 +13,7 @@ Apache Camel K (a.k.a. Kamel) is a lightweight integration framework built from
 Camel K allows to run integrations directly on a Kubernetes or OpenShift cluster.
 To use it, you need to be connected to a cloud environment or to a local cluster created for development purposes.
 
-If you need help on how to create a local development environment based on *Minishift* or *Minikube* (Minikube will be supported soon, stay tuned!), you can follow the link:/docs/cluster-setup.adoc[local cluster setup guide].
+If you need help on how to create a local development environment based on *Minishift* or *Minikube*, you can follow the link:/docs/cluster-setup.adoc[local cluster setup guide].
 
 [[installation]]
 === Installation
diff --git a/build/Makefile b/build/Makefile
index 3bc099c..6c0ddbb 100644
--- a/build/Makefile
+++ b/build/Makefile
@@ -56,6 +56,9 @@ install: install-minishift
 install-minishift:
 	./build/install_minishift.sh
 
+install-minikube:
+	./build/install_minikube.sh
+
 test: check
 check:
 	go test ./...
diff --git a/build/install_minikube.sh b/build/install_minikube.sh
new file mode 100755
index 0000000..cc6a159
--- /dev/null
+++ b/build/install_minikube.sh
@@ -0,0 +1,14 @@
+#!/bin/sh
+
+# Exit on error
+set -e
+
+# Compile and build images
+make
+eval $(minikube docker-env)
+make images
+
+# Perform installation
+./kamel install
+
+kubectl delete pod -l name=camel-k-operator || true
diff --git a/deploy/builder-pvc.yaml b/deploy/builder-pvc.yaml
new file mode 100644
index 0000000..8e6ca4b
--- /dev/null
+++ b/deploy/builder-pvc.yaml
@@ -0,0 +1,12 @@
+kind: PersistentVolumeClaim
+apiVersion: v1
+metadata:
+  name: camel-k-builder
+  labels:
+    app: "camel-k"
+spec:
+  accessModes:
+  - ReadWriteMany
+  resources:
+    requests:
+      storage: 1Gi
diff --git a/deploy/operator-deployment-kubernetes.yaml b/deploy/operator-deployment-kubernetes.yaml
new file mode 100644
index 0000000..c6e259f
--- /dev/null
+++ b/deploy/operator-deployment-kubernetes.yaml
@@ -0,0 +1,42 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: camel-k-operator
+  labels:
+    app: "camel-k"
+spec:
+  replicas: 1
+  strategy:
+    type: Recreate
+  selector:
+    matchLabels:
+      name: camel-k-operator
+  template:
+    metadata:
+      labels:
+        name: camel-k-operator
+    spec:
+      serviceAccountName: camel-k-operator
+      containers:
+        - name: camel-k-operator
+          image: docker.io/apache/camel-k:0.0.3-SNAPSHOT
+          ports:
+          - containerPort: 60000
+            name: metrics
+          command:
+          - camel-k-operator
+          imagePullPolicy: IfNotPresent
+          env:
+            - name: WATCH_NAMESPACE
+              valueFrom:
+                fieldRef:
+                  fieldPath: metadata.namespace
+            - name: OPERATOR_NAME
+              value: "camel-k-operator"
+          volumeMounts:
+          - mountPath: /workspace
+            name: camel-k-builder
+      volumes:
+      - name: camel-k-builder
+        persistentVolumeClaim:
+          claimName: camel-k-builder
diff --git a/deploy/operator-deployment.yaml b/deploy/operator-deployment-openshift.yaml
similarity index 100%
rename from deploy/operator-deployment.yaml
rename to deploy/operator-deployment-openshift.yaml
diff --git a/deploy/resources.go b/deploy/resources.go
index a3274cb..ea8fdb9 100644
--- a/deploy/resources.go
+++ b/deploy/resources.go
@@ -24,6 +24,22 @@ var Resources map[string]string
 func init() {
 	Resources = make(map[string]string)
 
+	Resources["builder-pvc.yaml"] =
+		`
+kind: PersistentVolumeClaim
+apiVersion: v1
+metadata:
+  name: camel-k-builder
+  labels:
+    app: "camel-k"
+spec:
+  accessModes:
+  - ReadWriteMany
+  resources:
+    requests:
+      storage: 1Gi
+
+`
 	Resources["camel-catalog.yaml"] =
 		`
 artifacts:
@@ -2190,7 +2206,53 @@ spec:
           .to('log:info?showHeaders=true')
     name: routes.groovy
 `
-	Resources["operator-deployment.yaml"] =
+	Resources["operator-deployment-kubernetes.yaml"] =
+		`
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: camel-k-operator
+  labels:
+    app: "camel-k"
+spec:
+  replicas: 1
+  strategy:
+    type: Recreate
+  selector:
+    matchLabels:
+      name: camel-k-operator
+  template:
+    metadata:
+      labels:
+        name: camel-k-operator
+    spec:
+      serviceAccountName: camel-k-operator
+      containers:
+        - name: camel-k-operator
+          image: docker.io/apache/camel-k:0.0.3-SNAPSHOT
+          ports:
+          - containerPort: 60000
+            name: metrics
+          command:
+          - camel-k-operator
+          imagePullPolicy: IfNotPresent
+          env:
+            - name: WATCH_NAMESPACE
+              valueFrom:
+                fieldRef:
+                  fieldPath: metadata.namespace
+            - name: OPERATOR_NAME
+              value: "camel-k-operator"
+          volumeMounts:
+          - mountPath: /workspace
+            name: camel-k-builder
+      volumes:
+      - name: camel-k-builder
+        persistentVolumeClaim:
+          claimName: camel-k-builder
+
+`
+	Resources["operator-deployment-openshift.yaml"] =
 		`
 apiVersion: apps/v1
 kind: Deployment
diff --git a/docs/cluster-setup.adoc b/docs/cluster-setup.adoc
index 7e52648..ef38dd6 100644
--- a/docs/cluster-setup.adoc
+++ b/docs/cluster-setup.adoc
@@ -24,7 +24,24 @@ Then you can start the cluster with:
 minishift start
 ```
 
+You can now proceed to link:/README.adoc[install Camel K].
+
 [[minikube]]
 == Minikube
 
-Minikube and Kubernetes are not yet supported (but support is coming soon).
+You can run Camel K integrations on plain Kubernetes using the Minikube cluster creation tool.
+Follow the instructions in the https://github.com/kubernetes/minikube#installation[official doc] for the installation.
+
+Start a new instance of Minikube using the command:
+
+```
+minikube start
+```
+
+After the startup process is completed, you need to **enable the `registry` addon**:
+
+```
+minikube addons enable registry
+```
+
+You can now proceed to link:/README.adoc[install Camel K].
diff --git a/docs/developers.adoc b/docs/developers.adoc
index 06bb10a..ee329bd 100644
--- a/docs/developers.adoc
+++ b/docs/developers.adoc
@@ -114,11 +114,21 @@ make test-integration
 
 If you want to install everything you have in your source code and see it running on Kubernetes, you need to run the following command:
 
+=== For Minishift
+
 * Run `make install-minishift` (or just `make install`): to build the project and install it in the current namespace on Minishift
 * You can specify a different namespace with `make install-minishift project=myawesomeproject`
 
 This command assumes you have an already running Minishift instance.
 
+=== For Minikube
+
+* Run `make install-minikube`: to build the project and install it in the current namespace on Minikube
+
+This command assumes you have an already running Minikube instance.
+
+=== Use
+
 Now you can play with Camel K:
 
 ```
@@ -162,3 +172,7 @@ When configuring the IDE task, make sure to add all required environment variabl
 * Set the `OPERATOR_NAME` environment variable to `camel-k-operator`.
 
 After you setup the IDE task, you can run and debug the operator process.
+
+NOTE: The operator can be fully debugged in Minishift, because it uses OpenShift S2I binary builds under the hood.
+The build phase cannot be (currently) debugged in Minikube because the Kaniko builder requires that the operator and the publisher pod
+share a common persistent volume.
diff --git a/pkg/build/build_manager.go b/pkg/build/build_manager.go
index 895575b..83c0f8a 100644
--- a/pkg/build/build_manager.go
+++ b/pkg/build/build_manager.go
@@ -28,14 +28,16 @@ type Manager struct {
 	ctx       context.Context
 	builds    sync.Map
 	assembler Assembler
+	packager  Packager
 	publisher Publisher
 }
 
-// NewManager creates an instance of the build manager using the given builder
-func NewManager(ctx context.Context, assembler Assembler, publisher Publisher) *Manager {
+// NewManager creates an instance of the build manager using the given assembler, packager and publisher
+func NewManager(ctx context.Context, assembler Assembler, packager Packager, publisher Publisher) *Manager {
 	return &Manager{
 		ctx:       ctx,
 		assembler: assembler,
+		packager:  packager,
 		publisher: publisher,
 	}
 }
@@ -68,7 +70,21 @@ func (m *Manager) Start(request Request) {
 			}
 		}
 
-		publishChannel := m.publisher.Publish(request, assembled)
+		packageChannel := m.packager.Package(request, assembled)
+		var packaged PackagedOutput
+		select {
+		case <-m.ctx.Done():
+			m.builds.Store(request.Identifier, canceledBuildInfo(request))
+			return
+		case packaged = <-packageChannel:
+			if packaged.Error != nil {
+				m.builds.Store(request.Identifier, failedPackageBuildInfo(request, packaged))
+				return
+			}
+		}
+		defer m.packager.Cleanup(packaged)
+
+		publishChannel := m.publisher.Publish(request, assembled, packaged)
 		var published PublishedOutput
 		select {
 		case <-m.ctx.Done():
@@ -114,6 +130,14 @@ func failedAssembleBuildInfo(request Request, output AssembledOutput) Result {
 	}
 }
 
+func failedPackageBuildInfo(request Request, output PackagedOutput) Result {
+	return Result{
+		Request: request,
+		Error:   output.Error,
+		Status:  StatusError,
+	}
+}
+
 func failedPublishBuildInfo(request Request, output PublishedOutput) Result {
 	return Result{
 		Request: request,
diff --git a/pkg/build/build_types.go b/pkg/build/build_types.go
index 22ff178..bf83245 100644
--- a/pkg/build/build_types.go
+++ b/pkg/build/build_types.go
@@ -63,6 +63,19 @@ type Assembler interface {
 	Assemble(Request) <-chan AssembledOutput
 }
 
+// PackagedOutput is the new image layer that needs to be pushed
+type PackagedOutput struct {
+	Error     error
+	BaseImage string
+	TarFile   string
+}
+
+// A Packager produces the image layer that needs to be pushed
+type Packager interface {
+	Package(Request, AssembledOutput) <-chan PackagedOutput
+	Cleanup(PackagedOutput)
+}
+
 // PublishedOutput is the output of the publish phase
 type PublishedOutput struct {
 	Error error
@@ -71,7 +84,7 @@ type PublishedOutput struct {
 
 // A Publisher publishes a docker image of a build request
 type Publisher interface {
-	Publish(Request, AssembledOutput) <-chan PublishedOutput
+	Publish(Request, AssembledOutput, PackagedOutput) <-chan PublishedOutput
 }
 
 // Status --
diff --git a/pkg/build/packager/base.go b/pkg/build/packager/base.go
new file mode 100644
index 0000000..a13da83
--- /dev/null
+++ b/pkg/build/packager/base.go
@@ -0,0 +1,177 @@
+/*
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package packager
+
+import (
+	"context"
+	"io/ioutil"
+	"os"
+	"path"
+	"time"
+
+	"github.com/apache/camel-k/pkg/build"
+	"github.com/apache/camel-k/pkg/util/maven"
+	"github.com/apache/camel-k/pkg/util/tar"
+	"github.com/pkg/errors"
+	"github.com/sirupsen/logrus"
+)
+
+const (
+	sharedDir         = "/workspace"
+	artifactDirPrefix = "layer-"
+)
+
+type commonPackager struct {
+	buffer chan packageOperation
+	uploadedArtifactsSelector
+}
+
+type packageOperation struct {
+	request   build.Request
+	assembled build.AssembledOutput
+	output    chan build.PackagedOutput
+}
+
+type uploadedArtifactsSelector func([]build.ClasspathEntry) (string, []build.ClasspathEntry, error)
+
+func newBasePackager(ctx context.Context, rootImage string) build.Packager {
+	identitySelector := func(entries []build.ClasspathEntry) (string, []build.ClasspathEntry, error) {
+		return rootImage, entries, nil
+	}
+	return newBasePackagerWithSelector(ctx, identitySelector)
+}
+
+func newBasePackagerWithSelector(ctx context.Context, uploadedArtifactsSelector uploadedArtifactsSelector) *commonPackager {
+	pack := commonPackager{
+		buffer: make(chan packageOperation, 100),
+		uploadedArtifactsSelector: uploadedArtifactsSelector,
+	}
+	go pack.packageCycle(ctx)
+	return &pack
+}
+
+func (b *commonPackager) Package(request build.Request, assembled build.AssembledOutput) <-chan build.PackagedOutput {
+	res := make(chan build.PackagedOutput, 1)
+	op := packageOperation{
+		request:   request,
+		assembled: assembled,
+		output:    res,
+	}
+	b.buffer <- op
+	return res
+}
+
+func (b *commonPackager) Cleanup(output build.PackagedOutput) {
+	parentDir, _ := path.Split(output.TarFile)
+	err := os.RemoveAll(parentDir)
+	if err != nil {
+		logrus.Warn("Could not remove temporary directory ", parentDir)
+	}
+}
+
+func (b *commonPackager) packageCycle(ctx context.Context) {
+	for {
+		select {
+		case <-ctx.Done():
+			b.buffer = nil
+			return
+		case op := <-b.buffer:
+			now := time.Now()
+			logrus.Info("Starting a new image packaging")
+			res := b.execute(op.request, op.assembled)
+			elapsed := time.Now().Sub(now)
+
+			if res.Error != nil {
+				logrus.Error("Error during packaging (total time ", elapsed.Seconds(), " seconds): ", res.Error)
+			} else {
+				logrus.Info("Packaging completed in ", elapsed.Seconds(), " seconds")
+			}
+
+			op.output <- res
+		}
+	}
+}
+
+func (b *commonPackager) execute(request build.Request, assembled build.AssembledOutput) build.PackagedOutput {
+	baseImageName, selectedArtifacts, err := b.uploadedArtifactsSelector(assembled.Classpath)
+	if err != nil {
+		return build.PackagedOutput{Error: err}
+	}
+
+	tarFile, err := b.createTar(assembled, selectedArtifacts)
+	if err != nil {
+		return build.PackagedOutput{Error: err}
+	}
+
+	return build.PackagedOutput{
+		BaseImage: baseImageName,
+		TarFile:   tarFile,
+	}
+}
+
+func (b *commonPackager) createTar(assembled build.AssembledOutput, selectedArtifacts []build.ClasspathEntry) (string, error) {
+	buildBaseDir := sharedDir
+	if _, err := os.Stat(buildBaseDir); os.IsNotExist(err) {
+		// use default OS temp dir if a shared dir is not present
+		buildBaseDir = ""
+	}
+	artifactDir, err := ioutil.TempDir(buildBaseDir, artifactDirPrefix)
+	if err != nil {
+		return "", errors.Wrap(err, "could not create temporary dir for packaged artifacts")
+	}
+
+	tarFileName := path.Join(artifactDir, "occi.tar")
+	tarAppender, err := tar.NewAppender(tarFileName)
+	if err != nil {
+		return "", err
+	}
+	defer tarAppender.Close()
+
+	tarDir := "dependencies/"
+	for _, entry := range selectedArtifacts {
+		gav, err := maven.ParseGAV(entry.ID)
+		if err != nil {
+			return "", nil
+		}
+
+		tarPath := path.Join(tarDir, gav.GroupID)
+		_, err = tarAppender.AddFile(entry.Location, tarPath)
+		if err != nil {
+			return "", err
+		}
+	}
+
+	cp := ""
+	for _, entry := range assembled.Classpath {
+		gav, err := maven.ParseGAV(entry.ID)
+		if err != nil {
+			return "", nil
+		}
+		tarPath := path.Join(tarDir, gav.GroupID)
+		_, fileName := path.Split(entry.Location)
+		fileName = path.Join(tarPath, fileName)
+		cp += fileName + "\n"
+	}
+
+	err = tarAppender.AppendData([]byte(cp), "classpath")
+	if err != nil {
+		return "", err
+	}
+
+	return tarFileName, nil
+}
diff --git a/pkg/build/packager/doc.go b/pkg/build/packager/doc.go
new file mode 100644
index 0000000..cfa21c6
--- /dev/null
+++ b/pkg/build/packager/doc.go
@@ -0,0 +1,19 @@
+/*
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package packager contains strategies for building layers of a Docker image
+package packager
diff --git a/pkg/build/packager/factory.go b/pkg/build/packager/factory.go
new file mode 100644
index 0000000..48ad665
--- /dev/null
+++ b/pkg/build/packager/factory.go
@@ -0,0 +1,49 @@
+/*
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package packager
+
+import (
+	"context"
+	"github.com/apache/camel-k/pkg/build"
+)
+
+const (
+	s2iRootImage = "fabric8/s2i-java:2.3"
+	//javaRootImage = "fabric8/java-jboss-openjdk8-jdk:1.5.1"
+	javaRootImage = "fabric8/java-alpine-openjdk8-jdk:1.5.1"
+)
+
+// NewS2IStandardPackager creates a standard packager for S2I builds
+func NewS2IStandardPackager(ctx context.Context) build.Packager {
+	return newBasePackager(ctx, s2iRootImage)
+}
+
+// NewS2IIncrementalPackager creates a incremental packager for S2I builds
+func NewS2IIncrementalPackager(ctx context.Context, lister PublishedImagesLister) build.Packager {
+	return newIncrementalPackager(ctx, lister, s2iRootImage)
+}
+
+// NewJavaStandardPackager creates a standard packager for Java Docker builds
+func NewJavaStandardPackager(ctx context.Context) build.Packager {
+	return newBasePackager(ctx, javaRootImage)
+}
+
+// NewJavaIncrementalPackager creates a incremental packager for Java Docker builds
+func NewJavaIncrementalPackager(ctx context.Context, lister PublishedImagesLister) build.Packager {
+	return newIncrementalPackager(ctx, lister, javaRootImage)
+}
diff --git a/pkg/build/publish/s2i_incremental_publisher.go b/pkg/build/packager/incremental.go
similarity index 62%
rename from pkg/build/publish/s2i_incremental_publisher.go
rename to pkg/build/packager/incremental.go
index ae996e4..cb9334b 100644
--- a/pkg/build/publish/s2i_incremental_publisher.go
+++ b/pkg/build/packager/incremental.go
@@ -15,7 +15,7 @@ See the License for the specific language governing permissions and
 limitations under the License.
 */
 
-package publish
+package packager
 
 import (
 	"context"
@@ -23,36 +23,31 @@ import (
 	"github.com/apache/camel-k/pkg/build"
 )
 
-type s2iIncrementalPublisher struct {
-	s2iPublisher *s2iPublisher
-	lister       PublishedImagesLister
+type incrementalPackager struct {
+	commonPackager *commonPackager
+	lister         PublishedImagesLister
+	rootImage      string
 }
 
-// PublishedImage represent a base image that can be used as starting point
-type PublishedImage struct {
-	Image     string
-	Classpath []string
-}
-
-// PublishedImagesLister allows to list all images already published
-type PublishedImagesLister interface {
-	ListPublishedImages() ([]PublishedImage, error)
+// newIncrementalPackager creates a new packager that is able to create a layer on top of a existing image
+func newIncrementalPackager(ctx context.Context, lister PublishedImagesLister, rootImage string) build.Packager {
+	layeredPackager := incrementalPackager{
+		lister:    lister,
+		rootImage: rootImage,
+	}
+	layeredPackager.commonPackager = newBasePackagerWithSelector(ctx, layeredPackager.selectArtifactsToUpload)
+	return &layeredPackager
 }
 
-// NewS2IIncrementalPublisher creates a new publisher that is able to do a Openshift S2I binary builds on top of other builds
-func NewS2IIncrementalPublisher(ctx context.Context, namespace string, lister PublishedImagesLister) build.Publisher {
-	layeredPublisher := s2iIncrementalPublisher{
-		lister: lister,
-	}
-	layeredPublisher.s2iPublisher = newS2IPublisher(ctx, namespace, layeredPublisher.selectArtifactsToUpload)
-	return &layeredPublisher
+func (p *incrementalPackager) Package(req build.Request, assembled build.AssembledOutput) <-chan build.PackagedOutput {
+	return p.commonPackager.Package(req, assembled)
 }
 
-func (p *s2iIncrementalPublisher) Publish(req build.Request, assembled build.AssembledOutput) <-chan build.PublishedOutput {
-	return p.s2iPublisher.Publish(req, assembled)
+func (p *incrementalPackager) Cleanup(output build.PackagedOutput) {
+	p.commonPackager.Cleanup(output)
 }
 
-func (p *s2iIncrementalPublisher) selectArtifactsToUpload(entries []build.ClasspathEntry) (string, []build.ClasspathEntry, error) {
+func (p *incrementalPackager) selectArtifactsToUpload(entries []build.ClasspathEntry) (string, []build.ClasspathEntry, error) {
 	images, err := p.lister.ListPublishedImages()
 	if err != nil {
 		return "", nil, err
@@ -71,10 +66,10 @@ func (p *s2iIncrementalPublisher) selectArtifactsToUpload(entries []build.Classp
 	}
 
 	// return default selection
-	return baseImage, entries, nil
+	return p.rootImage, entries, nil
 }
 
-func (p *s2iIncrementalPublisher) findBestImage(images []PublishedImage, entries []build.ClasspathEntry) (*PublishedImage, map[string]bool) {
+func (p *incrementalPackager) findBestImage(images []PublishedImage, entries []build.ClasspathEntry) (*PublishedImage, map[string]bool) {
 	if len(images) == 0 {
 		return nil, nil
 	}
diff --git a/pkg/build/packager/types.go b/pkg/build/packager/types.go
new file mode 100644
index 0000000..4965e28
--- /dev/null
+++ b/pkg/build/packager/types.go
@@ -0,0 +1,29 @@
+/*
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package packager
+
+// PublishedImage represent a base image that can be used as starting point
+type PublishedImage struct {
+	Image     string
+	Classpath []string
+}
+
+// PublishedImagesLister allows to list all images already published
+type PublishedImagesLister interface {
+	ListPublishedImages() ([]PublishedImage, error)
+}
\ No newline at end of file
diff --git a/pkg/build/publish/kaniko_publisher.go b/pkg/build/publish/kaniko_publisher.go
new file mode 100644
index 0000000..b36e867
--- /dev/null
+++ b/pkg/build/publish/kaniko_publisher.go
@@ -0,0 +1,225 @@
+/*
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package publish
+
+import (
+	tarutils "archive/tar"
+	"context"
+	"github.com/apache/camel-k/pkg/util/kubernetes"
+	"io"
+	"io/ioutil"
+	"os"
+	"path"
+	"time"
+
+	"github.com/apache/camel-k/pkg/build"
+	"github.com/operator-framework/operator-sdk/pkg/sdk"
+	"github.com/pkg/errors"
+	"github.com/sirupsen/logrus"
+	"k8s.io/api/core/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+type kanikoPublisher struct {
+	buffer    chan kanikoPublishOperation
+	namespace string
+	registry  string
+}
+
+type kanikoPublishOperation struct {
+	request   build.Request
+	assembled build.AssembledOutput
+	packaged  build.PackagedOutput
+	output    chan build.PublishedOutput
+}
+
+// NewKanikoPublisher creates a new publisher doing a Kaniko image push
+func NewKanikoPublisher(ctx context.Context, namespace string, registry string) build.Publisher {
+	publisher := kanikoPublisher{
+		buffer:    make(chan kanikoPublishOperation, 100),
+		namespace: namespace,
+		registry:  registry,
+	}
+	go publisher.publishCycle(ctx)
+	return &publisher
+}
+
+func (b *kanikoPublisher) Publish(request build.Request, assembled build.AssembledOutput, packaged build.PackagedOutput) <-chan build.PublishedOutput {
+	res := make(chan build.PublishedOutput, 1)
+	op := kanikoPublishOperation{
+		request:   request,
+		assembled: assembled,
+		packaged:  packaged,
+		output:    res,
+	}
+	b.buffer <- op
+	return res
+}
+
+func (b *kanikoPublisher) publishCycle(ctx context.Context) {
+	for {
+		select {
+		case <-ctx.Done():
+			b.buffer = nil
+			return
+		case op := <-b.buffer:
+			now := time.Now()
+			logrus.Info("Starting a new image publication")
+			res := b.execute(op.request, op.assembled, op.packaged)
+			elapsed := time.Now().Sub(now)
+
+			if res.Error != nil {
+				logrus.Error("Error during publication (total time ", elapsed.Seconds(), " seconds): ", res.Error)
+			} else {
+				logrus.Info("Publication completed in ", elapsed.Seconds(), " seconds")
+			}
+
+			op.output <- res
+		}
+	}
+}
+
+func (b *kanikoPublisher) execute(request build.Request, assembled build.AssembledOutput, packaged build.PackagedOutput) build.PublishedOutput {
+	image, err := b.publish(packaged.TarFile, packaged.BaseImage, request)
+	if err != nil {
+		return build.PublishedOutput{Error: err}
+	}
+
+	return build.PublishedOutput{Image: image}
+}
+
+func (b *kanikoPublisher) publish(tarFile string, baseImageName string, source build.Request) (string, error) {
+	image := b.registry + "/" + b.namespace + "/camel-k-" + source.Identifier.Name + ":" + source.Identifier.Qualifier
+	contextDir, err := b.prepareContext(tarFile, baseImageName)
+	if err != nil {
+		return "", err
+	}
+	pod := v1.Pod{
+		TypeMeta: metav1.TypeMeta{
+			APIVersion: v1.SchemeGroupVersion.String(),
+			Kind:       "Pod",
+		},
+		ObjectMeta: metav1.ObjectMeta{
+			Namespace: b.namespace,
+			Name:      "camel-k-" + source.Identifier.Name,
+		},
+		Spec: v1.PodSpec{
+			Containers: []v1.Container{
+				{
+					Name:  "kaniko",
+					Image: "gcr.io/kaniko-project/executor:latest",
+					Args: []string{
+						"--dockerfile=Dockerfile",
+						"--context=" + contextDir,
+						"--destination=" + image,
+						"--insecure",
+					},
+					VolumeMounts: []v1.VolumeMount{
+						{
+							Name:      "camel-k-builder",
+							MountPath: "/workspace",
+						},
+					},
+				},
+			},
+			RestartPolicy: v1.RestartPolicyNever,
+			Volumes: []v1.Volume{
+				{
+					Name: "camel-k-builder",
+					VolumeSource: v1.VolumeSource{
+						PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
+							ClaimName: "camel-k-builder",
+							ReadOnly:  true,
+						},
+					},
+				},
+			},
+		},
+	}
+
+	sdk.Delete(&pod)
+	err = sdk.Create(&pod)
+	if err != nil {
+		return "", errors.Wrap(err, "cannot create kaniko builder pod")
+	}
+
+	err = kubernetes.WaitCondition(&pod, func(obj interface{}) (bool, error) {
+		if val, ok := obj.(*v1.Pod); ok {
+			if val.Status.Phase == v1.PodSucceeded {
+				return true, nil
+			} else if val.Status.Phase == v1.PodFailed {
+				return false, errors.New("build failed")
+			}
+		}
+		return false, nil
+	}, 10*time.Minute)
+
+	if err != nil {
+		return "", err
+	}
+
+	return image, nil
+}
+
+func (b *kanikoPublisher) prepareContext(tarName string, baseImage string) (string, error) {
+	baseDir, _ := path.Split(tarName)
+	contextDir := path.Join(baseDir, "context")
+	if err := b.unTar(tarName, contextDir); err != nil {
+		return "", err
+	}
+
+	dockerFileContent := []byte(`
+		FROM ` + baseImage + `
+		ADD . /deployments
+	`)
+	if err := ioutil.WriteFile(path.Join(contextDir, "Dockerfile"), dockerFileContent, 0777); err != nil {
+		return "", err
+	}
+	return contextDir, nil
+}
+
+func (b *kanikoPublisher) unTar(tarName string, dir string) error {
+	file, err := os.Open(tarName)
+	if err != nil {
+		return err
+	}
+	defer file.Close()
+	reader := tarutils.NewReader(file)
+	for {
+		header, err := reader.Next()
+		if err == io.EOF {
+			break
+		}
+		if err != nil {
+			return err
+		}
+		targetName := path.Join(dir, header.Name)
+		targetDir, _ := path.Split(targetName)
+		if err := os.MkdirAll(targetDir, 0777); err != nil {
+			return err
+		}
+		buffer, err := ioutil.ReadAll(reader)
+		if err != nil {
+			return err
+		}
+		if err := ioutil.WriteFile(targetName, buffer, os.FileMode(header.Mode)); err != nil {
+			return err
+		}
+	}
+	return nil
+}
diff --git a/pkg/build/publish/s2i_publisher.go b/pkg/build/publish/s2i_publisher.go
index 97147a7..09a0105 100644
--- a/pkg/build/publish/s2i_publisher.go
+++ b/pkg/build/publish/s2i_publisher.go
@@ -20,14 +20,11 @@ package publish
 import (
 	"context"
 	"io/ioutil"
-	"path"
 	"time"
 
 	"github.com/apache/camel-k/pkg/build"
 	"github.com/apache/camel-k/pkg/util/kubernetes"
 	"github.com/apache/camel-k/pkg/util/kubernetes/customclient"
-	"github.com/apache/camel-k/pkg/util/maven"
-	"github.com/apache/camel-k/pkg/util/tar"
 	buildv1 "github.com/openshift/api/build/v1"
 	imagev1 "github.com/openshift/api/image/v1"
 	"github.com/operator-framework/operator-sdk/pkg/sdk"
@@ -39,49 +36,34 @@ import (
 	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
 )
 
-const (
-	artifactDirPrefix = "s2i-"
-	baseImage         = "fabric8/s2i-java:2.3"
-)
-
 type s2iPublisher struct {
-	buffer    chan publishOperation
+	buffer    chan s2iPublishOperation
 	namespace string
-	uploadedArtifactsSelector
 }
 
-type publishOperation struct {
+type s2iPublishOperation struct {
 	request   build.Request
 	assembled build.AssembledOutput
+	packaged  build.PackagedOutput
 	output    chan build.PublishedOutput
 }
 
-type uploadedArtifactsSelector func([]build.ClasspathEntry) (string, []build.ClasspathEntry, error)
-
 // NewS2IPublisher creates a new publisher doing a Openshift S2I binary build
 func NewS2IPublisher(ctx context.Context, namespace string) build.Publisher {
-	identitySelector := func(entries []build.ClasspathEntry) (string, []build.ClasspathEntry, error) {
-		return baseImage, entries, nil
-	}
-	return newS2IPublisher(ctx, namespace, identitySelector)
-}
-
-// NewS2IPublisher creates a new publisher doing a Openshift S2I binary build
-func newS2IPublisher(ctx context.Context, namespace string, uploadedArtifactsSelector uploadedArtifactsSelector) *s2iPublisher {
 	publisher := s2iPublisher{
-		buffer:                    make(chan publishOperation, 100),
-		namespace:                 namespace,
-		uploadedArtifactsSelector: uploadedArtifactsSelector,
+		buffer:    make(chan s2iPublishOperation, 100),
+		namespace: namespace,
 	}
 	go publisher.publishCycle(ctx)
 	return &publisher
 }
 
-func (b *s2iPublisher) Publish(request build.Request, assembled build.AssembledOutput) <-chan build.PublishedOutput {
+func (b *s2iPublisher) Publish(request build.Request, assembled build.AssembledOutput, packaged build.PackagedOutput) <-chan build.PublishedOutput {
 	res := make(chan build.PublishedOutput, 1)
-	op := publishOperation{
+	op := s2iPublishOperation{
 		request:   request,
 		assembled: assembled,
+		packaged:  packaged,
 		output:    res,
 	}
 	b.buffer <- op
@@ -97,7 +79,7 @@ func (b *s2iPublisher) publishCycle(ctx context.Context) {
 		case op := <-b.buffer:
 			now := time.Now()
 			logrus.Info("Starting a new image publication")
-			res := b.execute(op.request, op.assembled)
+			res := b.execute(op.request, op.assembled, op.packaged)
 			elapsed := time.Now().Sub(now)
 
 			if res.Error != nil {
@@ -111,18 +93,8 @@ func (b *s2iPublisher) publishCycle(ctx context.Context) {
 	}
 }
 
-func (b *s2iPublisher) execute(request build.Request, assembled build.AssembledOutput) build.PublishedOutput {
-	baseImageName, selectedArtifacts, err := b.uploadedArtifactsSelector(assembled.Classpath)
-	if err != nil {
-		return build.PublishedOutput{Error: err}
-	}
-
-	tarFile, err := b.createTar(assembled, selectedArtifacts)
-	if err != nil {
-		return build.PublishedOutput{Error: err}
-	}
-
-	image, err := b.publish(tarFile, baseImageName, request)
+func (b *s2iPublisher) execute(request build.Request, assembled build.AssembledOutput, packaged build.PackagedOutput) build.PublishedOutput {
+	image, err := b.publish(packaged.TarFile, packaged.BaseImage, request)
 	if err != nil {
 		return build.PublishedOutput{Error: err}
 	}
@@ -253,50 +225,3 @@ func (b *s2iPublisher) publish(tarFile string, imageName string, source build.Re
 	}
 	return is.Status.DockerImageRepository + ":" + source.Identifier.Qualifier, nil
 }
-
-func (b *s2iPublisher) createTar(assembled build.AssembledOutput, selectedArtifacts []build.ClasspathEntry) (string, error) {
-	artifactDir, err := ioutil.TempDir("", artifactDirPrefix)
-	if err != nil {
-		return "", errors.Wrap(err, "could not create temporary dir for s2i artifacts")
-	}
-
-	tarFileName := path.Join(artifactDir, "occi.tar")
-	tarAppender, err := tar.NewAppender(tarFileName)
-	if err != nil {
-		return "", err
-	}
-	defer tarAppender.Close()
-
-	tarDir := "dependencies/"
-	for _, entry := range selectedArtifacts {
-		gav, err := maven.ParseGAV(entry.ID)
-		if err != nil {
-			return "", nil
-		}
-
-		tarPath := path.Join(tarDir, gav.GroupID)
-		_, err = tarAppender.AddFile(entry.Location, tarPath)
-		if err != nil {
-			return "", err
-		}
-	}
-
-	cp := ""
-	for _, entry := range assembled.Classpath {
-		gav, err := maven.ParseGAV(entry.ID)
-		if err != nil {
-			return "", nil
-		}
-		tarPath := path.Join(tarDir, gav.GroupID)
-		_, fileName := path.Split(entry.Location)
-		fileName = path.Join(tarPath, fileName)
-		cp += fileName + "\n"
-	}
-
-	err = tarAppender.AppendData([]byte(cp), "classpath")
-	if err != nil {
-		return "", err
-	}
-
-	return tarFileName, nil
-}
diff --git a/pkg/client/cmd/install.go b/pkg/client/cmd/install.go
index 0d20021..abe61c5 100644
--- a/pkg/client/cmd/install.go
+++ b/pkg/client/cmd/install.go
@@ -41,6 +41,7 @@ func newCmdInstall(rootCmdOptions *RootCmdOptions) *cobra.Command {
 
 	cmd.Flags().BoolVar(&options.clusterSetupOnly, "cluster-setup", false, "Execute cluster-wide operations only (may require admin rights)")
 	cmd.Flags().BoolVar(&options.exampleSetup, "example", false, "Install example integration")
+	cmd.Flags().StringVar(&options.registry, "registry", "", "A Docker registry that can be used to publish images")
 	cmd.ParseFlags(os.Args)
 
 	return &cmd
@@ -50,6 +51,7 @@ type installCmdOptions struct {
 	*RootCmdOptions
 	clusterSetupOnly bool
 	exampleSetup     bool
+	registry         string
 }
 
 func (o *installCmdOptions) install(cmd *cobra.Command, args []string) error {
@@ -69,7 +71,7 @@ func (o *installCmdOptions) install(cmd *cobra.Command, args []string) error {
 			return err
 		}
 
-		err = install.Platform(namespace)
+		err = install.Platform(namespace, o.registry)
 		if err != nil {
 			return err
 		}
diff --git a/pkg/install/common.go b/pkg/install/common.go
index bef7f6f..6d7c61f 100644
--- a/pkg/install/common.go
+++ b/pkg/install/common.go
@@ -24,6 +24,7 @@ import (
 	"github.com/operator-framework/operator-sdk/pkg/sdk"
 	"k8s.io/apimachinery/pkg/api/errors"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
 )
 
 // Resources installs named resources from the project resource directory
@@ -43,11 +44,16 @@ func Resource(namespace string, name string) error {
 		return err
 	}
 
+	return RuntimeObject(namespace, obj)
+}
+
+// RuntimeObject installs a single runtime object
+func RuntimeObject(namespace string, obj runtime.Object) error {
 	if metaObject, ok := obj.(metav1.Object); ok {
 		metaObject.SetNamespace(namespace)
 	}
 
-	err = sdk.Create(obj)
+	err := sdk.Create(obj)
 	if err != nil && errors.IsAlreadyExists(err) {
 		// Don't recreate Service object
 		if obj.GetObjectKind().GroupVersionKind().Kind == "Service" {
@@ -60,6 +66,9 @@ func Resource(namespace string, name string) error {
 		if obj.GetObjectKind().GroupVersionKind().Kind == v1alpha1.IntegrationPlatformKind {
 			return nil
 		}
+		if obj.GetObjectKind().GroupVersionKind().Kind == "PersistentVolumeClaim" {
+			return nil
+		}
 		return sdk.Update(obj)
 	}
 	return err
diff --git a/pkg/install/operator.go b/pkg/install/operator.go
index eb3ac4c..59d5a13 100644
--- a/pkg/install/operator.go
+++ b/pkg/install/operator.go
@@ -17,20 +17,79 @@ limitations under the License.
 
 package install
 
+import (
+	"errors"
+	"github.com/apache/camel-k/deploy"
+	"github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
+	"github.com/apache/camel-k/pkg/util/kubernetes"
+	"github.com/apache/camel-k/pkg/util/minishift"
+	"github.com/apache/camel-k/pkg/util/openshift"
+)
+
 // Operator --
 func Operator(namespace string) error {
+	isOpenshift, err := openshift.IsOpenShift()
+	if err != nil {
+		return err
+	}
+	if isOpenshift {
+		return installOpenshift(namespace)
+	}
+	return installKubernetes(namespace)
+}
+
+func installOpenshift(namespace string) error {
+	return Resources(namespace,
+		"operator-service-account.yaml",
+		"operator-role-openshift.yaml",
+		"operator-role-binding.yaml",
+		"operator-deployment-openshift.yaml",
+		"operator-service.yaml",
+	)
+}
+
+func installKubernetes(namespace string) error {
 	return Resources(namespace,
 		"operator-service-account.yaml",
-		"operator-role-openshift.yaml", // TODO distinguish between Openshift and Kubernetes
+		"operator-role-kubernetes.yaml",
 		"operator-role-binding.yaml",
-		"operator-deployment.yaml",
+		"builder-pvc.yaml",
+		"operator-deployment-kubernetes.yaml",
 		"operator-service.yaml",
 	)
 }
 
 // Platform installs the platform custom resource
-func Platform(namespace string) error {
-	return Resource(namespace, "platform-cr.yaml")
+func Platform(namespace string, registry string) error {
+	isOpenshift, err := openshift.IsOpenShift()
+	if err != nil {
+		return err
+	}
+	if isOpenshift {
+		return Resource(namespace, "platform-cr.yaml")
+	}
+	platform, err := kubernetes.LoadResourceFromYaml(deploy.Resources["platform-cr.yaml"])
+	if err != nil {
+		return err
+	}
+	if pl, ok := platform.(*v1alpha1.IntegrationPlatform); !ok {
+		panic("cannot find integration platform template")
+	} else {
+		if registry == "" {
+			// This operation should be done here in the installer
+			// because the operator is not allowed to look into the "kube-system" namespace
+			minishiftRegistry, err := minishift.FindRegistry()
+			if err != nil {
+				return err
+			}
+			if minishiftRegistry == nil {
+				return errors.New("cannot find automatically a registry where to push images")
+			}
+			registry = *minishiftRegistry
+		}
+		pl.Spec.Build.Registry = registry
+		return RuntimeObject(namespace, pl)
+	}
 }
 
 // Example --
diff --git a/pkg/platform/build.go b/pkg/platform/build.go
index 994ced6..6e75ad2 100644
--- a/pkg/platform/build.go
+++ b/pkg/platform/build.go
@@ -23,6 +23,7 @@ import (
 	"github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
 	"github.com/apache/camel-k/pkg/build"
 	"github.com/apache/camel-k/pkg/build/assemble"
+	"github.com/apache/camel-k/pkg/build/packager"
 	"github.com/apache/camel-k/pkg/build/publish"
 	"github.com/operator-framework/operator-sdk/pkg/sdk"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -44,8 +45,13 @@ func GetPlatformBuildManager(ctx context.Context, namespace string) (*build.Mana
 
 	assembler := assemble.NewMavenAssembler(ctx)
 	if pl.Spec.Build.PublishStrategy == v1alpha1.IntegrationPlatformBuildPublishStrategyS2I {
-		publisher := publish.NewS2IIncrementalPublisher(ctx, namespace, newContextLister(namespace))
-		buildManager = build.NewManager(ctx, assembler, publisher)
+		packaging := packager.NewS2IIncrementalPackager(ctx, newContextLister(namespace))
+		publisher := publish.NewS2IPublisher(ctx, namespace)
+		buildManager = build.NewManager(ctx, assembler, packaging, publisher)
+	} else if pl.Spec.Build.PublishStrategy == v1alpha1.IntegrationPlatformBuildPublishStrategyKaniko && pl.Spec.Build.Registry != "" {
+		packaging := packager.NewJavaStandardPackager(ctx)
+		publisher := publish.NewKanikoPublisher(ctx, namespace, pl.Spec.Build.Registry)
+		buildManager = build.NewManager(ctx, assembler, packaging, publisher)
 	}
 
 	if buildManager == nil {
@@ -66,14 +72,14 @@ func newContextLister(namespace string) contextLister {
 	}
 }
 
-func (l contextLister) ListPublishedImages() ([]publish.PublishedImage, error) {
+func (l contextLister) ListPublishedImages() ([]packager.PublishedImage, error) {
 	list := v1alpha1.NewIntegrationContextList()
 
 	err := sdk.List(l.namespace, &list, sdk.WithListOptions(&metav1.ListOptions{}))
 	if err != nil {
 		return nil, err
 	}
-	images := make([]publish.PublishedImage, 0)
+	images := make([]packager.PublishedImage, 0)
 	for _, ctx := range list.Items {
 		if ctx.Status.Phase != v1alpha1.IntegrationContextPhaseReady || ctx.Labels == nil {
 			continue
@@ -82,7 +88,7 @@ func (l contextLister) ListPublishedImages() ([]publish.PublishedImage, error) {
 			continue
 		}
 
-		images = append(images, publish.PublishedImage{
+		images = append(images, packager.PublishedImage{
 			Image:     ctx.Status.Image,
 			Classpath: ctx.Status.Classpath,
 		})
diff --git a/pkg/stub/action/platform/initialize.go b/pkg/stub/action/platform/initialize.go
index 4215daa..07ea31f 100644
--- a/pkg/stub/action/platform/initialize.go
+++ b/pkg/stub/action/platform/initialize.go
@@ -18,12 +18,12 @@ limitations under the License.
 package platform
 
 import (
+	"errors"
 	"github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
 	platformutils "github.com/apache/camel-k/pkg/platform"
-	"github.com/operator-framework/operator-sdk/pkg/k8sclient"
+	"github.com/apache/camel-k/pkg/util/openshift"
 	"github.com/operator-framework/operator-sdk/pkg/sdk"
 	"github.com/sirupsen/logrus"
-	"k8s.io/apimachinery/pkg/api/errors"
 )
 
 // NewInitializeAction returns a action that initializes the platform configuration when not provided by the user
@@ -63,9 +63,9 @@ func (action *initializeAction) Handle(platform *v1alpha1.IntegrationPlatform) e
 	// update missing fields in the resource
 	if target.Spec.Cluster == "" {
 		// determine the kind of cluster the platform in installed into
-		if openshift, err := action.isOpenshift(); err != nil {
+		if isOpenshift, err := openshift.IsOpenShift(); err != nil {
 			return err
-		} else if openshift {
+		} else if isOpenshift {
 			target.Spec.Cluster = v1alpha1.IntegrationPlatformClusterOpenShift
 		} else {
 			target.Spec.Cluster = v1alpha1.IntegrationPlatformClusterKubernetes
@@ -77,26 +77,19 @@ func (action *initializeAction) Handle(platform *v1alpha1.IntegrationPlatform) e
 			target.Spec.Build.PublishStrategy = v1alpha1.IntegrationPlatformBuildPublishStrategyS2I
 		} else {
 			target.Spec.Build.PublishStrategy = v1alpha1.IntegrationPlatformBuildPublishStrategyKaniko
-			// TODO discover registry location
 		}
 	}
 
+	if target.Spec.Build.PublishStrategy == v1alpha1.IntegrationPlatformBuildPublishStrategyKaniko && target.Spec.Build.Registry == "" {
+		return errors.New("no registry specified for publishing images")
+	}
+
 	// next status
 	logrus.Info("Platform ", target.Name, " transitioning to state ", v1alpha1.IntegrationPlatformPhaseCreating)
 	target.Status.Phase = v1alpha1.IntegrationPlatformPhaseCreating
 	return sdk.Update(target)
 }
 
-func (action *initializeAction) isOpenshift() (bool, error) {
-	_, err := k8sclient.GetKubeClient().Discovery().ServerResourcesForGroupVersion("image.openshift.io/v1")
-	if err != nil && errors.IsNotFound(err) {
-		return false, nil
-	} else if err != nil {
-		return false, err
-	}
-	return true, nil
-}
-
 func (action *initializeAction) isDuplicate(thisPlatform *v1alpha1.IntegrationPlatform) (bool, error) {
 	platforms, err := platformutils.ListPlatforms(thisPlatform.Namespace)
 	if err != nil {
diff --git a/pkg/util/minishift/minishift.go b/pkg/util/minishift/minishift.go
new file mode 100644
index 0000000..9e5465c
--- /dev/null
+++ b/pkg/util/minishift/minishift.go
@@ -0,0 +1,60 @@
+/*
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package minishift contains utilities for Minishift deployments
+package minishift
+
+import (
+	"github.com/operator-framework/operator-sdk/pkg/sdk"
+	"k8s.io/api/core/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"strconv"
+)
+
+const (
+	registryNamespace = "kube-system"
+)
+
+// FindRegistry returns the Minishift registry location if any
+func FindRegistry() (*string, error) {
+	svcs := v1.ServiceList{
+		TypeMeta: metav1.TypeMeta{
+			APIVersion: v1.SchemeGroupVersion.String(),
+			Kind:       "Service",
+		},
+	}
+	options := metav1.ListOptions{
+		LabelSelector: "kubernetes.io/minikube-addons=registry",
+	}
+	if err := sdk.List(registryNamespace, &svcs, sdk.WithListOptions(&options)); err != nil {
+		return nil, err
+	}
+	if len(svcs.Items) == 0 {
+		return nil, nil
+	}
+	svc := svcs.Items[0]
+	ip := svc.Spec.ClusterIP
+	portStr := ""
+	if len(svc.Spec.Ports) > 0 {
+		port := svc.Spec.Ports[0].Port
+		if port > 0 && port != 80 {
+			portStr = ":" + strconv.FormatInt(int64(port), 10)
+		}
+	}
+	registry := ip + portStr
+	return &registry, nil
+}
diff --git a/pkg/util/openshift/openshift.go b/pkg/util/openshift/openshift.go
new file mode 100644
index 0000000..f5b69b0
--- /dev/null
+++ b/pkg/util/openshift/openshift.go
@@ -0,0 +1,34 @@
+/*
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package openshift
+
+import (
+	"github.com/operator-framework/operator-sdk/pkg/k8sclient"
+	"k8s.io/apimachinery/pkg/api/errors"
+)
+
+// IsOpenShift returns true if we are connected to a OpenShift cluster
+func IsOpenShift() (bool, error) {
+	_, err := k8sclient.GetKubeClient().Discovery().ServerResourcesForGroupVersion("image.openshift.io/v1")
+	if err != nil && errors.IsNotFound(err) {
+		return false, nil
+	} else if err != nil {
+		return false, err
+	}
+	return true, nil
+}
\ No newline at end of file


 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services