You are viewing a plain text version of this content. The canonical link for it is here.
Posted to dev@camel.apache.org by GitBox <gi...@apache.org> on 2019/01/07 14:25:36 UTC

[GitHub] nicolaferraro closed pull request #315: Migrate to operator-sdk 0.3.0

nicolaferraro closed pull request #315: Migrate to operator-sdk 0.3.0
URL: https://github.com/apache/camel-k/pull/315
 
 
   

This is a PR merged from a forked repository.
As GitHub hides the original diff on merge, it is displayed below for
the sake of provenance:

As this is a foreign pull request (from a fork), the diff is supplied
below (as it won't show otherwise due to GitHub magic):

diff --git a/.gitignore b/.gitignore
index 871a20b8..e43f8299 100644
--- a/.gitignore
+++ b/.gitignore
@@ -16,9 +16,9 @@
 .gopath
 
 # Temporary Build Files
-tmp/_output
-tmp/_test
-tmp/_maven_output
+build/_output
+build/_test
+build/_maven_output
 
 # eclipse / vscode
 .settings
diff --git a/.golangci.yml b/.golangci.yml
index d4ece648..a67fb8dc 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -1,6 +1,6 @@
 linters-settings:
   lll:
-    line-length: 150
+    line-length: 170
 linters:
   enable-all: true
   disable:
diff --git a/.travis.yml b/.travis.yml
index 9539e08f..572e97f5 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -14,4 +14,4 @@ services:
   - docker
 
 script:
-  - ./build/travis_build.sh
+  - ./script/travis_build.sh
diff --git a/Gopkg.lock b/Gopkg.lock
index a097acd2..f9ea74a5 100644
--- a/Gopkg.lock
+++ b/Gopkg.lock
@@ -2,18 +2,18 @@
 
 
 [[projects]]
-  digest = "1:5c3894b2aa4d6bead0ceeea6831b305d62879c871780e7b76296ded1b004bc57"
+  digest = "1:fd1a7ca82682444a45424f6af37b1e0373f632e5a303441b111558ae8656a9b7"
   name = "cloud.google.com/go"
   packages = ["compute/metadata"]
-  pruneopts = "NUT"
-  revision = "debcad1964693daf8ef4bc06292d7e828e075130"
-  version = "v0.31.0"
+  pruneopts = "NT"
+  revision = "0ebda48a7f143b1cce9eb37a8c1106ac762a3430"
+  version = "v0.34.0"
 
 [[projects]]
   digest = "1:d8ebbd207f3d3266d4423ce4860c9f3794956306ded6c7ba312ecc69cdfbf04c"
   name = "github.com/PuerkitoBio/purell"
   packages = ["."]
-  pruneopts = "NUT"
+  pruneopts = "NT"
   revision = "0bcb03f4b4d0a9428594752bd2a3b9aa0a9d4bd4"
   version = "v1.1.0"
 
@@ -22,41 +22,41 @@
   digest = "1:8098cd40cd09879efbf12e33bcd51ead4a66006ac802cd563a66c4f3373b9727"
   name = "github.com/PuerkitoBio/urlesc"
   packages = ["."]
-  pruneopts = "NUT"
+  pruneopts = "NT"
   revision = "de5bf2ad457846296e2031421a34e2568e304e35"
 
 [[projects]]
   branch = "master"
-  digest = "1:707ebe952a8b3d00b343c01536c79c73771d100f63ec6babeaed5c79e2b8a8dd"
+  digest = "1:c819830f4f5ef85874a90ac3cbcc96cd322c715f5c96fbe4722eacd3dafbaa07"
   name = "github.com/beorn7/perks"
   packages = ["quantile"]
-  pruneopts = "NUT"
+  pruneopts = "NT"
   revision = "3a771d992973f24aa725d07868b467d1ddfceafb"
 
 [[projects]]
-  digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec"
+  digest = "1:4b8b5811da6970495e04d1f4e98bb89518cc3cfc3b3f456bdb876ed7b6c74049"
   name = "github.com/davecgh/go-spew"
   packages = ["spew"]
-  pruneopts = "NUT"
+  pruneopts = "NT"
   revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73"
   version = "v1.1.1"
 
 [[projects]]
-  digest = "1:3537d33c077a9666720dc987fddfecb07270606ac0a58f67abd08e3b252c0a45"
+  digest = "1:e6f888d4be8ec0f05c50e2aba83da4948b58045dee54d03be81fa74ea673302c"
   name = "github.com/emicklei/go-restful"
   packages = [
     ".",
     "log",
   ]
-  pruneopts = "NUT"
+  pruneopts = "NT"
   revision = "3eb9738c1697594ea6e71a7156a9bb32ed216cf0"
   version = "v2.8.0"
 
 [[projects]]
-  digest = "1:32598368f409bbee79deb9d43569fcd92b9fb27f39155f5e166b3371217f051f"
+  digest = "1:820227d03dc661d34f837f3704626d2837dbfbf9f0ec8fdf1f58e683dc5f56fc"
   name = "github.com/evanphx/json-patch"
   packages = ["."]
-  pruneopts = "NUT"
+  pruneopts = "NT"
   revision = "72bf35d0ff611848c1dc9df0f976c81192392fa5"
   version = "v4.1.0"
 
@@ -64,7 +64,7 @@
   digest = "1:aa3ed0a71c4e66e4ae6486bf97a3f4cab28edc78df2e50c5ad01dc7d91604b88"
   name = "github.com/fatih/structs"
   packages = ["."]
-  pruneopts = "NUT"
+  pruneopts = "NT"
   revision = "4966fc68f5b7593aafa6cbbba2d65ec6e1416047"
   version = "v1.1.0"
 
@@ -72,71 +72,95 @@
   digest = "1:81466b4218bf6adddac2572a30ac733a9255919bc2f470b4827a317bd4ee1756"
   name = "github.com/ghodss/yaml"
   packages = ["."]
-  pruneopts = "NUT"
+  pruneopts = "NT"
   revision = "0ca9ea5df5451ffdf184b4428c902747c2c11cd7"
   version = "v1.0.0"
 
+[[projects]]
+  branch = "master"
+  digest = "1:d421af4c4fe51d399667d573982d663fe1fa67020a88d3ae43466ebfe8e2b5c9"
+  name = "github.com/go-logr/logr"
+  packages = ["."]
+  pruneopts = "NT"
+  revision = "9fb12b3b21c5415d16ac18dc5cd42c1cfdd40c4e"
+
+[[projects]]
+  digest = "1:340497a512995aa69c0add901d79a2096b3449d35a44a6f1f1115091a9f8c687"
+  name = "github.com/go-logr/zapr"
+  packages = ["."]
+  pruneopts = "NT"
+  revision = "7536572e8d55209135cd5e7ccf7fce43dca217ab"
+  version = "v0.1.0"
+
 [[projects]]
   digest = "1:260f7ebefc63024c8dfe2c9f1a2935a89fa4213637a1f522f592f80c001cc441"
   name = "github.com/go-openapi/jsonpointer"
   packages = ["."]
-  pruneopts = "NUT"
+  pruneopts = "NT"
   revision = "ef5f0afec364d3b9396b7b77b43dbe26bf1f8004"
-  version = "v0.17.2"
+  version = "v0.18.0"
 
 [[projects]]
   digest = "1:98abd61947ff5c7c6fcfec5473d02a4821ed3a2dd99a4fbfdb7925b0dd745546"
   name = "github.com/go-openapi/jsonreference"
   packages = ["."]
-  pruneopts = "NUT"
+  pruneopts = "NT"
   revision = "8483a886a90412cd6858df4ea3483dce9c8e35a3"
-  version = "v0.17.2"
+  version = "v0.18.0"
 
 [[projects]]
-  digest = "1:dfab391de021809e0041f0ab5648da6b74dd16a685472a1b8c3dc06b3dca1ee2"
+  digest = "1:4da4ea0a664ba528965683d350f602d0f11464e6bb2e17aad0914723bc25d163"
   name = "github.com/go-openapi/spec"
   packages = ["."]
-  pruneopts = "NUT"
-  revision = "5bae59e25b21498baea7f9d46e9c147ec106a42e"
-  version = "v0.17.2"
+  pruneopts = "NT"
+  revision = "5b6cdde3200976e3ecceb2868706ee39b6aff3e4"
+  version = "v0.18.0"
 
 [[projects]]
-  digest = "1:983f95b2fae6fe8fdd361738325ed6090f4f3bd15ce4db745e899fb5b0fdfc46"
+  digest = "1:dc0f590770e5a6c70ea086232324f7b7dc4857c60eca63ab8ff78e0a5cfcdbf3"
   name = "github.com/go-openapi/swag"
   packages = ["."]
-  pruneopts = "NUT"
-  revision = "5899d5c5e619fda5fa86e14795a835f473ca284c"
-  version = "v0.17.2"
+  pruneopts = "NT"
+  revision = "1d29f06aebd59ccdf11ae04aa0334ded96e2d909"
+  version = "v0.18.0"
 
 [[projects]]
-  digest = "1:ea634ab47d5c1f363bac9f8e0ff0ff0a29bcf291c5e470bd7489f05f099f9c56"
+  digest = "1:4fb6ac9e2e67130ed8c5db4154684b390c1c0ce213ba3f4532b7edc614f78999"
   name = "github.com/gobuffalo/envy"
   packages = ["."]
-  pruneopts = "NUT"
-  revision = "910ef88c9d32c6e779231577dfcf6ed8959bea2f"
-  version = "v1.6.8"
+  pruneopts = "NT"
+  revision = "801d7253ade1f895f74596b9a96147ed2d3b087e"
+  version = "v1.6.11"
 
 [[projects]]
-  digest = "1:8679b8a64f3613e9749c5640c3535c83399b8e69f67ce54d91dc73f6d77373af"
+  digest = "1:932970e69f16e127aa0653b8263ae588cd127fa53273e19ba44332902c9826f2"
   name = "github.com/gogo/protobuf"
   packages = [
     "proto",
     "sortkeys",
   ]
-  pruneopts = "NUT"
-  revision = "636bf0302bc95575d69441b25a2603156ffdddf1"
-  version = "v1.1.1"
+  pruneopts = "NT"
+  revision = "4cbf7e384e768b4e01799441fdf2a706a5635ae7"
+  version = "v1.2.0"
 
 [[projects]]
   branch = "master"
   digest = "1:e2b86e41f3d669fc36b50d31d32d22c8ac656c75aa5ea89717ce7177e134ff2a"
   name = "github.com/golang/glog"
   packages = ["."]
-  pruneopts = "NUT"
+  pruneopts = "NT"
   revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998"
 
 [[projects]]
-  digest = "1:63ccdfbd20f7ccd2399d0647a7d100b122f79c13bb83da9660b1598396fd9f62"
+  branch = "master"
+  digest = "1:aaedc94233e56ed57cdb04e3abfacc85c90c14082b62e3cdbe8ea72fc06ee035"
+  name = "github.com/golang/groupcache"
+  packages = ["lru"]
+  pruneopts = "NT"
+  revision = "c65c006176ff7ff98bb916961c7abbc6b0afc0aa"
+
+[[projects]]
+  digest = "1:d7cb4458ea8782e6efacd8f4940796ec559c90833509c436f40c4085b98156dd"
   name = "github.com/golang/protobuf"
   packages = [
     "proto",
@@ -145,7 +169,7 @@
     "ptypes/duration",
     "ptypes/timestamp",
   ]
-  pruneopts = "NUT"
+  pruneopts = "NT"
   revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5"
   version = "v1.2.0"
 
@@ -154,11 +178,11 @@
   digest = "1:05f95ffdfcf651bdb0f05b40b69e7f5663047f8da75c72d58728acb59b5cc107"
   name = "github.com/google/btree"
   packages = ["."]
-  pruneopts = "NUT"
+  pruneopts = "NT"
   revision = "4030bb1f1f0c35b30ca7009e9ebd06849dd45306"
 
 [[projects]]
-  digest = "1:d2754cafcab0d22c13541618a8029a70a8959eb3525ff201fe971637e2274cd0"
+  digest = "1:2ddc4fb22e0f7c7f6ce615f8b453aeb1c7d0eec746c1a5a62244363e1a836462"
   name = "github.com/google/go-cmp"
   packages = [
     "cmp",
@@ -167,7 +191,7 @@
     "cmp/internal/function",
     "cmp/internal/value",
   ]
-  pruneopts = "NUT"
+  pruneopts = "NT"
   revision = "3af367b6b30c263d47e8895973edcca9a49cf029"
   version = "v0.2.0"
 
@@ -176,31 +200,39 @@
   digest = "1:52c5834e2bebac9030c97cc0798ac11c3aa8a39f098aeb419f142533da6cd3cc"
   name = "github.com/google/gofuzz"
   packages = ["."]
-  pruneopts = "NUT"
+  pruneopts = "NT"
   revision = "24818f796faf91cd76ec7bddd72458fbced7a6c1"
 
 [[projects]]
-  digest = "1:06a7dadb7b760767341ffb6c8d377238d68a1226f2b21b5d497d2e3f6ecf6b4e"
+  digest = "1:56a1f3949ebb7fa22fa6b4e4ac0fe0f77cc4faee5b57413e6fa9199a8458faf1"
+  name = "github.com/google/uuid"
+  packages = ["."]
+  pruneopts = "NT"
+  revision = "9b3b1e0f5f99ae461456d768e7d301a7acdaa2d8"
+  version = "v1.1.0"
+
+[[projects]]
+  digest = "1:289332c13b80edfefc88397cce5266c16845dcf204fa2f6ac7e464ee4c7f6e96"
   name = "github.com/googleapis/gnostic"
   packages = [
     "OpenAPIv2",
     "compiler",
     "extensions",
   ]
-  pruneopts = "NUT"
+  pruneopts = "NT"
   revision = "7c663266750e7d82587642f65e60bc4083f1f84e"
   version = "v0.2.0"
 
 [[projects]]
   branch = "master"
-  digest = "1:7fdf3223c7372d1ced0b98bf53457c5e89d89aecbad9a77ba9fcc6e01f9e5621"
+  digest = "1:97972f03fbf34ec4247ddc78ddb681389c468c020492aa32b109744a54fc0c14"
   name = "github.com/gregjones/httpcache"
   packages = [
     ".",
     "diskcache",
   ]
-  pruneopts = "NUT"
-  revision = "9cad4c3443a7200dd6400aef47183728de563a38"
+  pruneopts = "NT"
+  revision = "c63ab54fda8f77302f8d414e19933f2b6026a089"
 
 [[projects]]
   digest = "1:b42cde0e1f3c816dd57f57f7bbcf05ca40263ad96f168714c130c611fc0856a6"
@@ -209,7 +241,7 @@
     ".",
     "simplelru",
   ]
-  pruneopts = "NUT"
+  pruneopts = "NT"
   revision = "20f1fb78b0740ba8c3cb143a61e86ba5c8669768"
   version = "v0.5.0"
 
@@ -217,7 +249,7 @@
   digest = "1:9a52adf44086cead3b384e5d0dbf7a1c1cce65e67552ee3383a8561c42a18cd3"
   name = "github.com/imdario/mergo"
   packages = ["."]
-  pruneopts = "NUT"
+  pruneopts = "NT"
   revision = "9f23e2d6bd2a77f959b2bf6acdbefd708a83a4a4"
   version = "v0.3.6"
 
@@ -225,67 +257,68 @@
   digest = "1:406338ad39ab2e37b7f4452906442a3dbf0eb3379dd1f06aafb5c07e769a5fbb"
   name = "github.com/inconshreveable/mousetrap"
   packages = ["."]
-  pruneopts = "NUT"
+  pruneopts = "NT"
   revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
   version = "v1.0"
 
 [[projects]]
-  digest = "1:da62aa6632d04e080b8a8b85a59ed9ed1550842a0099a55f3ae3a20d02a3745a"
+  digest = "1:f5b9328966ccea0970b1d15075698eff0ddb3e75889560aad2e9f76b289b536a"
   name = "github.com/joho/godotenv"
   packages = ["."]
-  pruneopts = "NUT"
+  pruneopts = "NT"
   revision = "23d116af351c84513e1946b527c88823e476be13"
   version = "v1.3.0"
 
 [[projects]]
-  digest = "1:8e36686e8b139f8fe240c1d5cf3a145bc675c22ff8e707857cdd3ae17b00d728"
+  digest = "1:1d39c063244ad17c4b18e8da1551163b6ffb52bd1640a49a8ec5c3b7bf4dbd5d"
   name = "github.com/json-iterator/go"
   packages = ["."]
-  pruneopts = "NUT"
+  pruneopts = "NT"
   revision = "1624edc4454b8682399def8740d46db5e4362ba4"
   version = "v1.1.5"
 
 [[projects]]
-  digest = "1:345bbba667abadd6263391c915251ede8d9fa6f6852839c60bb6738b6122b89c"
+  digest = "1:8faaaff026dfb90c01a725dbee08c6c2551d3d323144f1a3f163a927820a6e27"
   name = "github.com/knative/build"
   packages = [
     "pkg/apis/build",
     "pkg/apis/build/v1alpha1",
   ]
-  pruneopts = "NUT"
+  pruneopts = "NT"
   revision = "94859753e2c6724df2be86f6a254f810895fa3eb"
   version = "v0.2.0"
 
 [[projects]]
-  branch = "master"
-  digest = "1:66120bf2cc6bd35213b3bb29513b3ce5e28b12ef647de2dbd3623ebaea9fe741"
+  digest = "1:ddabfb7fbdf06445655fe4fb8a7891215c792fa2f3e340f4a28aca0d82443980"
   name = "github.com/knative/eventing"
   packages = [
     "pkg/apis/duck/v1alpha1",
     "pkg/apis/eventing",
     "pkg/apis/eventing/v1alpha1",
   ]
-  pruneopts = "NUT"
-  revision = "8fcf54f7147d51b0825fc1d14befd88e1355fec5"
+  pruneopts = "NT"
+  revision = "90852711c747bbafbf61baa7c5786f0066b20d4e"
+  version = "v0.2.1"
 
 [[projects]]
   branch = "master"
-  digest = "1:d9cf052ed0cbd1fd616fc1c0a88877afb48ea58d7ca9aa13c0d308534c44d3ab"
+  digest = "1:9c58fccfc91968a8b5c8bd712898b6e97da32f950fea1d9979de38efa6e3b3ff"
   name = "github.com/knative/pkg"
   packages = [
     "apis",
     "apis/duck",
     "apis/duck/v1alpha1",
+    "changeset",
     "kmeta",
     "logging",
     "logging/logkey",
     "webhook",
   ]
-  pruneopts = "NUT"
-  revision = "af2c4bc84ed90694967a799bde0f6a29cb713d4c"
+  pruneopts = "NT"
+  revision = "fff36e41c69f31f4f2522c47f808e9bef7156d3c"
 
 [[projects]]
-  digest = "1:39d60b103c12246d5c192a7ba0805079f3206e57f9f5f3af6ccafa980f6d0ebd"
+  digest = "1:2d75b12bd0e0c623236a648348406182cb0e245c7e73c61725b5710b5ae1816d"
   name = "github.com/knative/serving"
   packages = [
     "pkg/apis/autoscaling",
@@ -294,35 +327,35 @@
     "pkg/apis/serving",
     "pkg/apis/serving/v1alpha1",
   ]
-  pruneopts = "NUT"
-  revision = "5ec3b89b9ac9313dee514683229aefc3c8056577"
-  version = "v0.2.0"
+  pruneopts = "NT"
+  revision = "5cbee406446031df3105ca2ac90ce4e9369207dd"
+  version = "v0.2.3"
 
 [[projects]]
   digest = "1:4059c14e87a2de3a434430340521b5feece186c1469eff0834c29a63870de3ed"
   name = "github.com/konsorten/go-windows-terminal-sequences"
   packages = ["."]
-  pruneopts = "NUT"
+  pruneopts = "NT"
   revision = "5c8c8bd35d3832f5d134ae1e1e375b69a4d25242"
   version = "v1.0.1"
 
 [[projects]]
   branch = "master"
-  digest = "1:84a5a2b67486d5d67060ac393aa255d05d24ed5ee41daecd5635ec22657b6492"
+  digest = "1:7d9fcac7f1228470c4ea0ee31cdfb662a758c44df691e39b3e76c11d3e12ba8f"
   name = "github.com/mailru/easyjson"
   packages = [
     "buffer",
     "jlexer",
     "jwriter",
   ]
-  pruneopts = "NUT"
+  pruneopts = "NT"
   revision = "60711f1a8329503b04e1c88535f419d0bb440bff"
 
 [[projects]]
   digest = "1:56dbf15e091bf7926cb33a57cb6bdfc658fc6d3498d2f76f10a97ce7856f1fde"
   name = "github.com/markbates/inflect"
   packages = ["."]
-  pruneopts = "NUT"
+  pruneopts = "NT"
   revision = "24b83195037b3bc61fcda2d28b7b0518bce293b6"
   version = "v1.0.4"
 
@@ -331,14 +364,14 @@
   digest = "1:0e9bfc47ab9941ecc3344e580baca5deb4091177e84dd9773b48b38ec26b93d5"
   name = "github.com/mattbaird/jsonpatch"
   packages = ["."]
-  pruneopts = "NUT"
+  pruneopts = "NT"
   revision = "81af80346b1a01caae0cbc27fd3c1ba5b11e189f"
 
 [[projects]]
-  digest = "1:5985ef4caf91ece5d54817c11ea25f182697534f8ae6521eadcd628c142ac4b6"
+  digest = "1:ea1db000388d88b31db7531c83016bef0d6db0d908a07794bfc36aca16fbf935"
   name = "github.com/matttproud/golang_protobuf_extensions"
   packages = ["pbutil"]
-  pruneopts = "NUT"
+  pruneopts = "NT"
   revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c"
   version = "v1.0.1"
 
@@ -346,7 +379,7 @@
   digest = "1:a45ae66dea4c899d79fceb116accfa1892105c251f0dcd9a217ddc276b42ec68"
   name = "github.com/mitchellh/mapstructure"
   packages = ["."]
-  pruneopts = "NUT"
+  pruneopts = "NT"
   revision = "3536a929edddb9a5b34bd6861dc4a9647cb459fe"
   version = "v1.1.2"
 
@@ -354,7 +387,7 @@
   digest = "1:2f42fa12d6911c7b7659738758631bec870b7e9b4c6be5444f963cdcfccc191f"
   name = "github.com/modern-go/concurrent"
   packages = ["."]
-  pruneopts = "NUT"
+  pruneopts = "NT"
   revision = "bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94"
   version = "1.0.3"
 
@@ -362,12 +395,12 @@
   digest = "1:c6aca19413b13dc59c220ad7430329e2ec454cc310bc6d8de2c7e2b93c18a0f6"
   name = "github.com/modern-go/reflect2"
   packages = ["."]
-  pruneopts = "NUT"
+  pruneopts = "NT"
   revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd"
   version = "1.0.1"
 
 [[projects]]
-  digest = "1:acc97a63f734a3a5898c3c6478f21085e681fb5d610fc20beae36cb8f12fa512"
+  digest = "1:8bc8f43d2332d035857c92581df240f247c287629d5f60c751df609f9b6215c5"
   name = "github.com/openshift/api"
   packages = [
     "apps/v1",
@@ -380,38 +413,44 @@
     "route/v1",
     "template/v1",
   ]
-  pruneopts = "NUT"
+  pruneopts = "NT"
   revision = "0d921e363e951d89f583292c60d013c318df64dc"
   version = "v3.9.0"
 
 [[projects]]
-  branch = "v0.0.7-custom"
-  digest = "1:8f0bbe43f11ba62deaae454383b93f9b8b58a1e4b68f9b39ee93f4a9633ec52a"
+  digest = "1:25c007d9329a7240b7011d3f733dbfb5ec797af09c21a89a159439b0497e8e21"
   name = "github.com/operator-framework/operator-sdk"
   packages = [
-    "pkg/k8sclient",
-    "pkg/sdk",
-    "pkg/sdk/internal/metrics",
-    "pkg/util/k8sutil",
+    "pkg/k8sutil",
+    "pkg/leader",
+    "pkg/ready",
     "version",
   ]
-  pruneopts = "NUT"
-  revision = "450f0742059bb08f2f0c041154435e340a7829a2"
-  source = "https://github.com/nicolaferraro/operator-sdk.git"
+  pruneopts = "NT"
+  revision = "15244d60ef7e328531a4f150a05b1fdabf9cc1ae"
+  version = "v0.3.0"
+
+[[projects]]
+  digest = "1:93b1d84c5fa6d1ea52f4114c37714cddd84d5b78f151b62bb101128dd51399bf"
+  name = "github.com/pborman/uuid"
+  packages = ["."]
+  pruneopts = "NT"
+  revision = "adf5a7427709b9deb95d29d3fa8a2bf9cfd388f1"
+  version = "v1.2"
 
 [[projects]]
   branch = "master"
-  digest = "1:3bf17a6e6eaa6ad24152148a631d18662f7212e21637c2699bff3369b7f00fa2"
+  digest = "1:bf2ac97824a7221eb16b096aecc1c390d4c8a4e49524386aaa2e2dd215cbfb31"
   name = "github.com/petar/GoLLRB"
   packages = ["llrb"]
-  pruneopts = "NUT"
+  pruneopts = "NT"
   revision = "53be0d36a84c2a886ca057d34b6aa4468df9ccb4"
 
 [[projects]]
-  digest = "1:6c6d91dc326ed6778783cff869c49fb2f61303cdd2ebbcf90abe53505793f3b6"
+  digest = "1:e4e9e026b8e4c5630205cd0208efb491b40ad40552e57f7a646bb8a46896077b"
   name = "github.com/peterbourgon/diskv"
   packages = ["."]
-  pruneopts = "NUT"
+  pruneopts = "NT"
   revision = "5f041e8faa004a95c88a202771f4cc3e991971e6"
   version = "v2.0.1"
 
@@ -419,7 +458,7 @@
   digest = "1:5cf3f025cbee5951a4ee961de067c8a89fc95a5adabead774f82822efabab121"
   name = "github.com/pkg/errors"
   packages = ["."]
-  pruneopts = "NUT"
+  pruneopts = "NT"
   revision = "645ef00459ed84a119197bfb8d8205042c6df63d"
   version = "v0.8.0"
 
@@ -427,45 +466,45 @@
   digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe"
   name = "github.com/pmezard/go-difflib"
   packages = ["difflib"]
-  pruneopts = "NUT"
+  pruneopts = "NT"
   revision = "792786c7400a136282c1664665ae0a8db921c6c2"
   version = "v1.0.0"
 
 [[projects]]
-  digest = "1:aa2da1df3327c3a338bb42f978407c07de74cd0a5bef35e9411881dffd444214"
+  digest = "1:ec2a29e3bd141038ae5c3d3a4f57db0c341fcc1d98055a607aedd683aed124ee"
   name = "github.com/prometheus/client_golang"
   packages = [
     "prometheus",
     "prometheus/internal",
     "prometheus/promhttp",
   ]
-  pruneopts = "NUT"
-  revision = "1cafe34db7fdec6022e17e00e1c1ea501022f3e4"
-  version = "v0.9.0"
+  pruneopts = "NT"
+  revision = "505eaef017263e299324067d40ca2c48f6a2cf50"
+  version = "v0.9.2"
 
 [[projects]]
   branch = "master"
-  digest = "1:2d5cd61daa5565187e1d96bae64dbbc6080dacf741448e9629c64fd93203b0d4"
+  digest = "1:c2cc5049e927e2749c0d5163c9f8d924880d83e84befa732b9aad0b6be227bed"
   name = "github.com/prometheus/client_model"
   packages = ["go"]
-  pruneopts = "NUT"
+  pruneopts = "NT"
   revision = "5c3871d89910bfb32f5fcab2aa4b9ec68e65a99f"
 
 [[projects]]
   branch = "master"
-  digest = "1:06375f3b602de9c99fa99b8484f0e949fd5273e6e9c6592b5a0dd4cd9085f3ea"
+  digest = "1:aff0fac3bf0847ca241ebba899b74f614dee3c74d376f2be6ade2b1b22dd8e7c"
   name = "github.com/prometheus/common"
   packages = [
     "expfmt",
     "internal/bitbucket.org/ww/goautoneg",
     "model",
   ]
-  pruneopts = "NUT"
-  revision = "7e9e6cabbd393fc208072eedef99188d0ce788b6"
+  pruneopts = "NT"
+  revision = "67670fe90761d7ff18ec1d640135e53b9198328f"
 
 [[projects]]
   branch = "master"
-  digest = "1:102dea0c03a915acfc634b7c67f2662012b5483b56d9025e33f5188e112759b6"
+  digest = "1:2e3c31c847e848782a4925eb7b01fab0b8b4624a4249939405adc1f7b831f9da"
   name = "github.com/prometheus/procfs"
   packages = [
     ".",
@@ -473,46 +512,58 @@
     "nfs",
     "xfs",
   ]
-  pruneopts = "NUT"
-  revision = "185b4288413d2a0dd0806f78c90dde719829e5ae"
+  pruneopts = "NT"
+  revision = "14fa7590c24d4615893b68e22fce3b3489689f65"
 
 [[projects]]
-  digest = "1:669828a2363f1ecad15fff9f008dd1d07d449fb25c9060998b15f83fec896458"
+  digest = "1:32e563d4e6ed18cd2327875400f2b3aa46928ac5942c429fec8274830bb0ff56"
   name = "github.com/radovskyb/watcher"
   packages = ["."]
-  pruneopts = "NUT"
-  revision = "6145e1439b9de93806925353403f91d2abbad8a5"
-  version = "v1.0.2"
+  pruneopts = "NT"
+  revision = "3818ec23ec59ea15084fe26bfb114b3bb58aa132"
+  version = "1.0.5"
+
+[[projects]]
+  digest = "1:4e63570205b765959739e2ef37add1d229cab7dbf70d80341a0608816120493b"
+  name = "github.com/rogpeppe/go-internal"
+  packages = [
+    "modfile",
+    "module",
+    "semver",
+  ]
+  pruneopts = "NT"
+  revision = "d87f08a7d80821c797ffc8eb8f4e01675f378736"
+  version = "v1.0.0"
 
 [[projects]]
   digest = "1:0975c74a2cd70df6c2ae353c6283a25ce759dda7e1e706e5c07458baf3faca22"
   name = "github.com/rs/xid"
   packages = ["."]
-  pruneopts = "NUT"
+  pruneopts = "NT"
   revision = "15d26544def341f036c5f8dca987a4cbe575032c"
   version = "v1.2.1"
 
 [[projects]]
-  digest = "1:6f3ce746342be7b14a2d1ca33a4a11fd6cb0300e5d34c766f01e19e936fb10af"
+  digest = "1:57abe5c41d4509b53bd76107b65a7ac76763f3d5b68cad9765e3776712388488"
   name = "github.com/scylladb/go-set"
   packages = ["strset"]
-  pruneopts = "NUT"
+  pruneopts = "NT"
   revision = "e560bb8f49bb7f34d4f59b7e771f6e1307c329da"
   version = "v1.0.2"
 
 [[projects]]
-  digest = "1:ecf78eacf406c42f07f66d6b79fda24d2b92dc711bfd0760d0c931678f9621fe"
+  digest = "1:cd2f2cba5b7ffafd0412fb647ff4bcff170292de57270f05fbbf391e3eb9566b"
   name = "github.com/sirupsen/logrus"
   packages = ["."]
-  pruneopts = "NUT"
-  revision = "ad15b42461921f1fb3529b058c6786c6a45d5162"
-  version = "v1.1.1"
+  pruneopts = "NT"
+  revision = "bcd833dfe83d3cebad139e4a29ed79cb2318bf95"
+  version = "v1.2.0"
 
 [[projects]]
-  digest = "1:343d44e06621142ab09ae0c76c1799104cdfddd3ffb445d78b1adf8dc3ffaf3d"
+  digest = "1:234b95cdbb31612ff4f97e0ac69abdede0c60f5f84e5d3f40123859f77d8bc2c"
   name = "github.com/spf13/cobra"
   packages = ["."]
-  pruneopts = "NUT"
+  pruneopts = "NT"
   revision = "ef82de70bb3f60c65fb8eebacbb2d122ef517385"
   version = "v0.0.3"
 
@@ -520,23 +571,23 @@
   digest = "1:9d8420bbf131d1618bde6530af37c3799340d3762cc47210c1d9532a4c3a2779"
   name = "github.com/spf13/pflag"
   packages = ["."]
-  pruneopts = "NUT"
+  pruneopts = "NT"
   revision = "298182f68c66c05229eb03ac171abe6e309ee79a"
   version = "v1.0.3"
 
 [[projects]]
-  digest = "1:65c01b68bc4f813c18cdd48c98c38867dd0681b9584138c0403a0865b9bfdfb2"
+  digest = "1:232ab5b495f4faf58aea3d1c25042d6a557f4267d340b880d196864181a1d339"
   name = "github.com/stoewer/go-strcase"
   packages = ["."]
-  pruneopts = "NUT"
-  revision = "c8136b55823dc6af966d084a06056c5575f6400f"
-  version = "v1.0.1"
+  pruneopts = "NT"
+  revision = "9f4628dc69009a239427f342caccb0047c7aea01"
+  version = "v1.0.2"
 
 [[projects]]
-  digest = "1:bacb8b590716ab7c33f2277240972c9582d389593ee8d66fc10074e0508b8126"
+  digest = "1:4af061277c04a7660e082acc2020f4c66d2c21dfc62e0242ffa1d2120cdfb4ec"
   name = "github.com/stretchr/testify"
   packages = ["assert"]
-  pruneopts = "NUT"
+  pruneopts = "NT"
   revision = "f35b8ab0b5a2cef36673838d662e249dd9c94686"
   version = "v1.2.2"
 
@@ -544,7 +595,7 @@
   digest = "1:22f696cee54865fb8e9ff91df7b633f6b8f22037a8015253c6b6a71ca82219c7"
   name = "go.uber.org/atomic"
   packages = ["."]
-  pruneopts = "NUT"
+  pruneopts = "NT"
   revision = "1ea20fb1cbb1cc08cbd0d913a96dead89aa18289"
   version = "v1.3.2"
 
@@ -552,12 +603,12 @@
   digest = "1:58ca93bdf81bac106ded02226b5395a0595d5346cdc4caa8d9c1f3a5f8f9976e"
   name = "go.uber.org/multierr"
   packages = ["."]
-  pruneopts = "NUT"
+  pruneopts = "NT"
   revision = "3c4937480c32f4c13a875a1829af76c98ca3d40a"
   version = "v1.1.0"
 
 [[projects]]
-  digest = "1:85674ac609b704fd4e9f463553b6ffc3a3527a993ae0ba550eb56beaabdfe094"
+  digest = "1:572fa4496563920f3e3107a2294cf2621d6cc4ffd03403fb6397b1bab9fa082a"
   name = "go.uber.org/zap"
   packages = [
     ".",
@@ -567,21 +618,21 @@
     "internal/exit",
     "zapcore",
   ]
-  pruneopts = "NUT"
+  pruneopts = "NT"
   revision = "ff33455a0e382e8a81d14dd7c922020b6b5e7982"
   version = "v1.9.1"
 
 [[projects]]
   branch = "master"
-  digest = "1:3f3a05ae0b95893d90b9b3b5afdb79a9b3d96e4e36e099d841ae602e4aca0da8"
+  digest = "1:d6d3b59b8c4ceb6a7db2f20169719e57a8dcfa2c055b4418feb3fcc7bbd1a936"
   name = "golang.org/x/crypto"
   packages = ["ssh/terminal"]
-  pruneopts = "NUT"
-  revision = "e84da0312774c21d64ee2317962ef669b27ffb41"
+  pruneopts = "NT"
+  revision = "505ab145d0a99da450461ae2c1a9f6cd10d1f447"
 
 [[projects]]
   branch = "master"
-  digest = "1:2fa379f32ae3b5110f7f4393c98200804c0934d8b24087e433cda464eb5d7859"
+  digest = "1:9300b9f62f41c3dded875b33117e09e9269790eb4372f5e96621f1e375ed5307"
   name = "golang.org/x/net"
   packages = [
     "context",
@@ -591,12 +642,12 @@
     "http2/hpack",
     "idna",
   ]
-  pruneopts = "NUT"
-  revision = "9b4f9f5ad5197c79fd623a3638e70d8b26cef344"
+  pruneopts = "NT"
+  revision = "927f97764cc334a6575f4b7a1584a147864d5723"
 
 [[projects]]
   branch = "master"
-  digest = "1:b0fef33b00740f7eeb5198f67ee1642d8d2560e9b428df7fb5f69fb140f5c4d0"
+  digest = "1:bdb664c89389d18d2aa69fb3b61fe5e2effc09e55b333a56e3cb071026418e33"
   name = "golang.org/x/oauth2"
   packages = [
     ".",
@@ -605,22 +656,22 @@
     "jws",
     "jwt",
   ]
-  pruneopts = "NUT"
-  revision = "9dcd33a902f40452422c2367fefcb95b54f9f8f8"
+  pruneopts = "NT"
+  revision = "d668ce993890a79bda886613ee587a69dd5da7a6"
 
 [[projects]]
   branch = "master"
-  digest = "1:f9eb1fd707210fde0db4076dd3bfdb401d3770f929b8dbd49171ef3e9cece2a6"
+  digest = "1:1ca795ad9ef9bc9f0c93757893a59c80904588b453873d35cb393f667570a71a"
   name = "golang.org/x/sys"
   packages = [
     "unix",
     "windows",
   ]
-  pruneopts = "NUT"
-  revision = "731415f00dce967a133e841b3079eda31c996761"
+  pruneopts = "NT"
+  revision = "82a175fd1598e8a172e58ebdf5ed262bb29129e5"
 
 [[projects]]
-  digest = "1:e33513a825fcd765e97b5de639a2f7547542d1a8245df0cef18e1fd390b778a9"
+  digest = "1:8c74f97396ed63cc2ef04ebb5fc37bb032871b8fd890a25991ed40974b00cd2a"
   name = "golang.org/x/text"
   packages = [
     "collate",
@@ -639,33 +690,40 @@
     "unicode/rangetable",
     "width",
   ]
-  pruneopts = "NUT"
+  pruneopts = "NT"
   revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
   version = "v0.3.0"
 
 [[projects]]
   branch = "master"
-  digest = "1:c9e7a4b4d47c0ed205d257648b0e5b0440880cb728506e318f8ac7cd36270bc4"
+  digest = "1:9fdc2b55e8e0fafe4b41884091e51e77344f7dc511c5acedcfd98200003bff90"
   name = "golang.org/x/time"
   packages = ["rate"]
-  pruneopts = "NUT"
-  revision = "fbb02b2291d28baffd63558aa44b4b56f178d650"
+  pruneopts = "NT"
+  revision = "85acf8d2951cb2a3bde7632f9ff273ef0379bcbd"
 
 [[projects]]
   branch = "master"
-  digest = "1:3a04778e417b28bba5d30f0af919206b4869f57a1d5e152c4c2f29bf18889dce"
+  digest = "1:28a8b8275930d678cd5a2d4084f7ae8427a1e40c48085b6ae1340a4c00f79103"
   name = "golang.org/x/tools"
   packages = [
     "go/ast/astutil",
+    "go/gcexportdata",
+    "go/internal/cgo",
+    "go/internal/gcimporter",
+    "go/internal/packagesdriver",
+    "go/packages",
+    "go/types/typeutil",
     "imports",
     "internal/fastwalk",
     "internal/gopathwalk",
+    "internal/semver",
   ]
-  pruneopts = "NUT"
-  revision = "a2dc47679d30b6c496245bafc6a166b46c5fe318"
+  pruneopts = "NT"
+  revision = "d00ac6d27372a4273825635281f2dc360d4be563"
 
 [[projects]]
-  digest = "1:e2da54c7866453ac5831c61c7ec5d887f39328cac088c806553303bff4048e6f"
+  digest = "1:902ffa11f1d8c19c12b05cabffe69e1a16608ad03a8899ebcb9c6bde295660ae"
   name = "google.golang.org/appengine"
   packages = [
     ".",
@@ -679,28 +737,28 @@
     "internal/urlfetch",
     "urlfetch",
   ]
-  pruneopts = "NUT"
-  revision = "ae0ab99deb4dc413a2b4bd6c8bdd0eb67f1e4d06"
-  version = "v1.2.0"
+  pruneopts = "NT"
+  revision = "e9657d882bb81064595ca3b56cbe2546bbabf7b1"
+  version = "v1.4.0"
 
 [[projects]]
   digest = "1:2d1fbdc6777e5408cabeb02bf336305e724b925ff4546ded0fa8715a7267922a"
   name = "gopkg.in/inf.v0"
   packages = ["."]
-  pruneopts = "NUT"
+  pruneopts = "NT"
   revision = "d2d2541c53f18d2a059457998ce2876cc8e67cbf"
   version = "v0.9.1"
 
 [[projects]]
-  digest = "1:7c95b35057a0ff2e19f707173cc1a947fa43a6eb5c4d300d196ece0334046082"
+  digest = "1:18108594151654e9e696b27b181b953f9a90b16bf14d253dd1b397b025a1487f"
   name = "gopkg.in/yaml.v2"
   packages = ["."]
-  pruneopts = "NUT"
-  revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183"
-  version = "v2.2.1"
+  pruneopts = "NT"
+  revision = "51d6538a90f86fe93ac480b35f37b2be17fef232"
+  version = "v2.2.2"
 
 [[projects]]
-  digest = "1:ba83efacee6fc544aaebcc643ec8ad8d345bfa7f6cd5afb1492fb19cd994cc4a"
+  digest = "1:b3f8152a68d73095a40fdcf329a93fc42e8eadb3305171df23fdb6b4e41a6417"
   name = "k8s.io/api"
   packages = [
     "admission/v1beta1",
@@ -715,10 +773,12 @@
     "authorization/v1beta1",
     "autoscaling/v1",
     "autoscaling/v2beta1",
+    "autoscaling/v2beta2",
     "batch/v1",
     "batch/v1beta1",
     "batch/v2alpha1",
     "certificates/v1beta1",
+    "coordination/v1beta1",
     "core/v1",
     "events/v1beta1",
     "extensions/v1beta1",
@@ -734,12 +794,11 @@
     "storage/v1alpha1",
     "storage/v1beta1",
   ]
-  pruneopts = "NUT"
-  revision = "2d6f90ab1293a1fb871cf149423ebb72aa7423aa"
-  version = "kubernetes-1.11.2"
+  pruneopts = "NT"
+  revision = "b503174bad5991eb66f18247f52e41c3258f6348"
 
 [[projects]]
-  digest = "1:a26c8e664af7122535f2b13fd4977a0d25894a545e7ce679502e49372812f6e6"
+  digest = "1:868de7cbaa0ecde6dc231c1529a10ae01bb05916095c0c992186e2a5cac57e79"
   name = "k8s.io/apimachinery"
   packages = [
     "pkg/api/equality",
@@ -771,27 +830,30 @@
     "pkg/util/framer",
     "pkg/util/intstr",
     "pkg/util/json",
+    "pkg/util/mergepatch",
+    "pkg/util/naming",
     "pkg/util/net",
     "pkg/util/runtime",
     "pkg/util/sets",
+    "pkg/util/strategicpatch",
+    "pkg/util/uuid",
     "pkg/util/validation",
     "pkg/util/validation/field",
     "pkg/util/wait",
     "pkg/util/yaml",
     "pkg/version",
     "pkg/watch",
+    "third_party/forked/golang/json",
     "third_party/forked/golang/reflect",
   ]
-  pruneopts = "NUT"
-  revision = "103fd098999dc9c0c88536f5c9ad2e5da39373ae"
-  version = "kubernetes-1.11.2"
+  pruneopts = "NT"
+  revision = "eddba98df674a16931d2d4ba75edc3a389bf633a"
 
 [[projects]]
-  digest = "1:6cca3c9f626aeb165dad88de9db9b71585a92514134f466268dd83fd449df9a7"
+  digest = "1:00089f60de414edb1a51e63efde2480ce87c95d2cb3536ea240afe483905d736"
   name = "k8s.io/client-go"
   packages = [
     "discovery",
-    "discovery/cached",
     "dynamic",
     "kubernetes",
     "kubernetes/scheme",
@@ -806,10 +868,12 @@
     "kubernetes/typed/authorization/v1beta1",
     "kubernetes/typed/autoscaling/v1",
     "kubernetes/typed/autoscaling/v2beta1",
+    "kubernetes/typed/autoscaling/v2beta2",
     "kubernetes/typed/batch/v1",
     "kubernetes/typed/batch/v1beta1",
     "kubernetes/typed/batch/v2alpha1",
     "kubernetes/typed/certificates/v1beta1",
+    "kubernetes/typed/coordination/v1beta1",
     "kubernetes/typed/core/v1",
     "kubernetes/typed/events/v1beta1",
     "kubernetes/typed/extensions/v1beta1",
@@ -840,8 +904,11 @@
     "tools/clientcmd/api",
     "tools/clientcmd/api/latest",
     "tools/clientcmd/api/v1",
+    "tools/leaderelection",
+    "tools/leaderelection/resourcelock",
     "tools/metrics",
     "tools/pager",
+    "tools/record",
     "tools/reference",
     "transport",
     "util/buffer",
@@ -854,12 +921,11 @@
     "util/retry",
     "util/workqueue",
   ]
-  pruneopts = "NUT"
-  revision = "1f13a808da65775f22cbf47862c4e5898d8f4ca1"
-  version = "kubernetes-1.11.2"
+  pruneopts = "NT"
+  revision = "d082d5923d3cc0bfbb066ee5fbdea3d0ca79acf8"
 
 [[projects]]
-  digest = "1:8ab487a323486c8bbbaa3b689850487fdccc6cbea8690620e083b2d230a4447e"
+  digest = "1:4e2addcdbe0330f43800c1fcb905fc7a21b86415dfcca619e5c606c87257af1b"
   name = "k8s.io/code-generator"
   packages = [
     "cmd/client-gen",
@@ -888,12 +954,11 @@
     "pkg/util",
   ]
   pruneopts = "T"
-  revision = "6702109cc68eb6fe6350b83e14407c8d7309fd1a"
-  version = "kubernetes-1.11.2"
+  revision = "3dcf91f64f638563e5106f21f50c31fa361c918d"
 
 [[projects]]
   branch = "master"
-  digest = "1:5249c83f0fb9e277b2d28c19eca814feac7ef05dc762e4deaf0a2e4b1a7c5df3"
+  digest = "1:5edbd655d7ee65178fd5750bda9a3d3cd7fb96291937926f4969e6b2dfbc5743"
   name = "k8s.io/gengo"
   packages = [
     "args",
@@ -905,22 +970,68 @@
     "parser",
     "types",
   ]
-  pruneopts = "NUT"
-  revision = "7338e4bfd6915369a1375890db1bbda0158c9863"
+  pruneopts = "NT"
+  revision = "fd15ee9cc2f77baa4f31e59e6acbf21146455073"
+
+[[projects]]
+  digest = "1:f3b42f307c7f49a1a7276c48d4b910db76e003220e88797f7acd41e3a9277ddf"
+  name = "k8s.io/klog"
+  packages = ["."]
+  pruneopts = "NT"
+  revision = "a5bc97fbc634d635061f3146511332c7e313a55a"
+  version = "v0.1.0"
 
 [[projects]]
   branch = "master"
-  digest = "1:e8451187fe9d2b9bf86a44495959c391e831355fb835a63e117ff49b69bc70f9"
+  digest = "1:9ac2fdede4a8304e3b00ea3b36526536339f306d0306e320fc74f6cefeead18e"
   name = "k8s.io/kube-openapi"
   packages = [
     "cmd/openapi-gen/args",
     "pkg/common",
     "pkg/generators",
     "pkg/generators/rules",
+    "pkg/util/proto",
     "pkg/util/sets",
   ]
-  pruneopts = "NUT"
-  revision = "3a9b63ab1e397dc12a9764df998f99bc59dfd9ae"
+  pruneopts = "NT"
+  revision = "0317810137be915b9cf888946c6e115c1bfac693"
+
+[[projects]]
+  digest = "1:e03ddaf9f31bccbbb8c33eabad2c85025a95ca98905649fd744e0a54c630a064"
+  name = "sigs.k8s.io/controller-runtime"
+  packages = [
+    "pkg/cache",
+    "pkg/cache/internal",
+    "pkg/client",
+    "pkg/client/apiutil",
+    "pkg/client/config",
+    "pkg/controller",
+    "pkg/controller/controllerutil",
+    "pkg/event",
+    "pkg/handler",
+    "pkg/internal/controller",
+    "pkg/internal/controller/metrics",
+    "pkg/internal/recorder",
+    "pkg/leaderelection",
+    "pkg/manager",
+    "pkg/metrics",
+    "pkg/patch",
+    "pkg/predicate",
+    "pkg/reconcile",
+    "pkg/recorder",
+    "pkg/runtime/inject",
+    "pkg/runtime/log",
+    "pkg/runtime/scheme",
+    "pkg/runtime/signals",
+    "pkg/source",
+    "pkg/source/internal",
+    "pkg/webhook/admission",
+    "pkg/webhook/admission/types",
+    "pkg/webhook/types",
+  ]
+  pruneopts = "NT"
+  revision = "c63ebda0bf4be5f0a8abd4003e4ea546032545ba"
+  version = "v0.1.8"
 
 [solve-meta]
   analyzer-name = "dep"
@@ -936,9 +1047,9 @@
     "github.com/openshift/api/image/v1",
     "github.com/openshift/api/route/v1",
     "github.com/openshift/api/template/v1",
-    "github.com/operator-framework/operator-sdk/pkg/k8sclient",
-    "github.com/operator-framework/operator-sdk/pkg/sdk",
-    "github.com/operator-framework/operator-sdk/pkg/util/k8sutil",
+    "github.com/operator-framework/operator-sdk/pkg/k8sutil",
+    "github.com/operator-framework/operator-sdk/pkg/leader",
+    "github.com/operator-framework/operator-sdk/pkg/ready",
     "github.com/operator-framework/operator-sdk/version",
     "github.com/pkg/errors",
     "github.com/radovskyb/watcher",
@@ -960,6 +1071,7 @@
     "k8s.io/apimachinery/pkg/runtime/schema",
     "k8s.io/apimachinery/pkg/runtime/serializer/json",
     "k8s.io/apimachinery/pkg/runtime/serializer/versioning",
+    "k8s.io/apimachinery/pkg/types",
     "k8s.io/apimachinery/pkg/util/intstr",
     "k8s.io/apimachinery/pkg/util/yaml",
     "k8s.io/apimachinery/pkg/watch",
@@ -976,6 +1088,17 @@
     "k8s.io/code-generator/cmd/lister-gen",
     "k8s.io/code-generator/cmd/openapi-gen",
     "k8s.io/gengo/args",
+    "sigs.k8s.io/controller-runtime/pkg/client",
+    "sigs.k8s.io/controller-runtime/pkg/client/config",
+    "sigs.k8s.io/controller-runtime/pkg/controller",
+    "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil",
+    "sigs.k8s.io/controller-runtime/pkg/handler",
+    "sigs.k8s.io/controller-runtime/pkg/manager",
+    "sigs.k8s.io/controller-runtime/pkg/reconcile",
+    "sigs.k8s.io/controller-runtime/pkg/runtime/log",
+    "sigs.k8s.io/controller-runtime/pkg/runtime/scheme",
+    "sigs.k8s.io/controller-runtime/pkg/runtime/signals",
+    "sigs.k8s.io/controller-runtime/pkg/source",
   ]
   solver-name = "gps-cdcl"
   solver-version = 1
diff --git a/Gopkg.toml b/Gopkg.toml
index c6275137..7fad7ee8 100644
--- a/Gopkg.toml
+++ b/Gopkg.toml
@@ -1,6 +1,4 @@
-
 # Force dep to vendor the code generators, which aren't imported just used at dev time.
-# Picking a subpackage with Go code won't be necessary once https://github.com/golang/dep/pull/1545 is merged.
 required = [
   "k8s.io/code-generator/cmd/defaulter-gen",
   "k8s.io/code-generator/cmd/deepcopy-gen",
@@ -14,43 +12,43 @@ required = [
 
 [[override]]
   name = "k8s.io/code-generator"
-  version = "kubernetes-1.11.2"
+  # revision for tag "kubernetes-1.12.3"
+  revision = "3dcf91f64f638563e5106f21f50c31fa361c918d"
 
 [[override]]
   name = "k8s.io/api"
-  version = "kubernetes-1.11.2"
+  # revision for tag "kubernetes-1.12.3"
+  revision = "b503174bad5991eb66f18247f52e41c3258f6348"
 
 [[override]]
   name = "k8s.io/apiextensions-apiserver"
-  version = "kubernetes-1.11.2"
+  # revision for tag "kubernetes-1.12.3"
+  revision = "0cd23ebeb6882bd1cdc2cb15fc7b2d72e8a86a5b"
 
 [[override]]
   name = "k8s.io/apimachinery"
-  version = "kubernetes-1.11.2"
+  # revision for tag "kubernetes-1.12.3"
+  revision = "eddba98df674a16931d2d4ba75edc3a389bf633a"
 
 [[override]]
   name = "k8s.io/client-go"
-  version = "kubernetes-1.11.2"
+  # revision for tag "kubernetes-1.12.3"
+  revision = "d082d5923d3cc0bfbb066ee5fbdea3d0ca79acf8"
 
 [[override]]
   name = "sigs.k8s.io/controller-runtime"
-  version = "v0.1.4"
+  version = "=v0.1.8"
+
+[[constraint]]
+  name = "github.com/operator-framework/operator-sdk"
+  # The version rule is used for a specific release and the master branch for in between releases.
+  # branch = "master" #osdk_branch_annotation
+  version = "=v0.3.0" #osdk_version_annotation
 
 [prune]
   go-tests = true
   non-go = true
-  unused-packages = true
 
   [[prune.project]]
     name = "k8s.io/code-generator"
     non-go = false
-    unused-packages = false
-
-[[constraint]]
-  name = "github.com/operator-framework/operator-sdk"
-  # Using fork to customize the Kubernetes rest config
-  source = "https://github.com/nicolaferraro/operator-sdk.git"
-  branch = "v0.0.7-custom"
-  ## The version rule is used for a specific release and the master branch for in between releases.
-  #branch = "master"
-  #version = "=v0.0.7"
diff --git a/Makefile b/Makefile
index 2206d42c..770e545f 120000
--- a/Makefile
+++ b/Makefile
@@ -1 +1 @@
-./build/Makefile
\ No newline at end of file
+./script/Makefile
\ No newline at end of file
diff --git a/README.adoc b/README.adoc
index ce552170..28236c70 100644
--- a/README.adoc
+++ b/README.adoc
@@ -31,7 +31,7 @@ Other cluster types (such as OpenShift clusters) should not need prior configura
 To start using Camel K you need the **"kamel"** binary, that can be used to both configure the cluster and run integrations.
 Look into the https://github.com/apache/camel-k/releases[release page] for latest version of the `kamel` tool.
 
-If you want to contribute, you can also **build it from source!** Refer to the link:/docs/developers.adoc[developer's guide]
+If you want to contribute, you can also **build it from source!** Refer to the link:/contributing.adoc[contributing guide]
 for information on how to do it.
 
 Once you have the "kamel" binary, log into your cluster using the standard "oc" (OpenShift) or "kubectl" (Kubernetes) client tool and execute the following command to install Camel K:
@@ -244,7 +244,7 @@ kamel get
 
 We love contributions and we want to make Camel K great!
 
-Contributing is easy, just take a look at our link:/docs/developers.adoc[developer's guide].
+Contributing is easy, just take a look at our link:/contributing.adoc[developer's guide].
 
 [[uninstalling]]
 == Uninstalling
diff --git a/build/Dockerfile b/build/Dockerfile
new file mode 100644
index 00000000..39c3b26f
--- /dev/null
+++ b/build/Dockerfile
@@ -0,0 +1,11 @@
+FROM fabric8/s2i-java:2.3
+
+ADD build/_maven_output /tmp/artifacts/m2
+
+USER 0
+RUN chgrp -R 0 /tmp/artifacts/m2 \
+ && chmod -R g=u /tmp/artifacts/m2
+
+USER 1000
+
+ADD build/_output/bin/camel-k /usr/local/bin/camel-k
diff --git a/build/Makefile b/build/Makefile
deleted file mode 100644
index 0cdf73c3..00000000
--- a/build/Makefile
+++ /dev/null
@@ -1,81 +0,0 @@
-build: build-runtime build-operator build-kamel build-compile-integration-tests test
-
-build-go: build-embed-resources build-operator build-kamel
-
-build-operator: build-embed-resources
-	go build -o camel-k ./cmd/camel-k/*.go
-
-build-kamel:
-	go build -o kamel ./cmd/kamel/*.go
-
-build-embed-resources:
-	./build/embed_resources.sh deploy
-
-build-compile-integration-tests:
-	go test -c -tags=integration ./test/*.go
-
-build-runtime:
-	./mvnw clean install -f ./runtime/pom.xml
-
-release: clean prepare-release build images-build images-push cross-compile package-examples git-tag
-
-prepare-release:
-	./build/prepare_release.sh
-
-new-version: increment-snapshot build images-build images-push
-
-increment-snapshot:
-	./build/next_snapshot.sh
-
-cross-compile:
-	./build/cross_compile.sh
-
-package-examples:
-	./build/package_examples.sh
-
-git-tag:
-	./build/git_tag.sh
-
-dep:
-	dep ensure -v
-
-generate:
-	operator-sdk generate k8s
-
-clean:
-	./mvnw clean -f ./runtime/pom.xml
-	go clean
-	rm -f camel-k
-	rm -f kamel
-	rm -rf tmp/_maven_output
-
-codegen:
-	./tmp/codegen/update-generated.sh
-
-images: images-build
-
-images-build:
-	./build/images_build.sh
-
-images-push:
-	./build/images_push.sh
-
-install: install-minishift
-install-minishift:
-	./build/install_minishift.sh
-
-install-minikube:
-	./build/install_minikube.sh
-
-test: check
-check:
-	go test ./...
-
-test-integration: check-integration
-check-integration:
-	go test ./... -tags=integration
-
-lint:
-	golangci-lint run
-
-.PHONY: build build-operator build-kamel build-embed-resources build-runtime dep codegen images images-build images-push test check test-integration check-integration clean release prepare-release cross-compile package-examples new-version git-tag increment-snapshot install-minishift
diff --git a/build/embed_resources.sh b/build/embed_resources.sh
deleted file mode 100755
index c48e2228..00000000
--- a/build/embed_resources.sh
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/bin/sh
-
-if [ $# -ne 1 ]; then
-    echo "Error invoking embed_resources.sh: directory argument required"
-    exit 1
-fi
-
-location=$(dirname $0)
-destdir=$location/../$1
-destfile=$location/../$1/resources.go
-
-cat > $destfile << EOM
-/*
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by build/embed_resources.sh. DO NOT EDIT.
-
-package deploy
-
-var Resources map[string]string
-
-func init() {
-	Resources = make(map[string]string)
-
-EOM
-
-for f in $(ls $destdir | grep ".yaml" | grep -v -e "^operator.yaml$"); do
-	printf "\tResources[\"$f\"] =\n\t\t\`\n" >> $destfile
-	cat $destdir/$f >> $destfile
-	printf "\n\`\n" >> $destfile
-done
-
-printf "\n}\n" >> $destfile
\ No newline at end of file
diff --git a/build/maven/settings.xml b/build/maven/settings.xml
new file mode 100644
index 00000000..6c6c5c7d
--- /dev/null
+++ b/build/maven/settings.xml
@@ -0,0 +1,6 @@
+<settings xmlns="http://maven.apache.org/SETTINGS/1.0.0"
+          xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+          xsi:schemaLocation="http://maven.apache.org/SETTINGS/1.0.0
+                          https://maven.apache.org/xsd/settings-1.0.0.xsd">
+    <localRepository>build/_maven_output</localRepository>
+</settings>
\ No newline at end of file
diff --git a/build/package_maven_artifacts.sh b/build/package_maven_artifacts.sh
deleted file mode 100755
index a7502c08..00000000
--- a/build/package_maven_artifacts.sh
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/bin/sh
-
-location=$(dirname $0)
-cd $location/../
-./mvnw clean install -DskipTests -f runtime/pom.xml -s tmp/maven/settings.xml
diff --git a/build/travis_build.sh b/build/travis_build.sh
deleted file mode 100755
index e7897b4e..00000000
--- a/build/travis_build.sh
+++ /dev/null
@@ -1,72 +0,0 @@
-#!/bin/sh
-
-set -e
-
-# Find the JAVA_HOME and set the KOTLIN_JDK_HOME
-echo "Java home: $JAVA_HOME"
-export KOTLIN_JDK_HOME=$JAVA_HOME
-
-# First build the whole project
-make
-
-# set docker0 to promiscuous mode
-sudo ip link set docker0 promisc on
-
-# Download and install the oc binary
-sudo mount --make-shared /
-sudo service docker stop
-sudo sed -i 's/DOCKER_OPTS=\"/DOCKER_OPTS=\"--insecure-registry 172.30.0.0\/16 /' /etc/default/docker
-sudo service docker start
-wget https://github.com/openshift/origin/releases/download/v$OPENSHIFT_VERSION/openshift-origin-client-tools-v$OPENSHIFT_VERSION-$OPENSHIFT_COMMIT-linux-64bit.tar.gz
-tar xvzOf openshift-origin-client-tools-v$OPENSHIFT_VERSION-$OPENSHIFT_COMMIT-linux-64bit.tar.gz > oc.bin
-sudo mv oc.bin /usr/local/bin/oc
-sudo chmod 755 /usr/local/bin/oc
-
-# Figure out this host's IP address
-IP_ADDR="$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)"
-
-# Start OpenShift
-oc cluster up --public-hostname=$IP_ADDR
-
-oc login -u system:admin
-
-# Wait until we have a ready node in openshift
-TIMEOUT=0
-TIMEOUT_COUNT=60
-until [ $TIMEOUT -eq $TIMEOUT_COUNT ]; do
-  if [ -n "$(oc get nodes | grep Ready)" ]; then
-    break
-  fi
-
-  echo "openshift is not up yet"
-  let TIMEOUT=TIMEOUT+1
-  sleep 5
-done
-
-if [ $TIMEOUT -eq $TIMEOUT_COUNT ]; then
-  echo "Failed to start openshift"
-  exit 1
-fi
-
-echo "openshift is deployed and reachable"
-oc describe nodes
-
-echo "Adding maven artifacts to the image context"
-./build/package_maven_artifacts.sh
-
-echo "Copying binary file to docker dir"
-mkdir -p ./tmp/_output/bin
-cp ./camel-k ./tmp/_output/bin/
-
-echo "Building the images"
-export IMAGE=docker.io/apache/camel-k:$(./build/get_version.sh)
-./tmp/build/docker_build.sh
-
-echo "installing camel k cluster resources"
-./kamel install --cluster-setup
-
-oc login -u developer
-
-
-# Then run integration tests
-make test-integration
diff --git a/cmd/camel-k/main.go b/cmd/camel-k/main.go
deleted file mode 100644
index 67da2fe2..00000000
--- a/cmd/camel-k/main.go
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package main
-
-import (
-	"context"
-	"math/rand"
-	"runtime"
-	"time"
-
-	"github.com/apache/camel-k/pkg/stub"
-	"github.com/operator-framework/operator-sdk/pkg/sdk"
-	"github.com/operator-framework/operator-sdk/pkg/util/k8sutil"
-	sdkVersion "github.com/operator-framework/operator-sdk/version"
-
-	_ "github.com/apache/camel-k/pkg/apis/camel/v1alpha1/knative"
-	_ "github.com/apache/camel-k/pkg/util/openshift"
-
-	"github.com/sirupsen/logrus"
-	_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
-)
-
-const resyncPeriod = time.Duration(5) * time.Second
-
-func printVersion() {
-	logrus.Infof("Go Version: %s", runtime.Version())
-	logrus.Infof("Go OS/Arch: %s/%s", runtime.GOOS, runtime.GOARCH)
-	logrus.Infof("operator-sdk Version: %v", sdkVersion.Version)
-}
-
-func watch(resource string, kind string, namespace string, resyncPeriod time.Duration) {
-	logrus.Infof("Watching %s, %s, %s, %d", resource, kind, namespace, resyncPeriod)
-	sdk.Watch(resource, kind, namespace, resyncPeriod)
-}
-
-func main() {
-	rand.Seed(time.Now().UTC().UnixNano())
-
-	printVersion()
-
-	sdk.ExposeMetricsPort()
-
-	resource := "camel.apache.org/v1alpha1"
-	namespace, err := k8sutil.GetWatchNamespace()
-	if err != nil {
-		logrus.Fatalf("failed to get watch namespace: %v", err)
-	}
-
-	ctx := context.TODO()
-
-	watch(resource, "Integration", namespace, resyncPeriod)
-	watch(resource, "IntegrationContext", namespace, resyncPeriod)
-	watch(resource, "IntegrationPlatform", namespace, resyncPeriod)
-
-	sdk.Handle(stub.NewHandler(ctx, namespace))
-	sdk.Run(ctx)
-}
diff --git a/cmd/kamel/main.go b/cmd/kamel/main.go
index 18822eab..72ac1c29 100644
--- a/cmd/kamel/main.go
+++ b/cmd/kamel/main.go
@@ -24,10 +24,8 @@ import (
 	"os"
 	"time"
 
-	"github.com/apache/camel-k/pkg/client/cmd"
+	"github.com/apache/camel-k/pkg/cmd"
 
-	_ "github.com/apache/camel-k/pkg/apis/camel/v1alpha1/knative"
-	_ "github.com/apache/camel-k/pkg/util/openshift"
 	_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
 )
 
diff --git a/cmd/manager/main.go b/cmd/manager/main.go
new file mode 100644
index 00000000..19aed31f
--- /dev/null
+++ b/cmd/manager/main.go
@@ -0,0 +1,116 @@
+/*
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package main
+
+import (
+	"context"
+	"flag"
+	"fmt"
+	"math/rand"
+	"os"
+	"runtime"
+	"time"
+
+	"github.com/apache/camel-k/pkg/apis"
+	"github.com/apache/camel-k/pkg/controller"
+	"github.com/operator-framework/operator-sdk/pkg/k8sutil"
+	"github.com/operator-framework/operator-sdk/pkg/leader"
+	"github.com/operator-framework/operator-sdk/pkg/ready"
+	sdkVersion "github.com/operator-framework/operator-sdk/version"
+	"sigs.k8s.io/controller-runtime/pkg/client/config"
+	"sigs.k8s.io/controller-runtime/pkg/manager"
+	logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
+	"sigs.k8s.io/controller-runtime/pkg/runtime/signals"
+
+	_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
+)
+
+var log = logf.Log.WithName("cmd")
+
+func printVersion() {
+	log.Info(fmt.Sprintf("Go Version: %s", runtime.Version()))
+	log.Info(fmt.Sprintf("Go OS/Arch: %s/%s", runtime.GOOS, runtime.GOARCH))
+	log.Info(fmt.Sprintf("operator-sdk Version: %v", sdkVersion.Version))
+}
+
+func main() {
+	rand.Seed(time.Now().UTC().UnixNano())
+
+	flag.Parse()
+
+	// The logger instantiated here can be changed to any logger
+	// implementing the logr.Logger interface. This logger will
+	// be propagated through the whole operator, generating
+	// uniform and structured logs.
+	logf.SetLogger(logf.ZapLogger(false))
+
+	printVersion()
+
+	namespace, err := k8sutil.GetWatchNamespace()
+	if err != nil {
+		log.Error(err, "failed to get watch namespace")
+		os.Exit(1)
+	}
+
+	// Get a config to talk to the apiserver
+	cfg, err := config.GetConfig()
+	if err != nil {
+		log.Error(err, "")
+		os.Exit(1)
+	}
+
+	// Become the leader before proceeding
+	leader.Become(context.TODO(), "camel-k-lock") // nolint: errcheck
+
+	r := ready.NewFileReady()
+	err = r.Set()
+	if err != nil {
+		log.Error(err, "")
+		os.Exit(1)
+	}
+	defer r.Unset() // nolint: errcheck
+
+	// Create a new Cmd to provide shared dependencies and start components
+	mgr, err := manager.New(cfg, manager.Options{Namespace: namespace})
+	if err != nil {
+		log.Error(err, "")
+		os.Exit(1)
+	}
+
+	log.Info("Registering Components.")
+
+	// Setup Scheme for all resources
+	if err := apis.AddToScheme(mgr.GetScheme()); err != nil {
+		log.Error(err, "")
+		os.Exit(1)
+	}
+
+	// Setup all Controllers
+	if err := controller.AddToManager(mgr); err != nil {
+		log.Error(err, "")
+		os.Exit(1)
+	}
+
+	log.Info("Starting the Cmd.")
+
+	// Start the Cmd
+	if err := mgr.Start(signals.SetupSignalHandler()); err != nil {
+		log.Error(err, "manager exited non-zero")
+		os.Exit(1)
+	}
+}
diff --git a/config/config.yaml b/config/config.yaml
deleted file mode 100644
index 45de9133..00000000
--- a/config/config.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
-apiVersion: camel.apache.org/v1alpha1
-kind: Integration
-projectName: camel-k
diff --git a/contributing.adoc b/contributing.adoc
new file mode 100644
index 00000000..032ea6a5
--- /dev/null
+++ b/contributing.adoc
@@ -0,0 +1,185 @@
+[[contributing]]
+= Contributing to Camel K
+
+We love contributions!
+
+The project is written in https://golang.org/[go] and contains some parts written in Java for the link:/runtime[integration runtime]
+Camel K is built on top of Kubernetes through *Custom Resource Definitions*. The https://github.com/operator-framework/operator-sdk[Operator SDK] is used
+to manage the lifecycle of those custom resources.
+
+[[requirements]]
+== Requirements
+
+In order to build the project, you need to comply with the following requirements:
+
+* **Go version 1.10+**: needed to compile and test the project. Refer to the https://golang.org/[Go website] for the installation.
+* **Dep version 0.5.0**: for managing dependencies. You can find installation instructions in the https://github.com/golang/dep[dep GitHub repository].
+* **Operator SDK v0.3.0+**: used to build the operator and the Docker images. Instructions in the https://github.com/operator-framework/operator-sdk[Operator SDK website] (binary downloads available in the release page).
+* **GNU Make**: used to define composite build actions. This should be already installed or available as package if you have a good OS (https://www.gnu.org/software/make/).
+
+[[checks]]
+== Running checks
+Checks rely on `golangci-lint` being installed, to install it look at the https://github.com/golangci/golangci-lint#local-installation[Local Installation] instructions.
+
+You can run checks via `make lint` or you can install a GIT pre-commit hook and have the checks run via https://pre-commit.com[pre-commit]; then make sure to install the pre-commit hooks after installing pre-commit by running
+
+ $ pre-commit install
+
+[[checking-out]]
+== Checking Out the Sources
+
+You can create a fork of this project from Github, then clone your fork with the `git` command line tool.
+
+You need to put the project in your $GOPATH (refer to https://golang.org/doc/install[Go documentation] for information).
+So, make sure that the **root** of the github repo is in the path:
+
+```
+$GOPATH/src/github.com/apache/camel-k/
+```
+
+[[structure]]
+== Structure
+
+This is a high level overview of the project structure:
+
+.Structure
+[options="header"]
+|=======================
+| Path						| Content
+| link:/cmd[/cmd]			| Contains the entry points (the *main* functions) for the **camel-k** binary and the **kamel** client tool.
+| link:/build[/build]		| Contains scripts used during make operations for building the project.
+| link:/deploy[/deploy]		| Contains Kubernetes resource files that are used by the **kamel** client during installation. The `/deploy/resources.go` file is kept in sync with the content of the directory (`make build-embed-resources`), so that resources can be used from within the go code.
+| link:/docs[/docs]			| Contains the documentation website based on https://antora.org/[Antora].
+| link:/pkg[/pkg]			| This is where the code resides. The code is divided in multiple subpackages.
+| link:/runtime[/runtime]	| The Java runtime code that is used inside the integration Docker containers.
+| link:/test[/test]			| Include integration tests to ensure that the software interacts correctly with Kubernetes and OpenShift.
+| link:/tmp[/tmp]			| Scripts and Docker configuration files used by the operator-sdk.
+| /vendor					| Project dependencies (not staged in git).
+| link:/version[/version]	| Contains the global version of the project.
+|=======================
+
+
+[[building]]
+== Building
+
+Go dependencies in the *vendor* directory are not included when you clone the project.
+
+Before compiling the source code, you need to sync your local *vendor* directory with the project dependencies, using the following command:
+
+```
+make dep
+```
+
+The `make dep` command runs `dep ensure -v` under the hood, so make sure that `dep` is properly installed.
+
+To build the whole project you now need to run:
+
+```
+make
+```
+
+This execute a full build of both the Java and Go code. If you need to build the components separately you can execute:
+
+* `make build-operator`: to build the operator binary only.
+* `make build-kamel`: to build the `kamel` client tool only.
+* `make build-runtime`: to build the Java-based runtime code only.
+
+After a successful build, if you're connected to a Docker daemon, you can build the operator Docker image by running:
+
+```
+make images
+```
+
+[[testing]]
+== Testing
+
+Unit tests are executed automatically as part of the build. They use the standard go testing framework.
+
+Integration tests (aimed at ensuring that the code integrates correctly with Kubernetes and OpenShift), need special care.
+
+The **convention** used in this repo is to name unit tests `xxx_test.go`, and name integration tests `yyy_integration_test.go`.
+Integration tests are all in the link:/test[/test] dir.
+
+Since both names end with `_test.go`, both would be executed by go during build, so you need to put a special **build tag** to mark
+integration tests. A integration test should start with the following line:
+
+```
+// +build integration
+```
+
+Look into the link:/test[/test] directory for examples of integration tests.
+
+Before running a integration test, you need to be connected to a Kubernetes/OpenShift namespace.
+After you log in into your cluster, you can run the following command to execute **all** integration tests:
+
+```
+make test-integration
+```
+
+[running]
+== Running
+
+If you want to install everything you have in your source code and see it running on Kubernetes, you need to run the following command:
+
+=== For Minishift
+
+* Run `make install-minishift` (or just `make install`): to build the project and install it in the current namespace on Minishift
+* You can specify a different namespace with `make install-minishift project=myawesomeproject`
+
+This command assumes you have an already running Minishift instance.
+
+=== For Minikube
+
+* Run `make install-minikube`: to build the project and install it in the current namespace on Minikube
+
+This command assumes you have an already running Minikube instance.
+
+=== Use
+
+Now you can play with Camel K:
+
+```
+./kamel run examples/Sample.java
+```
+
+To add additional dependencies to your routes:
+
+```
+./kamel run -d camel:dns examples/dns.js
+```
+
+[[debugging]]
+== Debugging and Running from IDE
+
+Sometimes it's useful to debug the code from the IDE when troubleshooting.
+
+.**Debugging the `kamel` binary**
+
+It should be straightforward: just execute the link:/cmd/kamel/main.go[/cmd/kamel/main.go] file from the IDE (e.g. Goland) in debug mode.
+
+.**Debugging the operator**
+
+It is a bit more complex (but not so much).
+
+You are going to run the operator code **outside** OpenShift in your IDE so, first of all, you need to **stop the operator running inside**:
+
+```
+// use kubectl in plain Kubernetes
+oc scale deployment/camel-k-operator --replicas 0
+```
+
+You can scale it back to 1 when you're done and you have updated the operator image.
+
+You can setup the IDE (e.g. Goland) to execute the link:/cmd/camel-k/main.go[/cmd/camel-k/main.go] file in debug mode.
+
+When configuring the IDE task, make sure to add all required environment variables in the *IDE task configuration screen*:
+
+* Set the `KUBERNETES_CONFIG` environment variable to point to your Kubernetes configuration file (usually `<homedir>/.kube/config`).
+* Set the `WATCH_NAMESPACE` environment variable to a Kubernetes namespace you have access to.
+* Set the `OPERATOR_NAME` environment variable to `camel-k`.
+
+After you setup the IDE task, you can run and debug the operator process.
+
+NOTE: The operator can be fully debugged in Minishift, because it uses OpenShift S2I binary builds under the hood.
+The build phase cannot be (currently) debugged in Minikube because the Kaniko builder requires that the operator and the publisher pod
+share a common persistent volume.
\ No newline at end of file
diff --git a/deploy/resources.go b/deploy/resources.go
index 7f6a7684..f2561982 100644
--- a/deploy/resources.go
+++ b/deploy/resources.go
@@ -15,7 +15,7 @@ See the License for the specific language governing permissions and
 limitations under the License.
 */
 
-// Code generated by build/embed_resources.sh. DO NOT EDIT.
+// Code generated by script/embed_resources.sh. DO NOT EDIT.
 
 package deploy
 
diff --git a/docs/developers.adoc b/docs/developers.adoc
deleted file mode 100644
index 9589be3c..00000000
--- a/docs/developers.adoc
+++ /dev/null
@@ -1,186 +0,0 @@
-[[developers]]
-Developer's Guide
-=================
-
-We love contributions!
-
-The project is written in https://golang.org/[go] and contains some parts written in Java for the link:/runtime[integration runtime]
-Camel K is built on top of Kubernetes through *Custom Resource Definitions*. The https://github.com/operator-framework/operator-sdk[Operator SDK] is used
-to manage the lifecycle of those custom resources.
-
-[[requirements]]
-== Requirements
-
-In order to build the project, you need to comply with the following requirements:
-
-* **Go version 1.10+**: needed to compile and test the project. Refer to the https://golang.org/[Go website] for the installation.
-* **Dep version 0.5.0**: for managing dependencies. You can find installation instructions in the https://github.com/golang/dep[dep GitHub repository].
-* **Operator SDK v0.0.7+**: used to build the operator and the Docker images. Instructions in the https://github.com/operator-framework/operator-sdk[Operator SDK website] (binary downloads available in the release page).
-* **GNU Make**: used to define composite build actions. This should be already installed or available as package if you have a good OS (https://www.gnu.org/software/make/).
-
-[[checks]]
-== Running checks
-Checks rely on `golangci-lint` being installed, to install it look at the https://github.com/golangci/golangci-lint#local-installation[Local Installation] instructions.
-
-You can run checks via `make lint` or you can install a GIT pre-commit hook and have the checks run via https://pre-commit.com[pre-commit]; then make sure to install the pre-commit hooks after installing pre-commit by running
-
- $ pre-commit install
-
-[[checking-out]]
-== Checking Out the Sources
-
-You can create a fork of this project from Github, then clone your fork with the `git` command line tool.
-
-You need to put the project in your $GOPATH (refer to https://golang.org/doc/install[Go documentation] for information).
-So, make sure that the **root** of the github repo is in the path:
-
-```
-$GOPATH/src/github.com/apache/camel-k/
-```
-
-[[structure]]
-== Structure
-
-This is a high level overview of the project structure:
-
-.Structure
-[options="header"]
-|=======================
-| Path						| Content
-| link:/cmd[/cmd]			| Contains the entry points (the *main* functions) for the **camel-k** binary and the **kamel** client tool.
-| link:/build[/build]		| Contains scripts used during make operations for building the project.
-| link:/deploy[/deploy]		| Contains Kubernetes resource files that are used by the **kamel** client during installation. The `/deploy/resources.go` file is kept in sync with the content of the directory (`make build-embed-resources`), so that resources can be used from within the go code.
-| link:/docs[/docs]			| Contains this documentation.
-| link:/pkg[/pkg]			| This is where the code resides. The code is divided in multiple subpackages.
-| link:/runtime[/runtime]	| The Java runtime code that is used inside the integration Docker containers.
-| link:/test[/test]			| Include integration tests to ensure that the software interacts correctly with Kubernetes and OpenShift.
-| link:/tmp[/tmp]			| Scripts and Docker configuration files used by the operator-sdk.
-| /vendor					| Project dependencies (not staged in git).
-| link:/version[/version]	| Contains the global version of the project.
-|=======================
-
-
-[[building]]
-== Building
-
-Go dependencies in the *vendor* directory are not included when you clone the project.
-
-Before compiling the source code, you need to sync your local *vendor* directory with the project dependencies, using the following command:
-
-```
-make dep
-```
-
-The `make dep` command runs `dep ensure -v` under the hood, so make sure that `dep` is properly installed.
-
-To build the whole project you now need to run:
-
-```
-make
-```
-
-This execute a full build of both the Java and Go code. If you need to build the components separately you can execute:
-
-* `make build-operator`: to build the operator binary only.
-* `make build-kamel`: to build the `kamel` client tool only.
-* `make build-runtime`: to build the Java-based runtime code only.
-
-After a successful build, if you're connected to a Docker daemon, you can build the operator Docker image by running:
-
-```
-make images
-```
-
-[[testing]]
-== Testing
-
-Unit tests are executed automatically as part of the build. They use the standard go testing framework.
-
-Integration tests (aimed at ensuring that the code integrates correctly with Kubernetes and OpenShift), need special care.
-
-The **convention** used in this repo is to name unit tests `xxx_test.go`, and name integration tests `yyy_integration_test.go`.
-Integration tests are all in the link:/test[/test] dir.
-
-Since both names end with `_test.go`, both would be executed by go during build, so you need to put a special **build tag** to mark
-integration tests. A integration test should start with the following line:
-
-```
-// +build integration
-```
-
-Look into the link:/test[/test] directory for examples of integration tests.
-
-Before running a integration test, you need to be connected to a Kubernetes/OpenShift namespace.
-After you log in into your cluster, you can run the following command to execute **all** integration tests:
-
-```
-make test-integration
-```
-
-[running]
-== Running
-
-If you want to install everything you have in your source code and see it running on Kubernetes, you need to run the following command:
-
-=== For Minishift
-
-* Run `make install-minishift` (or just `make install`): to build the project and install it in the current namespace on Minishift
-* You can specify a different namespace with `make install-minishift project=myawesomeproject`
-
-This command assumes you have an already running Minishift instance.
-
-=== For Minikube
-
-* Run `make install-minikube`: to build the project and install it in the current namespace on Minikube
-
-This command assumes you have an already running Minikube instance.
-
-=== Use
-
-Now you can play with Camel K:
-
-```
-./kamel run examples/Sample.java
-```
-
-To add additional dependencies to your routes:
-
-```
-./kamel run -d camel:dns examples/dns.js
-```
-
-[[debugging]]
-== Debugging and Running from IDE
-
-Sometimes it's useful to debug the code from the IDE when troubleshooting.
-
-.**Debugging the `kamel` binary**
-
-It should be straightforward: just execute the link:/cmd/kamel/main.go[/cmd/kamel/main.go] file from the IDE (e.g. Goland) in debug mode.
-
-.**Debugging the operator**
-
-It is a bit more complex (but not so much).
-
-You are going to run the operator code **outside** OpenShift in your IDE so, first of all, you need to **stop the operator running inside**:
-
-```
-// use kubectl in plain Kubernetes
-oc scale deployment/camel-k-operator --replicas 0
-```
-
-You can scale it back to 1 when you're done and you have updated the operator image.
-
-You can setup the IDE (e.g. Goland) to execute the link:/cmd/camel-k/main.go[/cmd/camel-k/main.go] file in debug mode.
-
-When configuring the IDE task, make sure to add all required environment variables in the *IDE task configuration screen*:
-
-* Set the `KUBERNETES_CONFIG` environment variable to point to your Kubernetes configuration file (usually `<homedir>/.kube/config`).
-* Set the `WATCH_NAMESPACE` environment variable to a Kubernetes namespace you have access to.
-* Set the `OPERATOR_NAME` environment variable to `camel-k`.
-
-After you setup the IDE task, you can run and debug the operator process.
-
-NOTE: The operator can be fully debugged in Minishift, because it uses OpenShift S2I binary builds under the hood.
-The build phase cannot be (currently) debugged in Minikube because the Kaniko builder requires that the operator and the publisher pod
-share a common persistent volume.
diff --git a/pkg/apis/addtoscheme_camel_v1alpha1.go b/pkg/apis/addtoscheme_camel_v1alpha1.go
new file mode 100644
index 00000000..4c3045bd
--- /dev/null
+++ b/pkg/apis/addtoscheme_camel_v1alpha1.go
@@ -0,0 +1,10 @@
+package apis
+
+import (
+	"github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
+)
+
+func init() {
+	// Register the types with the Scheme so the components can map objects to GroupVersionKinds and back
+	AddToSchemes = append(AddToSchemes, v1alpha1.SchemeBuilder.AddToScheme)
+}
diff --git a/pkg/apis/addtoscheme_knative_eventing_v1alpha1.go b/pkg/apis/addtoscheme_knative_eventing_v1alpha1.go
new file mode 100644
index 00000000..9aa5d287
--- /dev/null
+++ b/pkg/apis/addtoscheme_knative_eventing_v1alpha1.go
@@ -0,0 +1,10 @@
+package apis
+
+import (
+	eventing "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
+)
+
+func init() {
+	// Register the types with the Scheme so the components can map objects to GroupVersionKinds and back
+	AddToSchemes = append(AddToSchemes, eventing.AddToScheme)
+}
diff --git a/pkg/apis/addtoscheme_knative_serving_v1alpha1.go b/pkg/apis/addtoscheme_knative_serving_v1alpha1.go
new file mode 100644
index 00000000..800bed17
--- /dev/null
+++ b/pkg/apis/addtoscheme_knative_serving_v1alpha1.go
@@ -0,0 +1,10 @@
+package apis
+
+import (
+	serving "github.com/knative/serving/pkg/apis/serving/v1alpha1"
+)
+
+func init() {
+	// Register the types with the Scheme so the components can map objects to GroupVersionKinds and back
+	AddToSchemes = append(AddToSchemes, serving.AddToScheme)
+}
diff --git a/pkg/apis/addtoscheme_openshift.go b/pkg/apis/addtoscheme_openshift.go
new file mode 100644
index 00000000..515a7720
--- /dev/null
+++ b/pkg/apis/addtoscheme_openshift.go
@@ -0,0 +1,8 @@
+package apis
+
+import "github.com/apache/camel-k/pkg/util/openshift"
+
+func init() {
+	// Register the types with the Scheme so the components can map objects to GroupVersionKinds and back
+	AddToSchemes = append(AddToSchemes, openshift.AddToScheme)
+}
diff --git a/pkg/apis/apis.go b/pkg/apis/apis.go
new file mode 100644
index 00000000..07dc9616
--- /dev/null
+++ b/pkg/apis/apis.go
@@ -0,0 +1,13 @@
+package apis
+
+import (
+	"k8s.io/apimachinery/pkg/runtime"
+)
+
+// AddToSchemes may be used to add all resources defined in the project to a Scheme
+var AddToSchemes runtime.SchemeBuilder
+
+// AddToScheme adds all Resources to the Scheme
+func AddToScheme(s *runtime.Scheme) error {
+	return AddToSchemes.AddToScheme(s)
+}
diff --git a/pkg/apis/camel/v1alpha1/common_types.go b/pkg/apis/camel/v1alpha1/common_types.go
new file mode 100644
index 00000000..2d0aac79
--- /dev/null
+++ b/pkg/apis/camel/v1alpha1/common_types.go
@@ -0,0 +1,45 @@
+/*
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+// ConfigurationSpec --
+type ConfigurationSpec struct {
+	Type  string `json:"type"`
+	Value string `json:"value"`
+}
+
+// Artifact --
+type Artifact struct {
+	ID       string `json:"id" yaml:"id"`
+	Location string `json:"location,omitempty" yaml:"location,omitempty"`
+	Target   string `json:"target,omitempty" yaml:"target,omitempty"`
+}
+
+// Flow --
+type Flow struct {
+	Steps []Step `json:"steps"`
+}
+
+// Flows are collections of Flow
+type Flows []Flow
+
+// Step --
+type Step struct {
+	Kind string `json:"kind"`
+	URI  string `json:"uri"`
+}
diff --git a/pkg/apis/camel/v1alpha1/common_types_support.go b/pkg/apis/camel/v1alpha1/common_types_support.go
new file mode 100644
index 00000000..84ff4f0d
--- /dev/null
+++ b/pkg/apis/camel/v1alpha1/common_types_support.go
@@ -0,0 +1,41 @@
+/*
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+	"fmt"
+
+	"gopkg.in/yaml.v2"
+)
+
+func (in *Artifact) String() string {
+	return in.ID
+}
+
+func (spec ConfigurationSpec) String() string {
+	return fmt.Sprintf("%s=%s", spec.Type, spec.Value)
+}
+
+// Serialize serializes a Flow
+func (flows Flows) Serialize() (string, error) {
+	res, err := yaml.Marshal(flows)
+	if err != nil {
+		return "", err
+	}
+	return string(res), nil
+}
diff --git a/pkg/apis/camel/v1alpha1/doc.go b/pkg/apis/camel/v1alpha1/doc.go
index c9375365..102912e1 100644
--- a/pkg/apis/camel/v1alpha1/doc.go
+++ b/pkg/apis/camel/v1alpha1/doc.go
@@ -1,21 +1,4 @@
-// +k8s:deepcopy-gen=package
+// Package v1alpha1 contains API Schema definitions for the camel v1alpha1 API group
+// +k8s:deepcopy-gen=package,register
 // +groupName=camel.apache.org
-
-/*
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
 package v1alpha1
diff --git a/pkg/apis/camel/v1alpha1/integration_types.go b/pkg/apis/camel/v1alpha1/integration_types.go
new file mode 100644
index 00000000..fd761504
--- /dev/null
+++ b/pkg/apis/camel/v1alpha1/integration_types.go
@@ -0,0 +1,127 @@
+package v1alpha1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// NOTE: json tags are required.  Any new fields you add must have json tags for the fields to be serialized.
+
+// IntegrationSpec defines the desired state of Integration
+type IntegrationSpec struct {
+	Replicas      *int32                          `json:"replicas,omitempty"`
+	Sources       []SourceSpec                    `json:"sources,omitempty"`
+	Resources     []ResourceSpec                  `json:"resources,omitempty"`
+	Context       string                          `json:"context,omitempty"`
+	Dependencies  []string                        `json:"dependencies,omitempty"`
+	Profile       TraitProfile                    `json:"profile,omitempty"`
+	Traits        map[string]IntegrationTraitSpec `json:"traits,omitempty"`
+	Configuration []ConfigurationSpec             `json:"configuration,omitempty"`
+	Repositories  []string                        `json:"repositories,omitempty"`
+}
+
+// IntegrationStatus defines the observed state of Integration
+type IntegrationStatus struct {
+	Phase        IntegrationPhase `json:"phase,omitempty"`
+	Digest       string           `json:"digest,omitempty"`
+	Image        string           `json:"image,omitempty"`
+	Dependencies []string         `json:"dependencies,omitempty"`
+	Context      string           `json:"context,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Integration is the Schema for the integrations API
+// +k8s:openapi-gen=true
+type Integration struct {
+	metav1.TypeMeta   `json:",inline"`
+	metav1.ObjectMeta `json:"metadata,omitempty"`
+
+	Spec   IntegrationSpec   `json:"spec,omitempty"`
+	Status IntegrationStatus `json:"status,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// IntegrationList contains a list of Integration
+type IntegrationList struct {
+	metav1.TypeMeta `json:",inline"`
+	metav1.ListMeta `json:"metadata,omitempty"`
+	Items           []Integration `json:"items"`
+}
+
+// DataSpec --
+type DataSpec struct {
+	Name        string `json:"name,omitempty"`
+	Content     string `json:"content,omitempty"`
+	Compression bool   `json:"compression,omitempty"`
+}
+
+// ResourceSpec --
+type ResourceSpec struct {
+	DataSpec
+}
+
+// SourceSpec --
+type SourceSpec struct {
+	DataSpec
+	Language Language `json:"language,omitempty"`
+}
+
+// Language --
+type Language string
+
+const (
+	// LanguageJavaSource --
+	LanguageJavaSource Language = "java"
+	// LanguageJavaClass --
+	LanguageJavaClass Language = "class"
+	// LanguageGroovy --
+	LanguageGroovy Language = "groovy"
+	// LanguageJavaScript --
+	LanguageJavaScript Language = "js"
+	// LanguageXML --
+	LanguageXML Language = "xml"
+	// LanguageKotlin --
+	LanguageKotlin Language = "kts"
+	// LanguageYamlFlow --
+	LanguageYamlFlow Language = "flow"
+)
+
+// Languages is the list of all supported languages
+var Languages = []Language{
+	LanguageJavaSource,
+	LanguageJavaClass,
+	LanguageJavaScript,
+	LanguageGroovy,
+	LanguageJavaScript,
+	LanguageKotlin,
+	LanguageYamlFlow,
+}
+
+// A IntegrationTraitSpec contains the configuration of a trait
+type IntegrationTraitSpec struct {
+	Configuration map[string]string `json:"configuration,omitempty"`
+}
+
+// IntegrationPhase --
+type IntegrationPhase string
+
+const (
+	// IntegrationKind --
+	IntegrationKind string = "Integration"
+
+	// IntegrationPhaseBuildingContext --
+	IntegrationPhaseBuildingContext IntegrationPhase = "Building Context"
+	// IntegrationPhaseBuildingImage --
+	IntegrationPhaseBuildingImage IntegrationPhase = "Building Image"
+	// IntegrationPhaseDeploying --
+	IntegrationPhaseDeploying IntegrationPhase = "Deploying"
+	// IntegrationPhaseRunning --
+	IntegrationPhaseRunning IntegrationPhase = "Running"
+	// IntegrationPhaseError --
+	IntegrationPhaseError IntegrationPhase = "Error"
+)
+
+func init() {
+	SchemeBuilder.Register(&Integration{}, &IntegrationList{})
+}
diff --git a/pkg/apis/camel/v1alpha1/integration_types_support.go b/pkg/apis/camel/v1alpha1/integration_types_support.go
new file mode 100644
index 00000000..bdade9f8
--- /dev/null
+++ b/pkg/apis/camel/v1alpha1/integration_types_support.go
@@ -0,0 +1,125 @@
+/*
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+	"strings"
+
+	"github.com/apache/camel-k/pkg/util"
+	"github.com/mitchellh/mapstructure"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// NewIntegrationList --
+func NewIntegrationList() IntegrationList {
+	return IntegrationList{
+		TypeMeta: metav1.TypeMeta{
+			APIVersion: SchemeGroupVersion.String(),
+			Kind:       IntegrationKind,
+		},
+	}
+}
+
+// AddSource --
+func (is *IntegrationSpec) AddSource(name string, content string, language Language) {
+	is.Sources = append(is.Sources, NewSourceSpec(name, content, language))
+}
+
+// AddSources --
+func (is *IntegrationSpec) AddSources(sources ...SourceSpec) {
+	is.Sources = append(is.Sources, sources...)
+}
+
+// AddResources --
+func (is *IntegrationSpec) AddResources(resources ...ResourceSpec) {
+	is.Resources = append(is.Resources, resources...)
+}
+
+// AddConfiguration --
+func (is *IntegrationSpec) AddConfiguration(confType string, confValue string) {
+	is.Configuration = append(is.Configuration, ConfigurationSpec{
+		Type:  confType,
+		Value: confValue,
+	})
+}
+
+// AddDependency --
+func (is *IntegrationSpec) AddDependency(dependency string) {
+	switch {
+	case strings.HasPrefix(dependency, "mvn:"):
+		util.StringSliceUniqueAdd(&is.Dependencies, dependency)
+	case strings.HasPrefix(dependency, "file:"):
+		util.StringSliceUniqueAdd(&is.Dependencies, dependency)
+	case strings.HasPrefix(dependency, "camel-"):
+		util.StringSliceUniqueAdd(&is.Dependencies, "camel:"+strings.TrimPrefix(dependency, "camel-"))
+	}
+}
+
+// Decode the trait configuration to a type safe struct
+func (in *IntegrationTraitSpec) Decode(target interface{}) error {
+	md := mapstructure.Metadata{}
+
+	decoder, err := mapstructure.NewDecoder(
+		&mapstructure.DecoderConfig{
+			Metadata:         &md,
+			WeaklyTypedInput: true,
+			TagName:          "property",
+			Result:           &target,
+		},
+	)
+
+	if err != nil {
+		return err
+	}
+
+	return decoder.Decode(in.Configuration)
+}
+
+// NewSourceSpec --
+func NewSourceSpec(name string, content string, language Language) SourceSpec {
+	return SourceSpec{
+		DataSpec: DataSpec{
+			Name:    name,
+			Content: content,
+		},
+		Language: language,
+	}
+}
+
+// NewResourceSpec --
+func NewResourceSpec(name string, content string, destination string) ResourceSpec {
+	return ResourceSpec{
+		DataSpec: DataSpec{
+			Name:    name,
+			Content: content,
+		},
+	}
+}
+
+// InferLanguage returns the language of the source or discovers it from file extension if not set
+func (s SourceSpec) InferLanguage() Language {
+	if s.Language != "" {
+		return s.Language
+	}
+	for _, l := range Languages {
+		if strings.HasSuffix(s.Name, "."+string(l)) {
+			return l
+		}
+	}
+	return ""
+}
diff --git a/pkg/apis/camel/v1alpha1/types_support_test.go b/pkg/apis/camel/v1alpha1/integration_types_support_test.go
similarity index 100%
rename from pkg/apis/camel/v1alpha1/types_support_test.go
rename to pkg/apis/camel/v1alpha1/integration_types_support_test.go
diff --git a/pkg/apis/camel/v1alpha1/integrationcontext_types.go b/pkg/apis/camel/v1alpha1/integrationcontext_types.go
new file mode 100644
index 00000000..b85aa053
--- /dev/null
+++ b/pkg/apis/camel/v1alpha1/integrationcontext_types.go
@@ -0,0 +1,68 @@
+package v1alpha1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// NOTE: json tags are required.  Any new fields you add must have json tags for the fields to be serialized.
+
+// IntegrationContextSpec defines the desired state of IntegrationContext
+type IntegrationContextSpec struct {
+	Dependencies  []string                        `json:"dependencies,omitempty"`
+	Profile       TraitProfile                    `json:"profile,omitempty"`
+	Traits        map[string]IntegrationTraitSpec `json:"traits,omitempty"`
+	Configuration []ConfigurationSpec             `json:"configuration,omitempty"`
+	Repositories  []string                        `json:"repositories,omitempty"`
+}
+
+// IntegrationContextStatus defines the observed state of IntegrationContext
+type IntegrationContextStatus struct {
+	Phase       IntegrationContextPhase `json:"phase,omitempty"`
+	Image       string                  `json:"image,omitempty"`
+	PublicImage string                  `json:"publicImage,omitempty"`
+	Digest      string                  `json:"digest,omitempty"`
+	Artifacts   []Artifact              `json:"artifacts,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// IntegrationContext is the Schema for the integrationcontexts API
+// +k8s:openapi-gen=true
+type IntegrationContext struct {
+	metav1.TypeMeta   `json:",inline"`
+	metav1.ObjectMeta `json:"metadata,omitempty"`
+
+	Spec   IntegrationContextSpec   `json:"spec,omitempty"`
+	Status IntegrationContextStatus `json:"status,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// IntegrationContextList contains a list of IntegrationContext
+type IntegrationContextList struct {
+	metav1.TypeMeta `json:",inline"`
+	metav1.ListMeta `json:"metadata,omitempty"`
+	Items           []IntegrationContext `json:"items"`
+}
+
+// IntegrationContextPhase --
+type IntegrationContextPhase string
+
+const (
+	// IntegrationContextKind --
+	IntegrationContextKind string = "IntegrationContext"
+
+	// IntegrationContextTypePlatform --
+	IntegrationContextTypePlatform = "platform"
+
+	// IntegrationContextPhaseBuilding --
+	IntegrationContextPhaseBuilding IntegrationContextPhase = "Building"
+	// IntegrationContextPhaseReady --
+	IntegrationContextPhaseReady IntegrationContextPhase = "Ready"
+	// IntegrationContextPhaseError --
+	IntegrationContextPhaseError IntegrationContextPhase = "Error"
+)
+
+func init() {
+	SchemeBuilder.Register(&IntegrationContext{}, &IntegrationContextList{})
+}
diff --git a/pkg/apis/camel/v1alpha1/integrationcontext_types_support.go b/pkg/apis/camel/v1alpha1/integrationcontext_types_support.go
new file mode 100644
index 00000000..32000b54
--- /dev/null
+++ b/pkg/apis/camel/v1alpha1/integrationcontext_types_support.go
@@ -0,0 +1,52 @@
+/*
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+// NewIntegrationContext --
+func NewIntegrationContext(namespace string, name string) IntegrationContext {
+	return IntegrationContext{
+		TypeMeta: metav1.TypeMeta{
+			APIVersion: SchemeGroupVersion.String(),
+			Kind:       IntegrationContextKind,
+		},
+		ObjectMeta: metav1.ObjectMeta{
+			Namespace: namespace,
+			Name:      name,
+		},
+	}
+}
+
+// NewIntegrationContextList --
+func NewIntegrationContextList() IntegrationContextList {
+	return IntegrationContextList{
+		TypeMeta: metav1.TypeMeta{
+			APIVersion: SchemeGroupVersion.String(),
+			Kind:       IntegrationContextKind,
+		},
+	}
+}
+
+// ImageForIntegration returns the image to use when using it for running an integration
+func (c IntegrationContext) ImageForIntegration() string {
+	if c.Status.PublicImage != "" {
+		return c.Status.PublicImage
+	}
+	return c.Status.Image
+}
diff --git a/pkg/apis/camel/v1alpha1/integrationplatform_types.go b/pkg/apis/camel/v1alpha1/integrationplatform_types.go
new file mode 100644
index 00000000..d13ea637
--- /dev/null
+++ b/pkg/apis/camel/v1alpha1/integrationplatform_types.go
@@ -0,0 +1,109 @@
+package v1alpha1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// NOTE: json tags are required.  Any new fields you add must have json tags for the fields to be serialized.
+
+// IntegrationPlatformSpec defines the desired state of IntegrationPlatform
+type IntegrationPlatformSpec struct {
+	Cluster IntegrationPlatformCluster   `json:"cluster,omitempty"`
+	Profile TraitProfile                 `json:"profile,omitempty"`
+	Build   IntegrationPlatformBuildSpec `json:"build,omitempty"`
+}
+
+// IntegrationPlatformStatus defines the observed state of IntegrationPlatform
+type IntegrationPlatformStatus struct {
+	Phase IntegrationPlatformPhase `json:"phase,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// IntegrationPlatform is the Schema for the integrationplatforms API
+// +k8s:openapi-gen=true
+type IntegrationPlatform struct {
+	metav1.TypeMeta   `json:",inline"`
+	metav1.ObjectMeta `json:"metadata,omitempty"`
+
+	Spec   IntegrationPlatformSpec   `json:"spec,omitempty"`
+	Status IntegrationPlatformStatus `json:"status,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// IntegrationPlatformList contains a list of IntegrationPlatform
+type IntegrationPlatformList struct {
+	metav1.TypeMeta `json:",inline"`
+	metav1.ListMeta `json:"metadata,omitempty"`
+	Items           []IntegrationPlatform `json:"items"`
+}
+
+// IntegrationPlatformCluster is the kind of orchestration cluster the platform is installed into
+type IntegrationPlatformCluster string
+
+const (
+	// IntegrationPlatformClusterOpenShift is used when targeting a OpenShift cluster
+	IntegrationPlatformClusterOpenShift = "OpenShift"
+	// IntegrationPlatformClusterKubernetes is used when targeting a Kubernetes cluster
+	IntegrationPlatformClusterKubernetes = "Kubernetes"
+)
+
+// TraitProfile represents lists of traits that are enabled for the specific installation/integration
+type TraitProfile string
+
+const (
+	// TraitProfileOpenShift is used by default on OpenShift clusters
+	TraitProfileOpenShift = "OpenShift"
+	// TraitProfileKubernetes is used by default on Kubernetes clusters
+	TraitProfileKubernetes = "Kubernetes"
+	// TraitProfileKnative is used by default on OpenShift/Kubernetes clusters powered by Knative
+	TraitProfileKnative = "Knative"
+)
+
+var allTraitProfiles = []TraitProfile{TraitProfileOpenShift, TraitProfileKubernetes, TraitProfileKnative}
+
+// IntegrationPlatformBuildSpec contains platform related build information
+type IntegrationPlatformBuildSpec struct {
+	PublishStrategy IntegrationPlatformBuildPublishStrategy `json:"publishStrategy,omitempty"`
+	Registry        string                                  `json:"registry,omitempty"`
+	Organization    string                                  `json:"organization,omitempty"`
+	PushSecret      string                                  `json:"pushSecret,omitempty"`
+	CamelVersion    string                                  `json:"camelVersion,omitempty"`
+	Properties      map[string]string                       `json:"properties,omitempty"`
+	Repositories    []string                                `json:"repositories,omitempty"`
+}
+
+// IntegrationPlatformBuildPublishStrategy enumerates all implemented build strategies
+type IntegrationPlatformBuildPublishStrategy string
+
+const (
+	// IntegrationPlatformBuildPublishStrategyS2I performs a OpenShift binary S2I build
+	IntegrationPlatformBuildPublishStrategyS2I = "S2I"
+
+	// IntegrationPlatformBuildPublishStrategyKaniko performs
+	IntegrationPlatformBuildPublishStrategyKaniko = "Kaniko"
+)
+
+// IntegrationPlatformPhase --
+type IntegrationPlatformPhase string
+
+const (
+	// IntegrationPlatformKind --
+	IntegrationPlatformKind string = "IntegrationPlatform"
+
+	// IntegrationPlatformPhaseCreating --
+	IntegrationPlatformPhaseCreating IntegrationPlatformPhase = "Creating"
+	// IntegrationPlatformPhaseStarting --
+	IntegrationPlatformPhaseStarting IntegrationPlatformPhase = "Starting"
+	// IntegrationPlatformPhaseReady --
+	IntegrationPlatformPhaseReady IntegrationPlatformPhase = "Ready"
+	// IntegrationPlatformPhaseError --
+	IntegrationPlatformPhaseError IntegrationPlatformPhase = "Error"
+	// IntegrationPlatformPhaseDuplicate --
+	IntegrationPlatformPhaseDuplicate IntegrationPlatformPhase = "Duplicate"
+)
+
+func init() {
+	SchemeBuilder.Register(&IntegrationPlatform{}, &IntegrationPlatformList{})
+}
diff --git a/pkg/apis/camel/v1alpha1/integrationplatform_types_support.go b/pkg/apis/camel/v1alpha1/integrationplatform_types_support.go
new file mode 100644
index 00000000..11800d37
--- /dev/null
+++ b/pkg/apis/camel/v1alpha1/integrationplatform_types_support.go
@@ -0,0 +1,58 @@
+/*
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+	"strings"
+
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// NewIntegrationPlatformList --
+func NewIntegrationPlatformList() IntegrationPlatformList {
+	return IntegrationPlatformList{
+		TypeMeta: metav1.TypeMeta{
+			APIVersion: SchemeGroupVersion.String(),
+			Kind:       IntegrationPlatformKind,
+		},
+	}
+}
+
+// NewIntegrationPlatform --
+func NewIntegrationPlatform(namespace string, name string) IntegrationPlatform {
+	return IntegrationPlatform{
+		TypeMeta: metav1.TypeMeta{
+			APIVersion: SchemeGroupVersion.String(),
+			Kind:       IntegrationPlatformKind,
+		},
+		ObjectMeta: metav1.ObjectMeta{
+			Namespace: namespace,
+			Name:      name,
+		},
+	}
+}
+
+// TraitProfileByName returns the trait profile corresponding to the given name (case insensitive)
+func TraitProfileByName(name string) TraitProfile {
+	for _, p := range allTraitProfiles {
+		if strings.EqualFold(name, string(p)) {
+			return p
+		}
+	}
+	return ""
+}
diff --git a/pkg/apis/camel/v1alpha1/knative/register.go b/pkg/apis/camel/v1alpha1/knative/register.go
deleted file mode 100644
index 2e90b5da..00000000
--- a/pkg/apis/camel/v1alpha1/knative/register.go
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package knative
-
-import (
-	knative "github.com/knative/serving/pkg/apis/serving/v1alpha1"
-	"github.com/operator-framework/operator-sdk/pkg/util/k8sutil"
-)
-
-// Register all Knative types that we want to manage.
-func init() {
-	k8sutil.AddToSDKScheme(knative.AddToScheme)
-}
diff --git a/pkg/apis/camel/v1alpha1/register.go b/pkg/apis/camel/v1alpha1/register.go
index 7ef62129..044a8eff 100644
--- a/pkg/apis/camel/v1alpha1/register.go
+++ b/pkg/apis/camel/v1alpha1/register.go
@@ -1,57 +1,19 @@
-/*
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
+// NOTE: Boilerplate only.  Ignore this file.
 
+// Package v1alpha1 contains API Schema definitions for the camel v1alpha1 API group
+// +k8s:deepcopy-gen=package,register
+// +groupName=camel.apache.org
 package v1alpha1
 
 import (
-	sdkK8sutil "github.com/operator-framework/operator-sdk/pkg/util/k8sutil"
-
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/apimachinery/pkg/runtime"
 	"k8s.io/apimachinery/pkg/runtime/schema"
-)
-
-const (
-	version   = "v1alpha1"
-	groupName = "camel.apache.org"
+	"sigs.k8s.io/controller-runtime/pkg/runtime/scheme"
 )
 
 var (
-	// SchemeGroupVersion is the group version used to register these objects.
-	SchemeGroupVersion = schema.GroupVersion{Group: groupName, Version: version}
-)
-
-func init() {
-	schemeBuilder := runtime.NewSchemeBuilder(addKnownTypes)
-	addToScheme := schemeBuilder.AddToScheme
+	// SchemeGroupVersion is group version used to register these objects
+	SchemeGroupVersion = schema.GroupVersion{Group: "camel.apache.org", Version: "v1alpha1"}
 
-	sdkK8sutil.AddToSDKScheme(addToScheme)
-}
-
-// addKnownTypes adds the set of types defined in this package to the supplied scheme.
-func addKnownTypes(scheme *runtime.Scheme) error {
-	scheme.AddKnownTypes(SchemeGroupVersion,
-		&Integration{},
-		&IntegrationList{},
-		&IntegrationContext{},
-		&IntegrationContextList{},
-		&IntegrationPlatform{},
-		&IntegrationPlatformList{},
-	)
-	metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
-	return nil
-}
+	// SchemeBuilder is used to add go types to the GroupVersionKind scheme
+	SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion}
+)
diff --git a/pkg/apis/camel/v1alpha1/types.go b/pkg/apis/camel/v1alpha1/types.go
deleted file mode 100644
index 09b46fa3..00000000
--- a/pkg/apis/camel/v1alpha1/types.go
+++ /dev/null
@@ -1,385 +0,0 @@
-/*
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1alpha1
-
-import (
-	"github.com/apache/camel-k/pkg/util"
-	"github.com/mitchellh/mapstructure"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-
-	"strings"
-)
-
-// ConfigurationSpec --
-type ConfigurationSpec struct {
-	Type  string `json:"type"`
-	Value string `json:"value"`
-}
-
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-// IntegrationList --
-type IntegrationList struct {
-	metav1.TypeMeta `json:",inline"`
-	metav1.ListMeta `json:"metadata"`
-	Items           []Integration `json:"items"`
-}
-
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-// Integration --
-type Integration struct {
-	metav1.TypeMeta   `json:",inline"`
-	metav1.ObjectMeta `json:"metadata"`
-	Spec              IntegrationSpec   `json:"spec"`
-	Status            IntegrationStatus `json:"status,omitempty"`
-}
-
-// IntegrationSpec --
-type IntegrationSpec struct {
-	Replicas      *int32                          `json:"replicas,omitempty"`
-	Sources       []SourceSpec                    `json:"sources,omitempty"`
-	Resources     []ResourceSpec                  `json:"resources,omitempty"`
-	Context       string                          `json:"context,omitempty"`
-	Dependencies  []string                        `json:"dependencies,omitempty"`
-	Profile       TraitProfile                    `json:"profile,omitempty"`
-	Traits        map[string]IntegrationTraitSpec `json:"traits,omitempty"`
-	Configuration []ConfigurationSpec             `json:"configuration,omitempty"`
-	Repositories  []string                        `json:"repositories,omitempty"`
-}
-
-// AddSource --
-func (is *IntegrationSpec) AddSource(name string, content string, language Language) {
-	is.Sources = append(is.Sources, NewSourceSpec(name, content, language))
-}
-
-// AddSources --
-func (is *IntegrationSpec) AddSources(sources ...SourceSpec) {
-	is.Sources = append(is.Sources, sources...)
-}
-
-// AddResources --
-func (is *IntegrationSpec) AddResources(resources ...ResourceSpec) {
-	is.Resources = append(is.Resources, resources...)
-}
-
-// AddConfiguration --
-func (is *IntegrationSpec) AddConfiguration(confType string, confValue string) {
-	is.Configuration = append(is.Configuration, ConfigurationSpec{
-		Type:  confType,
-		Value: confValue,
-	})
-}
-
-// AddDependency --
-func (is *IntegrationSpec) AddDependency(dependency string) {
-	switch {
-	case strings.HasPrefix(dependency, "mvn:"):
-		util.StringSliceUniqueAdd(&is.Dependencies, dependency)
-	case strings.HasPrefix(dependency, "file:"):
-		util.StringSliceUniqueAdd(&is.Dependencies, dependency)
-	case strings.HasPrefix(dependency, "camel-"):
-		util.StringSliceUniqueAdd(&is.Dependencies, "camel:"+strings.TrimPrefix(dependency, "camel-"))
-	}
-}
-
-// DataSpec --
-type DataSpec struct {
-	Name        string `json:"name,omitempty"`
-	Content     string `json:"content,omitempty"`
-	Compression bool   `json:"compression,omitempty"`
-}
-
-// ResourceSpec --
-type ResourceSpec struct {
-	DataSpec
-}
-
-// SourceSpec --
-type SourceSpec struct {
-	DataSpec
-	Language Language `json:"language,omitempty"`
-}
-
-// Language --
-type Language string
-
-const (
-	// LanguageJavaSource --
-	LanguageJavaSource Language = "java"
-	// LanguageJavaClass --
-	LanguageJavaClass Language = "class"
-	// LanguageGroovy --
-	LanguageGroovy Language = "groovy"
-	// LanguageJavaScript --
-	LanguageJavaScript Language = "js"
-	// LanguageXML --
-	LanguageXML Language = "xml"
-	// LanguageKotlin --
-	LanguageKotlin Language = "kts"
-	// LanguageYamlFlow --
-	LanguageYamlFlow Language = "flow"
-)
-
-// Languages is the list of all supported languages
-var Languages = []Language{
-	LanguageJavaSource,
-	LanguageJavaClass,
-	LanguageJavaScript,
-	LanguageGroovy,
-	LanguageJavaScript,
-	LanguageKotlin,
-	LanguageYamlFlow,
-}
-
-// A IntegrationTraitSpec contains the configuration of a trait
-type IntegrationTraitSpec struct {
-	Configuration map[string]string `json:"configuration,omitempty"`
-}
-
-// Decode the trait configuration to a type safe struct
-func (in *IntegrationTraitSpec) Decode(target interface{}) error {
-	md := mapstructure.Metadata{}
-
-	decoder, err := mapstructure.NewDecoder(
-		&mapstructure.DecoderConfig{
-			Metadata:         &md,
-			WeaklyTypedInput: true,
-			TagName:          "property",
-			Result:           &target,
-		},
-	)
-
-	if err != nil {
-		return err
-	}
-
-	return decoder.Decode(in.Configuration)
-}
-
-// IntegrationStatus --
-type IntegrationStatus struct {
-	Phase        IntegrationPhase `json:"phase,omitempty"`
-	Digest       string           `json:"digest,omitempty"`
-	Image        string           `json:"image,omitempty"`
-	Dependencies []string         `json:"dependencies,omitempty"`
-	Context      string           `json:"context,omitempty"`
-}
-
-// IntegrationPhase --
-type IntegrationPhase string
-
-const (
-	// IntegrationKind --
-	IntegrationKind string = "Integration"
-
-	// IntegrationPhaseBuildingContext --
-	IntegrationPhaseBuildingContext IntegrationPhase = "Building Context"
-	// IntegrationPhaseBuildingImage --
-	IntegrationPhaseBuildingImage IntegrationPhase = "Building Image"
-	// IntegrationPhaseDeploying --
-	IntegrationPhaseDeploying IntegrationPhase = "Deploying"
-	// IntegrationPhaseRunning --
-	IntegrationPhaseRunning IntegrationPhase = "Running"
-	// IntegrationPhaseError --
-	IntegrationPhaseError IntegrationPhase = "Error"
-)
-
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-// IntegrationContextList --
-type IntegrationContextList struct {
-	metav1.TypeMeta `json:",inline"`
-	metav1.ListMeta `json:"metadata"`
-	Items           []IntegrationContext `json:"items"`
-}
-
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-// IntegrationContext --
-type IntegrationContext struct {
-	metav1.TypeMeta   `json:",inline"`
-	metav1.ObjectMeta `json:"metadata"`
-	Spec              IntegrationContextSpec   `json:"spec"`
-	Status            IntegrationContextStatus `json:"status,omitempty"`
-}
-
-// IntegrationContextSpec --
-type IntegrationContextSpec struct {
-	Dependencies  []string                        `json:"dependencies,omitempty"`
-	Profile       TraitProfile                    `json:"profile,omitempty"`
-	Traits        map[string]IntegrationTraitSpec `json:"traits,omitempty"`
-	Configuration []ConfigurationSpec             `json:"configuration,omitempty"`
-	Repositories  []string                        `json:"repositories,omitempty"`
-}
-
-// IntegrationContextStatus --
-type IntegrationContextStatus struct {
-	Phase       IntegrationContextPhase `json:"phase,omitempty"`
-	Image       string                  `json:"image,omitempty"`
-	PublicImage string                  `json:"publicImage,omitempty"`
-	Digest      string                  `json:"digest,omitempty"`
-	Artifacts   []Artifact              `json:"artifacts,omitempty"`
-}
-
-// ImageForIntegration returns the image to use when using it for running an integration
-func (c IntegrationContext) ImageForIntegration() string {
-	if c.Status.PublicImage != "" {
-		return c.Status.PublicImage
-	}
-	return c.Status.Image
-}
-
-// IntegrationContextPhase --
-type IntegrationContextPhase string
-
-const (
-	// IntegrationContextKind --
-	IntegrationContextKind string = "IntegrationContext"
-
-	// IntegrationContextTypePlatform --
-	IntegrationContextTypePlatform = "platform"
-
-	// IntegrationContextPhaseBuilding --
-	IntegrationContextPhaseBuilding IntegrationContextPhase = "Building"
-	// IntegrationContextPhaseReady --
-	IntegrationContextPhaseReady IntegrationContextPhase = "Ready"
-	// IntegrationContextPhaseError --
-	IntegrationContextPhaseError IntegrationContextPhase = "Error"
-)
-
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-// IntegrationPlatformList --
-type IntegrationPlatformList struct {
-	metav1.TypeMeta `json:",inline"`
-	metav1.ListMeta `json:"metadata"`
-	Items           []IntegrationPlatform `json:"items"`
-}
-
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-// IntegrationPlatform --
-type IntegrationPlatform struct {
-	metav1.TypeMeta   `json:",inline"`
-	metav1.ObjectMeta `json:"metadata"`
-	Spec              IntegrationPlatformSpec   `json:"spec"`
-	Status            IntegrationPlatformStatus `json:"status,omitempty"`
-}
-
-// IntegrationPlatformSpec --
-type IntegrationPlatformSpec struct {
-	Cluster IntegrationPlatformCluster   `json:"cluster,omitempty"`
-	Profile TraitProfile                 `json:"profile,omitempty"`
-	Build   IntegrationPlatformBuildSpec `json:"build,omitempty"`
-}
-
-// IntegrationPlatformCluster is the kind of orchestration cluster the platform is installed into
-type IntegrationPlatformCluster string
-
-const (
-	// IntegrationPlatformClusterOpenShift is used when targeting a OpenShift cluster
-	IntegrationPlatformClusterOpenShift = "OpenShift"
-	// IntegrationPlatformClusterKubernetes is used when targeting a Kubernetes cluster
-	IntegrationPlatformClusterKubernetes = "Kubernetes"
-)
-
-// TraitProfile represents lists of traits that are enabled for the specific installation/integration
-type TraitProfile string
-
-const (
-	// TraitProfileOpenShift is used by default on OpenShift clusters
-	TraitProfileOpenShift = "OpenShift"
-	// TraitProfileKubernetes is used by default on Kubernetes clusters
-	TraitProfileKubernetes = "Kubernetes"
-	// TraitProfileKnative is used by default on OpenShift/Kubernetes clusters powered by Knative
-	TraitProfileKnative = "Knative"
-)
-
-var allTraitProfiles = []TraitProfile{TraitProfileOpenShift, TraitProfileKubernetes, TraitProfileKnative}
-
-// IntegrationPlatformBuildSpec contains platform related build information
-type IntegrationPlatformBuildSpec struct {
-	PublishStrategy IntegrationPlatformBuildPublishStrategy `json:"publishStrategy,omitempty"`
-	Registry        string                                  `json:"registry,omitempty"`
-	Organization    string                                  `json:"organization,omitempty"`
-	PushSecret      string                                  `json:"pushSecret,omitempty"`
-	CamelVersion    string                                  `json:"camelVersion,omitempty"`
-	Properties      map[string]string                       `json:"properties,omitempty"`
-	Repositories    []string                                `json:"repositories,omitempty"`
-}
-
-// IntegrationPlatformBuildPublishStrategy enumerates all implemented build strategies
-type IntegrationPlatformBuildPublishStrategy string
-
-const (
-	// IntegrationPlatformBuildPublishStrategyS2I performs a OpenShift binary S2I build
-	IntegrationPlatformBuildPublishStrategyS2I = "S2I"
-
-	// IntegrationPlatformBuildPublishStrategyKaniko performs
-	IntegrationPlatformBuildPublishStrategyKaniko = "Kaniko"
-)
-
-// IntegrationPlatformStatus --
-type IntegrationPlatformStatus struct {
-	Phase IntegrationPlatformPhase `json:"phase,omitempty"`
-}
-
-// IntegrationPlatformPhase --
-type IntegrationPlatformPhase string
-
-const (
-	// IntegrationPlatformKind --
-	IntegrationPlatformKind string = "IntegrationPlatform"
-
-	// IntegrationPlatformPhaseCreating --
-	IntegrationPlatformPhaseCreating IntegrationPlatformPhase = "Creating"
-	// IntegrationPlatformPhaseStarting --
-	IntegrationPlatformPhaseStarting IntegrationPlatformPhase = "Starting"
-	// IntegrationPlatformPhaseReady --
-	IntegrationPlatformPhaseReady IntegrationPlatformPhase = "Ready"
-	// IntegrationPlatformPhaseError --
-	IntegrationPlatformPhaseError IntegrationPlatformPhase = "Error"
-	// IntegrationPlatformPhaseDuplicate --
-	IntegrationPlatformPhaseDuplicate IntegrationPlatformPhase = "Duplicate"
-)
-
-// Artifact --
-type Artifact struct {
-	ID       string `json:"id" yaml:"id"`
-	Location string `json:"location,omitempty" yaml:"location,omitempty"`
-	Target   string `json:"target,omitempty" yaml:"target,omitempty"`
-}
-
-func (in *Artifact) String() string {
-	return in.ID
-}
-
-// Flow --
-type Flow struct {
-	Steps []Step `json:"steps"`
-}
-
-// Flows are collections of Flow
-type Flows []Flow
-
-// Step --
-type Step struct {
-	Kind string `json:"kind"`
-	URI  string `json:"uri"`
-}
diff --git a/pkg/apis/camel/v1alpha1/types_support.go b/pkg/apis/camel/v1alpha1/types_support.go
deleted file mode 100644
index f73414c3..00000000
--- a/pkg/apis/camel/v1alpha1/types_support.go
+++ /dev/null
@@ -1,153 +0,0 @@
-/*
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1alpha1
-
-import (
-	"fmt"
-	"strings"
-
-	"gopkg.in/yaml.v2"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-// **********************************
-//
-// Methods
-//
-// **********************************
-
-func (spec ConfigurationSpec) String() string {
-	return fmt.Sprintf("%s=%s", spec.Type, spec.Value)
-}
-
-// **********************************
-//
-// Helpers
-//
-// **********************************
-
-// NewSourceSpec --
-func NewSourceSpec(name string, content string, language Language) SourceSpec {
-	return SourceSpec{
-		DataSpec: DataSpec{
-			Name:    name,
-			Content: content,
-		},
-		Language: language,
-	}
-}
-
-// NewResourceSpec --
-func NewResourceSpec(name string, content string, destination string) ResourceSpec {
-	return ResourceSpec{
-		DataSpec: DataSpec{
-			Name:    name,
-			Content: content,
-		},
-	}
-}
-
-// NewIntegrationPlatformList --
-func NewIntegrationPlatformList() IntegrationPlatformList {
-	return IntegrationPlatformList{
-		TypeMeta: metav1.TypeMeta{
-			APIVersion: SchemeGroupVersion.String(),
-			Kind:       IntegrationPlatformKind,
-		},
-	}
-}
-
-// NewIntegrationPlatform --
-func NewIntegrationPlatform(namespace string, name string) IntegrationPlatform {
-	return IntegrationPlatform{
-		TypeMeta: metav1.TypeMeta{
-			APIVersion: SchemeGroupVersion.String(),
-			Kind:       IntegrationPlatformKind,
-		},
-		ObjectMeta: metav1.ObjectMeta{
-			Namespace: namespace,
-			Name:      name,
-		},
-	}
-}
-
-// NewIntegrationList --
-func NewIntegrationList() IntegrationList {
-	return IntegrationList{
-		TypeMeta: metav1.TypeMeta{
-			APIVersion: SchemeGroupVersion.String(),
-			Kind:       IntegrationKind,
-		},
-	}
-}
-
-// NewIntegrationContext --
-func NewIntegrationContext(namespace string, name string) IntegrationContext {
-	return IntegrationContext{
-		TypeMeta: metav1.TypeMeta{
-			APIVersion: SchemeGroupVersion.String(),
-			Kind:       IntegrationContextKind,
-		},
-		ObjectMeta: metav1.ObjectMeta{
-			Namespace: namespace,
-			Name:      name,
-		},
-	}
-}
-
-// NewIntegrationContextList --
-func NewIntegrationContextList() IntegrationContextList {
-	return IntegrationContextList{
-		TypeMeta: metav1.TypeMeta{
-			APIVersion: SchemeGroupVersion.String(),
-			Kind:       IntegrationContextKind,
-		},
-	}
-}
-
-// TraitProfileByName returns the trait profile corresponding to the given name (case insensitive)
-func TraitProfileByName(name string) TraitProfile {
-	for _, p := range allTraitProfiles {
-		if strings.EqualFold(name, string(p)) {
-			return p
-		}
-	}
-	return ""
-}
-
-// Serialize serializes a Flow
-func (flows Flows) Serialize() (string, error) {
-	res, err := yaml.Marshal(flows)
-	if err != nil {
-		return "", err
-	}
-	return string(res), nil
-}
-
-// InferLanguage returns the language of the source or discovers it from file extension if not set
-func (s SourceSpec) InferLanguage() Language {
-	if s.Language != "" {
-		return s.Language
-	}
-	for _, l := range Languages {
-		if strings.HasSuffix(s.Name, "."+string(l)) {
-			return l
-		}
-	}
-	return ""
-}
diff --git a/pkg/apis/camel/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/camel/v1alpha1/zz_generated.deepcopy.go
index 8df47697..f26f1086 100644
--- a/pkg/apis/camel/v1alpha1/zz_generated.deepcopy.go
+++ b/pkg/apis/camel/v1alpha1/zz_generated.deepcopy.go
@@ -1,14 +1,13 @@
 // +build !ignore_autogenerated
 
 /*
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
 
 Unless required by applicable law or agreed to in writing, software
 distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/pkg/builder/builder.go b/pkg/builder/builder.go
index 58d814e5..9becccbc 100644
--- a/pkg/builder/builder.go
+++ b/pkg/builder/builder.go
@@ -28,7 +28,7 @@ import (
 	"time"
 
 	"github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
-
+	"github.com/apache/camel-k/pkg/client"
 	"github.com/sirupsen/logrus"
 )
 
@@ -41,6 +41,7 @@ import (
 type defaultBuilder struct {
 	log       *logrus.Entry
 	ctx       context.Context
+	client    client.Client
 	requests  chan Request
 	interrupt chan bool
 	request   sync.Map
@@ -49,10 +50,11 @@ type defaultBuilder struct {
 }
 
 // New --
-func New(ctx context.Context, namespace string) Builder {
+func New(ctx context.Context, c client.Client, namespace string) Builder {
 	m := defaultBuilder{
 		log:       logrus.WithField("logger", "builder"),
 		ctx:       ctx,
+		client:    c,
 		requests:  make(chan Request),
 		interrupt: make(chan bool, 1),
 		running:   0,
@@ -144,6 +146,7 @@ func (b *defaultBuilder) submit(request Request) {
 
 	c := Context{
 		C:         b.ctx,
+		Client:    b.client,
 		Path:      builderPath,
 		Namespace: b.namespace,
 		Request:   request,
diff --git a/pkg/builder/builder_steps.go b/pkg/builder/builder_steps.go
index ffe71372..79d98d8d 100644
--- a/pkg/builder/builder_steps.go
+++ b/pkg/builder/builder_steps.go
@@ -24,13 +24,13 @@ import (
 	"path"
 	"strings"
 
+	k8sclient "sigs.k8s.io/controller-runtime/pkg/client"
+
 	"github.com/scylladb/go-set/strset"
 
 	"github.com/rs/xid"
 
 	"github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
-	"github.com/operator-framework/operator-sdk/pkg/sdk"
-
 	"github.com/apache/camel-k/pkg/util/tar"
 
 	"gopkg.in/yaml.v2"
@@ -245,7 +245,7 @@ func packager(ctx *Context, selector ArtifactsSelector) error {
 func ListPublishedImages(context *Context) ([]PublishedImage, error) {
 	list := v1alpha1.NewIntegrationContextList()
 
-	err := sdk.List(context.Namespace, &list, sdk.WithListOptions(&metav1.ListOptions{}))
+	err := context.Client.List(context.C, &k8sclient.ListOptions{Namespace: context.Namespace}, &list)
 	if err != nil {
 		return nil, err
 	}
@@ -350,8 +350,12 @@ func NotifyIntegrationContext(ctx *Context) error {
 			Name:      ctx.Request.Meta.Name,
 		},
 	}
+	key := k8sclient.ObjectKey{
+		Namespace: ctx.Namespace,
+		Name:      ctx.Request.Meta.Name,
+	}
 
-	if err := sdk.Get(&target); err != nil {
+	if err := ctx.Client.Get(ctx.C, key, &target); err != nil {
 		return err
 	}
 
@@ -363,7 +367,7 @@ func NotifyIntegrationContext(ctx *Context) error {
 	// Add a random ID to trigger update
 	t.Annotations["camel.apache.org/build.id"] = xid.New().String()
 
-	if err := sdk.Update(t); err != nil {
+	if err := ctx.Client.Update(ctx.C, t); err != nil {
 		return err
 	}
 
@@ -382,8 +386,12 @@ func NotifyIntegration(ctx *Context) error {
 			Name:      ctx.Request.Meta.Name,
 		},
 	}
+	key := k8sclient.ObjectKey{
+		Namespace: ctx.Namespace,
+		Name:      ctx.Request.Meta.Name,
+	}
 
-	if err := sdk.Get(&target); err != nil {
+	if err := ctx.Client.Get(ctx.C, key, &target); err != nil {
 		return err
 	}
 
@@ -395,7 +403,7 @@ func NotifyIntegration(ctx *Context) error {
 	// Add a random ID to trigger update
 	t.Annotations["camel.apache.org/build.id"] = xid.New().String()
 
-	if err := sdk.Update(t); err != nil {
+	if err := ctx.Client.Update(ctx.C, t); err != nil {
 		return err
 	}
 
diff --git a/pkg/builder/builder_types.go b/pkg/builder/builder_types.go
index d0e13b4a..6666bdac 100644
--- a/pkg/builder/builder_types.go
+++ b/pkg/builder/builder_types.go
@@ -23,11 +23,10 @@ import (
 	"math"
 	"time"
 
-	"k8s.io/apimachinery/pkg/apis/meta/v1"
-
-	"github.com/apache/camel-k/pkg/util/maven"
-
 	"github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
+	"github.com/apache/camel-k/pkg/client"
+	"github.com/apache/camel-k/pkg/util/maven"
+	"k8s.io/apimachinery/pkg/apis/meta/v1"
 )
 
 const (
@@ -136,7 +135,8 @@ type Result struct {
 
 // Context --
 type Context struct {
-	C                 context.Context
+	C context.Context
+	client.Client
 	Request           Request
 	Image             string
 	PublicImage       string
diff --git a/pkg/builder/kaniko/publisher.go b/pkg/builder/kaniko/publisher.go
index d9533d0a..4710b4bd 100644
--- a/pkg/builder/kaniko/publisher.go
+++ b/pkg/builder/kaniko/publisher.go
@@ -26,7 +26,6 @@ import (
 	"github.com/apache/camel-k/pkg/util/tar"
 
 	"github.com/apache/camel-k/pkg/util/kubernetes"
-	"github.com/operator-framework/operator-sdk/pkg/sdk"
 	"github.com/pkg/errors"
 	"k8s.io/api/core/v1"
 	apierrors "k8s.io/apimachinery/pkg/api/errors"
@@ -139,17 +138,17 @@ func Publisher(ctx *builder.Context) error {
 		},
 	}
 
-	err = sdk.Delete(&pod)
+	err = ctx.Client.Delete(ctx.C, &pod)
 	if err != nil && !apierrors.IsNotFound(err) {
 		return errors.Wrap(err, "cannot delete kaniko builder pod")
 	}
 
-	err = sdk.Create(&pod)
+	err = ctx.Client.Create(ctx.C, &pod)
 	if err != nil {
 		return errors.Wrap(err, "cannot create kaniko builder pod")
 	}
 
-	err = kubernetes.WaitCondition(&pod, func(obj interface{}) (bool, error) {
+	err = kubernetes.WaitCondition(ctx.C, ctx.Client, &pod, func(obj interface{}) (bool, error) {
 		if val, ok := obj.(*v1.Pod); ok {
 			if val.Status.Phase == v1.PodSucceeded {
 				return true, nil
diff --git a/pkg/builder/s2i/publisher.go b/pkg/builder/s2i/publisher.go
index 437c8707..b02a7ef1 100644
--- a/pkg/builder/s2i/publisher.go
+++ b/pkg/builder/s2i/publisher.go
@@ -21,17 +21,16 @@ import (
 	"io/ioutil"
 	"time"
 
+	"k8s.io/apimachinery/pkg/util/json"
+	k8sclient "sigs.k8s.io/controller-runtime/pkg/client"
+
 	"github.com/apache/camel-k/pkg/builder"
 
 	"github.com/apache/camel-k/pkg/util/kubernetes"
 	"github.com/apache/camel-k/pkg/util/kubernetes/customclient"
-	"github.com/operator-framework/operator-sdk/pkg/sdk"
-	"github.com/operator-framework/operator-sdk/pkg/util/k8sutil"
-	"k8s.io/api/core/v1"
-	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
-
 	buildv1 "github.com/openshift/api/build/v1"
 	imagev1 "github.com/openshift/api/image/v1"
+	"k8s.io/api/core/v1"
 	apierrors "k8s.io/apimachinery/pkg/api/errors"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 
@@ -72,12 +71,12 @@ func Publisher(ctx *builder.Context) error {
 		},
 	}
 
-	err := sdk.Delete(&bc)
+	err := ctx.Client.Delete(ctx.C, &bc)
 	if err != nil && !apierrors.IsNotFound(err) {
 		return errors.Wrap(err, "cannot delete build config")
 	}
 
-	err = sdk.Create(&bc)
+	err = ctx.Client.Create(ctx.C, &bc)
 	if err != nil {
 		return errors.Wrap(err, "cannot create build config")
 	}
@@ -98,12 +97,12 @@ func Publisher(ctx *builder.Context) error {
 		},
 	}
 
-	err = sdk.Delete(&is)
+	err = ctx.Client.Delete(ctx.C, &is)
 	if err != nil && !apierrors.IsNotFound(err) {
 		return errors.Wrap(err, "cannot delete image stream")
 	}
 
-	err = sdk.Create(&is)
+	err = ctx.Client.Create(ctx.C, &is)
 	if err != nil {
 		return errors.Wrap(err, "cannot create image stream")
 	}
@@ -113,7 +112,7 @@ func Publisher(ctx *builder.Context) error {
 		return errors.Wrap(err, "cannot fully read tar file "+ctx.Archive)
 	}
 
-	restClient, err := customclient.GetClientFor("build.openshift.io", "v1")
+	restClient, err := customclient.GetClientFor(ctx.Client, "build.openshift.io", "v1")
 	if err != nil {
 		return err
 	}
@@ -135,18 +134,13 @@ func Publisher(ctx *builder.Context) error {
 		return errors.Wrap(err, "no raw data retrieved")
 	}
 
-	u := unstructured.Unstructured{}
-	err = u.UnmarshalJSON(data)
+	ocbuild := buildv1.Build{}
+	err = json.Unmarshal(data, &ocbuild)
 	if err != nil {
-		return errors.Wrap(err, "cannot unmarshal instantiate binary response")
+		return errors.Wrap(err, "cannot unmarshal instantiated binary response")
 	}
 
-	ocbuild, err := k8sutil.RuntimeObjectFromUnstructured(&u)
-	if err != nil {
-		return err
-	}
-
-	err = kubernetes.WaitCondition(ocbuild, func(obj interface{}) (bool, error) {
+	err = kubernetes.WaitCondition(ctx.C, ctx.Client, &ocbuild, func(obj interface{}) (bool, error) {
 		if val, ok := obj.(*buildv1.Build); ok {
 			if val.Status.Phase == buildv1.BuildPhaseComplete {
 				return true, nil
@@ -162,7 +156,11 @@ func Publisher(ctx *builder.Context) error {
 		return err
 	}
 
-	err = sdk.Get(&is)
+	key, err := k8sclient.ObjectKeyFromObject(&is)
+	if err != nil {
+		return err
+	}
+	err = ctx.Client.Get(ctx.C, key, &is)
 	if err != nil {
 		return err
 	}
diff --git a/pkg/client/client.go b/pkg/client/client.go
new file mode 100644
index 00000000..cade334e
--- /dev/null
+++ b/pkg/client/client.go
@@ -0,0 +1,163 @@
+/*
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package client
+
+import (
+	"io/ioutil"
+	"os"
+	"os/user"
+	"path/filepath"
+
+	"github.com/apache/camel-k/pkg/apis"
+	"github.com/operator-framework/operator-sdk/pkg/k8sutil"
+	"github.com/pkg/errors"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/client-go/kubernetes"
+	clientscheme "k8s.io/client-go/kubernetes/scheme"
+	"k8s.io/client-go/tools/clientcmd"
+	clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
+	clientcmdlatest "k8s.io/client-go/tools/clientcmd/api/latest"
+	controller "sigs.k8s.io/controller-runtime/pkg/client"
+	"sigs.k8s.io/controller-runtime/pkg/client/config"
+	"sigs.k8s.io/controller-runtime/pkg/manager"
+)
+
+// Client is an abstraction for a k8s client
+type Client interface {
+	controller.Client
+	kubernetes.Interface
+	GetScheme() *runtime.Scheme
+}
+
+// Injectable identifies objects that can receive a Client
+type Injectable interface {
+	InjectClient(Client)
+}
+
+// Provider is used to provide a new instance of the Client each time it's required
+type Provider struct {
+	Get func() (Client, error)
+}
+
+type defaultClient struct {
+	controller.Client
+	kubernetes.Interface
+	scheme *runtime.Scheme
+}
+
+func (c *defaultClient) GetScheme() *runtime.Scheme {
+	return c.scheme
+}
+
+// NewOutOfClusterClient creates a new k8s client that can be used from outside the cluster
+func NewOutOfClusterClient(kubeconfig string) (Client, error) {
+	initialize(kubeconfig)
+	// Get a config to talk to the apiserver
+	cfg, err := config.GetConfig()
+	if err != nil {
+		return nil, err
+	}
+
+	scheme := clientscheme.Scheme
+
+	// Setup Scheme for all resources
+	if err := apis.AddToScheme(scheme); err != nil {
+		return nil, err
+	}
+
+	var clientset kubernetes.Interface
+	if clientset, err = kubernetes.NewForConfig(cfg); err != nil {
+		return nil, err
+	}
+
+	// Create a new client to avoid using cache (enabled by default on operator-sdk client)
+	clientOptions := controller.Options{
+		Scheme: scheme,
+	}
+	dynClient, err := controller.New(cfg, clientOptions)
+	if err != nil {
+		return nil, err
+	}
+
+	return &defaultClient{
+		Client:    dynClient,
+		Interface: clientset,
+		scheme:    clientOptions.Scheme,
+	}, nil
+}
+
+// FromManager creates a new k8s client from a manager object
+func FromManager(manager manager.Manager) (Client, error) {
+	var err error
+	var clientset kubernetes.Interface
+	if clientset, err = kubernetes.NewForConfig(manager.GetConfig()); err != nil {
+		return nil, err
+	}
+	return &defaultClient{
+		Client:    manager.GetClient(),
+		Interface: clientset,
+		scheme:    manager.GetScheme(),
+	}, nil
+}
+
+// init initialize the k8s client for usage outside the cluster
+func initialize(kubeconfig string) {
+	if kubeconfig == "" {
+		kubeconfig = getDefaultKubeConfigFile()
+	}
+	os.Setenv(k8sutil.KubeConfigEnvVar, kubeconfig)
+}
+
+func getDefaultKubeConfigFile() string {
+	usr, err := user.Current()
+	if err != nil {
+		panic(err) // TODO handle error
+	}
+	return filepath.Join(usr.HomeDir, ".kube", "config")
+}
+
+// GetCurrentNamespace --
+func GetCurrentNamespace(kubeconfig string) (string, error) {
+	if kubeconfig == "" {
+		kubeconfig = getDefaultKubeConfigFile()
+	}
+	if kubeconfig == "" {
+		return "default", nil
+	}
+
+	data, err := ioutil.ReadFile(kubeconfig)
+	if err != nil {
+		return "", err
+	}
+	conf := clientcmdapi.NewConfig()
+	if len(data) == 0 {
+		return "", errors.New("kubernetes config file is empty")
+	}
+
+	decoded, _, err := clientcmdlatest.Codec.Decode(data, &schema.GroupVersionKind{Version: clientcmdlatest.Version, Kind: "Config"}, conf)
+	if err != nil {
+		return "", err
+	}
+
+	clientcmdconfig := decoded.(*clientcmdapi.Config)
+
+	cc := clientcmd.NewDefaultClientConfig(*clientcmdconfig, &clientcmd.ConfigOverrides{})
+	ns, _, err := cc.Namespace()
+	return ns, err
+}
diff --git a/pkg/client/cmd/completion_bash.go b/pkg/client/cmd/completion_bash.go
deleted file mode 100644
index ad3ccbff..00000000
--- a/pkg/client/cmd/completion_bash.go
+++ /dev/null
@@ -1,250 +0,0 @@
-/*
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package cmd
-
-import (
-	"fmt"
-	"os"
-	"strings"
-
-	"github.com/apache/camel-k/pkg/trait"
-
-	"github.com/apache/camel-k/pkg/util/camel"
-	"github.com/spf13/cobra"
-)
-
-// ******************************
-//
-//
-//
-// ******************************
-
-const bashCompletionCmdLongDescription = `
-To load completion run
-
-. <(kamel completion bash)
-
-To configure your bash shell to load completions for each session add to your bashrc
-
-# ~/.bashrc or ~/.profile
-. <(kamel completion bash)
-`
-
-var bashCompletionFunction = `
-__kamel_dependency_type() {
-    case ${cur} in
-    c*)
-        local type_list="` + computeCamelDependencies() + `"
-        COMPREPLY=( $( compgen -W "${type_list}" -- "$cur") )
-        ;;
-    m*)
-        local type_list="mvn:"
-        COMPREPLY=( $( compgen -W "${type_list}" -- "$cur") )
-		compopt -o nospace
-        ;;
-    f*)
-        local type_list="file:"
-        COMPREPLY=( $( compgen -W "${type_list}" -- "$cur") )
-		compopt -o nospace
-        ;;
-    *)
-        local type_list="camel mvn: file:"
-        COMPREPLY=( $( compgen -W "${type_list}" -- "$cur") )
-	    compopt -o nospace
-    esac
-}
-
-__kamel_traits() {
-    local type_list="` + strings.Join(trait.NewCatalog().ComputeTraitsProperties(), " ") + `"
-    COMPREPLY=( $( compgen -W "${type_list}" -- "$cur") )
-    compopt -o nospace
-}
-
-__kamel_languages() {
-    local type_list="js groovy kotlin java xml"
-    COMPREPLY=( $( compgen -W "${type_list}" -- "$cur") )
-}
-
-__kamel_runtimes() {
-    local type_list="jvm groovy kotlin"
-    COMPREPLY=( $( compgen -W "${type_list}" -- "$cur") )
-}
-
-__kamel_kubectl_get_configmap() {
-    local template
-    local kubectl_out
-
-    template="{{ range .items  }}{{ .metadata.name }} {{ end }}"
-
-    if kubectl_out=$(kubectl get -o template --template="${template}" configmap 2>/dev/null); then
-        COMPREPLY=( $( compgen -W "${kubectl_out}" -- "$cur" ) )
-    fi
-}
-
-__kamel_kubectl_get_secret() {
-    local template
-    local kubectl_out
-
-    template="{{ range .items  }}{{ .metadata.name }} {{ end }}"
-
-    if kubectl_out=$(kubectl get -o template --template="${template}" secret 2>/dev/null); then
-        COMPREPLY=( $( compgen -W "${kubectl_out}" -- "$cur" ) )
-    fi
-}
-
-__kamel_kubectl_get_integrations() {
-    local template
-    local kubectl_out
-
-    template="{{ range .items  }}{{ .metadata.name }} {{ end }}"
-
-    if kubectl_out=$(kubectl get -o template --template="${template}" integrations 2>/dev/null); then
-        COMPREPLY=( $( compgen -W "${kubectl_out}" -- "$cur" ) )
-    fi
-}
-
-__kamel_kubectl_get_integrationcontexts() {
-    local template
-    local kubectl_out
-
-    template="{{ range .items  }}{{ .metadata.name }} {{ end }}"
-
-    if kubectl_out=$(kubectl get -o template --template="${template}" integrationcontexts 2>/dev/null); then
-        COMPREPLY=( $( compgen -W "${kubectl_out}" -- "$cur" ) )
-    fi
-}
-
-__kamel_kubectl_get_user_integrationcontexts() {
-    local template
-    local kubectl_out
-
-    template="{{ range .items  }}{{ .metadata.name }} {{ end }}"
-
-    if kubectl_out=$(kubectl get -l camel.apache.org/context.type=user -o template --template="${template}" integrationcontexts 2>/dev/null); then
-        COMPREPLY=( $( compgen -W "${kubectl_out}" -- "$cur" ) )
-    fi
-}
-
-__custom_func() {
-    case ${last_command} in
-        kamel_delete)
-            __kamel_kubectl_get_integrations
-            return
-            ;;
-        kamel_log)
-            __kamel_kubectl_get_integrations
-            return
-            ;;
-        kamel_context_delete)
-            __kamel_kubectl_get_user_integrationcontexts
-            return
-            ;;
-        *)
-            ;;
-    esac
-}
-`
-
-// ******************************
-//
-// COMMAND
-//
-// ******************************
-
-func newCmdCompletionBash(root *cobra.Command) *cobra.Command {
-	return &cobra.Command{
-		Use:   "bash",
-		Short: "Generates bash completion scripts",
-		Long:  bashCompletionCmdLongDescription,
-		Run: func(cmd *cobra.Command, args []string) {
-			err := root.GenBashCompletion(os.Stdout)
-			if err != nil {
-				fmt.Print(err.Error())
-			}
-		},
-	}
-}
-
-func configureKnownBashCompletions(command *cobra.Command) {
-	configureBashAnnotationForFlag(
-		command,
-		"dependency",
-		map[string][]string{
-			cobra.BashCompCustom: {"__kamel_dependency_type"},
-		},
-	)
-	configureBashAnnotationForFlag(
-		command,
-		"configmap",
-		map[string][]string{
-			cobra.BashCompCustom: {"__kamel_kubectl_get_configmap"},
-		},
-	)
-	configureBashAnnotationForFlag(
-		command,
-		"secret",
-		map[string][]string{
-			cobra.BashCompCustom: {"__kamel_kubectl_get_secret"},
-		},
-	)
-	configureBashAnnotationForFlag(
-		command,
-		"context",
-		map[string][]string{
-			cobra.BashCompCustom: {"__kamel_kubectl_get_user_integrationcontexts"},
-		},
-	)
-	configureBashAnnotationForFlag(
-		command,
-		"language",
-		map[string][]string{
-			cobra.BashCompCustom: {"__kamel_languages"},
-		},
-	)
-	configureBashAnnotationForFlag(
-		command,
-		"runtime",
-		map[string][]string{
-			cobra.BashCompCustom: {"__kamel_runtimes"},
-		},
-	)
-	configureBashAnnotationForFlag(
-		command,
-		"trait",
-		map[string][]string{
-			cobra.BashCompCustom: {"__kamel_traits"},
-		},
-	)
-}
-
-func configureBashAnnotationForFlag(command *cobra.Command, flagName string, annotations map[string][]string) {
-	flag := command.Flag(flagName)
-	if flag != nil {
-		flag.Annotations = annotations
-	}
-}
-
-func computeCamelDependencies() string {
-	results := make([]string, 0, len(camel.Runtime.Artifacts))
-
-	for k := range camel.Runtime.Artifacts {
-		results = append(results, k)
-	}
-
-	return strings.Join(results, " ")
-}
diff --git a/pkg/client/cmd/context_create.go b/pkg/client/cmd/context_create.go
deleted file mode 100644
index 0bc8e97e..00000000
--- a/pkg/client/cmd/context_create.go
+++ /dev/null
@@ -1,168 +0,0 @@
-/*
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package cmd
-
-import (
-	"errors"
-	"fmt"
-	"strconv"
-	"strings"
-
-	"github.com/apache/camel-k/pkg/util"
-
-	"github.com/operator-framework/operator-sdk/pkg/sdk"
-
-	"github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
-	"github.com/apache/camel-k/pkg/util/kubernetes"
-
-	"github.com/spf13/cobra"
-	k8serrors "k8s.io/apimachinery/pkg/api/errors"
-)
-
-// NewCmdContext --
-func newContextCreateCmd(rootCmdOptions *RootCmdOptions) *cobra.Command {
-	impl := &contextCreateCommand{
-		RootCmdOptions: rootCmdOptions,
-	}
-
-	cmd := cobra.Command{
-		Use:   "create",
-		Short: "Create an Integration Context",
-		Long:  `Create an Integration Context.`,
-		Args:  impl.validateArgs,
-		RunE:  impl.run,
-	}
-
-	cmd.Flags().StringVarP(&impl.runtime, "runtime", "r", "jvm", "Runtime provided by the context")
-	cmd.Flags().StringSliceVarP(&impl.dependencies, "dependency", "d", nil, "Add a dependency")
-	cmd.Flags().StringSliceVarP(&impl.properties, "property", "p", nil, "Add a camel property")
-	cmd.Flags().StringSliceVar(&impl.configmaps, "configmap", nil, "Add a ConfigMap")
-	cmd.Flags().StringSliceVar(&impl.secrets, "secret", nil, "Add a Secret")
-	cmd.Flags().StringSliceVar(&impl.Repositories, "repository", nil, "Add a maven repository")
-
-	// completion support
-	configureKnownCompletions(&cmd)
-
-	return &cmd
-}
-
-type contextCreateCommand struct {
-	*RootCmdOptions
-
-	runtime      string
-	dependencies []string
-	properties   []string
-	configmaps   []string
-	secrets      []string
-	Repositories []string
-}
-
-func (command *contextCreateCommand) validateArgs(cmd *cobra.Command, args []string) error {
-	if len(args) != 1 {
-		return errors.New("accepts 1 arg, received " + strconv.Itoa(len(args)))
-	}
-
-	return nil
-}
-
-func (command *contextCreateCommand) run(cmd *cobra.Command, args []string) error {
-	ctx := v1alpha1.NewIntegrationContext(command.Namespace, args[0])
-	if err := sdk.Get(&ctx); err == nil {
-		// the integration context already exists, let's check that it is
-		// not a platform one which is supposed to be "read only"
-
-		if ctx.Labels["camel.apache.org/context.type"] == v1alpha1.IntegrationContextTypePlatform {
-			fmt.Printf("integration context \"%s\" is not editable\n", ctx.Name)
-			return nil
-		}
-	}
-
-	ctx = v1alpha1.NewIntegrationContext(command.Namespace, kubernetes.SanitizeName(args[0]))
-	ctx.Labels = map[string]string{
-		"camel.apache.org/context.type": "user",
-	}
-	ctx.Spec = v1alpha1.IntegrationContextSpec{
-		Dependencies:  make([]string, 0, len(command.dependencies)),
-		Configuration: make([]v1alpha1.ConfigurationSpec, 0),
-		Repositories:  command.Repositories,
-	}
-
-	for _, item := range command.dependencies {
-		switch {
-		case strings.HasPrefix(item, "mvn:"):
-			ctx.Spec.Dependencies = append(ctx.Spec.Dependencies, item)
-		case strings.HasPrefix(item, "file:"):
-			ctx.Spec.Dependencies = append(ctx.Spec.Dependencies, item)
-		case strings.HasPrefix(item, "camel-"):
-			ctx.Spec.Dependencies = append(ctx.Spec.Dependencies, "camel:"+strings.TrimPrefix(item, "camel-"))
-		}
-	}
-
-	// jvm runtime required by default
-	util.StringSliceUniqueAdd(&ctx.Spec.Dependencies, "runtime:jvm")
-
-	if command.runtime != "" {
-		util.StringSliceUniqueAdd(&ctx.Spec.Dependencies, "runtime:"+command.runtime)
-	}
-
-	for _, item := range command.properties {
-		ctx.Spec.Configuration = append(ctx.Spec.Configuration, v1alpha1.ConfigurationSpec{
-			Type:  "property",
-			Value: item,
-		})
-	}
-	for _, item := range command.configmaps {
-		ctx.Spec.Configuration = append(ctx.Spec.Configuration, v1alpha1.ConfigurationSpec{
-			Type:  "configmap",
-			Value: item,
-		})
-	}
-	for _, item := range command.secrets {
-		ctx.Spec.Configuration = append(ctx.Spec.Configuration, v1alpha1.ConfigurationSpec{
-			Type:  "secret",
-			Value: item,
-		})
-	}
-
-	existed := false
-	err := sdk.Create(&ctx)
-	if err != nil && k8serrors.IsAlreadyExists(err) {
-		existed = true
-		clone := ctx.DeepCopy()
-		err = sdk.Get(clone)
-		if err != nil {
-			fmt.Print(err.Error())
-			return nil
-		}
-		ctx.ResourceVersion = clone.ResourceVersion
-		err = sdk.Update(&ctx)
-	}
-
-	if err != nil {
-		fmt.Print(err.Error())
-		return nil
-	}
-
-	if !existed {
-		fmt.Printf("integration context \"%s\" created\n", ctx.Name)
-	} else {
-		fmt.Printf("integration context \"%s\" updated\n", ctx.Name)
-	}
-
-	return nil
-}
diff --git a/pkg/client/cmd/context_delete.go b/pkg/client/cmd/context_delete.go
deleted file mode 100644
index cf64eee9..00000000
--- a/pkg/client/cmd/context_delete.go
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package cmd
-
-import (
-	"errors"
-	"fmt"
-
-	"github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
-	"github.com/operator-framework/operator-sdk/pkg/sdk"
-	"github.com/spf13/cobra"
-
-	k8errors "k8s.io/apimachinery/pkg/api/errors"
-)
-
-func newContextDeleteCmd(rootCmdOptions *RootCmdOptions) *cobra.Command {
-	impl := contextDeleteCommand{
-		RootCmdOptions: rootCmdOptions,
-	}
-
-	cmd := cobra.Command{
-		Use:   "delete",
-		Short: "Delete an Integration Context",
-		Long:  `Delete an Integration Context.`,
-		RunE: func(cmd *cobra.Command, args []string) error {
-			if err := impl.validate(args); err != nil {
-				return err
-			}
-			if err := impl.run(args); err != nil {
-				fmt.Println(err.Error())
-			}
-
-			return nil
-		},
-	}
-
-	cmd.Flags().BoolVar(&impl.all, "all", false, "Delete all integration contexts")
-
-	return &cmd
-}
-
-type contextDeleteCommand struct {
-	*RootCmdOptions
-	all bool
-}
-
-func (command *contextDeleteCommand) validate(args []string) error {
-	if command.all && len(args) > 0 {
-		return errors.New("invalid combination: both all flag and named contexts are set")
-	}
-	if !command.all && len(args) == 0 {
-		return errors.New("invalid combination: neither all flag nor named contexts are set")
-	}
-
-	return nil
-}
-
-func (command *contextDeleteCommand) run(args []string) error {
-	names := args
-
-	if command.all {
-		ctxList := v1alpha1.NewIntegrationContextList()
-		if err := sdk.List(command.Namespace, &ctxList); err != nil {
-			return err
-		}
-
-		names = make([]string, 0, len(ctxList.Items))
-		for _, item := range ctxList.Items {
-			// only include non platform contexts
-			if item.Labels["camel.apache.org/context.type"] != v1alpha1.IntegrationContextTypePlatform {
-				names = append(names, item.Name)
-			}
-		}
-	}
-
-	for _, name := range names {
-		if err := command.delete(name); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (command *contextDeleteCommand) delete(name string) error {
-	ctx := v1alpha1.NewIntegrationContext(command.Namespace, name)
-
-	err := sdk.Get(&ctx)
-
-	// pass through if the context is not found
-	if err != nil && k8errors.IsNotFound(err) {
-		return fmt.Errorf("no integration context found with name \"%s\"", ctx.Name)
-	}
-
-	// fail otherwise
-	if err != nil {
-		return err
-	}
-
-	// check that it is not a platform one which is supposed to be "read only"
-	// thus not managed by the end user
-	if ctx.Labels["camel.apache.org/context.type"] == v1alpha1.IntegrationContextTypePlatform {
-		// skip platform contexts while deleting all contexts
-		if command.all {
-			return nil
-		}
-
-		return fmt.Errorf("integration context \"%s\" is not editable", ctx.Name)
-	}
-
-	err = sdk.Delete(&ctx)
-
-	if err != nil && !k8errors.IsNotFound(err) {
-		return fmt.Errorf("error deleting integration context \"%s\", %s", ctx.Name, err)
-	}
-	if err != nil && k8errors.IsNotFound(err) {
-		return fmt.Errorf("no integration context found with name \"%s\"", ctx.Name)
-	}
-
-	fmt.Printf("integration context \"%s\" has been deleted\n", ctx.Name)
-
-	return err
-}
diff --git a/pkg/client/cmd/context_get.go b/pkg/client/cmd/context_get.go
deleted file mode 100644
index f264b916..00000000
--- a/pkg/client/cmd/context_get.go
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package cmd
-
-import (
-	"fmt"
-	"os"
-	"text/tabwriter"
-
-	"github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
-	"github.com/operator-framework/operator-sdk/pkg/sdk"
-	"github.com/spf13/cobra"
-)
-
-func newContextGetCmd(rootCmdOptions *RootCmdOptions) *cobra.Command {
-	impl := contextGetCommand{
-		RootCmdOptions: rootCmdOptions,
-	}
-
-	cmd := cobra.Command{
-		Use:   "get",
-		Short: "Get defined Integration Context",
-		Long:  `Get defined Integration Context.`,
-		RunE: func(cmd *cobra.Command, args []string) error {
-			if err := impl.validate(cmd, args); err != nil {
-				return err
-			}
-			if err := impl.run(); err != nil {
-				fmt.Println(err.Error())
-			}
-
-			return nil
-		},
-	}
-
-	cmd.Flags().BoolVar(&impl.user, "user", true, "Includes user contexts")
-	cmd.Flags().BoolVar(&impl.platform, v1alpha1.IntegrationContextTypePlatform, true, "Includes platform contexts")
-
-	return &cmd
-}
-
-type contextGetCommand struct {
-	*RootCmdOptions
-	user     bool
-	platform bool
-}
-
-func (command *contextGetCommand) validate(cmd *cobra.Command, args []string) error {
-	return nil
-
-}
-
-func (command *contextGetCommand) run() error {
-	ctxList := v1alpha1.NewIntegrationContextList()
-	if err := sdk.List(command.Namespace, &ctxList); err != nil {
-		return err
-	}
-
-	w := tabwriter.NewWriter(os.Stdout, 0, 8, 1, '\t', 0)
-	fmt.Fprintln(w, "NAME\tTYPE\tSTATUS")
-	for _, ctx := range ctxList.Items {
-		t := ctx.Labels["camel.apache.org/context.type"]
-		u := command.user && t == "user"
-		p := command.platform && t == v1alpha1.IntegrationContextTypePlatform
-
-		if u || p {
-			fmt.Fprintf(w, "%s\t%s\t%s\n", ctx.Name, t, string(ctx.Status.Phase))
-		}
-	}
-	w.Flush()
-
-	return nil
-}
diff --git a/pkg/client/cmd/delete.go b/pkg/client/cmd/delete.go
deleted file mode 100644
index 5c3f470d..00000000
--- a/pkg/client/cmd/delete.go
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package cmd
-
-import (
-	"errors"
-	"fmt"
-	"strconv"
-
-	"github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
-	"github.com/operator-framework/operator-sdk/pkg/sdk"
-	"github.com/spf13/cobra"
-	k8errors "k8s.io/apimachinery/pkg/api/errors"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-// newCmdDelete --
-func newCmdDelete(rootCmdOptions *RootCmdOptions) *cobra.Command {
-	impl := deleteCmdOptions{
-		RootCmdOptions: rootCmdOptions,
-	}
-	cmd := cobra.Command{
-		Use:   "delete [integration1] [integration2] ...",
-		Short: "Delete integrations deployed on Kubernetes",
-		RunE: func(cmd *cobra.Command, args []string) error {
-			if err := impl.validate(args); err != nil {
-				return err
-			}
-			if err := impl.run(args); err != nil {
-				fmt.Println(err.Error())
-			}
-
-			return nil
-		},
-	}
-
-	cmd.Flags().BoolVar(&impl.deleteAll, "all", false, "Delete all integrations")
-
-	return &cmd
-}
-
-type deleteCmdOptions struct {
-	*RootCmdOptions
-	deleteAll bool
-}
-
-func (command *deleteCmdOptions) validate(args []string) error {
-	if command.deleteAll && len(args) > 0 {
-		return errors.New("invalid combination: both all flag and named integrations are set")
-	}
-	if !command.deleteAll && len(args) == 0 {
-		return errors.New("invalid combination: neither all flag nor named integrations are set")
-	}
-
-	return nil
-}
-
-func (command *deleteCmdOptions) run(args []string) error {
-
-	if len(args) != 0 && !command.deleteAll {
-		for _, arg := range args {
-
-			err := DeleteIntegration(arg, command.Namespace)
-			if err != nil {
-				if k8errors.IsNotFound(err) {
-					fmt.Println("Integration " + arg + " not found. Skipped.")
-				} else {
-					return err
-				}
-			} else {
-				fmt.Println("Integration " + arg + " deleted")
-			}
-		}
-	} else if command.deleteAll {
-		integrationList := v1alpha1.IntegrationList{
-			TypeMeta: metav1.TypeMeta{
-				APIVersion: v1alpha1.SchemeGroupVersion.String(),
-				Kind:       v1alpha1.IntegrationKind,
-			},
-		}
-
-		//Looks like Operator SDK doesn't support deletion of all objects with one command
-		err := sdk.List(command.Namespace, &integrationList)
-		if err != nil {
-			return err
-		}
-		for _, integration := range integrationList.Items {
-			integration := integration // pin
-			err := sdk.Delete(&integration)
-			if err != nil {
-				return err
-			}
-		}
-		if len(integrationList.Items) == 0 {
-			fmt.Println("Nothing to delete")
-		} else {
-			fmt.Println(strconv.Itoa(len(integrationList.Items)) + " integration(s) deleted")
-		}
-	}
-
-	return nil
-}
diff --git a/pkg/client/cmd/get.go b/pkg/client/cmd/get.go
deleted file mode 100644
index d9e48510..00000000
--- a/pkg/client/cmd/get.go
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package cmd
-
-import (
-	"fmt"
-	"os"
-	"text/tabwriter"
-
-	"github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
-	"github.com/operator-framework/operator-sdk/pkg/sdk"
-	"github.com/spf13/cobra"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-type getCmdOptions struct {
-	*RootCmdOptions
-}
-
-func newCmdGet(rootCmdOptions *RootCmdOptions) *cobra.Command {
-	options := getCmdOptions{
-		RootCmdOptions: rootCmdOptions,
-	}
-	cmd := cobra.Command{
-		Use:   "get",
-		Short: "Get all integrations deployed on Kubernetes",
-		Long:  `Get the status of all integrations deployed on on Kubernetes.`,
-		RunE:  options.run,
-	}
-
-	return &cmd
-}
-
-func (o *getCmdOptions) run(cmd *cobra.Command, args []string) error {
-	integrationList := v1alpha1.IntegrationList{
-		TypeMeta: metav1.TypeMeta{
-			APIVersion: v1alpha1.SchemeGroupVersion.String(),
-			Kind:       "Integration",
-		},
-	}
-
-	namespace := o.Namespace
-
-	err := sdk.List(namespace, &integrationList)
-	if err != nil {
-		return err
-	}
-
-	w := tabwriter.NewWriter(os.Stdout, 0, 8, 1, '\t', 0)
-	fmt.Fprintln(w, "NAME\tCONTEXT\tSTATUS")
-	for _, integration := range integrationList.Items {
-		fmt.Fprintf(w, "%s\t%s\t%s\n", integration.Name, integration.Status.Context, string(integration.Status.Phase))
-	}
-	w.Flush()
-
-	return nil
-}
diff --git a/pkg/client/cmd/install.go b/pkg/client/cmd/install.go
deleted file mode 100644
index 6abe4bcf..00000000
--- a/pkg/client/cmd/install.go
+++ /dev/null
@@ -1,172 +0,0 @@
-/*
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package cmd
-
-import (
-	"fmt"
-
-	"strings"
-	"time"
-
-	"github.com/apache/camel-k/pkg/install"
-	"github.com/apache/camel-k/pkg/util/kubernetes"
-	"github.com/operator-framework/operator-sdk/pkg/k8sclient"
-	"github.com/pkg/errors"
-	"github.com/spf13/cobra"
-	k8serrors "k8s.io/apimachinery/pkg/api/errors"
-)
-
-func newCmdInstall(rootCmdOptions *RootCmdOptions) *cobra.Command {
-	options := installCmdOptions{
-		RootCmdOptions: rootCmdOptions,
-	}
-	cmd := cobra.Command{
-		Use:   "install",
-		Short: "Install Camel K on a Kubernetes cluster",
-		Long:  `Installs Camel K on a Kubernetes or OpenShift cluster.`,
-		RunE:  options.install,
-	}
-
-	cmd.Flags().BoolVar(&options.clusterSetupOnly, "cluster-setup", false, "Execute cluster-wide operations only (may require admin rights)")
-	cmd.Flags().BoolVar(&options.skipClusterSetup, "skip-cluster-setup", false, "Skip the cluster-setup phase")
-	cmd.Flags().BoolVar(&options.exampleSetup, "example", false, "Install example integration")
-	cmd.Flags().StringVar(&options.registry, "registry", "", "A Docker registry that can be used to publish images")
-	cmd.Flags().StringVarP(&options.outputFormat, "output", "o", "", "Output format. One of: json|yaml")
-	cmd.Flags().StringVar(&options.organization, "organization", "", "A organization on the Docker registry that can be used to publish images")
-	cmd.Flags().StringVar(&options.pushSecret, "push-secret", "", "A secret used to push images to the Docker registry")
-	cmd.Flags().StringSliceVar(&options.repositories, "repository", nil, "Add a maven repository")
-	cmd.Flags().StringSliceVarP(&options.properties, "property", "p", nil, "Add a camel property")
-	cmd.Flags().StringVar(&options.camelVersion, "camel-version", "", "Set the camel version")
-
-	return &cmd
-}
-
-type installCmdOptions struct {
-	*RootCmdOptions
-	clusterSetupOnly bool
-	skipClusterSetup bool
-	exampleSetup     bool
-	registry         string
-	outputFormat     string
-	organization     string
-	pushSecret       string
-	camelVersion     string
-	repositories     []string
-	properties       []string
-}
-
-func (o *installCmdOptions) install(cmd *cobra.Command, args []string) error {
-	// Let's use a fast refresh period when running with the CLI
-	k8sclient.ResetCacheEvery(8 * time.Second)
-
-	var collection *kubernetes.Collection
-	if o.outputFormat != "" {
-		collection = kubernetes.NewCollection()
-	}
-
-	if !o.skipClusterSetup {
-		err := install.SetupClusterwideResourcesOrCollect(collection)
-		if err != nil && k8serrors.IsForbidden(err) {
-			fmt.Println("Current user is not authorized to create cluster-wide objects like custom resource definitions or cluster roles: ", err)
-
-			meg := `please login as cluster-admin and execute "kamel install --cluster-setup" to install cluster-wide resources (one-time operation)`
-			return errors.New(meg)
-		} else if err != nil {
-			return err
-		}
-	}
-
-	if o.clusterSetupOnly {
-		if collection == nil {
-			fmt.Println("Camel K cluster setup completed successfully")
-		}
-	} else {
-		namespace := o.Namespace
-
-		err := install.OperatorOrCollect(namespace, collection)
-		if err != nil {
-			return err
-		}
-
-		platform, err := install.PlatformOrCollect(namespace, o.registry, o.organization, o.pushSecret, collection)
-		if err != nil {
-			return err
-		}
-
-		if len(o.properties) > 0 {
-			platform.Spec.Build.Properties = make(map[string]string)
-
-			for _, property := range o.properties {
-				kv := strings.Split(property, "=")
-
-				if len(kv) == 2 {
-					platform.Spec.Build.Properties[kv[0]] = kv[1]
-				}
-			}
-		}
-		if len(o.repositories) > 0 {
-			platform.Spec.Build.Repositories = o.repositories
-		}
-		if o.camelVersion != "" {
-			platform.Spec.Build.CamelVersion = o.camelVersion
-		}
-
-		err = install.RuntimeObjectOrCollect(namespace, collection, platform)
-		if err != nil {
-			return err
-		}
-
-		if o.exampleSetup {
-			err = install.ExampleOrCollect(namespace, collection)
-			if err != nil {
-				return err
-			}
-		}
-
-		if collection == nil {
-			fmt.Println("Camel K installed in namespace", namespace)
-		}
-	}
-
-	if collection != nil {
-		return o.printOutput(collection)
-	}
-
-	return nil
-}
-
-func (o *installCmdOptions) printOutput(collection *kubernetes.Collection) error {
-	lst := collection.AsKubernetesList()
-	switch o.outputFormat {
-	case "yaml":
-		data, err := kubernetes.ToYAML(lst)
-		if err != nil {
-			return err
-		}
-		fmt.Print(string(data))
-	case "json":
-		data, err := kubernetes.ToJSON(lst)
-		if err != nil {
-			return err
-		}
-		fmt.Print(string(data))
-	default:
-		return errors.New("unknown output format: " + o.outputFormat)
-	}
-	return nil
-}
diff --git a/pkg/client/cmd/log.go b/pkg/client/cmd/log.go
deleted file mode 100644
index d7277186..00000000
--- a/pkg/client/cmd/log.go
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package cmd
-
-import (
-	"fmt"
-
-	"github.com/operator-framework/operator-sdk/pkg/sdk"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-
-	"github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
-	"github.com/apache/camel-k/pkg/util/log"
-	"github.com/spf13/cobra"
-)
-
-func newCmdLog(rootCmdOptions *RootCmdOptions) *cobra.Command {
-	options := logCmdOptions{
-		RootCmdOptions: rootCmdOptions,
-	}
-
-	cmd := cobra.Command{
-		Use:   "log integration",
-		Short: "Print the logs of an integration",
-		Long:  `Print the logs of an integration.`,
-		Args:  options.validate,
-		RunE:  options.run,
-	}
-
-	// completion support
-	configureKnownCompletions(&cmd)
-
-	return &cmd
-}
-
-type logCmdOptions struct {
-	*RootCmdOptions
-}
-
-func (o *logCmdOptions) validate(cmd *cobra.Command, args []string) error {
-	if len(args) != 1 {
-		return fmt.Errorf("accepts 1 arg, received %d", len(args))
-	}
-
-	return nil
-}
-
-func (o *logCmdOptions) run(cmd *cobra.Command, args []string) error {
-	integration := v1alpha1.Integration{
-		TypeMeta: metav1.TypeMeta{
-			Kind:       v1alpha1.IntegrationKind,
-			APIVersion: v1alpha1.SchemeGroupVersion.String(),
-		},
-		ObjectMeta: metav1.ObjectMeta{
-			Namespace: o.Namespace,
-			Name:      args[0],
-		},
-	}
-
-	if err := sdk.Get(&integration); err != nil {
-		return err
-	}
-	if err := log.Print(o.Context, &integration); err != nil {
-		return err
-	}
-
-	// Let's add a wait point, otherwise the script terminates
-	<-o.Context.Done()
-
-	return nil
-}
diff --git a/pkg/client/cmd/reset.go b/pkg/client/cmd/reset.go
deleted file mode 100644
index 6a456b7f..00000000
--- a/pkg/client/cmd/reset.go
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package cmd
-
-import (
-	"fmt"
-
-	"github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
-	"github.com/operator-framework/operator-sdk/pkg/sdk"
-	"github.com/pkg/errors"
-	"github.com/spf13/cobra"
-)
-
-func newCmdReset(rootCmdOptions *RootCmdOptions) *cobra.Command {
-	options := resetCmdOptions{
-		RootCmdOptions: rootCmdOptions,
-	}
-	cmd := cobra.Command{
-		Use:   "reset",
-		Short: "Reset the Camel K installation",
-		Long:  `Reset the Camel K installation by deleting everything except current platform configuration.`,
-		RunE:  options.reset,
-	}
-
-	return &cmd
-}
-
-type resetCmdOptions struct {
-	*RootCmdOptions
-}
-
-func (o *resetCmdOptions) reset(cmd *cobra.Command, args []string) (err error) {
-	var n int
-	if n, err = o.deleteAllIntegrations(); err != nil {
-		return err
-	}
-	fmt.Printf("%d integrations deleted from namespace %s\n", n, o.Namespace)
-
-	if n, err = o.deleteAllIntegrationContexts(); err != nil {
-		return err
-	}
-	fmt.Printf("%d integration contexts deleted from namespace %s\n", n, o.Namespace)
-
-	if err = o.resetIntegrationPlatform(); err != nil {
-		return err
-	}
-	fmt.Println("Camel K platform has been reset successfully!")
-	return err
-}
-
-func (o *resetCmdOptions) deleteAllIntegrations() (int, error) {
-	list := v1alpha1.NewIntegrationList()
-	if err := sdk.List(o.Namespace, &list); err != nil {
-		return 0, errors.Wrap(err, fmt.Sprintf("could not retrieve integrations from namespace %s", o.Namespace))
-	}
-	for _, i := range list.Items {
-		it := i
-		if err := sdk.Delete(&it); err != nil {
-			return 0, errors.Wrap(err, fmt.Sprintf("could not delete integration %s from namespace %s", it.Name, it.Namespace))
-		}
-	}
-	return len(list.Items), nil
-}
-
-func (o *resetCmdOptions) deleteAllIntegrationContexts() (int, error) {
-	list := v1alpha1.NewIntegrationContextList()
-	if err := sdk.List(o.Namespace, &list); err != nil {
-		return 0, errors.Wrap(err, fmt.Sprintf("could not retrieve integration contexts from namespace %s", o.Namespace))
-	}
-	for _, i := range list.Items {
-		ictx := i
-		if err := sdk.Delete(&ictx); err != nil {
-			return 0, errors.Wrap(err, fmt.Sprintf("could not delete integration context %s from namespace %s", ictx.Name, ictx.Namespace))
-		}
-	}
-	return len(list.Items), nil
-}
-
-func (o *resetCmdOptions) resetIntegrationPlatform() error {
-	list := v1alpha1.NewIntegrationPlatformList()
-	if err := sdk.List(o.Namespace, &list); err != nil {
-		return errors.Wrap(err, fmt.Sprintf("could not retrieve integration platform from namespace %s", o.Namespace))
-	}
-	if len(list.Items) > 1 {
-		return errors.New(fmt.Sprintf("expected 1 integration platform in the namespace, found: %d", len(list.Items)))
-	} else if len(list.Items) == 0 {
-		return errors.New("no integration platforms found in the namespace: run \"kamel install\" to install the platform")
-	}
-	platform := list.Items[0]
-	// Let's reset the status
-	platform.Status = v1alpha1.IntegrationPlatformStatus{}
-	return sdk.Update(&platform)
-}
diff --git a/pkg/client/cmd/root.go b/pkg/client/cmd/root.go
deleted file mode 100644
index 92a83e8c..00000000
--- a/pkg/client/cmd/root.go
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package cmd
-
-import (
-	"context"
-
-	"github.com/apache/camel-k/pkg/util/kubernetes"
-	"github.com/pkg/errors"
-	"github.com/spf13/cobra"
-)
-
-const kamelCommandLongDescription = `
-Long:  "Apache Camel K (a.k.a. Kamel) is a lightweight integration framework
-built from Apache Camel that runs natively on Kubernetes and is
-specifically designed for serverless and microservice architectures.",,	
-`
-
-// RootCmdOptions --
-type RootCmdOptions struct {
-	Context    context.Context
-	KubeConfig string
-	Namespace  string
-}
-
-// NewKamelCommand --
-func NewKamelCommand(ctx context.Context) (*cobra.Command, error) {
-	options := RootCmdOptions{
-		Context: ctx,
-	}
-	var cmd = cobra.Command{
-		Use:                    "kamel",
-		Short:                  "Kamel is a awesome client tool for running Apache Camel integrations natively on Kubernetes",
-		Long:                   kamelCommandLongDescription,
-		BashCompletionFunction: bashCompletionFunction,
-		PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
-			if options.Namespace == "" {
-				current, err := kubernetes.GetClientCurrentNamespace(options.KubeConfig)
-				if err != nil {
-					return errors.Wrap(err, "cannot get current namespace")
-				}
-				err = cmd.Flag("namespace").Value.Set(current)
-				if err != nil {
-					return err
-				}
-			}
-
-			// Initialize the Kubernetes client to allow using the operator-sdk
-			return kubernetes.InitKubeClient(options.KubeConfig)
-		},
-	}
-
-	cmd.PersistentFlags().StringVar(&options.KubeConfig, "config", "", "Path to the config file to use for CLI requests")
-	cmd.PersistentFlags().StringVarP(&options.Namespace, "namespace", "n", "", "Namespace to use for all operations")
-
-	cmd.AddCommand(newCmdCompletion(&cmd))
-	cmd.AddCommand(newCmdVersion())
-	cmd.AddCommand(newCmdRun(&options))
-	cmd.AddCommand(newCmdGet(&options))
-	cmd.AddCommand(newCmdDelete(&options))
-	cmd.AddCommand(newCmdInstall(&options))
-	cmd.AddCommand(newCmdLog(&options))
-	cmd.AddCommand(newCmdContext(&options))
-	cmd.AddCommand(newCmdReset(&options))
-
-	return &cmd, nil
-}
diff --git a/pkg/client/cmd/run.go b/pkg/client/cmd/run.go
deleted file mode 100644
index 376b91a6..00000000
--- a/pkg/client/cmd/run.go
+++ /dev/null
@@ -1,451 +0,0 @@
-/*
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package cmd
-
-import (
-	"bytes"
-	"encoding/base64"
-	"fmt"
-	"io/ioutil"
-	"net/http"
-	"os"
-	"os/signal"
-	"path"
-	"regexp"
-	"strconv"
-	"strings"
-	"syscall"
-
-	"github.com/apache/camel-k/pkg/gzip"
-
-	"github.com/apache/camel-k/pkg/trait"
-	"github.com/apache/camel-k/pkg/util"
-
-	"github.com/pkg/errors"
-	"github.com/sirupsen/logrus"
-
-	"github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
-	"github.com/apache/camel-k/pkg/util/kubernetes"
-	"github.com/apache/camel-k/pkg/util/log"
-	"github.com/apache/camel-k/pkg/util/sync"
-	"github.com/apache/camel-k/pkg/util/watch"
-	"github.com/operator-framework/operator-sdk/pkg/sdk"
-	"github.com/spf13/cobra"
-	k8serrors "k8s.io/apimachinery/pkg/api/errors"
-	"k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-var (
-	traitConfigRegexp = regexp.MustCompile(`^([a-z-]+)((?:\.[a-z-]+)+)=(.*)$`)
-)
-
-func newCmdRun(rootCmdOptions *RootCmdOptions) *cobra.Command {
-	options := runCmdOptions{
-		RootCmdOptions: rootCmdOptions,
-	}
-
-	cmd := cobra.Command{
-		Use:   "run [file to run]",
-		Short: "Run a integration on Kubernetes",
-		Long:  `Deploys and execute a integration pod on Kubernetes.`,
-		Args:  options.validateArgs,
-		RunE:  options.run,
-	}
-
-	cmd.Flags().StringVarP(&options.Runtime, "runtime", "r", "", "Runtime used by the integration")
-	cmd.Flags().StringVar(&options.IntegrationName, "name", "", "The integration name")
-	cmd.Flags().StringSliceVarP(&options.Dependencies, "dependency", "d", nil, "The integration dependency")
-	cmd.Flags().BoolVarP(&options.Wait, "wait", "w", false, "Waits for the integration to be running")
-	cmd.Flags().StringVarP(&options.IntegrationContext, "context", "x", "", "The contex used to run the integration")
-	cmd.Flags().StringSliceVarP(&options.Properties, "property", "p", nil, "Add a camel property")
-	cmd.Flags().StringSliceVar(&options.ConfigMaps, "configmap", nil, "Add a ConfigMap")
-	cmd.Flags().StringSliceVar(&options.Secrets, "secret", nil, "Add a Secret")
-	cmd.Flags().StringSliceVar(&options.Repositories, "repository", nil, "Add a maven repository")
-	cmd.Flags().BoolVar(&options.Logs, "logs", false, "Print integration logs")
-	cmd.Flags().BoolVar(&options.Sync, "sync", false, "Synchronize the local source file with the cluster, republishing at each change")
-	cmd.Flags().BoolVar(&options.Dev, "dev", false, "Enable Dev mode (equivalent to \"-w --logs --sync\")")
-	cmd.Flags().StringVar(&options.Profile, "profile", "", "Trait profile used for deployment")
-	cmd.Flags().StringSliceVarP(&options.Traits, "trait", "t", nil, "Configure a trait. E.g. \"-t service.enabled=false\"")
-	cmd.Flags().StringSliceVar(&options.LoggingLevels, "logging-level", nil, "Configure the logging level. "+
-		"E.g. \"--logging-level org.apache.camel=DEBUG\"")
-	cmd.Flags().StringVarP(&options.OutputFormat, "output", "o", "", "Output format. One of: json|yaml")
-	cmd.Flags().BoolVar(&options.Compression, "compression", false, "Enable store source as a compressed binary blob")
-	cmd.Flags().StringSliceVar(&options.Resources, "resource", nil, "Add a resource")
-
-	// completion support
-	configureKnownCompletions(&cmd)
-
-	return &cmd
-}
-
-type runCmdOptions struct {
-	*RootCmdOptions
-	Compression        bool
-	Wait               bool
-	Logs               bool
-	Sync               bool
-	Dev                bool
-	IntegrationContext string
-	Runtime            string
-	IntegrationName    string
-	Profile            string
-	OutputFormat       string
-	Resources          []string
-	Dependencies       []string
-	Properties         []string
-	ConfigMaps         []string
-	Secrets            []string
-	Repositories       []string
-	Traits             []string
-	LoggingLevels      []string
-}
-
-func (o *runCmdOptions) validateArgs(cmd *cobra.Command, args []string) error {
-	if len(args) < 1 {
-		return errors.New("accepts at least 1 arg, received 0")
-	}
-	if len(args) > 1 && o.IntegrationName == "" {
-		return errors.New("integration name is mandatory when using multiple sources")
-	}
-
-	for _, fileName := range args {
-		if !strings.HasPrefix(fileName, "http://") && !strings.HasPrefix(fileName, "https://") {
-			if _, err := os.Stat(fileName); err != nil && os.IsNotExist(err) {
-				return errors.Wrap(err, "file "+fileName+" does not exist")
-			} else if err != nil {
-				return errors.Wrap(err, "error while accessing file "+fileName)
-			}
-		} else {
-			resp, err := http.Get(fileName)
-			if err != nil {
-				return errors.Wrap(err, "The URL provided is not reachable")
-			} else if resp.StatusCode != 200 {
-				return errors.New("The URL provided is not reachable " + fileName + " The error code returned is " + strconv.Itoa(resp.StatusCode))
-			}
-		}
-	}
-
-	return nil
-}
-
-func (o *runCmdOptions) run(cmd *cobra.Command, args []string) error {
-	catalog := trait.NewCatalog()
-	tp := catalog.ComputeTraitsProperties()
-	for _, t := range o.Traits {
-		kv := strings.SplitN(t, "=", 2)
-
-		if !util.StringSliceExists(tp, kv[0]) {
-			fmt.Printf("Error: %s is not a valid trait property\n", t)
-			return nil
-		}
-	}
-
-	integration, err := o.createIntegration(args)
-	if err != nil {
-		return err
-	}
-
-	if o.Dev {
-		c := make(chan os.Signal)
-		signal.Notify(c, os.Interrupt, syscall.SIGTERM)
-		go func() {
-			<-c
-			fmt.Printf("Run integration terminating\n")
-			err := DeleteIntegration(integration.Name, integration.Namespace)
-			if err != nil {
-				fmt.Println(err)
-			}
-			os.Exit(1)
-		}()
-	}
-
-	if o.Sync || o.Dev {
-		err = o.syncIntegration(args)
-		if err != nil {
-			return err
-		}
-	}
-	if o.Wait || o.Dev {
-		err = o.waitForIntegrationReady(integration)
-		if err != nil {
-			return err
-		}
-	}
-	if o.Logs || o.Dev {
-		err = log.Print(o.Context, integration)
-		if err != nil {
-			return err
-		}
-	}
-
-	if o.Sync && !o.Logs && !o.Dev {
-		// Let's add a wait point, otherwise the script terminates
-		<-o.Context.Done()
-	}
-	return nil
-}
-
-func (o *runCmdOptions) waitForIntegrationReady(integration *v1alpha1.Integration) error {
-	handler := func(i *v1alpha1.Integration) bool {
-		//
-		// TODO when we add health checks, we should wait until they are passed
-		//
-		if i.Status.Phase != "" {
-			fmt.Println("integration \""+integration.Name+"\" in phase", i.Status.Phase)
-
-			if i.Status.Phase == v1alpha1.IntegrationPhaseRunning {
-				// TODO display some error info when available in the status
-				return false
-			}
-
-			if i.Status.Phase == v1alpha1.IntegrationPhaseError {
-				fmt.Println("integration deployment failed")
-				return false
-			}
-		}
-
-		return true
-	}
-
-	return watch.HandleStateChanges(o.Context, integration, handler)
-}
-
-func (o *runCmdOptions) syncIntegration(sources []string) error {
-	for _, s := range sources {
-		changes, err := sync.File(o.Context, s)
-		if err != nil {
-			return err
-		}
-		go func() {
-			for {
-				select {
-				case <-o.Context.Done():
-					return
-				case <-changes:
-					_, err := o.updateIntegrationCode(sources)
-					if err != nil {
-						logrus.Error("Unable to sync integration: ", err)
-					}
-				}
-			}
-		}()
-	}
-
-	return nil
-}
-
-func (o *runCmdOptions) createIntegration(sources []string) (*v1alpha1.Integration, error) {
-	return o.updateIntegrationCode(sources)
-}
-
-func (o *runCmdOptions) updateIntegrationCode(sources []string) (*v1alpha1.Integration, error) {
-	namespace := o.Namespace
-
-	name := ""
-	if o.IntegrationName != "" {
-		name = o.IntegrationName
-		name = kubernetes.SanitizeName(name)
-	} else if len(sources) == 1 {
-		name = kubernetes.SanitizeName(sources[0])
-	}
-
-	if name == "" {
-		return nil, errors.New("unable to determine integration name")
-	}
-
-	integration := v1alpha1.Integration{
-		TypeMeta: v1.TypeMeta{
-			Kind:       v1alpha1.IntegrationKind,
-			APIVersion: v1alpha1.SchemeGroupVersion.String(),
-		},
-		ObjectMeta: v1.ObjectMeta{
-			Namespace: namespace,
-			Name:      name,
-		},
-		Spec: v1alpha1.IntegrationSpec{
-			Dependencies:  make([]string, 0, len(o.Dependencies)),
-			Context:       o.IntegrationContext,
-			Configuration: make([]v1alpha1.ConfigurationSpec, 0),
-			Repositories:  o.Repositories,
-			Profile:       v1alpha1.TraitProfileByName(o.Profile),
-		},
-	}
-
-	for _, source := range sources {
-		data, err := o.loadData(source, o.Compression)
-		if err != nil {
-			return nil, err
-		}
-
-		integration.Spec.AddSources(v1alpha1.SourceSpec{
-			DataSpec: v1alpha1.DataSpec{
-				Name:        path.Base(source),
-				Content:     data,
-				Compression: o.Compression,
-			},
-		})
-	}
-
-	for _, resource := range o.Resources {
-		data, err := o.loadData(resource, o.Compression)
-		if err != nil {
-			return nil, err
-		}
-
-		integration.Spec.AddResources(v1alpha1.ResourceSpec{
-			DataSpec: v1alpha1.DataSpec{
-				Name:        path.Base(resource),
-				Content:     data,
-				Compression: o.Compression,
-			},
-		})
-	}
-
-	if o.Runtime != "" {
-		integration.Spec.AddDependency("runtime:" + o.Runtime)
-	}
-
-	for _, item := range o.Dependencies {
-		integration.Spec.AddDependency(item)
-	}
-	for _, item := range o.Properties {
-		integration.Spec.AddConfiguration("property", item)
-	}
-	for _, item := range o.LoggingLevels {
-		integration.Spec.AddConfiguration("property", "logging.level."+item)
-	}
-	for _, item := range o.ConfigMaps {
-		integration.Spec.AddConfiguration("configmap", item)
-	}
-	for _, item := range o.Secrets {
-		integration.Spec.AddConfiguration("secret", item)
-	}
-
-	for _, traitConf := range o.Traits {
-		if err := o.configureTrait(&integration, traitConf); err != nil {
-			return nil, err
-		}
-	}
-
-	switch o.OutputFormat {
-	case "":
-		// continue..
-	case "yaml":
-		data, err := kubernetes.ToYAML(&integration)
-		if err != nil {
-			return nil, err
-		}
-		fmt.Print(string(data))
-		return nil, nil
-
-	case "json":
-		data, err := kubernetes.ToJSON(&integration)
-		if err != nil {
-			return nil, err
-		}
-		fmt.Print(string(data))
-		return nil, nil
-
-	default:
-		return nil, fmt.Errorf("invalid output format option '%s', should be one of: yaml|json", o.OutputFormat)
-	}
-
-	existed := false
-	err := sdk.Create(&integration)
-	if err != nil && k8serrors.IsAlreadyExists(err) {
-		existed = true
-		clone := integration.DeepCopy()
-		err = sdk.Get(clone)
-		if err != nil {
-			return nil, err
-		}
-		integration.ResourceVersion = clone.ResourceVersion
-		err = sdk.Update(&integration)
-	}
-
-	if err != nil {
-		return nil, err
-	}
-
-	if !existed {
-		fmt.Printf("integration \"%s\" created\n", name)
-	} else {
-		fmt.Printf("integration \"%s\" updated\n", name)
-	}
-	return &integration, nil
-}
-
-func (*runCmdOptions) loadData(fileName string, compress bool) (string, error) {
-	var content []byte
-	var err error
-
-	if !strings.HasPrefix(fileName, "http://") && !strings.HasPrefix(fileName, "https://") {
-		content, err = ioutil.ReadFile(fileName)
-		if err != nil {
-			return "", err
-		}
-	} else {
-		resp, err := http.Get(fileName)
-		if err != nil {
-			return "", err
-		}
-		defer resp.Body.Close()
-
-		content, err = ioutil.ReadAll(resp.Body)
-		if err != nil {
-			return "", err
-		}
-	}
-
-	if compress {
-		var b bytes.Buffer
-
-		if err := gzip.Compress(&b, content); err != nil {
-			return "", err
-		}
-
-		return base64.StdEncoding.EncodeToString(b.Bytes()), nil
-	}
-
-	return string(content), nil
-}
-
-func (*runCmdOptions) configureTrait(integration *v1alpha1.Integration, config string) error {
-	if integration.Spec.Traits == nil {
-		integration.Spec.Traits = make(map[string]v1alpha1.IntegrationTraitSpec)
-	}
-
-	parts := traitConfigRegexp.FindStringSubmatch(config)
-	if len(parts) < 4 {
-		return errors.New("unrecognized config format (expected \"<trait>.<prop>=<val>\"): " + config)
-	}
-	traitID := parts[1]
-	prop := parts[2][1:]
-	val := parts[3]
-
-	spec, ok := integration.Spec.Traits[traitID]
-	if !ok {
-		spec = v1alpha1.IntegrationTraitSpec{
-			Configuration: make(map[string]string),
-		}
-	}
-
-	spec.Configuration[prop] = val
-	integration.Spec.Traits[traitID] = spec
-	return nil
-}
diff --git a/pkg/client/cmd/util.go b/pkg/client/cmd/util.go
deleted file mode 100644
index e99aac99..00000000
--- a/pkg/client/cmd/util.go
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package cmd
-
-import (
-	"github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
-	"github.com/operator-framework/operator-sdk/pkg/sdk"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-// DeleteIntegration --
-func DeleteIntegration(name string, namespace string) error {
-	integration := v1alpha1.Integration{
-		TypeMeta: metav1.TypeMeta{
-			Kind:       v1alpha1.IntegrationKind,
-			APIVersion: v1alpha1.SchemeGroupVersion.String(),
-		},
-		ObjectMeta: metav1.ObjectMeta{
-			Namespace: namespace,
-			Name:      name,
-		},
-	}
-	return sdk.Delete(&integration)
-}
diff --git a/pkg/client/cmd/completion.go b/pkg/cmd/completion.go
similarity index 100%
rename from pkg/client/cmd/completion.go
rename to pkg/cmd/completion.go
diff --git a/pkg/cmd/completion_bash.go b/pkg/cmd/completion_bash.go
new file mode 100644
index 00000000..836b3912
--- /dev/null
+++ b/pkg/cmd/completion_bash.go
@@ -0,0 +1,251 @@
+/*
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+	"context"
+	"fmt"
+	"os"
+	"strings"
+
+	"github.com/apache/camel-k/pkg/trait"
+
+	"github.com/apache/camel-k/pkg/util/camel"
+	"github.com/spf13/cobra"
+)
+
+// ******************************
+//
+//
+//
+// ******************************
+
+const bashCompletionCmdLongDescription = `
+To load completion run
+
+. <(kamel completion bash)
+
+To configure your bash shell to load completions for each session add to your bashrc
+
+# ~/.bashrc or ~/.profile
+. <(kamel completion bash)
+`
+
+var bashCompletionFunction = `
+__kamel_dependency_type() {
+    case ${cur} in
+    c*)
+        local type_list="` + computeCamelDependencies() + `"
+        COMPREPLY=( $( compgen -W "${type_list}" -- "$cur") )
+        ;;
+    m*)
+        local type_list="mvn:"
+        COMPREPLY=( $( compgen -W "${type_list}" -- "$cur") )
+		compopt -o nospace
+        ;;
+    f*)
+        local type_list="file:"
+        COMPREPLY=( $( compgen -W "${type_list}" -- "$cur") )
+		compopt -o nospace
+        ;;
+    *)
+        local type_list="camel mvn: file:"
+        COMPREPLY=( $( compgen -W "${type_list}" -- "$cur") )
+	    compopt -o nospace
+    esac
+}
+
+__kamel_traits() {
+    local type_list="` + strings.Join(trait.NewCatalog(context.TODO(), nil).ComputeTraitsProperties(), " ") + `"
+    COMPREPLY=( $( compgen -W "${type_list}" -- "$cur") )
+    compopt -o nospace
+}
+
+__kamel_languages() {
+    local type_list="js groovy kotlin java xml"
+    COMPREPLY=( $( compgen -W "${type_list}" -- "$cur") )
+}
+
+__kamel_runtimes() {
+    local type_list="jvm groovy kotlin"
+    COMPREPLY=( $( compgen -W "${type_list}" -- "$cur") )
+}
+
+__kamel_kubectl_get_configmap() {
+    local template
+    local kubectl_out
+
+    template="{{ range .items  }}{{ .metadata.name }} {{ end }}"
+
+    if kubectl_out=$(kubectl get -o template --template="${template}" configmap 2>/dev/null); then
+        COMPREPLY=( $( compgen -W "${kubectl_out}" -- "$cur" ) )
+    fi
+}
+
+__kamel_kubectl_get_secret() {
+    local template
+    local kubectl_out
+
+    template="{{ range .items  }}{{ .metadata.name }} {{ end }}"
+
+    if kubectl_out=$(kubectl get -o template --template="${template}" secret 2>/dev/null); then
+        COMPREPLY=( $( compgen -W "${kubectl_out}" -- "$cur" ) )
+    fi
+}
+
+__kamel_kubectl_get_integrations() {
+    local template
+    local kubectl_out
+
+    template="{{ range .items  }}{{ .metadata.name }} {{ end }}"
+
+    if kubectl_out=$(kubectl get -o template --template="${template}" integrations 2>/dev/null); then
+        COMPREPLY=( $( compgen -W "${kubectl_out}" -- "$cur" ) )
+    fi
+}
+
+__kamel_kubectl_get_integrationcontexts() {
+    local template
+    local kubectl_out
+
+    template="{{ range .items  }}{{ .metadata.name }} {{ end }}"
+
+    if kubectl_out=$(kubectl get -o template --template="${template}" integrationcontexts 2>/dev/null); then
+        COMPREPLY=( $( compgen -W "${kubectl_out}" -- "$cur" ) )
+    fi
+}
+
+__kamel_kubectl_get_user_integrationcontexts() {
+    local template
+    local kubectl_out
+
+    template="{{ range .items  }}{{ .metadata.name }} {{ end }}"
+
+    if kubectl_out=$(kubectl get -l camel.apache.org/context.type=user -o template --template="${template}" integrationcontexts 2>/dev/null); then
+        COMPREPLY=( $( compgen -W "${kubectl_out}" -- "$cur" ) )
+    fi
+}
+
+__custom_func() {
+    case ${last_command} in
+        kamel_delete)
+            __kamel_kubectl_get_integrations
+            return
+            ;;
+        kamel_log)
+            __kamel_kubectl_get_integrations
+            return
+            ;;
+        kamel_context_delete)
+            __kamel_kubectl_get_user_integrationcontexts
+            return
+            ;;
+        *)
+            ;;
+    esac
+}
+`
+
+// ******************************
+//
+// COMMAND
+//
+// ******************************
+
+func newCmdCompletionBash(root *cobra.Command) *cobra.Command {
+	return &cobra.Command{
+		Use:   "bash",
+		Short: "Generates bash completion scripts",
+		Long:  bashCompletionCmdLongDescription,
+		Run: func(cmd *cobra.Command, args []string) {
+			err := root.GenBashCompletion(os.Stdout)
+			if err != nil {
+				fmt.Print(err.Error())
+			}
+		},
+	}
+}
+
+func configureKnownBashCompletions(command *cobra.Command) {
+	configureBashAnnotationForFlag(
+		command,
+		"dependency",
+		map[string][]string{
+			cobra.BashCompCustom: {"__kamel_dependency_type"},
+		},
+	)
+	configureBashAnnotationForFlag(
+		command,
+		"configmap",
+		map[string][]string{
+			cobra.BashCompCustom: {"__kamel_kubectl_get_configmap"},
+		},
+	)
+	configureBashAnnotationForFlag(
+		command,
+		"secret",
+		map[string][]string{
+			cobra.BashCompCustom: {"__kamel_kubectl_get_secret"},
+		},
+	)
+	configureBashAnnotationForFlag(
+		command,
+		"context",
+		map[string][]string{
+			cobra.BashCompCustom: {"__kamel_kubectl_get_user_integrationcontexts"},
+		},
+	)
+	configureBashAnnotationForFlag(
+		command,
+		"language",
+		map[string][]string{
+			cobra.BashCompCustom: {"__kamel_languages"},
+		},
+	)
+	configureBashAnnotationForFlag(
+		command,
+		"runtime",
+		map[string][]string{
+			cobra.BashCompCustom: {"__kamel_runtimes"},
+		},
+	)
+	configureBashAnnotationForFlag(
+		command,
+		"trait",
+		map[string][]string{
+			cobra.BashCompCustom: {"__kamel_traits"},
+		},
+	)
+}
+
+func configureBashAnnotationForFlag(command *cobra.Command, flagName string, annotations map[string][]string) {
+	flag := command.Flag(flagName)
+	if flag != nil {
+		flag.Annotations = annotations
+	}
+}
+
+func computeCamelDependencies() string {
+	results := make([]string, 0, len(camel.Runtime.Artifacts))
+
+	for k := range camel.Runtime.Artifacts {
+		results = append(results, k)
+	}
+
+	return strings.Join(results, " ")
+}
diff --git a/pkg/client/cmd/completion_zsh.go b/pkg/cmd/completion_zsh.go
similarity index 100%
rename from pkg/client/cmd/completion_zsh.go
rename to pkg/cmd/completion_zsh.go
diff --git a/pkg/client/cmd/context.go b/pkg/cmd/context.go
similarity index 100%
rename from pkg/client/cmd/context.go
rename to pkg/cmd/context.go
diff --git a/pkg/cmd/context_create.go b/pkg/cmd/context_create.go
new file mode 100644
index 00000000..7090d9b8
--- /dev/null
+++ b/pkg/cmd/context_create.go
@@ -0,0 +1,175 @@
+/*
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+	"errors"
+	"fmt"
+	"strconv"
+	"strings"
+
+	"github.com/apache/camel-k/pkg/util"
+
+	"github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
+	"github.com/apache/camel-k/pkg/util/kubernetes"
+
+	"github.com/spf13/cobra"
+	k8serrors "k8s.io/apimachinery/pkg/api/errors"
+	k8sclient "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+// NewCmdContext --
+func newContextCreateCmd(rootCmdOptions *RootCmdOptions) *cobra.Command {
+	impl := &contextCreateCommand{
+		RootCmdOptions: rootCmdOptions,
+	}
+
+	cmd := cobra.Command{
+		Use:   "create",
+		Short: "Create an Integration Context",
+		Long:  `Create an Integration Context.`,
+		Args:  impl.validateArgs,
+		RunE:  impl.run,
+	}
+
+	cmd.Flags().StringVarP(&impl.runtime, "runtime", "r", "jvm", "Runtime provided by the context")
+	cmd.Flags().StringSliceVarP(&impl.dependencies, "dependency", "d", nil, "Add a dependency")
+	cmd.Flags().StringSliceVarP(&impl.properties, "property", "p", nil, "Add a camel property")
+	cmd.Flags().StringSliceVar(&impl.configmaps, "configmap", nil, "Add a ConfigMap")
+	cmd.Flags().StringSliceVar(&impl.secrets, "secret", nil, "Add a Secret")
+	cmd.Flags().StringSliceVar(&impl.Repositories, "repository", nil, "Add a maven repository")
+
+	// completion support
+	configureKnownCompletions(&cmd)
+
+	return &cmd
+}
+
+type contextCreateCommand struct {
+	*RootCmdOptions
+
+	runtime      string
+	dependencies []string
+	properties   []string
+	configmaps   []string
+	secrets      []string
+	Repositories []string
+}
+
+func (command *contextCreateCommand) validateArgs(cmd *cobra.Command, args []string) error {
+	if len(args) != 1 {
+		return errors.New("accepts 1 arg, received " + strconv.Itoa(len(args)))
+	}
+
+	return nil
+}
+
+func (command *contextCreateCommand) run(cmd *cobra.Command, args []string) error {
+	c, err := command.GetCmdClient()
+	if err != nil {
+		return err
+	}
+	ctx := v1alpha1.NewIntegrationContext(command.Namespace, args[0])
+	key := k8sclient.ObjectKey{
+		Namespace: command.Namespace,
+		Name:      args[0],
+	}
+	if err := c.Get(command.Context, key, &ctx); err == nil {
+		// the integration context already exists, let's check that it is
+		// not a platform one which is supposed to be "read only"
+
+		if ctx.Labels["camel.apache.org/context.type"] == v1alpha1.IntegrationContextTypePlatform {
+			fmt.Printf("integration context \"%s\" is not editable\n", ctx.Name)
+			return nil
+		}
+	}
+
+	ctx = v1alpha1.NewIntegrationContext(command.Namespace, kubernetes.SanitizeName(args[0]))
+	ctx.Labels = map[string]string{
+		"camel.apache.org/context.type": "user",
+	}
+	ctx.Spec = v1alpha1.IntegrationContextSpec{
+		Dependencies:  make([]string, 0, len(command.dependencies)),
+		Configuration: make([]v1alpha1.ConfigurationSpec, 0),
+		Repositories:  command.Repositories,
+	}
+
+	for _, item := range command.dependencies {
+		switch {
+		case strings.HasPrefix(item, "mvn:"):
+			ctx.Spec.Dependencies = append(ctx.Spec.Dependencies, item)
+		case strings.HasPrefix(item, "file:"):
+			ctx.Spec.Dependencies = append(ctx.Spec.Dependencies, item)
+		case strings.HasPrefix(item, "camel-"):
+			ctx.Spec.Dependencies = append(ctx.Spec.Dependencies, "camel:"+strings.TrimPrefix(item, "camel-"))
+		}
+	}
+
+	// jvm runtime required by default
+	util.StringSliceUniqueAdd(&ctx.Spec.Dependencies, "runtime:jvm")
+
+	if command.runtime != "" {
+		util.StringSliceUniqueAdd(&ctx.Spec.Dependencies, "runtime:"+command.runtime)
+	}
+
+	for _, item := range command.properties {
+		ctx.Spec.Configuration = append(ctx.Spec.Configuration, v1alpha1.ConfigurationSpec{
+			Type:  "property",
+			Value: item,
+		})
+	}
+	for _, item := range command.configmaps {
+		ctx.Spec.Configuration = append(ctx.Spec.Configuration, v1alpha1.ConfigurationSpec{
+			Type:  "configmap",
+			Value: item,
+		})
+	}
+	for _, item := range command.secrets {
+		ctx.Spec.Configuration = append(ctx.Spec.Configuration, v1alpha1.ConfigurationSpec{
+			Type:  "secret",
+			Value: item,
+		})
+	}
+
+	existed := false
+	err = c.Create(command.Context, &ctx)
+	if err != nil && k8serrors.IsAlreadyExists(err) {
+		existed = true
+		clone := ctx.DeepCopy()
+		err = c.Get(command.Context, key, clone)
+		if err != nil {
+			fmt.Print(err.Error())
+			return nil
+		}
+		ctx.ResourceVersion = clone.ResourceVersion
+		err = c.Update(command.Context, &ctx)
+	}
+
+	if err != nil {
+		fmt.Print(err.Error())
+		return nil
+	}
+
+	if !existed {
+		fmt.Printf("integration context \"%s\" created\n", ctx.Name)
+	} else {
+		fmt.Printf("integration context \"%s\" updated\n", ctx.Name)
+	}
+
+	return nil
+}
diff --git a/pkg/cmd/context_delete.go b/pkg/cmd/context_delete.go
new file mode 100644
index 00000000..6306a144
--- /dev/null
+++ b/pkg/cmd/context_delete.go
@@ -0,0 +1,152 @@
+/*
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+	"errors"
+	"fmt"
+
+	k8sclient "sigs.k8s.io/controller-runtime/pkg/client"
+
+	"github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
+	"github.com/spf13/cobra"
+
+	k8errors "k8s.io/apimachinery/pkg/api/errors"
+)
+
+func newContextDeleteCmd(rootCmdOptions *RootCmdOptions) *cobra.Command {
+	impl := contextDeleteCommand{
+		RootCmdOptions: rootCmdOptions,
+	}
+
+	cmd := cobra.Command{
+		Use:   "delete",
+		Short: "Delete an Integration Context",
+		Long:  `Delete an Integration Context.`,
+		RunE: func(cmd *cobra.Command, args []string) error {
+			if err := impl.validate(args); err != nil {
+				return err
+			}
+			if err := impl.run(args); err != nil {
+				fmt.Println(err.Error())
+			}
+
+			return nil
+		},
+	}
+
+	cmd.Flags().BoolVar(&impl.all, "all", false, "Delete all integration contexts")
+
+	return &cmd
+}
+
+type contextDeleteCommand struct {
+	*RootCmdOptions
+	all bool
+}
+
+func (command *contextDeleteCommand) validate(args []string) error {
+	if command.all && len(args) > 0 {
+		return errors.New("invalid combination: both all flag and named contexts are set")
+	}
+	if !command.all && len(args) == 0 {
+		return errors.New("invalid combination: neither all flag nor named contexts are set")
+	}
+
+	return nil
+}
+
+func (command *contextDeleteCommand) run(args []string) error {
+	names := args
+
+	c, err := command.GetCmdClient()
+	if err != nil {
+		return err
+	}
+
+	if command.all {
+		ctxList := v1alpha1.NewIntegrationContextList()
+		if err := c.List(command.Context, &k8sclient.ListOptions{Namespace: command.Namespace}, &ctxList); err != nil {
+			return err
+		}
+
+		names = make([]string, 0, len(ctxList.Items))
+		for _, item := range ctxList.Items {
+			// only include non platform contexts
+			if item.Labels["camel.apache.org/context.type"] != v1alpha1.IntegrationContextTypePlatform {
+				names = append(names, item.Name)
+			}
+		}
+	}
+
+	for _, name := range names {
+		if err := command.delete(name); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (command *contextDeleteCommand) delete(name string) error {
+	ctx := v1alpha1.NewIntegrationContext(command.Namespace, name)
+	key := k8sclient.ObjectKey{
+		Namespace: command.Namespace,
+		Name:      name,
+	}
+	c, err := command.GetCmdClient()
+	if err != nil {
+		return err
+	}
+
+	err = c.Get(command.Context, key, &ctx)
+
+	// pass through if the context is not found
+	if err != nil && k8errors.IsNotFound(err) {
+		return fmt.Errorf("no integration context found with name \"%s\"", ctx.Name)
+	}
+
+	// fail otherwise
+	if err != nil {
+		return err
+	}
+
+	// check that it is not a platform one which is supposed to be "read only"
+	// thus not managed by the end user
+	if ctx.Labels["camel.apache.org/context.type"] == v1alpha1.IntegrationContextTypePlatform {
+		// skip platform contexts while deleting all contexts
+		if command.all {
+			return nil
+		}
+
+		return fmt.Errorf("integration context \"%s\" is not editable", ctx.Name)
+	}
+
+	err = c.Delete(command.Context, &ctx)
+
+	if err != nil && !k8errors.IsNotFound(err) {
+		return fmt.Errorf("error deleting integration context \"%s\", %s", ctx.Name, err)
+	}
+	if err != nil && k8errors.IsNotFound(err) {
+		return fmt.Errorf("no integration context found with name \"%s\"", ctx.Name)
+	}
+
+	fmt.Printf("integration context \"%s\" has been deleted\n", ctx.Name)
+
+	return err
+}
diff --git a/pkg/cmd/context_get.go b/pkg/cmd/context_get.go
new file mode 100644
index 00000000..eab23908
--- /dev/null
+++ b/pkg/cmd/context_get.go
@@ -0,0 +1,93 @@
+/*
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+	"fmt"
+	"os"
+	"text/tabwriter"
+
+	k8sclient "sigs.k8s.io/controller-runtime/pkg/client"
+
+	"github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
+	"github.com/spf13/cobra"
+)
+
+func newContextGetCmd(rootCmdOptions *RootCmdOptions) *cobra.Command {
+	impl := contextGetCommand{
+		RootCmdOptions: rootCmdOptions,
+	}
+
+	cmd := cobra.Command{
+		Use:   "get",
+		Short: "Get defined Integration Context",
+		Long:  `Get defined Integration Context.`,
+		RunE: func(cmd *cobra.Command, args []string) error {
+			if err := impl.validate(cmd, args); err != nil {
+				return err
+			}
+			if err := impl.run(); err != nil {
+				fmt.Println(err.Error())
+			}
+
+			return nil
+		},
+	}
+
+	cmd.Flags().BoolVar(&impl.user, "user", true, "Includes user contexts")
+	cmd.Flags().BoolVar(&impl.platform, v1alpha1.IntegrationContextTypePlatform, true, "Includes platform contexts")
+
+	return &cmd
+}
+
+type contextGetCommand struct {
+	*RootCmdOptions
+	user     bool
+	platform bool
+}
+
+func (command *contextGetCommand) validate(cmd *cobra.Command, args []string) error {
+	return nil
+
+}
+
+func (command *contextGetCommand) run() error {
+	ctxList := v1alpha1.NewIntegrationContextList()
+	c, err := command.GetCmdClient()
+	if err != nil {
+		return err
+	}
+	if err := c.List(command.Context, &k8sclient.ListOptions{Namespace: command.Namespace}, &ctxList); err != nil {
+		return err
+	}
+
+	w := tabwriter.NewWriter(os.Stdout, 0, 8, 1, '\t', 0)
+	fmt.Fprintln(w, "NAME\tTYPE\tSTATUS")
+	for _, ctx := range ctxList.Items {
+		t := ctx.Labels["camel.apache.org/context.type"]
+		u := command.user && t == "user"
+		p := command.platform && t == v1alpha1.IntegrationContextTypePlatform
+
+		if u || p {
+			fmt.Fprintf(w, "%s\t%s\t%s\n", ctx.Name, t, string(ctx.Status.Phase))
+		}
+	}
+	w.Flush()
+
+	return nil
+}
diff --git a/pkg/cmd/delete.go b/pkg/cmd/delete.go
new file mode 100644
index 00000000..15378d75
--- /dev/null
+++ b/pkg/cmd/delete.go
@@ -0,0 +1,120 @@
+/*
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+	"errors"
+	"fmt"
+	"strconv"
+
+	"github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
+	"github.com/spf13/cobra"
+	k8errors "k8s.io/apimachinery/pkg/api/errors"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	k8sclient "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+// newCmdDelete --
+func newCmdDelete(rootCmdOptions *RootCmdOptions) *cobra.Command {
+	impl := deleteCmdOptions{
+		RootCmdOptions: rootCmdOptions,
+	}
+	cmd := cobra.Command{
+		Use:   "delete [integration1] [integration2] ...",
+		Short: "Delete integrations deployed on Kubernetes",
+		RunE: func(cmd *cobra.Command, args []string) error {
+			if err := impl.validate(args); err != nil {
+				return err
+			}
+			if err := impl.run(args); err != nil {
+				fmt.Println(err.Error())
+			}
+
+			return nil
+		},
+	}
+
+	cmd.Flags().BoolVar(&impl.deleteAll, "all", false, "Delete all integrations")
+
+	return &cmd
+}
+
+type deleteCmdOptions struct {
+	*RootCmdOptions
+	deleteAll bool
+}
+
+func (command *deleteCmdOptions) validate(args []string) error {
+	if command.deleteAll && len(args) > 0 {
+		return errors.New("invalid combination: both all flag and named integrations are set")
+	}
+	if !command.deleteAll && len(args) == 0 {
+		return errors.New("invalid combination: neither all flag nor named integrations are set")
+	}
+
+	return nil
+}
+
+func (command *deleteCmdOptions) run(args []string) error {
+	c, err := command.GetCmdClient()
+	if err != nil {
+		return err
+	}
+	if len(args) != 0 && !command.deleteAll {
+		for _, arg := range args {
+
+			err := DeleteIntegration(command.Context, c, arg, command.Namespace)
+			if err != nil {
+				if k8errors.IsNotFound(err) {
+					fmt.Println("Integration " + arg + " not found. Skipped.")
+				} else {
+					return err
+				}
+			} else {
+				fmt.Println("Integration " + arg + " deleted")
+			}
+		}
+	} else if command.deleteAll {
+		integrationList := v1alpha1.IntegrationList{
+			TypeMeta: metav1.TypeMeta{
+				APIVersion: v1alpha1.SchemeGroupVersion.String(),
+				Kind:       v1alpha1.IntegrationKind,
+			},
+		}
+
+		//Looks like Operator SDK doesn't support deletion of all objects with one command
+		err := c.List(command.Context, &k8sclient.ListOptions{Namespace: command.Namespace}, &integrationList)
+		if err != nil {
+			return err
+		}
+		for _, integration := range integrationList.Items {
+			integration := integration // pin
+			err := c.Delete(command.Context, &integration)
+			if err != nil {
+				return err
+			}
+		}
+		if len(integrationList.Items) == 0 {
+			fmt.Println("Nothing to delete")
+		} else {
+			fmt.Println(strconv.Itoa(len(integrationList.Items)) + " integration(s) deleted")
+		}
+	}
+
+	return nil
+}
diff --git a/pkg/cmd/get.go b/pkg/cmd/get.go
new file mode 100644
index 00000000..619333c1
--- /dev/null
+++ b/pkg/cmd/get.go
@@ -0,0 +1,78 @@
+/*
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+	"fmt"
+	"os"
+	"text/tabwriter"
+
+	k8sclient "sigs.k8s.io/controller-runtime/pkg/client"
+
+	"github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
+	"github.com/spf13/cobra"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+type getCmdOptions struct {
+	*RootCmdOptions
+}
+
+func newCmdGet(rootCmdOptions *RootCmdOptions) *cobra.Command {
+	options := getCmdOptions{
+		RootCmdOptions: rootCmdOptions,
+	}
+	cmd := cobra.Command{
+		Use:   "get",
+		Short: "Get all integrations deployed on Kubernetes",
+		Long:  `Get the status of all integrations deployed on on Kubernetes.`,
+		RunE:  options.run,
+	}
+
+	return &cmd
+}
+
+func (o *getCmdOptions) run(cmd *cobra.Command, args []string) error {
+	c, err := o.GetCmdClient()
+	if err != nil {
+		return err
+	}
+
+	integrationList := v1alpha1.IntegrationList{
+		TypeMeta: metav1.TypeMeta{
+			APIVersion: v1alpha1.SchemeGroupVersion.String(),
+			Kind:       "Integration",
+		},
+	}
+
+	namespace := o.Namespace
+
+	err = c.List(o.Context, &k8sclient.ListOptions{Namespace: namespace}, &integrationList)
+	if err != nil {
+		return err
+	}
+
+	w := tabwriter.NewWriter(os.Stdout, 0, 8, 1, '\t', 0)
+	fmt.Fprintln(w, "NAME\tCONTEXT\tSTATUS")
+	for _, integration := range integrationList.Items {
+		fmt.Fprintf(w, "%s\t%s\t%s\n", integration.Name, integration.Status.Context, string(integration.Status.Phase))
+	}
+	w.Flush()
+
+	return nil
+}
diff --git a/pkg/cmd/install.go b/pkg/cmd/install.go
new file mode 100644
index 00000000..342c4a94
--- /dev/null
+++ b/pkg/cmd/install.go
@@ -0,0 +1,175 @@
+/*
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+	"fmt"
+	"strings"
+
+	"github.com/apache/camel-k/pkg/client"
+	"github.com/apache/camel-k/pkg/install"
+	"github.com/apache/camel-k/pkg/util/kubernetes"
+	"github.com/pkg/errors"
+	"github.com/spf13/cobra"
+	k8serrors "k8s.io/apimachinery/pkg/api/errors"
+)
+
+func newCmdInstall(rootCmdOptions *RootCmdOptions) *cobra.Command {
+	options := installCmdOptions{
+		RootCmdOptions: rootCmdOptions,
+	}
+	cmd := cobra.Command{
+		Use:   "install",
+		Short: "Install Camel K on a Kubernetes cluster",
+		Long:  `Installs Camel K on a Kubernetes or OpenShift cluster.`,
+		RunE:  options.install,
+	}
+
+	cmd.Flags().BoolVar(&options.clusterSetupOnly, "cluster-setup", false, "Execute cluster-wide operations only (may require admin rights)")
+	cmd.Flags().BoolVar(&options.skipClusterSetup, "skip-cluster-setup", false, "Skip the cluster-setup phase")
+	cmd.Flags().BoolVar(&options.exampleSetup, "example", false, "Install example integration")
+	cmd.Flags().StringVar(&options.registry, "registry", "", "A Docker registry that can be used to publish images")
+	cmd.Flags().StringVarP(&options.outputFormat, "output", "o", "", "Output format. One of: json|yaml")
+	cmd.Flags().StringVar(&options.organization, "organization", "", "A organization on the Docker registry that can be used to publish images")
+	cmd.Flags().StringVar(&options.pushSecret, "push-secret", "", "A secret used to push images to the Docker registry")
+	cmd.Flags().StringSliceVar(&options.repositories, "repository", nil, "Add a maven repository")
+	cmd.Flags().StringSliceVarP(&options.properties, "property", "p", nil, "Add a camel property")
+	cmd.Flags().StringVar(&options.camelVersion, "camel-version", "", "Set the camel version")
+
+	return &cmd
+}
+
+type installCmdOptions struct {
+	*RootCmdOptions
+	clusterSetupOnly bool
+	skipClusterSetup bool
+	exampleSetup     bool
+	registry         string
+	outputFormat     string
+	organization     string
+	pushSecret       string
+	camelVersion     string
+	repositories     []string
+	properties       []string
+}
+
+func (o *installCmdOptions) install(cmd *cobra.Command, args []string) error {
+	var collection *kubernetes.Collection
+	if o.outputFormat != "" {
+		collection = kubernetes.NewCollection()
+	}
+
+	if !o.skipClusterSetup {
+		// Let's use a client provider during cluster installation, to eliminate the problem of CRD object caching
+		clientProvider := client.Provider{Get: o.NewCmdClient}
+
+		err := install.SetupClusterwideResourcesOrCollect(o.Context, clientProvider, collection)
+		if err != nil && k8serrors.IsForbidden(err) {
+			fmt.Println("Current user is not authorized to create cluster-wide objects like custom resource definitions or cluster roles: ", err)
+
+			meg := `please login as cluster-admin and execute "kamel install --cluster-setup" to install cluster-wide resources (one-time operation)`
+			return errors.New(meg)
+		} else if err != nil {
+			return err
+		}
+	}
+
+	if o.clusterSetupOnly {
+		if collection == nil {
+			fmt.Println("Camel K cluster setup completed successfully")
+		}
+	} else {
+		c, err := o.GetCmdClient()
+		if err != nil {
+			return err
+		}
+
+		namespace := o.Namespace
+
+		err = install.OperatorOrCollect(o.Context, c, namespace, collection)
+		if err != nil {
+			return err
+		}
+
+		platform, err := install.PlatformOrCollect(o.Context, c, namespace, o.registry, o.organization, o.pushSecret, collection)
+		if err != nil {
+			return err
+		}
+
+		if len(o.properties) > 0 {
+			platform.Spec.Build.Properties = make(map[string]string)
+
+			for _, property := range o.properties {
+				kv := strings.Split(property, "=")
+
+				if len(kv) == 2 {
+					platform.Spec.Build.Properties[kv[0]] = kv[1]
+				}
+			}
+		}
+		if len(o.repositories) > 0 {
+			platform.Spec.Build.Repositories = o.repositories
+		}
+		if o.camelVersion != "" {
+			platform.Spec.Build.CamelVersion = o.camelVersion
+		}
+
+		err = install.RuntimeObjectOrCollect(o.Context, c, namespace, collection, platform)
+		if err != nil {
+			return err
+		}
+
+		if o.exampleSetup {
+			err = install.ExampleOrCollect(o.Context, c, namespace, collection)
+			if err != nil {
+				return err
+			}
+		}
+
+		if collection == nil {
+			fmt.Println("Camel K installed in namespace", namespace)
+		}
+	}
+
+	if collection != nil {
+		return o.printOutput(collection)
+	}
+
+	return nil
+}
+
+func (o *installCmdOptions) printOutput(collection *kubernetes.Collection) error {
+	lst := collection.AsKubernetesList()
+	switch o.outputFormat {
+	case "yaml":
+		data, err := kubernetes.ToYAML(lst)
+		if err != nil {
+			return err
+		}
+		fmt.Print(string(data))
+	case "json":
+		data, err := kubernetes.ToJSON(lst)
+		if err != nil {
+			return err
+		}
+		fmt.Print(string(data))
+	default:
+		return errors.New("unknown output format: " + o.outputFormat)
+	}
+	return nil
+}
diff --git a/pkg/cmd/log.go b/pkg/cmd/log.go
new file mode 100644
index 00000000..b847c499
--- /dev/null
+++ b/pkg/cmd/log.go
@@ -0,0 +1,92 @@
+/*
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+	"fmt"
+
+	"github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
+	"github.com/apache/camel-k/pkg/util/log"
+	"github.com/spf13/cobra"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	k8sclient "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+func newCmdLog(rootCmdOptions *RootCmdOptions) *cobra.Command {
+	options := logCmdOptions{
+		RootCmdOptions: rootCmdOptions,
+	}
+
+	cmd := cobra.Command{
+		Use:   "log integration",
+		Short: "Print the logs of an integration",
+		Long:  `Print the logs of an integration.`,
+		Args:  options.validate,
+		RunE:  options.run,
+	}
+
+	// completion support
+	configureKnownCompletions(&cmd)
+
+	return &cmd
+}
+
+type logCmdOptions struct {
+	*RootCmdOptions
+}
+
+func (o *logCmdOptions) validate(cmd *cobra.Command, args []string) error {
+	if len(args) != 1 {
+		return fmt.Errorf("accepts 1 arg, received %d", len(args))
+	}
+
+	return nil
+}
+
+func (o *logCmdOptions) run(cmd *cobra.Command, args []string) error {
+	c, err := o.GetCmdClient()
+	if err != nil {
+		return err
+	}
+	integration := v1alpha1.Integration{
+		TypeMeta: metav1.TypeMeta{
+			Kind:       v1alpha1.IntegrationKind,
+			APIVersion: v1alpha1.SchemeGroupVersion.String(),
+		},
+		ObjectMeta: metav1.ObjectMeta{
+			Namespace: o.Namespace,
+			Name:      args[0],
+		},
+	}
+	key := k8sclient.ObjectKey{
+		Namespace: o.Namespace,
+		Name:      args[0],
+	}
+
+	if err := c.Get(o.Context, key, &integration); err != nil {
+		return err
+	}
+	if err := log.Print(o.Context, c, &integration); err != nil {
+		return err
+	}
+
+	// Let's add a wait point, otherwise the script terminates
+	<-o.Context.Done()
+
+	return nil
+}
diff --git a/pkg/cmd/reset.go b/pkg/cmd/reset.go
new file mode 100644
index 00000000..6946f7e3
--- /dev/null
+++ b/pkg/cmd/reset.go
@@ -0,0 +1,113 @@
+/*
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+	"fmt"
+
+	"github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
+	"github.com/apache/camel-k/pkg/client"
+	"github.com/pkg/errors"
+	"github.com/spf13/cobra"
+	k8sclient "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+func newCmdReset(rootCmdOptions *RootCmdOptions) *cobra.Command {
+	options := resetCmdOptions{
+		RootCmdOptions: rootCmdOptions,
+	}
+	cmd := cobra.Command{
+		Use:   "reset",
+		Short: "Reset the Camel K installation",
+		Long:  `Reset the Camel K installation by deleting everything except current platform configuration.`,
+		RunE:  options.reset,
+	}
+
+	return &cmd
+}
+
+type resetCmdOptions struct {
+	*RootCmdOptions
+}
+
+func (o *resetCmdOptions) reset(cmd *cobra.Command, args []string) (err error) {
+	c, err := o.GetCmdClient()
+	if err != nil {
+		return err
+	}
+	var n int
+	if n, err = o.deleteAllIntegrations(c); err != nil {
+		return err
+	}
+	fmt.Printf("%d integrations deleted from namespace %s\n", n, o.Namespace)
+
+	if n, err = o.deleteAllIntegrationContexts(c); err != nil {
+		return err
+	}
+	fmt.Printf("%d integration contexts deleted from namespace %s\n", n, o.Namespace)
+
+	if err = o.resetIntegrationPlatform(c); err != nil {
+		return err
+	}
+	fmt.Println("Camel K platform has been reset successfully!")
+	return err
+}
+
+func (o *resetCmdOptions) deleteAllIntegrations(c client.Client) (int, error) {
+	list := v1alpha1.NewIntegrationList()
+	if err := c.List(o.Context, &k8sclient.ListOptions{Namespace: o.Namespace}, &list); err != nil {
+		return 0, errors.Wrap(err, fmt.Sprintf("could not retrieve integrations from namespace %s", o.Namespace))
+	}
+	for _, i := range list.Items {
+		it := i
+		if err := c.Delete(o.Context, &it); err != nil {
+			return 0, errors.Wrap(err, fmt.Sprintf("could not delete integration %s from namespace %s", it.Name, it.Namespace))
+		}
+	}
+	return len(list.Items), nil
+}
+
+func (o *resetCmdOptions) deleteAllIntegrationContexts(c client.Client) (int, error) {
+	list := v1alpha1.NewIntegrationContextList()
+	if err := c.List(o.Context, &k8sclient.ListOptions{Namespace: o.Namespace}, &list); err != nil {
+		return 0, errors.Wrap(err, fmt.Sprintf("could not retrieve integration contexts from namespace %s", o.Namespace))
+	}
+	for _, i := range list.Items {
+		ictx := i
+		if err := c.Delete(o.Context, &ictx); err != nil {
+			return 0, errors.Wrap(err, fmt.Sprintf("could not delete integration context %s from namespace %s", ictx.Name, ictx.Namespace))
+		}
+	}
+	return len(list.Items), nil
+}
+
+func (o *resetCmdOptions) resetIntegrationPlatform(c client.Client) error {
+	list := v1alpha1.NewIntegrationPlatformList()
+	if err := c.List(o.Context, &k8sclient.ListOptions{Namespace: o.Namespace}, &list); err != nil {
+		return errors.Wrap(err, fmt.Sprintf("could not retrieve integration platform from namespace %s", o.Namespace))
+	}
+	if len(list.Items) > 1 {
+		return errors.New(fmt.Sprintf("expected 1 integration platform in the namespace, found: %d", len(list.Items)))
+	} else if len(list.Items) == 0 {
+		return errors.New("no integration platforms found in the namespace: run \"kamel install\" to install the platform")
+	}
+	platform := list.Items[0]
+	// Let's reset the status
+	platform.Status = v1alpha1.IntegrationPlatformStatus{}
+	return c.Update(o.Context, &platform)
+}
diff --git a/pkg/cmd/root.go b/pkg/cmd/root.go
new file mode 100644
index 00000000..bf6aeef5
--- /dev/null
+++ b/pkg/cmd/root.go
@@ -0,0 +1,98 @@
+/*
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+	"context"
+
+	"github.com/apache/camel-k/pkg/client"
+	"github.com/pkg/errors"
+	"github.com/spf13/cobra"
+)
+
+const kamelCommandLongDescription = `Apache Camel K is a lightweight integration framework
+built from Apache Camel that runs natively on Kubernetes and is
+specifically designed for serverless and microservice architectures.
+`
+
+// RootCmdOptions --
+type RootCmdOptions struct {
+	Context    context.Context
+	_client    client.Client
+	KubeConfig string
+	Namespace  string
+}
+
+// NewKamelCommand --
+func NewKamelCommand(ctx context.Context) (*cobra.Command, error) {
+	options := RootCmdOptions{
+		Context: ctx,
+	}
+	var cmd = cobra.Command{
+		BashCompletionFunction: bashCompletionFunction,
+		PersistentPreRunE:      options.preRun,
+		Use:                    "kamel",
+		Short:                  "Kamel is a awesome client tool for running Apache Camel integrations natively on Kubernetes",
+		Long:                   kamelCommandLongDescription,
+	}
+
+	cmd.PersistentFlags().StringVar(&options.KubeConfig, "config", "", "Path to the config file to use for CLI requests")
+	cmd.PersistentFlags().StringVarP(&options.Namespace, "namespace", "n", "", "Namespace to use for all operations")
+
+	cmd.AddCommand(newCmdCompletion(&cmd))
+	cmd.AddCommand(newCmdVersion())
+	cmd.AddCommand(newCmdRun(&options))
+	cmd.AddCommand(newCmdGet(&options))
+	cmd.AddCommand(newCmdDelete(&options))
+	cmd.AddCommand(newCmdInstall(&options))
+	cmd.AddCommand(newCmdLog(&options))
+	cmd.AddCommand(newCmdContext(&options))
+	cmd.AddCommand(newCmdReset(&options))
+
+	return &cmd, nil
+}
+
+func (command *RootCmdOptions) preRun(cmd *cobra.Command, args []string) error {
+	if command.Namespace == "" {
+		current, err := client.GetCurrentNamespace(command.KubeConfig)
+		if err != nil {
+			return errors.Wrap(err, "cannot get current namespace")
+		}
+		err = cmd.Flag("namespace").Value.Set(current)
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// GetCmdClient returns the client that can be used from command line tools
+func (command *RootCmdOptions) GetCmdClient() (client.Client, error) {
+	// Get the pre-computed client
+	if command._client != nil {
+		return command._client, nil
+	}
+	var err error
+	command._client, err = command.NewCmdClient()
+	return command._client, err
+}
+
+// NewCmdClient returns a new client that can be used from command line tools
+func (command *RootCmdOptions) NewCmdClient() (client.Client, error) {
+	return client.NewOutOfClusterClient(command.KubeConfig)
+}
diff --git a/pkg/cmd/run.go b/pkg/cmd/run.go
new file mode 100644
index 00000000..f7addb21
--- /dev/null
+++ b/pkg/cmd/run.go
@@ -0,0 +1,459 @@
+/*
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+	"bytes"
+	"encoding/base64"
+	"fmt"
+	"io/ioutil"
+	"net/http"
+	"os"
+	"os/signal"
+	"path"
+	"regexp"
+	"strconv"
+	"strings"
+	"syscall"
+
+	"github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
+	"github.com/apache/camel-k/pkg/client"
+	"github.com/apache/camel-k/pkg/gzip"
+	"github.com/apache/camel-k/pkg/trait"
+	"github.com/apache/camel-k/pkg/util"
+	"github.com/apache/camel-k/pkg/util/kubernetes"
+	"github.com/apache/camel-k/pkg/util/log"
+	"github.com/apache/camel-k/pkg/util/sync"
+	"github.com/apache/camel-k/pkg/util/watch"
+	"github.com/pkg/errors"
+	"github.com/sirupsen/logrus"
+	"github.com/spf13/cobra"
+	k8serrors "k8s.io/apimachinery/pkg/api/errors"
+	"k8s.io/apimachinery/pkg/apis/meta/v1"
+	k8sclient "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+var (
+	traitConfigRegexp = regexp.MustCompile(`^([a-z-]+)((?:\.[a-z-]+)+)=(.*)$`)
+)
+
+func newCmdRun(rootCmdOptions *RootCmdOptions) *cobra.Command {
+	options := runCmdOptions{
+		RootCmdOptions: rootCmdOptions,
+	}
+
+	cmd := cobra.Command{
+		Use:   "run [file to run]",
+		Short: "Run a integration on Kubernetes",
+		Long:  `Deploys and execute a integration pod on Kubernetes.`,
+		Args:  options.validateArgs,
+		RunE:  options.run,
+	}
+
+	cmd.Flags().StringVarP(&options.Runtime, "runtime", "r", "", "Runtime used by the integration")
+	cmd.Flags().StringVar(&options.IntegrationName, "name", "", "The integration name")
+	cmd.Flags().StringSliceVarP(&options.Dependencies, "dependency", "d", nil, "The integration dependency")
+	cmd.Flags().BoolVarP(&options.Wait, "wait", "w", false, "Waits for the integration to be running")
+	cmd.Flags().StringVarP(&options.IntegrationContext, "context", "x", "", "The contex used to run the integration")
+	cmd.Flags().StringSliceVarP(&options.Properties, "property", "p", nil, "Add a camel property")
+	cmd.Flags().StringSliceVar(&options.ConfigMaps, "configmap", nil, "Add a ConfigMap")
+	cmd.Flags().StringSliceVar(&options.Secrets, "secret", nil, "Add a Secret")
+	cmd.Flags().StringSliceVar(&options.Repositories, "repository", nil, "Add a maven repository")
+	cmd.Flags().BoolVar(&options.Logs, "logs", false, "Print integration logs")
+	cmd.Flags().BoolVar(&options.Sync, "sync", false, "Synchronize the local source file with the cluster, republishing at each change")
+	cmd.Flags().BoolVar(&options.Dev, "dev", false, "Enable Dev mode (equivalent to \"-w --logs --sync\")")
+	cmd.Flags().StringVar(&options.Profile, "profile", "", "Trait profile used for deployment")
+	cmd.Flags().StringSliceVarP(&options.Traits, "trait", "t", nil, "Configure a trait. E.g. \"-t service.enabled=false\"")
+	cmd.Flags().StringSliceVar(&options.LoggingLevels, "logging-level", nil, "Configure the logging level. "+
+		"E.g. \"--logging-level org.apache.camel=DEBUG\"")
+	cmd.Flags().StringVarP(&options.OutputFormat, "output", "o", "", "Output format. One of: json|yaml")
+	cmd.Flags().BoolVar(&options.Compression, "compression", false, "Enable store source as a compressed binary blob")
+	cmd.Flags().StringSliceVar(&options.Resources, "resource", nil, "Add a resource")
+
+	// completion support
+	configureKnownCompletions(&cmd)
+
+	return &cmd
+}
+
+type runCmdOptions struct {
+	*RootCmdOptions
+	Compression        bool
+	Wait               bool
+	Logs               bool
+	Sync               bool
+	Dev                bool
+	IntegrationContext string
+	Runtime            string
+	IntegrationName    string
+	Profile            string
+	OutputFormat       string
+	Resources          []string
+	Dependencies       []string
+	Properties         []string
+	ConfigMaps         []string
+	Secrets            []string
+	Repositories       []string
+	Traits             []string
+	LoggingLevels      []string
+}
+
+func (o *runCmdOptions) validateArgs(cmd *cobra.Command, args []string) error {
+	if len(args) < 1 {
+		return errors.New("accepts at least 1 arg, received 0")
+	}
+	if len(args) > 1 && o.IntegrationName == "" {
+		return errors.New("integration name is mandatory when using multiple sources")
+	}
+
+	for _, fileName := range args {
+		if !strings.HasPrefix(fileName, "http://") && !strings.HasPrefix(fileName, "https://") {
+			if _, err := os.Stat(fileName); err != nil && os.IsNotExist(err) {
+				return errors.Wrap(err, "file "+fileName+" does not exist")
+			} else if err != nil {
+				return errors.Wrap(err, "error while accessing file "+fileName)
+			}
+		} else {
+			resp, err := http.Get(fileName)
+			if err != nil {
+				return errors.Wrap(err, "The URL provided is not reachable")
+			} else if resp.StatusCode != 200 {
+				return errors.New("The URL provided is not reachable " + fileName + " The error code returned is " + strconv.Itoa(resp.StatusCode))
+			}
+		}
+	}
+
+	return nil
+}
+
+func (o *runCmdOptions) run(cmd *cobra.Command, args []string) error {
+	c, err := o.GetCmdClient()
+	if err != nil {
+		return err
+	}
+
+	catalog := trait.NewCatalog(o.Context, c)
+	tp := catalog.ComputeTraitsProperties()
+	for _, t := range o.Traits {
+		kv := strings.SplitN(t, "=", 2)
+
+		if !util.StringSliceExists(tp, kv[0]) {
+			fmt.Printf("Error: %s is not a valid trait property\n", t)
+			return nil
+		}
+	}
+
+	integration, err := o.createIntegration(c, args)
+	if err != nil {
+		return err
+	}
+
+	if o.Dev {
+		cs := make(chan os.Signal)
+		signal.Notify(cs, os.Interrupt, syscall.SIGTERM)
+		go func() {
+			<-cs
+			fmt.Printf("Run integration terminating\n")
+			err := DeleteIntegration(o.Context, c, integration.Name, integration.Namespace)
+			if err != nil {
+				fmt.Println(err)
+			}
+			os.Exit(1)
+		}()
+	}
+
+	if o.Sync || o.Dev {
+		err = o.syncIntegration(c, args)
+		if err != nil {
+			return err
+		}
+	}
+	if o.Wait || o.Dev {
+		err = o.waitForIntegrationReady(integration)
+		if err != nil {
+			return err
+		}
+	}
+	if o.Logs || o.Dev {
+		err = log.Print(o.Context, c, integration)
+		if err != nil {
+			return err
+		}
+	}
+
+	if o.Sync && !o.Logs && !o.Dev {
+		// Let's add a wait point, otherwise the script terminates
+		<-o.Context.Done()
+	}
+	return nil
+}
+
+func (o *runCmdOptions) waitForIntegrationReady(integration *v1alpha1.Integration) error {
+	handler := func(i *v1alpha1.Integration) bool {
+		//
+		// TODO when we add health checks, we should wait until they are passed
+		//
+		if i.Status.Phase != "" {
+			fmt.Println("integration \""+integration.Name+"\" in phase", i.Status.Phase)
+
+			if i.Status.Phase == v1alpha1.IntegrationPhaseRunning {
+				// TODO display some error info when available in the status
+				return false
+			}
+
+			if i.Status.Phase == v1alpha1.IntegrationPhaseError {
+				fmt.Println("integration deployment failed")
+				return false
+			}
+		}
+
+		return true
+	}
+
+	return watch.HandleStateChanges(o.Context, integration, handler)
+}
+
+func (o *runCmdOptions) syncIntegration(c client.Client, sources []string) error {
+	for _, s := range sources {
+		changes, err := sync.File(o.Context, s)
+		if err != nil {
+			return err
+		}
+		go func() {
+			for {
+				select {
+				case <-o.Context.Done():
+					return
+				case <-changes:
+					_, err := o.updateIntegrationCode(c, sources)
+					if err != nil {
+						logrus.Error("Unable to sync integration: ", err)
+					}
+				}
+			}
+		}()
+	}
+
+	return nil
+}
+
+func (o *runCmdOptions) createIntegration(c client.Client, sources []string) (*v1alpha1.Integration, error) {
+	return o.updateIntegrationCode(c, sources)
+}
+
+func (o *runCmdOptions) updateIntegrationCode(c client.Client, sources []string) (*v1alpha1.Integration, error) {
+	namespace := o.Namespace
+
+	name := ""
+	if o.IntegrationName != "" {
+		name = o.IntegrationName
+		name = kubernetes.SanitizeName(name)
+	} else if len(sources) == 1 {
+		name = kubernetes.SanitizeName(sources[0])
+	}
+
+	if name == "" {
+		return nil, errors.New("unable to determine integration name")
+	}
+
+	integration := v1alpha1.Integration{
+		TypeMeta: v1.TypeMeta{
+			Kind:       v1alpha1.IntegrationKind,
+			APIVersion: v1alpha1.SchemeGroupVersion.String(),
+		},
+		ObjectMeta: v1.ObjectMeta{
+			Namespace: namespace,
+			Name:      name,
+		},
+		Spec: v1alpha1.IntegrationSpec{
+			Dependencies:  make([]string, 0, len(o.Dependencies)),
+			Context:       o.IntegrationContext,
+			Configuration: make([]v1alpha1.ConfigurationSpec, 0),
+			Repositories:  o.Repositories,
+			Profile:       v1alpha1.TraitProfileByName(o.Profile),
+		},
+	}
+
+	for _, source := range sources {
+		data, err := o.loadData(source, o.Compression)
+		if err != nil {
+			return nil, err
+		}
+
+		integration.Spec.AddSources(v1alpha1.SourceSpec{
+			DataSpec: v1alpha1.DataSpec{
+				Name:        path.Base(source),
+				Content:     data,
+				Compression: o.Compression,
+			},
+		})
+	}
+
+	for _, resource := range o.Resources {
+		data, err := o.loadData(resource, o.Compression)
+		if err != nil {
+			return nil, err
+		}
+
+		integration.Spec.AddResources(v1alpha1.ResourceSpec{
+			DataSpec: v1alpha1.DataSpec{
+				Name:        path.Base(resource),
+				Content:     data,
+				Compression: o.Compression,
+			},
+		})
+	}
+
+	if o.Runtime != "" {
+		integration.Spec.AddDependency("runtime:" + o.Runtime)
+	}
+
+	for _, item := range o.Dependencies {
+		integration.Spec.AddDependency(item)
+	}
+	for _, item := range o.Properties {
+		integration.Spec.AddConfiguration("property", item)
+	}
+	for _, item := range o.LoggingLevels {
+		integration.Spec.AddConfiguration("property", "logging.level."+item)
+	}
+	for _, item := range o.ConfigMaps {
+		integration.Spec.AddConfiguration("configmap", item)
+	}
+	for _, item := range o.Secrets {
+		integration.Spec.AddConfiguration("secret", item)
+	}
+
+	for _, traitConf := range o.Traits {
+		if err := o.configureTrait(&integration, traitConf); err != nil {
+			return nil, err
+		}
+	}
+
+	switch o.OutputFormat {
+	case "":
+		// continue..
+	case "yaml":
+		data, err := kubernetes.ToYAML(&integration)
+		if err != nil {
+			return nil, err
+		}
+		fmt.Print(string(data))
+		return nil, nil
+
+	case "json":
+		data, err := kubernetes.ToJSON(&integration)
+		if err != nil {
+			return nil, err
+		}
+		fmt.Print(string(data))
+		return nil, nil
+
+	default:
+		return nil, fmt.Errorf("invalid output format option '%s', should be one of: yaml|json", o.OutputFormat)
+	}
+
+	existed := false
+	err := c.Create(o.Context, &integration)
+	if err != nil && k8serrors.IsAlreadyExists(err) {
+		existed = true
+		clone := integration.DeepCopy()
+		var key k8sclient.ObjectKey
+		key, err = k8sclient.ObjectKeyFromObject(clone)
+		if err != nil {
+			return nil, err
+		}
+		err = c.Get(o.Context, key, clone)
+		if err != nil {
+			return nil, err
+		}
+		integration.ResourceVersion = clone.ResourceVersion
+		err = c.Update(o.Context, &integration)
+	}
+
+	if err != nil {
+		return nil, err
+	}
+
+	if !existed {
+		fmt.Printf("integration \"%s\" created\n", name)
+	} else {
+		fmt.Printf("integration \"%s\" updated\n", name)
+	}
+	return &integration, nil
+}
+
+func (*runCmdOptions) loadData(fileName string, compress bool) (string, error) {
+	var content []byte
+	var err error
+
+	if !strings.HasPrefix(fileName, "http://") && !strings.HasPrefix(fileName, "https://") {
+		content, err = ioutil.ReadFile(fileName)
+		if err != nil {
+			return "", err
+		}
+	} else {
+		resp, err := http.Get(fileName)
+		if err != nil {
+			return "", err
+		}
+		defer resp.Body.Close()
+
+		content, err = ioutil.ReadAll(resp.Body)
+		if err != nil {
+			return "", err
+		}
+	}
+
+	if compress {
+		var b bytes.Buffer
+
+		if err := gzip.Compress(&b, content); err != nil {
+			return "", err
+		}
+
+		return base64.StdEncoding.EncodeToString(b.Bytes()), nil
+	}
+
+	return string(content), nil
+}
+
+func (*runCmdOptions) configureTrait(integration *v1alpha1.Integration, config string) error {
+	if integration.Spec.Traits == nil {
+		integration.Spec.Traits = make(map[string]v1alpha1.IntegrationTraitSpec)
+	}
+
+	parts := traitConfigRegexp.FindStringSubmatch(config)
+	if len(parts) < 4 {
+		return errors.New("unrecognized config format (expected \"<trait>.<prop>=<val>\"): " + config)
+	}
+	traitID := parts[1]
+	prop := parts[2][1:]
+	val := parts[3]
+
+	spec, ok := integration.Spec.Traits[traitID]
+	if !ok {
+		spec = v1alpha1.IntegrationTraitSpec{
+			Configuration: make(map[string]string),
+		}
+	}
+
+	spec.Configuration[prop] = val
+	integration.Spec.Traits[traitID] = spec
+	return nil
+}
diff --git a/pkg/cmd/util.go b/pkg/cmd/util.go
new file mode 100644
index 00000000..d6cb4eed
--- /dev/null
+++ b/pkg/cmd/util.go
@@ -0,0 +1,41 @@
+/*
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+	"context"
+
+	"github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
+	"github.com/apache/camel-k/pkg/client"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// DeleteIntegration --
+func DeleteIntegration(ctx context.Context, c client.Client, name string, namespace string) error {
+	integration := v1alpha1.Integration{
+		TypeMeta: metav1.TypeMeta{
+			Kind:       v1alpha1.IntegrationKind,
+			APIVersion: v1alpha1.SchemeGroupVersion.String(),
+		},
+		ObjectMeta: metav1.ObjectMeta{
+			Namespace: namespace,
+			Name:      name,
+		},
+	}
+	return c.Delete(ctx, &integration)
+}
diff --git a/pkg/client/cmd/version.go b/pkg/cmd/version.go
similarity index 100%
rename from pkg/client/cmd/version.go
rename to pkg/cmd/version.go
diff --git a/pkg/controller/add_integration.go b/pkg/controller/add_integration.go
new file mode 100644
index 00000000..95573320
--- /dev/null
+++ b/pkg/controller/add_integration.go
@@ -0,0 +1,10 @@
+package controller
+
+import (
+	"github.com/apache/camel-k/pkg/controller/integration"
+)
+
+func init() {
+	// AddToManagerFuncs is a list of functions to create controllers and add them to a manager.
+	AddToManagerFuncs = append(AddToManagerFuncs, integration.Add)
+}
diff --git a/pkg/controller/add_integrationcontext.go b/pkg/controller/add_integrationcontext.go
new file mode 100644
index 00000000..e10781e1
--- /dev/null
+++ b/pkg/controller/add_integrationcontext.go
@@ -0,0 +1,10 @@
+package controller
+
+import (
+	"github.com/apache/camel-k/pkg/controller/integrationcontext"
+)
+
+func init() {
+	// AddToManagerFuncs is a list of functions to create controllers and add them to a manager.
+	AddToManagerFuncs = append(AddToManagerFuncs, integrationcontext.Add)
+}
diff --git a/pkg/controller/add_integrationplatform.go b/pkg/controller/add_integrationplatform.go
new file mode 100644
index 00000000..1bf1d748
--- /dev/null
+++ b/pkg/controller/add_integrationplatform.go
@@ -0,0 +1,10 @@
+package controller
+
+import (
+	"github.com/apache/camel-k/pkg/controller/integrationplatform"
+)
+
+func init() {
+	// AddToManagerFuncs is a list of functions to create controllers and add them to a manager.
+	AddToManagerFuncs = append(AddToManagerFuncs, integrationplatform.Add)
+}
diff --git a/pkg/controller/controller.go b/pkg/controller/controller.go
new file mode 100644
index 00000000..7c069f3e
--- /dev/null
+++ b/pkg/controller/controller.go
@@ -0,0 +1,18 @@
+package controller
+
+import (
+	"sigs.k8s.io/controller-runtime/pkg/manager"
+)
+
+// AddToManagerFuncs is a list of functions to add all Controllers to the Manager
+var AddToManagerFuncs []func(manager.Manager) error
+
+// AddToManager adds all Controllers to the Manager
+func AddToManager(m manager.Manager) error {
+	for _, f := range AddToManagerFuncs {
+		if err := f(m); err != nil {
+			return err
+		}
+	}
+	return nil
+}
diff --git a/pkg/controller/integration/action.go b/pkg/controller/integration/action.go
new file mode 100644
index 00000000..eebadef0
--- /dev/null
+++ b/pkg/controller/integration/action.go
@@ -0,0 +1,47 @@
+/*
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package integration
+
+import (
+	"context"
+
+	"github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
+	"github.com/apache/camel-k/pkg/client"
+)
+
+// Action --
+type Action interface {
+	client.Injectable
+
+	// a user friendly name for the action
+	Name() string
+
+	// returns true if the action can handle the integration
+	CanHandle(integration *v1alpha1.Integration) bool
+
+	// executes the handling function
+	Handle(ctx context.Context, integration *v1alpha1.Integration) error
+}
+
+type baseAction struct {
+	client client.Client
+}
+
+func (action *baseAction) InjectClient(client client.Client) {
+	action.client = client
+}
diff --git a/pkg/controller/integration/build_context.go b/pkg/controller/integration/build_context.go
new file mode 100644
index 00000000..427794c0
--- /dev/null
+++ b/pkg/controller/integration/build_context.go
@@ -0,0 +1,156 @@
+/*
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package integration
+
+import (
+	"context"
+	"fmt"
+
+	"github.com/apache/camel-k/pkg/trait"
+
+	"github.com/sirupsen/logrus"
+
+	"github.com/apache/camel-k/pkg/util"
+	"github.com/apache/camel-k/pkg/util/digest"
+
+	"github.com/rs/xid"
+
+	"github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
+)
+
+// NewBuildContextAction create an action that handles integration context build
+func NewBuildContextAction(namespace string) Action {
+	return &buildContextAction{
+		namespace: namespace,
+	}
+}
+
+type buildContextAction struct {
+	baseAction
+	namespace string
+}
+
+func (action *buildContextAction) Name() string {
+	return "build-context"
+}
+
+func (action *buildContextAction) CanHandle(integration *v1alpha1.Integration) bool {
+	return integration.Status.Phase == v1alpha1.IntegrationPhaseBuildingContext
+}
+
+func (action *buildContextAction) Handle(ctx context.Context, integration *v1alpha1.Integration) error {
+	ictx, err := LookupContextForIntegration(ctx, action.client, integration)
+	if err != nil {
+		//TODO: we may need to add a wait strategy, i.e give up after some time
+		return err
+	}
+
+	if ictx != nil {
+		if ictx.Labels["camel.apache.org/context.type"] == v1alpha1.IntegrationContextTypePlatform {
+			// This is a platform context and as it is auto generated it may get
+			// out of sync if the integration that has generated it, has been
+			// amended to add/remove dependencies
+
+			//TODO: this is a very simple check, we may need to provide a deps comparison strategy
+			if !util.StringSliceContains(ictx.Spec.Dependencies, integration.Status.Dependencies) {
+				// We need to re-generate a context or search for a new one that
+				// satisfies integrations needs so let's remove the association
+				// with a context
+				target := integration.DeepCopy()
+				target.Status.Context = ""
+				return action.client.Update(ctx, target)
+			}
+		}
+
+		if ictx.Status.Phase == v1alpha1.IntegrationContextPhaseError {
+			target := integration.DeepCopy()
+			target.Status.Image = ictx.ImageForIntegration()
+			target.Status.Context = ictx.Name
+			target.Status.Phase = v1alpha1.IntegrationPhaseError
+
+			target.Status.Digest, err = digest.ComputeForIntegration(target)
+			if err != nil {
+				return err
+			}
+
+			logrus.Info("Integration ", target.Name, " transitioning to state ", target.Status.Phase)
+
+			return action.client.Update(ctx, target)
+		}
+
+		if ictx.Status.Phase == v1alpha1.IntegrationContextPhaseReady {
+			target := integration.DeepCopy()
+			target.Status.Image = ictx.ImageForIntegration()
+			target.Status.Context = ictx.Name
+
+			dgst, err := digest.ComputeForIntegration(target)
+			if err != nil {
+				return err
+			}
+
+			target.Status.Digest = dgst
+
+			if _, err := trait.Apply(ctx, action.client, target, ictx); err != nil {
+				return err
+			}
+
+			logrus.Info("Integration ", target.Name, " transitioning to state ", target.Status.Phase)
+
+			return action.client.Update(ctx, target)
+		}
+
+		if integration.Status.Context == "" {
+			// We need to set the context
+			target := integration.DeepCopy()
+			target.Status.Context = ictx.Name
+			return action.client.Update(ctx, target)
+		}
+
+		return nil
+	}
+
+	platformCtxName := fmt.Sprintf("ctx-%s", xid.New())
+	platformCtx := v1alpha1.NewIntegrationContext(action.namespace, platformCtxName)
+
+	// Add some information for post-processing, this may need to be refactored
+	// to a proper data structure
+	platformCtx.Labels = map[string]string{
+		"camel.apache.org/context.type":               v1alpha1.IntegrationContextTypePlatform,
+		"camel.apache.org/context.created.by.kind":    v1alpha1.IntegrationKind,
+		"camel.apache.org/context.created.by.name":    integration.Name,
+		"camel.apache.org/context.created.by.version": integration.ResourceVersion,
+	}
+
+	// Set the context to have the same dependencies as the integrations
+	platformCtx.Spec = v1alpha1.IntegrationContextSpec{
+		Dependencies: integration.Status.Dependencies,
+		Repositories: integration.Spec.Repositories,
+		Traits:       integration.Spec.Traits,
+	}
+
+	if err := action.client.Create(ctx, &platformCtx); err != nil {
+		return err
+	}
+
+	// Set the context name so the next handle loop, will fall through the
+	// same path as integration with a user defined context
+	target := integration.DeepCopy()
+	target.Status.Context = platformCtxName
+
+	return action.client.Update(ctx, target)
+}
diff --git a/pkg/controller/integration/build_image.go b/pkg/controller/integration/build_image.go
new file mode 100644
index 00000000..222b7535
--- /dev/null
+++ b/pkg/controller/integration/build_image.go
@@ -0,0 +1,158 @@
+/*
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package integration
+
+import (
+	"context"
+	"fmt"
+	"path"
+
+	"github.com/pkg/errors"
+
+	"github.com/apache/camel-k/pkg/util/digest"
+
+	"github.com/apache/camel-k/pkg/trait"
+
+	"github.com/apache/camel-k/pkg/builder"
+	"github.com/apache/camel-k/pkg/platform"
+	"github.com/sirupsen/logrus"
+
+	"github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
+	k8sclient "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+// NewBuildImageAction create an action that handles integration image build
+func NewBuildImageAction(namespace string) Action {
+	return &buildImageAction{
+		namespace: namespace,
+	}
+}
+
+type buildImageAction struct {
+	baseAction
+	context.Context
+	namespace string
+}
+
+func (action *buildImageAction) Name() string {
+	return "build-image"
+}
+
+func (action *buildImageAction) CanHandle(integration *v1alpha1.Integration) bool {
+	return integration.Status.Phase == v1alpha1.IntegrationPhaseBuildingImage
+}
+
+func (action *buildImageAction) Handle(ctx context.Context, integration *v1alpha1.Integration) error {
+
+	// in this phase the integration need to be associated to a context whose image
+	// will be used as base image for the integration images
+	if integration.Status.Context == "" {
+		return fmt.Errorf("context is not set for integration: %s", integration.Name)
+	}
+
+	// look-up the integration context associated to this integration, this is needed
+	// to determine the base image
+	ictx := v1alpha1.NewIntegrationContext(integration.Namespace, integration.Status.Context)
+	ikey := k8sclient.ObjectKey{
+		Namespace: integration.Namespace,
+		Name:      integration.Status.Context,
+	}
+	if err := action.client.Get(ctx, ikey, &ictx); err != nil {
+		return errors.Wrapf(err, "unable to find integration context %s, %s", ikey.Name, err)
+	}
+
+	b, err := platform.GetPlatformBuilder(action.Context, action.client, action.namespace)
+	if err != nil {
+		return err
+	}
+	env, err := trait.Apply(ctx, action.client, integration, &ictx)
+	if err != nil {
+		return err
+	}
+
+	// This build do not require to determine dependencies nor a project, the builder
+	// step do remove them
+	r := builder.Request{
+		Meta:     integration.ObjectMeta,
+		Steps:    env.Steps,
+		BuildDir: env.BuildDir,
+		Platform: env.Platform.Spec,
+		Image:    ictx.Status.Image,
+	}
+
+	// Sources are added as part of the standard deployment bits
+	r.Resources = make([]builder.Resource, 0, len(integration.Spec.Sources))
+
+	for _, source := range integration.Spec.Sources {
+		r.Resources = append(r.Resources, builder.Resource{
+			Content: []byte(source.Content),
+			Target:  path.Join("sources", source.Name),
+		})
+	}
+	for _, resource := range integration.Spec.Resources {
+		r.Resources = append(r.Resources, builder.Resource{
+			Content: []byte(resource.Content),
+			Target:  path.Join("resources", resource.Name),
+		})
+	}
+
+	res := b.Submit(r)
+
+	switch res.Status {
+	case builder.StatusSubmitted:
+		logrus.Info("Build submitted")
+	case builder.StatusStarted:
+		logrus.Info("Build started")
+	case builder.StatusError:
+		target := integration.DeepCopy()
+		target.Status.Phase = v1alpha1.IntegrationPhaseError
+
+		logrus.Infof("Integration %s transitioning to state %s, reason: %s", target.Name, target.Status.Phase, res.Error.Error())
+
+		// remove the build from cache
+		defer b.Purge(r)
+
+		return action.client.Update(ctx, target)
+	case builder.StatusCompleted:
+		target := integration.DeepCopy()
+		target.Status.Phase = v1alpha1.IntegrationPhaseDeploying
+		if res.PublicImage != "" {
+			target.Status.Image = res.PublicImage
+		} else {
+			target.Status.Image = res.Image
+		}
+
+		dgst, err := digest.ComputeForIntegration(integration)
+		if err != nil {
+			return err
+		}
+
+		target.Status.Digest = dgst
+
+		logrus.Info("Integration ", target.Name, " transitioning to state ", target.Status.Phase)
+
+		// remove the build from cache
+		defer b.Purge(r)
+
+		if err := action.client.Update(ctx, target); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
diff --git a/pkg/controller/integration/deploy.go b/pkg/controller/integration/deploy.go
new file mode 100644
index 00000000..a8ac0871
--- /dev/null
+++ b/pkg/controller/integration/deploy.go
@@ -0,0 +1,79 @@
+/*
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package integration
+
+import (
+	"context"
+
+	"github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
+	"github.com/apache/camel-k/pkg/trait"
+	"github.com/apache/camel-k/pkg/util/kubernetes"
+	"github.com/pkg/errors"
+	"github.com/sirupsen/logrus"
+	k8sclient "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+// NewDeployAction create an action that handles integration deploy
+func NewDeployAction() Action {
+	return &deployAction{}
+}
+
+type deployAction struct {
+	baseAction
+}
+
+func (action *deployAction) Name() string {
+	return "deploy"
+}
+
+func (action *deployAction) CanHandle(integration *v1alpha1.Integration) bool {
+	return integration.Status.Phase == v1alpha1.IntegrationPhaseDeploying
+}
+
+func (action *deployAction) Handle(ctx context.Context, integration *v1alpha1.Integration) error {
+	ictxName := integration.Status.Context
+	if ictxName == "" {
+		return errors.Errorf("no context set on integration %s", integration.Name)
+	}
+	ictx := v1alpha1.NewIntegrationContext(integration.Namespace, ictxName)
+	ictxKey := k8sclient.ObjectKey{
+		Namespace: integration.Namespace,
+		Name:      ictxName,
+	}
+
+	if err := action.client.Get(ctx, ictxKey, &ictx); err != nil {
+		return errors.Wrapf(err, "unable to find integration context %s, %s", ictxName, err)
+	}
+
+	env, err := trait.Apply(ctx, action.client, integration, &ictx)
+	if err != nil {
+		return err
+	}
+
+	// TODO we should look for objects that are no longer present in the collection and remove them
+	err = kubernetes.ReplaceResources(ctx, action.client, env.Resources.Items())
+	if err != nil {
+		return err
+	}
+
+	target := integration.DeepCopy()
+	target.Status.Phase = v1alpha1.IntegrationPhaseRunning
+	logrus.Info("Integration ", target.Name, " transitioning to state ", target.Status.Phase)
+
+	return action.client.Update(ctx, target)
+}
diff --git a/pkg/controller/integration/initialize.go b/pkg/controller/integration/initialize.go
new file mode 100644
index 00000000..aa824afe
--- /dev/null
+++ b/pkg/controller/integration/initialize.go
@@ -0,0 +1,80 @@
+/*
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package integration
+
+import (
+	"context"
+
+	"github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
+	"github.com/apache/camel-k/pkg/platform"
+	"github.com/apache/camel-k/pkg/trait"
+	"github.com/apache/camel-k/pkg/util/digest"
+	"github.com/sirupsen/logrus"
+)
+
+// NewInitializeAction creates a new inititialize action
+func NewInitializeAction() Action {
+	return &initializeAction{}
+}
+
+type initializeAction struct {
+	baseAction
+}
+
+// Name returns a common name of the action
+func (action *initializeAction) Name() string {
+	return "initialize"
+}
+
+// CanHandle tells whether this action can handle the integration
+func (action *initializeAction) CanHandle(integration *v1alpha1.Integration) bool {
+	return integration.Status.Phase == ""
+}
+
+// Handle handles the integrations
+func (action *initializeAction) Handle(ctx context.Context, integration *v1alpha1.Integration) error {
+	// The integration platform needs to be ready before starting to create integrations
+	if pl, err := platform.GetCurrentPlatform(ctx, action.client, integration.Namespace); err != nil || pl.Status.Phase != v1alpha1.IntegrationPlatformPhaseReady {
+		logrus.Info("Waiting for a integration platform to be ready")
+		return nil
+	}
+
+	target := integration.DeepCopy()
+	// better not changing the spec section of the target because it may be used for comparison by a
+	// higher level controller (e.g. Knative source controller)
+
+	// execute custom initialization
+	if _, err := trait.Apply(ctx, action.client, target, nil); err != nil {
+		return err
+	}
+
+	// update the status
+	dgst, err := digest.ComputeForIntegration(integration)
+	if err != nil {
+		return err
+	}
+
+	target.Status.Phase = v1alpha1.IntegrationPhaseBuildingContext
+	target.Status.Digest = dgst
+	target.Status.Context = integration.Spec.Context
+	target.Status.Image = ""
+
+	logrus.Info("Integration ", target.Name, " transitioning to state ", target.Status.Phase)
+
+	return action.client.Update(ctx, target)
+}
diff --git a/pkg/controller/integration/integration_controller.go b/pkg/controller/integration/integration_controller.go
new file mode 100644
index 00000000..eb8e33a1
--- /dev/null
+++ b/pkg/controller/integration/integration_controller.go
@@ -0,0 +1,134 @@
+package integration
+
+import (
+	"context"
+	"time"
+
+	camelv1alpha1 "github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
+	"github.com/apache/camel-k/pkg/client"
+	"github.com/sirupsen/logrus"
+	appsv1 "k8s.io/api/apps/v1"
+	"k8s.io/apimachinery/pkg/api/errors"
+	"k8s.io/apimachinery/pkg/runtime"
+	"sigs.k8s.io/controller-runtime/pkg/controller"
+	"sigs.k8s.io/controller-runtime/pkg/handler"
+	"sigs.k8s.io/controller-runtime/pkg/manager"
+	"sigs.k8s.io/controller-runtime/pkg/reconcile"
+	logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
+	"sigs.k8s.io/controller-runtime/pkg/source"
+)
+
+var log = logf.Log.WithName("controller_integration")
+
+/**
+* USER ACTION REQUIRED: This is a scaffold file intended for the user to modify with their own Controller
+* business logic.  Delete these comments after modifying this file.*
+ */
+
+// Add creates a new Integration Controller and adds it to the Manager. The Manager will set fields on the Controller
+// and Start it when the Manager is Started.
+func Add(mgr manager.Manager) error {
+	c, err := client.FromManager(mgr)
+	if err != nil {
+		return err
+	}
+	return add(mgr, newReconciler(mgr, c))
+}
+
+// newReconciler returns a new reconcile.Reconciler
+func newReconciler(mgr manager.Manager, c client.Client) reconcile.Reconciler {
+	return &ReconcileIntegration{client: c, scheme: mgr.GetScheme()}
+}
+
+// add adds a new Controller to mgr with r as the reconcile.Reconciler
+func add(mgr manager.Manager, r reconcile.Reconciler) error {
+	// Create a new controller
+	c, err := controller.New("integration-controller", mgr, controller.Options{Reconciler: r})
+	if err != nil {
+		return err
+	}
+
+	// Watch for changes to primary resource Integration
+	err = c.Watch(&source.Kind{Type: &camelv1alpha1.Integration{}}, &handler.EnqueueRequestForObject{})
+	if err != nil {
+		return err
+	}
+
+	// Watch for changes to secondary resource Pods and requeue the owner Integration
+	err = c.Watch(&source.Kind{Type: &appsv1.Deployment{}}, &handler.EnqueueRequestForOwner{
+		IsController: true,
+		OwnerType:    &camelv1alpha1.Integration{},
+	})
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+var _ reconcile.Reconciler = &ReconcileIntegration{}
+
+// ReconcileIntegration reconciles a Integration object
+type ReconcileIntegration struct {
+	// This client, initialized using mgr.Client() above, is a split client
+	// that reads objects from the cache and writes to the apiserver
+	client client.Client
+	scheme *runtime.Scheme
+}
+
+// Reconcile reads that state of the cluster for a Integration object and makes changes based on the state read
+// and what is in the Integration.Spec
+// Note:
+// The Controller will requeue the Request to be processed again if the returned error is non-nil or
+// Result.Requeue is true, otherwise upon completion it will remove the work from the queue.
+func (r *ReconcileIntegration) Reconcile(request reconcile.Request) (reconcile.Result, error) {
+	reqLogger := log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name)
+	reqLogger.Info("Reconciling Integration")
+
+	ctx := context.TODO()
+
+	// Fetch the Integration instance
+	instance := &camelv1alpha1.Integration{}
+	err := r.client.Get(ctx, request.NamespacedName, instance)
+	if err != nil {
+		if errors.IsNotFound(err) {
+			// Request object not found, could have been deleted after reconcile request.
+			// Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.
+			// Return and don't requeue
+			return reconcile.Result{}, nil
+		}
+		// Error reading the object - requeue the request.
+		return reconcile.Result{}, err
+	}
+
+	integrationActionPool := []Action{
+		NewInitializeAction(),
+		NewBuildContextAction(request.Namespace),
+		NewBuildImageAction(request.Namespace),
+		NewDeployAction(),
+		NewMonitorAction(),
+	}
+
+	for _, a := range integrationActionPool {
+		a.InjectClient(r.client)
+		if a.CanHandle(instance) {
+			logrus.Debug("Invoking action ", a.Name(), " on integration ", instance.Name)
+			if err := a.Handle(ctx, instance); err != nil {
+				return reconcile.Result{}, err
+			}
+		}
+	}
+
+	// Fetch the Integration again and check the state
+	if err = r.client.Get(ctx, request.NamespacedName, instance); err != nil {
+		return reconcile.Result{}, err
+	}
+
+	if instance.Status.Phase == camelv1alpha1.IntegrationPhaseRunning {
+		return reconcile.Result{}, nil
+	}
+	// Requeue
+	return reconcile.Result{
+		RequeueAfter: 5 * time.Second,
+	}, nil
+}
diff --git a/pkg/controller/integration/monitor.go b/pkg/controller/integration/monitor.go
new file mode 100644
index 00000000..3f0d6a52
--- /dev/null
+++ b/pkg/controller/integration/monitor.go
@@ -0,0 +1,67 @@
+/*
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package integration
+
+import (
+	"context"
+
+	"github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
+	"github.com/apache/camel-k/pkg/util/digest"
+	"github.com/sirupsen/logrus"
+)
+
+// NewMonitorAction creates a new monitoring action for an integration
+func NewMonitorAction() Action {
+	return &monitorAction{}
+}
+
+type monitorAction struct {
+	baseAction
+}
+
+func (action *monitorAction) Name() string {
+	return "monitor"
+}
+
+func (action *monitorAction) CanHandle(integration *v1alpha1.Integration) bool {
+	return integration.Status.Phase == v1alpha1.IntegrationPhaseRunning ||
+		integration.Status.Phase == v1alpha1.IntegrationPhaseError
+}
+
+func (action *monitorAction) Handle(ctx context.Context, integration *v1alpha1.Integration) error {
+
+	hash, err := digest.ComputeForIntegration(integration)
+	if err != nil {
+		return err
+	}
+
+	if hash != integration.Status.Digest {
+		logrus.Info("Integration ", integration.Name, " needs a rebuild")
+
+		target := integration.DeepCopy()
+		target.Status.Digest = hash
+		target.Status.Phase = ""
+
+		logrus.Info("Integration ", target.Name, " transitioning to state ", target.Status.Phase)
+
+		return action.client.Update(ctx, target)
+	}
+
+	// TODO check also if deployment matches (e.g. replicas)
+	return nil
+}
diff --git a/pkg/controller/integration/util.go b/pkg/controller/integration/util.go
new file mode 100644
index 00000000..9c31ec82
--- /dev/null
+++ b/pkg/controller/integration/util.go
@@ -0,0 +1,68 @@
+/*
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package integration
+
+import (
+	"context"
+
+	"github.com/apache/camel-k/pkg/util"
+	k8sclient "sigs.k8s.io/controller-runtime/pkg/client"
+
+	"github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
+	"github.com/pkg/errors"
+)
+
+// LookupContextForIntegration --
+func LookupContextForIntegration(ctx context.Context, c k8sclient.Reader, integration *v1alpha1.Integration) (*v1alpha1.IntegrationContext, error) {
+	if integration.Status.Context != "" {
+		name := integration.Status.Context
+		ictx := v1alpha1.NewIntegrationContext(integration.Namespace, name)
+		key := k8sclient.ObjectKey{
+			Namespace: integration.Namespace,
+			Name:      name,
+		}
+		if err := c.Get(ctx, key, &ictx); err != nil {
+			return nil, errors.Wrapf(err, "unable to find integration context %s, %s", name, err)
+		}
+
+		return &ictx, nil
+	}
+
+	ctxList := v1alpha1.NewIntegrationContextList()
+	if err := c.List(ctx, &k8sclient.ListOptions{Namespace: integration.Namespace}, &ctxList); err != nil {
+		return nil, err
+	}
+
+	for _, ctx := range ctxList.Items {
+		ctx := ctx // pin
+		if ctx.Labels["camel.apache.org/context.type"] == v1alpha1.IntegrationContextTypePlatform {
+			ideps := len(integration.Status.Dependencies)
+			cdeps := len(ctx.Spec.Dependencies)
+
+			if ideps != cdeps {
+				continue
+			}
+
+			if util.StringSliceContains(ctx.Spec.Dependencies, integration.Status.Dependencies) {
+				return &ctx, nil
+			}
+		}
+	}
+
+	return nil, nil
+}
diff --git a/pkg/controller/integrationcontext/action.go b/pkg/controller/integrationcontext/action.go
new file mode 100644
index 00000000..6d1369a1
--- /dev/null
+++ b/pkg/controller/integrationcontext/action.go
@@ -0,0 +1,47 @@
+/*
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package integrationcontext
+
+import (
+	"context"
+
+	"github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
+	"github.com/apache/camel-k/pkg/client"
+)
+
+// Action --
+type Action interface {
+	client.Injectable
+
+	// a user friendly name for the action
+	Name() string
+
+	// returns true if the action can handle the integration context
+	CanHandle(integration *v1alpha1.IntegrationContext) bool
+
+	// executes the handling function
+	Handle(ctx context.Context, integration *v1alpha1.IntegrationContext) error
+}
+
+type baseAction struct {
+	client client.Client
+}
+
+func (action *baseAction) InjectClient(client client.Client) {
+	action.client = client
+}
diff --git a/pkg/controller/integrationcontext/build.go b/pkg/controller/integrationcontext/build.go
new file mode 100644
index 00000000..09fe4b03
--- /dev/null
+++ b/pkg/controller/integrationcontext/build.go
@@ -0,0 +1,152 @@
+/*
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package integrationcontext
+
+import (
+	"context"
+
+	"github.com/apache/camel-k/pkg/trait"
+
+	"github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
+	"github.com/apache/camel-k/pkg/builder"
+	"github.com/apache/camel-k/pkg/platform"
+
+	"github.com/sirupsen/logrus"
+	k8sclient "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+// NewBuildAction creates a new build handling action for the context
+func NewBuildAction(ctx context.Context) Action {
+	return &buildAction{
+		Context: ctx,
+	}
+}
+
+type buildAction struct {
+	baseAction
+	context.Context
+}
+
+func (action *buildAction) Name() string {
+	return "build"
+}
+
+func (action *buildAction) CanHandle(ictx *v1alpha1.IntegrationContext) bool {
+	return ictx.Status.Phase == v1alpha1.IntegrationContextPhaseBuilding
+}
+
+func (action *buildAction) Handle(ctx context.Context, ictx *v1alpha1.IntegrationContext) error {
+	p, err := platform.GetCurrentPlatform(ctx, action.client, ictx.Namespace)
+	if err != nil {
+		return err
+	}
+	b, err := platform.GetPlatformBuilder(action.Context, action.client, ictx.Namespace)
+	if err != nil {
+		return err
+	}
+	env, err := trait.Apply(ctx, action.client, nil, ictx)
+	if err != nil {
+		return err
+	}
+
+	// assume there's no duplication nor conflict for now
+	repositories := make([]string, 0, len(ictx.Spec.Repositories)+len(p.Spec.Build.Repositories))
+	repositories = append(repositories, ictx.Spec.Repositories...)
+	repositories = append(repositories, p.Spec.Build.Repositories...)
+
+	r := builder.Request{
+		Meta:         ictx.ObjectMeta,
+		Dependencies: ictx.Spec.Dependencies,
+		Repositories: repositories,
+		Steps:        env.Steps,
+		BuildDir:     env.BuildDir,
+		Platform:     env.Platform.Spec,
+	}
+
+	res := b.Submit(r)
+	switch res.Status {
+	case builder.StatusSubmitted:
+		logrus.Info("Build submitted")
+	case builder.StatusStarted:
+		logrus.Info("Build started")
+	case builder.StatusError:
+		target := ictx.DeepCopy()
+		target.Status.Phase = v1alpha1.IntegrationContextPhaseError
+
+		logrus.Infof("Context %s transitioning to state %s, reason: %s", target.Name, target.Status.Phase, res.Error.Error())
+
+		// remove the build from cache
+		defer b.Purge(r)
+
+		return action.client.Update(ctx, target)
+	case builder.StatusCompleted:
+		target := ictx.DeepCopy()
+		target.Status.Image = res.Image
+		target.Status.PublicImage = res.PublicImage
+		target.Status.Phase = v1alpha1.IntegrationContextPhaseReady
+		target.Status.Artifacts = make([]v1alpha1.Artifact, 0, len(res.Artifacts))
+
+		for _, a := range res.Artifacts {
+			// do not include artifact location
+			target.Status.Artifacts = append(target.Status.Artifacts, v1alpha1.Artifact{
+				ID:       a.ID,
+				Location: "",
+				Target:   a.Target,
+			})
+		}
+
+		logrus.Info("Context ", target.Name, " transitioning to state ", target.Status.Phase)
+
+		// remove the build from cache
+		defer b.Purge(r)
+
+		if err := action.client.Update(ctx, target); err != nil {
+			return err
+		}
+		if err := action.informIntegrations(target); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+// informIntegrations triggers the processing of all integrations waiting for this context to be built
+func (action *buildAction) informIntegrations(ictx *v1alpha1.IntegrationContext) error {
+	list := v1alpha1.NewIntegrationList()
+	err := action.client.List(action.Context, &k8sclient.ListOptions{Namespace: ictx.Namespace}, &list)
+	if err != nil {
+		return err
+	}
+	for _, integration := range list.Items {
+		integration := integration // pin
+		if integration.Status.Context != ictx.Name {
+			continue
+		}
+
+		if integration.Annotations == nil {
+			integration.Annotations = make(map[string]string)
+		}
+		integration.Annotations["camel.apache.org/context.digest"] = ictx.Status.Digest
+		err = action.client.Update(action.Context, &integration)
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
diff --git a/pkg/controller/integrationcontext/initialize.go b/pkg/controller/integrationcontext/initialize.go
new file mode 100644
index 00000000..20302acd
--- /dev/null
+++ b/pkg/controller/integrationcontext/initialize.go
@@ -0,0 +1,70 @@
+/*
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package integrationcontext
+
+import (
+	"context"
+
+	"github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
+	"github.com/apache/camel-k/pkg/platform"
+	"github.com/apache/camel-k/pkg/util/digest"
+	"github.com/sirupsen/logrus"
+)
+
+// NewInitializeAction creates a new initialization handling action for the context
+func NewInitializeAction() Action {
+	return &initializeAction{}
+}
+
+type initializeAction struct {
+	baseAction
+}
+
+func (action *initializeAction) Name() string {
+	return "initialize"
+}
+
+func (action *initializeAction) CanHandle(ictx *v1alpha1.IntegrationContext) bool {
+	return ictx.Status.Phase == ""
+}
+
+func (action *initializeAction) Handle(ctx context.Context, ictx *v1alpha1.IntegrationContext) error {
+	// The integration platform needs to be initialized before starting to create contexts
+	if _, err := platform.GetCurrentPlatform(ctx, action.client, ictx.Namespace); err != nil {
+		logrus.Info("Waiting for a integration platform to be initialized")
+		return nil
+	}
+
+	target := ictx.DeepCopy()
+
+	// execute custom initialization
+	//if err := trait.apply(nil, context); err != nil {
+	//	return err
+	//}
+
+	// update the status
+	logrus.Info("Context ", target.Name, " transitioning to state ", v1alpha1.IntegrationContextPhaseBuilding)
+	target.Status.Phase = v1alpha1.IntegrationContextPhaseBuilding
+	dgst, err := digest.ComputeForIntegrationContext(ictx)
+	if err != nil {
+		return err
+	}
+	target.Status.Digest = dgst
+
+	return action.client.Update(ctx, target)
+}
diff --git a/pkg/controller/integrationcontext/integrationcontext_controller.go b/pkg/controller/integrationcontext/integrationcontext_controller.go
new file mode 100644
index 00000000..5eb0e469
--- /dev/null
+++ b/pkg/controller/integrationcontext/integrationcontext_controller.go
@@ -0,0 +1,117 @@
+package integrationcontext
+
+import (
+	"context"
+	"time"
+
+	camelv1alpha1 "github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
+	"github.com/apache/camel-k/pkg/client"
+	"github.com/sirupsen/logrus"
+	"k8s.io/apimachinery/pkg/api/errors"
+	"k8s.io/apimachinery/pkg/runtime"
+	"sigs.k8s.io/controller-runtime/pkg/controller"
+	"sigs.k8s.io/controller-runtime/pkg/handler"
+	"sigs.k8s.io/controller-runtime/pkg/manager"
+	"sigs.k8s.io/controller-runtime/pkg/reconcile"
+	logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
+	"sigs.k8s.io/controller-runtime/pkg/source"
+)
+
+var log = logf.Log.WithName("controller_integrationcontext")
+
+// Add creates a new IntegrationContext Controller and adds it to the Manager. The Manager will set fields on the Controller
+// and Start it when the Manager is Started.
+func Add(mgr manager.Manager) error {
+	c, err := client.FromManager(mgr)
+	if err != nil {
+		return err
+	}
+	return add(mgr, newReconciler(mgr, c))
+}
+
+// newReconciler returns a new reconcile.Reconciler
+func newReconciler(mgr manager.Manager, c client.Client) reconcile.Reconciler {
+	return &ReconcileIntegrationContext{client: c, scheme: mgr.GetScheme()}
+}
+
+// add adds a new Controller to mgr with r as the reconcile.Reconciler
+func add(mgr manager.Manager, r reconcile.Reconciler) error {
+	// Create a new controller
+	c, err := controller.New("integrationcontext-controller", mgr, controller.Options{Reconciler: r})
+	if err != nil {
+		return err
+	}
+
+	// Watch for changes to primary resource IntegrationContext
+	err = c.Watch(&source.Kind{Type: &camelv1alpha1.IntegrationContext{}}, &handler.EnqueueRequestForObject{})
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+var _ reconcile.Reconciler = &ReconcileIntegrationContext{}
+
+// ReconcileIntegrationContext reconciles a IntegrationContext object
+type ReconcileIntegrationContext struct {
+	// This client, initialized using mgr.Client() above, is a split client
+	// that reads objects from the cache and writes to the apiserver
+	client client.Client
+	scheme *runtime.Scheme
+}
+
+// Reconcile reads that state of the cluster for a IntegrationContext object and makes changes based on the state read
+// and what is in the IntegrationContext.Spec
+// Note:
+// The Controller will requeue the Request to be processed again if the returned error is non-nil or
+// Result.Requeue is true, otherwise upon completion it will remove the work from the queue.
+func (r *ReconcileIntegrationContext) Reconcile(request reconcile.Request) (reconcile.Result, error) {
+	reqLogger := log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name)
+	reqLogger.Info("Reconciling IntegrationContext")
+
+	ctx := context.TODO()
+
+	// Fetch the IntegrationContext instance
+	instance := &camelv1alpha1.IntegrationContext{}
+	err := r.client.Get(ctx, request.NamespacedName, instance)
+	if err != nil {
+		if errors.IsNotFound(err) {
+			// Request object not found, could have been deleted after reconcile request.
+			// Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.
+			// Return and don't requeue
+			return reconcile.Result{}, nil
+		}
+		// Error reading the object - requeue the request.
+		return reconcile.Result{}, err
+	}
+
+	integrationContextActionPool := []Action{
+		NewInitializeAction(),
+		NewBuildAction(ctx),
+		NewMonitorAction(),
+	}
+
+	for _, a := range integrationContextActionPool {
+		a.InjectClient(r.client)
+		if a.CanHandle(instance) {
+			logrus.Debug("Invoking action ", a.Name(), " on integration context ", instance.Name)
+			if err := a.Handle(ctx, instance); err != nil {
+				return reconcile.Result{}, err
+			}
+		}
+	}
+
+	// Fetch the IntegrationContext again and check the state
+	if err = r.client.Get(ctx, request.NamespacedName, instance); err != nil {
+		return reconcile.Result{}, err
+	}
+
+	if instance.Status.Phase == camelv1alpha1.IntegrationContextPhaseReady {
+		return reconcile.Result{}, nil
+	}
+	// Requeue
+	return reconcile.Result{
+		RequeueAfter: 5 * time.Second,
+	}, nil
+}
diff --git a/pkg/controller/integrationcontext/monitor.go b/pkg/controller/integrationcontext/monitor.go
new file mode 100644
index 00000000..5265bca6
--- /dev/null
+++ b/pkg/controller/integrationcontext/monitor.go
@@ -0,0 +1,61 @@
+/*
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package integrationcontext
+
+import (
+	"context"
+
+	"github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
+	"github.com/apache/camel-k/pkg/util/digest"
+	"github.com/sirupsen/logrus"
+)
+
+// NewMonitorAction creates a new monitoring handling action for the context
+func NewMonitorAction() Action {
+	return &monitorAction{}
+}
+
+type monitorAction struct {
+	baseAction
+}
+
+func (action *monitorAction) Name() string {
+	return "monitor"
+}
+
+func (action *monitorAction) CanHandle(ictx *v1alpha1.IntegrationContext) bool {
+	return ictx.Status.Phase == v1alpha1.IntegrationContextPhaseReady || ictx.Status.Phase == v1alpha1.IntegrationContextPhaseError
+}
+
+func (action *monitorAction) Handle(ctx context.Context, ictx *v1alpha1.IntegrationContext) error {
+	hash, err := digest.ComputeForIntegrationContext(ictx)
+	if err != nil {
+		return err
+	}
+	if hash != ictx.Status.Digest {
+		logrus.Info("IntegrationContext ", ictx.Name, " needs a rebuild")
+
+		target := ictx.DeepCopy()
+		target.Status.Digest = hash
+		logrus.Info("Context ", target.Name, " transitioning to state ", v1alpha1.IntegrationContextPhaseBuilding)
+		target.Status.Phase = v1alpha1.IntegrationContextPhaseBuilding
+		return action.client.Update(ctx, target)
+	}
+
+	return nil
+}
diff --git a/pkg/controller/integrationplatform/action.go b/pkg/controller/integrationplatform/action.go
new file mode 100644
index 00000000..fccc36c3
--- /dev/null
+++ b/pkg/controller/integrationplatform/action.go
@@ -0,0 +1,47 @@
+/*
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package integrationplatform
+
+import (
+	"context"
+
+	"github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
+	"github.com/apache/camel-k/pkg/client"
+)
+
+// Action --
+type Action interface {
+	client.Injectable
+
+	// a user friendly name for the action
+	Name() string
+
+	// returns true if the action can handle the integration context
+	CanHandle(platform *v1alpha1.IntegrationPlatform) bool
+
+	// executes the handling function
+	Handle(ctx context.Context, platform *v1alpha1.IntegrationPlatform) error
+}
+
+type baseAction struct {
+	client client.Client
+}
+
+func (action *baseAction) InjectClient(client client.Client) {
+	action.client = client
+}
diff --git a/pkg/controller/integrationplatform/create.go b/pkg/controller/integrationplatform/create.go
new file mode 100644
index 00000000..82cfcd34
--- /dev/null
+++ b/pkg/controller/integrationplatform/create.go
@@ -0,0 +1,76 @@
+/*
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package integrationplatform
+
+import (
+	"context"
+
+	"github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
+	"github.com/apache/camel-k/pkg/install"
+	"github.com/sirupsen/logrus"
+)
+
+var resources = []string{
+	"platform-integration-context-jvm.yaml",
+	"platform-integration-context-groovy.yaml",
+	"platform-integration-context-kotlin.yaml",
+	"platform-integration-context-spring-boot.yaml",
+}
+
+var knativeResources = []string{
+	"platform-integration-context-knative.yaml",
+}
+
+// NewCreateAction returns a action that creates resources needed by the platform
+func NewCreateAction() Action {
+	return &createAction{}
+}
+
+type createAction struct {
+	baseAction
+}
+
+func (action *createAction) Name() string {
+	return "create"
+}
+
+func (action *createAction) CanHandle(platform *v1alpha1.IntegrationPlatform) bool {
+	return platform.Status.Phase == v1alpha1.IntegrationPlatformPhaseCreating
+}
+
+func (action *createAction) Handle(ctx context.Context, platform *v1alpha1.IntegrationPlatform) error {
+	logrus.Info("Installing platform resources")
+	err := install.Resources(ctx, action.client, platform.Namespace, resources...)
+	if err != nil {
+		return err
+	}
+
+	if platform.Spec.Profile == v1alpha1.TraitProfileKnative {
+		logrus.Info("Installing knative resources")
+		err := install.Resources(ctx, action.client, platform.Namespace, knativeResources...)
+		if err != nil {
+			return err
+		}
+	}
+
+	target := platform.DeepCopy()
+	target.Status.Phase = v1alpha1.IntegrationPlatformPhaseStarting
+	logrus.Info("Platform ", target.Name, " transitioning to state ", target.Status.Phase)
+
+	return action.client.Update(ctx, target)
+}
diff --git a/pkg/controller/integrationplatform/initialize.go b/pkg/controller/integrationplatform/initialize.go
new file mode 100644
index 00000000..a508a2cf
--- /dev/null
+++ b/pkg/controller/integrationplatform/initialize.go
@@ -0,0 +1,114 @@
+/*
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package integrationplatform
+
+import (
+	"context"
+	"errors"
+
+	"github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
+	platformutils "github.com/apache/camel-k/pkg/platform"
+	"github.com/apache/camel-k/pkg/util/openshift"
+	"github.com/sirupsen/logrus"
+)
+
+// NewInitializeAction returns a action that initializes the platform configuration when not provided by the user
+func NewInitializeAction() Action {
+	return &initializeAction{}
+}
+
+type initializeAction struct {
+	baseAction
+}
+
+func (action *initializeAction) Name() string {
+	return "initialize"
+}
+
+func (action *initializeAction) CanHandle(platform *v1alpha1.IntegrationPlatform) bool {
+	return platform.Status.Phase == "" || platform.Status.Phase == v1alpha1.IntegrationPlatformPhaseDuplicate
+}
+
+func (action *initializeAction) Handle(ctx context.Context, platform *v1alpha1.IntegrationPlatform) error {
+	target := platform.DeepCopy()
+
+	duplicate, err := action.isDuplicate(ctx, platform)
+	if err != nil {
+		return err
+	}
+	if duplicate {
+		// another platform already present in the namespace
+		if platform.Status.Phase != v1alpha1.IntegrationPlatformPhaseDuplicate {
+			target := platform.DeepCopy()
+			logrus.Info("Platform ", target.Name, " transitioning to state ", v1alpha1.IntegrationPlatformPhaseDuplicate)
+			target.Status.Phase = v1alpha1.IntegrationPlatformPhaseDuplicate
+			return action.client.Update(ctx, target)
+		}
+		return nil
+	}
+
+	// update missing fields in the resource
+	if target.Spec.Cluster == "" {
+		// determine the kind of cluster the platform in installed into
+		isOpenshift, err := openshift.IsOpenShift(action.client)
+		switch {
+		case err != nil:
+			return err
+		case isOpenshift:
+			target.Spec.Cluster = v1alpha1.IntegrationPlatformClusterOpenShift
+		default:
+			target.Spec.Cluster = v1alpha1.IntegrationPlatformClusterKubernetes
+		}
+	}
+
+	if target.Spec.Build.PublishStrategy == "" {
+		if target.Spec.Cluster == v1alpha1.IntegrationPlatformClusterOpenShift {
+			target.Spec.Build.PublishStrategy = v1alpha1.IntegrationPlatformBuildPublishStrategyS2I
+		} else {
+			target.Spec.Build.PublishStrategy = v1alpha1.IntegrationPlatformBuildPublishStrategyKaniko
+		}
+	}
+
+	if target.Spec.Build.PublishStrategy == v1alpha1.IntegrationPlatformBuildPublishStrategyKaniko && target.Spec.Build.Registry == "" {
+		return errors.New("no registry specified for publishing images")
+	}
+
+	if target.Spec.Profile == "" {
+		target.Spec.Profile = platformutils.GetProfile(target)
+	}
+
+	// next status
+	logrus.Info("Platform ", target.Name, " transitioning to state ", v1alpha1.IntegrationPlatformPhaseCreating)
+	target.Status.Phase = v1alpha1.IntegrationPlatformPhaseCreating
+	return action.client.Update(ctx, target)
+}
+
+func (action *initializeAction) isDuplicate(ctx context.Context, thisPlatform *v1alpha1.IntegrationPlatform) (bool, error) {
+	platforms, err := platformutils.ListPlatforms(ctx, action.client, thisPlatform.Namespace)
+	if err != nil {
+		return false, err
+	}
+	for _, platform := range platforms.Items {
+		platform := platform // pin
+		if platform.Name != thisPlatform.Name && platformutils.IsActive(&platform) {
+			return true, nil
+		}
+	}
+
+	return false, nil
+}
diff --git a/pkg/controller/integrationplatform/integrationplatform_controller.go b/pkg/controller/integrationplatform/integrationplatform_controller.go
new file mode 100644
index 00000000..0859f95a
--- /dev/null
+++ b/pkg/controller/integrationplatform/integrationplatform_controller.go
@@ -0,0 +1,118 @@
+package integrationplatform
+
+import (
+	"context"
+	"time"
+
+	camelv1alpha1 "github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
+	"github.com/apache/camel-k/pkg/client"
+	"github.com/sirupsen/logrus"
+	"k8s.io/apimachinery/pkg/api/errors"
+	"k8s.io/apimachinery/pkg/runtime"
+	"sigs.k8s.io/controller-runtime/pkg/controller"
+	"sigs.k8s.io/controller-runtime/pkg/handler"
+	"sigs.k8s.io/controller-runtime/pkg/manager"
+	"sigs.k8s.io/controller-runtime/pkg/reconcile"
+	logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
+	"sigs.k8s.io/controller-runtime/pkg/source"
+)
+
+var log = logf.Log.WithName("controller_integrationplatform")
+
+// Add creates a new IntegrationPlatform Controller and adds it to the Manager. The Manager will set fields on the Controller
+// and Start it when the Manager is Started.
+func Add(mgr manager.Manager) error {
+	c, err := client.FromManager(mgr)
+	if err != nil {
+		return err
+	}
+	return add(mgr, newReconciler(mgr, c))
+}
+
+// newReconciler returns a new reconcile.Reconciler
+func newReconciler(mgr manager.Manager, c client.Client) reconcile.Reconciler {
+	return &ReconcileIntegrationPlatform{client: c, scheme: mgr.GetScheme()}
+}
+
+// add adds a new Controller to mgr with r as the reconcile.Reconciler
+func add(mgr manager.Manager, r reconcile.Reconciler) error {
+	// Create a new controller
+	c, err := controller.New("integrationplatform-controller", mgr, controller.Options{Reconciler: r})
+	if err != nil {
+		return err
+	}
+
+	// Watch for changes to primary resource IntegrationPlatform
+	err = c.Watch(&source.Kind{Type: &camelv1alpha1.IntegrationPlatform{}}, &handler.EnqueueRequestForObject{})
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+var _ reconcile.Reconciler = &ReconcileIntegrationPlatform{}
+
+// ReconcileIntegrationPlatform reconciles a IntegrationPlatform object
+type ReconcileIntegrationPlatform struct {
+	// This client, initialized using mgr.Client() above, is a split client
+	// that reads objects from the cache and writes to the apiserver
+	client client.Client
+	scheme *runtime.Scheme
+}
+
+// Reconcile reads that state of the cluster for a IntegrationPlatform object and makes changes based on the state read
+// and what is in the IntegrationPlatform.Spec
+// Note:
+// The Controller will requeue the Request to be processed again if the returned error is non-nil or
+// Result.Requeue is true, otherwise upon completion it will remove the work from the queue.
+func (r *ReconcileIntegrationPlatform) Reconcile(request reconcile.Request) (reconcile.Result, error) {
+	reqLogger := log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name)
+	reqLogger.Info("Reconciling IntegrationPlatform")
+
+	ctx := context.TODO()
+
+	// Fetch the IntegrationPlatform instance
+	instance := &camelv1alpha1.IntegrationPlatform{}
+	err := r.client.Get(ctx, request.NamespacedName, instance)
+	if err != nil {
+		if errors.IsNotFound(err) {
+			// Request object not found, could have been deleted after reconcile request.
+			// Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.
+			// Return and don't requeue
+			return reconcile.Result{}, nil
+		}
+		// Error reading the object - requeue the request.
+		return reconcile.Result{}, err
+	}
+
+	integrationPlatformActionPool := []Action{
+		NewInitializeAction(),
+		NewCreateAction(),
+		NewStartAction(),
+	}
+
+	for _, a := range integrationPlatformActionPool {
+		a.InjectClient(r.client)
+		if a.CanHandle(instance) {
+			logrus.Debug("Invoking action ", a.Name(), " on integration platform ", instance.Name)
+			if err := a.Handle(ctx, instance); err != nil {
+				return reconcile.Result{}, err
+			}
+		}
+	}
+
+	// Fetch the IntegrationPlatform again and check the state
+	if err = r.client.Get(ctx, request.NamespacedName, instance); err != nil {
+		return reconcile.Result{}, err
+	}
+
+	if instance.Status.Phase == camelv1alpha1.IntegrationPlatformPhaseReady {
+		return reconcile.Result{}, nil
+	}
+	// Requeue
+	return reconcile.Result{
+		RequeueAfter: 5 * time.Second,
+	}, nil
+
+}
diff --git a/pkg/controller/integrationplatform/start.go b/pkg/controller/integrationplatform/start.go
new file mode 100644
index 00000000..6d69bf6e
--- /dev/null
+++ b/pkg/controller/integrationplatform/start.go
@@ -0,0 +1,87 @@
+/*
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package integrationplatform
+
+import (
+	"context"
+
+	"github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
+	"github.com/sirupsen/logrus"
+	"k8s.io/apimachinery/pkg/labels"
+	k8sclient "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+// NewStartAction returns a action that waits for all required platform resources to start
+func NewStartAction() Action {
+	return &startAction{}
+}
+
+type startAction struct {
+	baseAction
+}
+
+func (action *startAction) Name() string {
+	return "start"
+}
+
+func (action *startAction) CanHandle(platform *v1alpha1.IntegrationPlatform) bool {
+	return platform.Status.Phase == v1alpha1.IntegrationPlatformPhaseStarting || platform.Status.Phase == v1alpha1.IntegrationPlatformPhaseError
+}
+
+func (action *startAction) Handle(ctx context.Context, platform *v1alpha1.IntegrationPlatform) error {
+	aggregatePhase, err := action.aggregatePlatformPhaseFromContexts(ctx, platform.Namespace)
+	if err != nil {
+		return err
+	}
+	if platform.Status.Phase != aggregatePhase {
+		target := platform.DeepCopy()
+		logrus.Info("Platform ", target.Name, " transitioning to state ", aggregatePhase)
+		target.Status.Phase = aggregatePhase
+		return action.client.Update(ctx, target)
+	}
+	// wait
+	return nil
+}
+
+func (action *startAction) aggregatePlatformPhaseFromContexts(ctx context.Context, namespace string) (v1alpha1.IntegrationPlatformPhase, error) {
+	ctxs := v1alpha1.NewIntegrationContextList()
+	options := k8sclient.ListOptions{
+		LabelSelector: labels.SelectorFromSet(labels.Set{
+			"camel.apache.org/context.type": "platform",
+		}),
+		Namespace: namespace,
+	}
+	if err := action.client.List(ctx, &options, &ctxs); err != nil {
+		return "", err
+	}
+
+	countReady := 0
+	for _, ctx := range ctxs.Items {
+		if ctx.Status.Phase == v1alpha1.IntegrationContextPhaseError {
+			return v1alpha1.IntegrationPlatformPhaseError, nil
+		} else if ctx.Status.Phase == v1alpha1.IntegrationContextPhaseReady {
+			countReady++
+		}
+	}
+
+	if countReady < len(ctxs.Items) {
+		return v1alpha1.IntegrationPlatformPhaseStarting, nil
+	}
+
+	return v1alpha1.IntegrationPlatformPhaseReady, nil
+}
diff --git a/pkg/install/cluster.go b/pkg/install/cluster.go
index 314de5ee..31d10966 100644
--- a/pkg/install/cluster.go
+++ b/pkg/install/cluster.go
@@ -18,59 +18,112 @@ limitations under the License.
 package install
 
 import (
+	"context"
+	"errors"
+	"strconv"
+	"time"
+
 	"github.com/apache/camel-k/deploy"
+	"github.com/apache/camel-k/pkg/client"
 	"github.com/apache/camel-k/pkg/util/kubernetes"
 	"github.com/apache/camel-k/pkg/util/kubernetes/customclient"
-	"github.com/operator-framework/operator-sdk/pkg/k8sclient"
-	"github.com/operator-framework/operator-sdk/pkg/sdk"
 	"k8s.io/api/rbac/v1"
-	"k8s.io/apimachinery/pkg/api/errors"
+	k8serrors "k8s.io/apimachinery/pkg/api/errors"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/apimachinery/pkg/util/yaml"
+	k8sclient "sigs.k8s.io/controller-runtime/pkg/client"
 )
 
 // SetupClusterwideResources --
-func SetupClusterwideResources() error {
-	return SetupClusterwideResourcesOrCollect(nil)
+func SetupClusterwideResources(ctx context.Context, clientProvider client.Provider) error {
+	return SetupClusterwideResourcesOrCollect(ctx, clientProvider, nil)
 }
 
 // SetupClusterwideResourcesOrCollect --
-func SetupClusterwideResourcesOrCollect(collection *kubernetes.Collection) error {
+func SetupClusterwideResourcesOrCollect(ctx context.Context, clientProvider client.Provider, collection *kubernetes.Collection) error {
+	// Get a client to install the CRD
+	c, err := clientProvider.Get()
+	if err != nil {
+		return err
+	}
 
 	// Install CRD for Integration Platform (if needed)
-	if err := installCRD("IntegrationPlatform", "crd-integration-platform.yaml", collection); err != nil {
+	if err := installCRD(ctx, c, "IntegrationPlatform", "crd-integration-platform.yaml", collection); err != nil {
 		return err
 	}
 
 	// Install CRD for Integration Context (if needed)
-	if err := installCRD("IntegrationContext", "crd-integration-context.yaml", collection); err != nil {
+	if err := installCRD(ctx, c, "IntegrationContext", "crd-integration-context.yaml", collection); err != nil {
 		return err
 	}
 
 	// Install CRD for Integration (if needed)
-	if err := installCRD("Integration", "crd-integration.yaml", collection); err != nil {
+	if err := installCRD(ctx, c, "Integration", "crd-integration.yaml", collection); err != nil {
 		return err
 	}
 
 	// Installing ClusterRole
-	clusterRoleInstalled, err := IsClusterRoleInstalled()
+	clusterRoleInstalled, err := IsClusterRoleInstalled(ctx, c)
 	if err != nil {
 		return err
 	}
 	if !clusterRoleInstalled || collection != nil {
-		err := installClusterRole(collection)
+		err := installClusterRole(ctx, c, collection)
 		if err != nil {
 			return err
 		}
 	}
 
+	// Wait for all CRDs to be installed before proceeding
+	if err := WaitForAllCRDInstallation(ctx, clientProvider, 25*time.Second); err != nil {
+		return err
+	}
+
 	return nil
 }
 
-// IsCRDInstalled check if the given CRT kind is installed
-func IsCRDInstalled(kind string) (bool, error) {
-	lst, err := k8sclient.GetKubeClient().Discovery().ServerResourcesForGroupVersion("camel.apache.org/v1alpha1")
-	if err != nil && errors.IsNotFound(err) {
+// WaitForAllCRDInstallation waits until all CRDs are installed
+func WaitForAllCRDInstallation(ctx context.Context, clientProvider client.Provider, timeout time.Duration) error {
+	deadline := time.Now().Add(timeout)
+	for {
+		var c client.Client
+		var err error
+		if c, err = clientProvider.Get(); err != nil {
+			return err
+		}
+		var inst bool
+		if inst, err = AreAllCRDInstalled(ctx, c); err != nil {
+			return err
+		} else if inst {
+			return nil
+		}
+		// Check after 2 seconds if not expired
+		if time.Now().After(deadline) {
+			return errors.New("cannot check CRD installation after " + strconv.FormatInt(timeout.Nanoseconds()/1000000000, 10) + " seconds")
+		}
+		time.Sleep(2 * time.Second)
+	}
+}
+
+// AreAllCRDInstalled check if all the required CRDs are installed
+func AreAllCRDInstalled(ctx context.Context, c client.Client) (bool, error) {
+	if ok, err := IsCRDInstalled(ctx, c, "IntegrationPlatform"); err != nil {
+		return ok, err
+	} else if !ok {
+		return false, nil
+	}
+	if ok, err := IsCRDInstalled(ctx, c, "IntegrationContext"); err != nil {
+		return ok, err
+	} else if !ok {
+		return false, nil
+	}
+	return IsCRDInstalled(ctx, c, "Integration")
+}
+
+// IsCRDInstalled check if the given CRD kind is installed
+func IsCRDInstalled(ctx context.Context, c client.Client, kind string) (bool, error) {
+	lst, err := c.Discovery().ServerResourcesForGroupVersion("camel.apache.org/v1alpha1")
+	if err != nil && k8serrors.IsNotFound(err) {
 		return false, nil
 	} else if err != nil {
 		return false, err
@@ -83,7 +136,7 @@ func IsCRDInstalled(kind string) (bool, error) {
 	return false, nil
 }
 
-func installCRD(kind string, resourceName string, collection *kubernetes.Collection) error {
+func installCRD(ctx context.Context, c client.Client, kind string, resourceName string, collection *kubernetes.Collection) error {
 	crd := []byte(deploy.Resources[resourceName])
 	if collection != nil {
 		unstr, err := kubernetes.LoadRawResourceFromYaml(string(crd))
@@ -95,7 +148,7 @@ func installCRD(kind string, resourceName string, collection *kubernetes.Collect
 	}
 
 	// Installing Integration CRD
-	installed, err := IsCRDInstalled(kind)
+	installed, err := IsCRDInstalled(ctx, c, kind)
 	if err != nil {
 		return err
 	}
@@ -107,7 +160,7 @@ func installCRD(kind string, resourceName string, collection *kubernetes.Collect
 	if err != nil {
 		return err
 	}
-	restClient, err := customclient.GetClientFor("apiextensions.k8s.io", "v1beta1")
+	restClient, err := customclient.GetClientFor(c, "apiextensions.k8s.io", "v1beta1")
 	if err != nil {
 		return err
 	}
@@ -118,7 +171,7 @@ func installCRD(kind string, resourceName string, collection *kubernetes.Collect
 		Resource("customresourcedefinitions").
 		Do()
 	// Check result
-	if result.Error() != nil && !errors.IsAlreadyExists(result.Error()) {
+	if result.Error() != nil && !k8serrors.IsAlreadyExists(result.Error()) {
 		return result.Error()
 	}
 
@@ -126,7 +179,7 @@ func installCRD(kind string, resourceName string, collection *kubernetes.Collect
 }
 
 // IsClusterRoleInstalled check if cluster role camel-k:edit is installed
-func IsClusterRoleInstalled() (bool, error) {
+func IsClusterRoleInstalled(ctx context.Context, c client.Client) (bool, error) {
 	clusterRole := v1.ClusterRole{
 		TypeMeta: metav1.TypeMeta{
 			Kind:       "ClusterRole",
@@ -136,8 +189,12 @@ func IsClusterRoleInstalled() (bool, error) {
 			Name: "camel-k:edit",
 		},
 	}
-	err := sdk.Get(&clusterRole)
-	if err != nil && errors.IsNotFound(err) {
+	key, err := k8sclient.ObjectKeyFromObject(&clusterRole)
+	if err != nil {
+		return false, err
+	}
+	err = c.Get(ctx, key, &clusterRole)
+	if err != nil && k8serrors.IsNotFound(err) {
 		return false, nil
 	} else if err != nil {
 		return false, err
@@ -145,8 +202,8 @@ func IsClusterRoleInstalled() (bool, error) {
 	return true, nil
 }
 
-func installClusterRole(collection *kubernetes.Collection) error {
-	obj, err := kubernetes.LoadResourceFromYaml(deploy.Resources["user-cluster-role.yaml"])
+func installClusterRole(ctx context.Context, c client.Client, collection *kubernetes.Collection) error {
+	obj, err := kubernetes.LoadResourceFromYaml(c.GetScheme(), deploy.Resources["user-cluster-role.yaml"])
 	if err != nil {
 		return err
 	}
@@ -155,5 +212,5 @@ func installClusterRole(collection *kubernetes.Collection) error {
 		collection.Add(obj)
 		return nil
 	}
-	return sdk.Create(obj)
+	return c.Create(ctx, obj)
 }
diff --git a/pkg/install/common.go b/pkg/install/common.go
index ea11af12..a2edac4e 100644
--- a/pkg/install/common.go
+++ b/pkg/install/common.go
@@ -18,23 +18,26 @@ limitations under the License.
 package install
 
 import (
+	"context"
+
 	"github.com/apache/camel-k/deploy"
 	"github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
+	"github.com/apache/camel-k/pkg/client"
 	"github.com/apache/camel-k/pkg/util/kubernetes"
-	"github.com/operator-framework/operator-sdk/pkg/sdk"
 	"k8s.io/apimachinery/pkg/api/errors"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/apimachinery/pkg/runtime"
 )
 
 // Resources installs named resources from the project resource directory
-func Resources(namespace string, names ...string) error {
-	return ResourcesOrCollect(namespace, nil, names...)
+func Resources(ctx context.Context, c client.Client, namespace string, names ...string) error {
+	return ResourcesOrCollect(ctx, c, namespace, nil, names...)
 }
 
-func ResourcesOrCollect(namespace string, collection *kubernetes.Collection, names ...string) error {
+// ResourcesOrCollect --
+func ResourcesOrCollect(ctx context.Context, c client.Client, namespace string, collection *kubernetes.Collection, names ...string) error {
 	for _, name := range names {
-		if err := ResourceOrCollect(namespace, collection, name); err != nil {
+		if err := ResourceOrCollect(ctx, c, namespace, collection, name); err != nil {
 			return err
 		}
 	}
@@ -42,25 +45,27 @@ func ResourcesOrCollect(namespace string, collection *kubernetes.Collection, nam
 }
 
 // Resource installs a single named resource from the project resource directory
-func Resource(namespace string, name string) error {
-	return ResourceOrCollect(namespace, nil, name)
+func Resource(ctx context.Context, c client.Client, namespace string, name string) error {
+	return ResourceOrCollect(ctx, c, namespace, nil, name)
 }
 
-func ResourceOrCollect(namespace string, collection *kubernetes.Collection, name string) error {
-	obj, err := kubernetes.LoadResourceFromYaml(deploy.Resources[name])
+// ResourceOrCollect --
+func ResourceOrCollect(ctx context.Context, c client.Client, namespace string, collection *kubernetes.Collection, name string) error {
+	obj, err := kubernetes.LoadResourceFromYaml(c.GetScheme(), deploy.Resources[name])
 	if err != nil {
 		return err
 	}
 
-	return RuntimeObjectOrCollect(namespace, collection, obj)
+	return RuntimeObjectOrCollect(ctx, c, namespace, collection, obj)
 }
 
 // RuntimeObject installs a single runtime object
-func RuntimeObject(namespace string, obj runtime.Object) error {
-	return RuntimeObjectOrCollect(namespace, nil, obj)
+func RuntimeObject(ctx context.Context, c client.Client, namespace string, obj runtime.Object) error {
+	return RuntimeObjectOrCollect(ctx, c, namespace, nil, obj)
 }
 
-func RuntimeObjectOrCollect(namespace string, collection *kubernetes.Collection, obj runtime.Object) error {
+// RuntimeObjectOrCollect --
+func RuntimeObjectOrCollect(ctx context.Context, c client.Client, namespace string, collection *kubernetes.Collection, obj runtime.Object) error {
 	if collection != nil {
 		// Adding to the collection before setting the namespace
 		collection.Add(obj)
@@ -71,7 +76,7 @@ func RuntimeObjectOrCollect(namespace string, collection *kubernetes.Collection,
 		metaObject.SetNamespace(namespace)
 	}
 
-	err := sdk.Create(obj)
+	err := c.Create(ctx, obj)
 	if err != nil && errors.IsAlreadyExists(err) {
 		// Don't recreate Service object
 		if obj.GetObjectKind().GroupVersionKind().Kind == "Service" {
@@ -87,7 +92,7 @@ func RuntimeObjectOrCollect(namespace string, collection *kubernetes.Collection,
 		if obj.GetObjectKind().GroupVersionKind().Kind == "PersistentVolumeClaim" {
 			return nil
 		}
-		return sdk.Update(obj)
+		return c.Update(ctx, obj)
 	}
 	return err
 }
diff --git a/pkg/install/operator.go b/pkg/install/operator.go
index a35c344d..365592f4 100644
--- a/pkg/install/operator.go
+++ b/pkg/install/operator.go
@@ -18,52 +18,51 @@ limitations under the License.
 package install
 
 import (
+	"context"
 	"errors"
-	"strconv"
-	"time"
 
 	"github.com/apache/camel-k/deploy"
 	"github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
+	"github.com/apache/camel-k/pkg/client"
 	"github.com/apache/camel-k/pkg/util/knative"
 	"github.com/apache/camel-k/pkg/util/kubernetes"
 	"github.com/apache/camel-k/pkg/util/minishift"
 	"github.com/apache/camel-k/pkg/util/openshift"
-	"github.com/operator-framework/operator-sdk/pkg/sdk"
 )
 
 // Operator installs the operator resources in the given namespace
-func Operator(namespace string) error {
-	return OperatorOrCollect(namespace, nil)
+func Operator(ctx context.Context, c client.Client, namespace string) error {
+	return OperatorOrCollect(ctx, c, namespace, nil)
 }
 
 // OperatorOrCollect installs the operator resources or adds them to the collector if present
-func OperatorOrCollect(namespace string, collection *kubernetes.Collection) error {
-	isOpenshift, err := openshift.IsOpenShift()
+func OperatorOrCollect(ctx context.Context, c client.Client, namespace string, collection *kubernetes.Collection) error {
+	isOpenshift, err := openshift.IsOpenShift(c)
 	if err != nil {
 		return err
 	}
 	if isOpenshift {
-		if err := installOpenshift(namespace, collection); err != nil {
+		if err := installOpenshift(ctx, c, namespace, collection); err != nil {
 			return err
 		}
 	} else {
-		if err := installKubernetes(namespace, collection); err != nil {
+		if err := installKubernetes(ctx, c, namespace, collection); err != nil {
 			return err
 		}
 	}
 	// Additionally, install Knative resources (roles and bindings)
-	isKnative, err := knative.IsInstalled()
+	isKnative, err := knative.IsInstalled(ctx, c)
 	if err != nil {
 		return err
 	}
 	if isKnative {
-		return installKnative(namespace, collection)
+		return installKnative(ctx, c, namespace, collection)
 	}
 	return nil
 }
 
-func installOpenshift(namespace string, collection *kubernetes.Collection) error {
-	return ResourcesOrCollect(namespace, collection,
+func installOpenshift(ctx context.Context, c client.Client, namespace string, collection *kubernetes.Collection) error {
+	return ResourcesOrCollect(ctx, c, namespace, collection,
 		"operator-service-account.yaml",
 		"operator-role-openshift.yaml",
 		"operator-role-binding.yaml",
@@ -72,8 +71,8 @@ func installOpenshift(namespace string, collection *kubernetes.Collection) error
 	)
 }
 
-func installKubernetes(namespace string, collection *kubernetes.Collection) error {
-	return ResourcesOrCollect(namespace, collection,
+func installKubernetes(ctx context.Context, c client.Client, namespace string, collection *kubernetes.Collection) error {
+	return ResourcesOrCollect(ctx, c, namespace, collection,
 		"operator-service-account.yaml",
 		"operator-role-kubernetes.yaml",
 		"operator-role-binding.yaml",
@@ -83,29 +82,26 @@ func installKubernetes(namespace string, collection *kubernetes.Collection) erro
 	)
 }
 
-func installKnative(namespace string, collection *kubernetes.Collection) error {
-	return ResourcesOrCollect(namespace, collection,
+func installKnative(ctx context.Context, c client.Client, namespace string, collection *kubernetes.Collection) error {
+	return ResourcesOrCollect(ctx, c, namespace, collection,
 		"operator-role-knative.yaml",
 		"operator-role-binding-knative.yaml",
 	)
 }
 
 // Platform installs the platform custom resource
-func Platform(namespace string, registry string, organization string, pushSecret string) (*v1alpha1.IntegrationPlatform, error) {
-	return PlatformOrCollect(namespace, registry, organization, pushSecret, nil)
+func Platform(ctx context.Context, c client.Client, namespace string, registry string, organization string, pushSecret string) (*v1alpha1.IntegrationPlatform, error) {
+	return PlatformOrCollect(ctx, c, namespace, registry, organization, pushSecret, nil)
 }
 
 // PlatformOrCollect --
 // nolint: lll
-func PlatformOrCollect(namespace string, registry string, organization string, pushSecret string, collection *kubernetes.Collection) (*v1alpha1.IntegrationPlatform, error) {
-	if err := waitForPlatformCRDAvailable(namespace, 25*time.Second); err != nil {
-		return nil, err
-	}
-	isOpenshift, err := openshift.IsOpenShift()
+func PlatformOrCollect(ctx context.Context, c client.Client, namespace string, registry string, organization string, pushSecret string, collection *kubernetes.Collection) (*v1alpha1.IntegrationPlatform, error) {
+	isOpenshift, err := openshift.IsOpenShift(c)
 	if err != nil {
 		return nil, err
 	}
-	platformObject, err := kubernetes.LoadResourceFromYaml(deploy.Resources["platform-cr.yaml"])
+	platformObject, err := kubernetes.LoadResourceFromYaml(c.GetScheme(), deploy.Resources["platform-cr.yaml"])
 	if err != nil {
 		return nil, err
 	}
@@ -116,7 +112,7 @@ func PlatformOrCollect(namespace string, registry string, organization string, p
 		if registry == "" {
 			// This operation should be done here in the installer
 			// because the operator is not allowed to look into the "kube-system" namespace
-			minishiftRegistry, err := minishift.FindRegistry()
+			minishiftRegistry, err := minishift.FindRegistry(ctx, c)
 			if err != nil {
 				return nil, err
 			}
@@ -131,7 +127,7 @@ func PlatformOrCollect(namespace string, registry string, organization string, p
 	}
 
 	var knativeInstalled bool
-	if knativeInstalled, err = knative.IsInstalled(); err != nil {
+	if knativeInstalled, err = knative.IsInstalled(ctx, c); err != nil {
 		return nil, err
 	}
 	if knativeInstalled {
@@ -141,28 +137,14 @@ func PlatformOrCollect(namespace string, registry string, organization string, p
 	return pl, nil
 }
 
-func waitForPlatformCRDAvailable(namespace string, timeout time.Duration) error {
-	deadline := time.Now().Add(timeout)
-	for {
-		pla := v1alpha1.NewIntegrationPlatformList()
-		if err := sdk.List(namespace, &pla); err == nil {
-			return nil
-		}
-		if time.Now().After(deadline) {
-			return errors.New("cannot list integration platforms after " + strconv.FormatInt(timeout.Nanoseconds()/1000000000, 10) + " seconds")
-		}
-		time.Sleep(2 * time.Second)
-	}
-}
-
 // Example --
-func Example(namespace string) error {
-	return ExampleOrCollect(namespace, nil)
+func Example(ctx context.Context, c client.Client, namespace string) error {
+	return ExampleOrCollect(ctx, c, namespace, nil)
 }
 
 // ExampleOrCollect --
-func ExampleOrCollect(namespace string, collection *kubernetes.Collection) error {
-	return ResourcesOrCollect(namespace, collection,
+func ExampleOrCollect(ctx context.Context, c client.Client, namespace string, collection *kubernetes.Collection) error {
+	return ResourcesOrCollect(ctx, c, namespace, collection,
 		"cr-example.yaml",
 	)
 }
diff --git a/pkg/platform/platform.go b/pkg/platform/platform.go
index cf82ed71..48cffcb4 100644
--- a/pkg/platform/platform.go
+++ b/pkg/platform/platform.go
@@ -23,7 +23,8 @@ import (
 
 	"github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
 	"github.com/apache/camel-k/pkg/builder"
-	"github.com/operator-framework/operator-sdk/pkg/sdk"
+	"github.com/apache/camel-k/pkg/client"
+	k8sclient "sigs.k8s.io/controller-runtime/pkg/client"
 )
 
 // gBuilder is the current builder
@@ -31,19 +32,19 @@ import (
 var gBuilder builder.Builder
 
 // GetPlatformBuilder --
-func GetPlatformBuilder(ctx context.Context, namespace string) (builder.Builder, error) {
+func GetPlatformBuilder(ctx context.Context, c client.Client, namespace string) (builder.Builder, error) {
 	if gBuilder != nil {
 		return gBuilder, nil
 	}
 
-	gBuilder = builder.New(ctx, namespace)
+	gBuilder = builder.New(ctx, c, namespace)
 
 	return gBuilder, nil
 }
 
 // GetCurrentPlatform returns the currently installed platform
-func GetCurrentPlatform(namespace string) (*v1alpha1.IntegrationPlatform, error) {
-	lst, err := ListPlatforms(namespace)
+func GetCurrentPlatform(ctx context.Context, c client.Client, namespace string) (*v1alpha1.IntegrationPlatform, error) {
+	lst, err := ListPlatforms(ctx, c, namespace)
 	if err != nil {
 		return nil, err
 	}
@@ -58,9 +59,9 @@ func GetCurrentPlatform(namespace string) (*v1alpha1.IntegrationPlatform, error)
 }
 
 // ListPlatforms returns all platforms installed in a given namespace (only one will be active)
-func ListPlatforms(namespace string) (*v1alpha1.IntegrationPlatformList, error) {
+func ListPlatforms(ctx context.Context, c client.Client, namespace string) (*v1alpha1.IntegrationPlatformList, error) {
 	lst := v1alpha1.NewIntegrationPlatformList()
-	if err := sdk.List(namespace, &lst); err != nil {
+	if err := c.List(ctx, &k8sclient.ListOptions{Namespace: namespace}, &lst); err != nil {
 		return nil, err
 	}
 	return &lst, nil
diff --git a/pkg/stub/action/context/action.go b/pkg/stub/action/context/action.go
deleted file mode 100644
index 97f35c41..00000000
--- a/pkg/stub/action/context/action.go
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package context
-
-import (
-	"github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
-)
-
-// Action --
-type Action interface {
-
-	// a user friendly name for the action
-	Name() string
-
-	// returns true if the action can handle the integration context
-	CanHandle(integration *v1alpha1.IntegrationContext) bool
-
-	// executes the handling function
-	Handle(integration *v1alpha1.IntegrationContext) error
-}
diff --git a/pkg/stub/action/context/build.go b/pkg/stub/action/context/build.go
deleted file mode 100644
index eebf87e7..00000000
--- a/pkg/stub/action/context/build.go
+++ /dev/null
@@ -1,153 +0,0 @@
-/*
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package context
-
-import (
-	"context"
-
-	"github.com/apache/camel-k/pkg/trait"
-
-	"github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
-	"github.com/apache/camel-k/pkg/builder"
-	"github.com/apache/camel-k/pkg/platform"
-
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-
-	"github.com/operator-framework/operator-sdk/pkg/sdk"
-	"github.com/sirupsen/logrus"
-)
-
-// NewBuildAction creates a new build handling action for the context
-func NewBuildAction(ctx context.Context) Action {
-	return &buildAction{
-		ctx,
-	}
-}
-
-type buildAction struct {
-	context.Context
-}
-
-func (action *buildAction) Name() string {
-	return "build"
-}
-
-func (action *buildAction) CanHandle(context *v1alpha1.IntegrationContext) bool {
-	return context.Status.Phase == v1alpha1.IntegrationContextPhaseBuilding
-}
-
-func (action *buildAction) Handle(context *v1alpha1.IntegrationContext) error {
-	p, err := platform.GetCurrentPlatform(context.Namespace)
-	if err != nil {
-		return err
-	}
-	b, err := platform.GetPlatformBuilder(action.Context, context.Namespace)
-	if err != nil {
-		return err
-	}
-	env, err := trait.Apply(nil, context)
-	if err != nil {
-		return err
-	}
-
-	// assume there's no duplication nor conflict for now
-	repositories := make([]string, 0, len(context.Spec.Repositories)+len(p.Spec.Build.Repositories))
-	repositories = append(repositories, context.Spec.Repositories...)
-	repositories = append(repositories, p.Spec.Build.Repositories...)
-
-	r := builder.Request{
-		Meta:         context.ObjectMeta,
-		Dependencies: context.Spec.Dependencies,
-		Repositories: repositories,
-		Steps:        env.Steps,
-		BuildDir:     env.BuildDir,
-		Platform:     env.Platform.Spec,
-	}
-
-	res := b.Submit(r)
-	switch res.Status {
-	case builder.StatusSubmitted:
-		logrus.Info("Build submitted")
-	case builder.StatusStarted:
-		logrus.Info("Build started")
-	case builder.StatusError:
-		target := context.DeepCopy()
-		target.Status.Phase = v1alpha1.IntegrationContextPhaseError
-
-		logrus.Infof("Context %s transitioning to state %s, reason: %s", target.Name, target.Status.Phase, res.Error.Error())
-
-		// remove the build from cache
-		defer b.Purge(r)
-
-		return sdk.Update(target)
-	case builder.StatusCompleted:
-		target := context.DeepCopy()
-		target.Status.Image = res.Image
-		target.Status.PublicImage = res.PublicImage
-		target.Status.Phase = v1alpha1.IntegrationContextPhaseReady
-		target.Status.Artifacts = make([]v1alpha1.Artifact, 0, len(res.Artifacts))
-
-		for _, a := range res.Artifacts {
-			// do not include artifact location
-			target.Status.Artifacts = append(target.Status.Artifacts, v1alpha1.Artifact{
-				ID:       a.ID,
-				Location: "",
-				Target:   a.Target,
-			})
-		}
-
-		logrus.Info("Context ", target.Name, " transitioning to state ", target.Status.Phase)
-
-		// remove the build from cache
-		defer b.Purge(r)
-
-		if err := sdk.Update(target); err != nil {
-			return err
-		}
-		if err := action.informIntegrations(target); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-// informIntegrations triggers the processing of all integrations waiting for this context to be built
-func (action *buildAction) informIntegrations(context *v1alpha1.IntegrationContext) error {
-	list := v1alpha1.NewIntegrationList()
-	err := sdk.List(context.Namespace, &list, sdk.WithListOptions(&metav1.ListOptions{}))
-	if err != nil {
-		return err
-	}
-	for _, integration := range list.Items {
-		integration := integration // pin
-		if integration.Status.Context != context.Name {
-			continue
-		}
-
-		if integration.Annotations == nil {
-			integration.Annotations = make(map[string]string)
-		}
-		integration.Annotations["camel.apache.org/context.digest"] = context.Status.Digest
-		err = sdk.Update(&integration)
-		if err != nil {
-			return err
-		}
-	}
-	return nil
-}
diff --git a/pkg/stub/action/context/initialize.go b/pkg/stub/action/context/initialize.go
deleted file mode 100644
index 17c49418..00000000
--- a/pkg/stub/action/context/initialize.go
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package context
-
-import (
-	"github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
-	"github.com/apache/camel-k/pkg/platform"
-	"github.com/apache/camel-k/pkg/util/digest"
-	"github.com/operator-framework/operator-sdk/pkg/sdk"
-	"github.com/sirupsen/logrus"
-)
-
-// NewInitializeAction creates a new initialization handling action for the context
-func NewInitializeAction() Action {
-	return &initializeAction{}
-}
-
-type initializeAction struct {
-}
-
-func (action *initializeAction) Name() string {
-	return "initialize"
-}
-
-func (action *initializeAction) CanHandle(context *v1alpha1.IntegrationContext) bool {
-	return context.Status.Phase == ""
-}
-
-func (action *initializeAction) Handle(context *v1alpha1.IntegrationContext) error {
-	// The integration platform needs to be initialized before starting to create contexts
-	if _, err := platform.GetCurrentPlatform(context.Namespace); err != nil {
-		logrus.Info("Waiting for a integration platform to be initialized")
-		return nil
-	}
-
-	target := context.DeepCopy()
-
-	// execute custom initialization
-	//if err := trait.apply(nil, context); err != nil {
-	//	return err
-	//}
-
-	// update the status
-	logrus.Info("Context ", target.Name, " transitioning to state ", v1alpha1.IntegrationContextPhaseBuilding)
-	target.Status.Phase = v1alpha1.IntegrationContextPhaseBuilding
-	dgst, err := digest.ComputeForIntegrationContext(context)
-	if err != nil {
-		return err
-	}
-	target.Status.Digest = dgst
-
-	return sdk.Update(target)
-}
diff --git a/pkg/stub/action/context/monitor.go b/pkg/stub/action/context/monitor.go
deleted file mode 100644
index 41e8c45c..00000000
--- a/pkg/stub/action/context/monitor.go
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package context
-
-import (
-	"github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
-	"github.com/apache/camel-k/pkg/util/digest"
-	"github.com/operator-framework/operator-sdk/pkg/sdk"
-	"github.com/sirupsen/logrus"
-)
-
-// NewMonitorAction creates a new monitoring handling action for the context
-func NewMonitorAction() Action {
-	return &monitorAction{}
-}
-
-type monitorAction struct {
-}
-
-func (action *monitorAction) Name() string {
-	return "monitor"
-}
-
-func (action *monitorAction) CanHandle(context *v1alpha1.IntegrationContext) bool {
-	return context.Status.Phase == v1alpha1.IntegrationContextPhaseReady || context.Status.Phase == v1alpha1.IntegrationContextPhaseError
-}
-
-func (action *monitorAction) Handle(context *v1alpha1.IntegrationContext) error {
-	hash, err := digest.ComputeForIntegrationContext(context)
-	if err != nil {
-		return err
-	}
-	if hash != context.Status.Digest {
-		logrus.Info("IntegrationContext ", context.Name, " needs a rebuild")
-
-		target := context.DeepCopy()
-		target.Status.Digest = hash
-		logrus.Info("Context ", target.Name, " transitioning to state ", v1alpha1.IntegrationContextPhaseBuilding)
-		target.Status.Phase = v1alpha1.IntegrationContextPhaseBuilding
-		return sdk.Update(target)
-	}
-
-	return nil
-}
diff --git a/pkg/stub/action/integration/action.go b/pkg/stub/action/integration/action.go
deleted file mode 100644
index 729ec5ce..00000000
--- a/pkg/stub/action/integration/action.go
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package integration
-
-import (
-	"github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
-)
-
-// Action --
-type Action interface {
-
-	// a user friendly name for the action
-	Name() string
-
-	// returns true if the action can handle the integration
-	CanHandle(integration *v1alpha1.Integration) bool
-
-	// executes the handling function
-	Handle(integration *v1alpha1.Integration) error
-}
diff --git a/pkg/stub/action/integration/build_context.go b/pkg/stub/action/integration/build_context.go
deleted file mode 100644
index 9f252258..00000000
--- a/pkg/stub/action/integration/build_context.go
+++ /dev/null
@@ -1,155 +0,0 @@
-/*
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package integration
-
-import (
-	"fmt"
-
-	"github.com/apache/camel-k/pkg/trait"
-
-	"github.com/sirupsen/logrus"
-
-	"github.com/apache/camel-k/pkg/util"
-	"github.com/apache/camel-k/pkg/util/digest"
-
-	"github.com/rs/xid"
-
-	"github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
-	"github.com/operator-framework/operator-sdk/pkg/sdk"
-)
-
-// NewBuildContextAction create an action that handles integration context build
-func NewBuildContextAction(namespace string) Action {
-	return &buildContextAction{
-		namespace: namespace,
-	}
-}
-
-type buildContextAction struct {
-	namespace string
-}
-
-func (action *buildContextAction) Name() string {
-	return "build-context"
-}
-
-func (action *buildContextAction) CanHandle(integration *v1alpha1.Integration) bool {
-	return integration.Status.Phase == v1alpha1.IntegrationPhaseBuildingContext
-}
-
-func (action *buildContextAction) Handle(integration *v1alpha1.Integration) error {
-	ctx, err := LookupContextForIntegration(integration)
-	if err != nil {
-		//TODO: we may need to add a wait strategy, i.e give up after some time
-		return err
-	}
-
-	if ctx != nil {
-		if ctx.Labels["camel.apache.org/context.type"] == v1alpha1.IntegrationContextTypePlatform {
-			// This is a platform context and as it is auto generated it may get
-			// out of sync if the integration that has generated it, has been
-			// amended to add/remove dependencies
-
-			//TODO: this is a very simple check, we may need to provide a deps comparison strategy
-			if !util.StringSliceContains(ctx.Spec.Dependencies, integration.Status.Dependencies) {
-				// We need to re-generate a context or search for a new one that
-				// satisfies integrations needs so let's remove the association
-				// with a context
-				target := integration.DeepCopy()
-				target.Status.Context = ""
-				return sdk.Update(target)
-			}
-		}
-
-		if ctx.Status.Phase == v1alpha1.IntegrationContextPhaseError {
-			target := integration.DeepCopy()
-			target.Status.Image = ctx.ImageForIntegration()
-			target.Status.Context = ctx.Name
-			target.Status.Phase = v1alpha1.IntegrationPhaseError
-
-			target.Status.Digest, err = digest.ComputeForIntegration(target)
-			if err != nil {
-				return err
-			}
-
-			logrus.Info("Integration ", target.Name, " transitioning to state ", target.Status.Phase)
-
-			return sdk.Update(target)
-		}
-
-		if ctx.Status.Phase == v1alpha1.IntegrationContextPhaseReady {
-			target := integration.DeepCopy()
-			target.Status.Image = ctx.ImageForIntegration()
-			target.Status.Context = ctx.Name
-
-			dgst, err := digest.ComputeForIntegration(target)
-			if err != nil {
-				return err
-			}
-
-			target.Status.Digest = dgst
-
-			if _, err := trait.Apply(target, ctx); err != nil {
-				return err
-			}
-
-			logrus.Info("Integration ", target.Name, " transitioning to state ", target.Status.Phase)
-
-			return sdk.Update(target)
-		}
-
-		if integration.Status.Context == "" {
-			// We need to set the context
-			target := integration.DeepCopy()
-			target.Status.Context = ctx.Name
-			return sdk.Update(target)
-		}
-
-		return nil
-	}
-
-	platformCtxName := fmt.Sprintf("ctx-%s", xid.New())
-	platformCtx := v1alpha1.NewIntegrationContext(action.namespace, platformCtxName)
-
-	// Add some information for post-processing, this may need to be refactored
-	// to a proper data structure
-	platformCtx.Labels = map[string]string{
-		"camel.apache.org/context.type":               v1alpha1.IntegrationContextTypePlatform,
-		"camel.apache.org/context.created.by.kind":    v1alpha1.IntegrationKind,
-		"camel.apache.org/context.created.by.name":    integration.Name,
-		"camel.apache.org/context.created.by.version": integration.ResourceVersion,
-	}
-
-	// Set the context to have the same dependencies as the integrations
-	platformCtx.Spec = v1alpha1.IntegrationContextSpec{
-		Dependencies: integration.Status.Dependencies,
-		Repositories: integration.Spec.Repositories,
-		Traits:       integration.Spec.Traits,
-	}
-
-	if err := sdk.Create(&platformCtx); err != nil {
-		return err
-	}
-
-	// Set the context name so the next handle loop, will fall through the
-	// same path as integration with a user defined context
-	target := integration.DeepCopy()
-	target.Status.Context = platformCtxName
-
-	return sdk.Update(target)
-}
diff --git a/pkg/stub/action/integration/build_image.go b/pkg/stub/action/integration/build_image.go
deleted file mode 100644
index 62058f52..00000000
--- a/pkg/stub/action/integration/build_image.go
+++ /dev/null
@@ -1,155 +0,0 @@
-/*
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package integration
-
-import (
-	"context"
-	"fmt"
-	"path"
-
-	"github.com/pkg/errors"
-
-	"github.com/apache/camel-k/pkg/util/digest"
-
-	"github.com/apache/camel-k/pkg/trait"
-
-	"github.com/apache/camel-k/pkg/builder"
-	"github.com/operator-framework/operator-sdk/pkg/sdk"
-	"github.com/sirupsen/logrus"
-
-	"github.com/apache/camel-k/pkg/platform"
-
-	"github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
-)
-
-// NewBuildImageAction create an action that handles integration image build
-func NewBuildImageAction(ctx context.Context, namespace string) Action {
-	return &buildImageAction{
-		Context:   ctx,
-		namespace: namespace,
-	}
-}
-
-type buildImageAction struct {
-	context.Context
-	namespace string
-}
-
-func (action *buildImageAction) Name() string {
-	return "build-image"
-}
-
-func (action *buildImageAction) CanHandle(integration *v1alpha1.Integration) bool {
-	return integration.Status.Phase == v1alpha1.IntegrationPhaseBuildingImage
-}
-
-func (action *buildImageAction) Handle(integration *v1alpha1.Integration) error {
-
-	// in this phase the integration need to be associated to a context whose image
-	// will be used as base image for the integration images
-	if integration.Status.Context == "" {
-		return fmt.Errorf("context is not set for integration: %s", integration.Name)
-	}
-
-	// look-up the integration context associated to this integration, this is needed
-	// to determine the base image
-	ctx := v1alpha1.NewIntegrationContext(integration.Namespace, integration.Status.Context)
-	if err := sdk.Get(&ctx); err != nil {
-		return errors.Wrapf(err, "unable to find integration context %s, %s", ctx.Name, err)
-	}
-
-	b, err := platform.GetPlatformBuilder(action.Context, action.namespace)
-	if err != nil {
-		return err
-	}
-	env, err := trait.Apply(integration, &ctx)
-	if err != nil {
-		return err
-	}
-
-	// This build do not require to determine dependencies nor a project, the builder
-	// step do remove them
-	r := builder.Request{
-		Meta:     integration.ObjectMeta,
-		Steps:    env.Steps,
-		BuildDir: env.BuildDir,
-		Platform: env.Platform.Spec,
-		Image:    ctx.Status.Image,
-	}
-
-	// Sources are added as part of the standard deployment bits
-	r.Resources = make([]builder.Resource, 0, len(integration.Spec.Sources))
-
-	for _, source := range integration.Spec.Sources {
-		r.Resources = append(r.Resources, builder.Resource{
-			Content: []byte(source.Content),
-			Target:  path.Join("sources", source.Name),
-		})
-	}
-	for _, resource := range integration.Spec.Resources {
-		r.Resources = append(r.Resources, builder.Resource{
-			Content: []byte(resource.Content),
-			Target:  path.Join("resources", resource.Name),
-		})
-	}
-
-	res := b.Submit(r)
-
-	switch res.Status {
-	case builder.StatusSubmitted:
-		logrus.Info("Build submitted")
-	case builder.StatusStarted:
-		logrus.Info("Build started")
-	case builder.StatusError:
-		target := integration.DeepCopy()
-		target.Status.Phase = v1alpha1.IntegrationPhaseError
-
-		logrus.Infof("Integration %s transitioning to state %s, reason: %s", target.Name, target.Status.Phase, res.Error.Error())
-
-		// remove the build from cache
-		defer b.Purge(r)
-
-		return sdk.Update(target)
-	case builder.StatusCompleted:
-		target := integration.DeepCopy()
-		target.Status.Phase = v1alpha1.IntegrationPhaseDeploying
-		if res.PublicImage != "" {
-			target.Status.Image = res.PublicImage
-		} else {
-			target.Status.Image = res.Image
-		}
-
-		dgst, err := digest.ComputeForIntegration(integration)
-		if err != nil {
-			return err
-		}
-
-		target.Status.Digest = dgst
-
-		logrus.Info("Integration ", target.Name, " transitioning to state ", target.Status.Phase)
-
-		// remove the build from cache
-		defer b.Purge(r)
-
-		if err := sdk.Update(target); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
diff --git a/pkg/stub/action/integration/deploy.go b/pkg/stub/action/integration/deploy.go
deleted file mode 100644
index 7ceaeef4..00000000
--- a/pkg/stub/action/integration/deploy.go
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package integration
-
-import (
-	"github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
-	"github.com/apache/camel-k/pkg/trait"
-	"github.com/apache/camel-k/pkg/util/kubernetes"
-	"github.com/operator-framework/operator-sdk/pkg/sdk"
-	"github.com/pkg/errors"
-	"github.com/sirupsen/logrus"
-)
-
-// NewDeployAction create an action that handles integration deploy
-func NewDeployAction() Action {
-	return &deployAction{}
-}
-
-type deployAction struct {
-}
-
-func (action *deployAction) Name() string {
-	return "deploy"
-}
-
-func (action *deployAction) CanHandle(integration *v1alpha1.Integration) bool {
-	return integration.Status.Phase == v1alpha1.IntegrationPhaseDeploying
-}
-
-func (action *deployAction) Handle(integration *v1alpha1.Integration) error {
-	ctxName := integration.Status.Context
-	if ctxName == "" {
-		return errors.Errorf("no context set on integration %s", integration.Name)
-	}
-	ctx := v1alpha1.NewIntegrationContext(integration.Namespace, ctxName)
-
-	if err := sdk.Get(&ctx); err != nil {
-		return errors.Wrapf(err, "unable to find integration context %s, %s", ctxName, err)
-	}
-
-	env, err := trait.Apply(integration, &ctx)
-	if err != nil {
-		return err
-	}
-
-	// TODO we should look for objects that are no longer present in the collection and remove them
-	err = kubernetes.ReplaceResources(env.Resources.Items())
-	if err != nil {
-		return err
-	}
-
-	target := integration.DeepCopy()
-	target.Status.Phase = v1alpha1.IntegrationPhaseRunning
-	logrus.Info("Integration ", target.Name, " transitioning to state ", target.Status.Phase)
-
-	return sdk.Update(target)
-}
diff --git a/pkg/stub/action/integration/initialize.go b/pkg/stub/action/integration/initialize.go
deleted file mode 100644
index aa4f5dcb..00000000
--- a/pkg/stub/action/integration/initialize.go
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package integration
-
-import (
-	"github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
-	"github.com/apache/camel-k/pkg/platform"
-	"github.com/apache/camel-k/pkg/trait"
-	"github.com/apache/camel-k/pkg/util/digest"
-	"github.com/operator-framework/operator-sdk/pkg/sdk"
-	"github.com/sirupsen/logrus"
-)
-
-// NewInitializeAction creates a new inititialize action
-func NewInitializeAction() Action {
-	return &initializeAction{}
-}
-
-type initializeAction struct {
-}
-
-// Name returns a common name of the action
-func (action *initializeAction) Name() string {
-	return "initialize"
-}
-
-// CanHandle tells whether this action can handle the integration
-func (action *initializeAction) CanHandle(integration *v1alpha1.Integration) bool {
-	return integration.Status.Phase == ""
-}
-
-// Handle handles the integrations
-func (action *initializeAction) Handle(integration *v1alpha1.Integration) error {
-	// The integration platform needs to be ready before starting to create integrations
-	if pl, err := platform.GetCurrentPlatform(integration.Namespace); err != nil || pl.Status.Phase != v1alpha1.IntegrationPlatformPhaseReady {
-		logrus.Info("Waiting for a integration platform to be ready")
-		return nil
-	}
-
-	target := integration.DeepCopy()
-	// better not changing the spec section of the target because it may be used for comparison by a
-	// higher level controller (e.g. Knative source controller)
-
-	// execute custom initialization
-	if _, err := trait.Apply(target, nil); err != nil {
-		return err
-	}
-
-	// update the status
-	dgst, err := digest.ComputeForIntegration(integration)
-	if err != nil {
-		return err
-	}
-
-	target.Status.Phase = v1alpha1.IntegrationPhaseBuildingContext
-	target.Status.Digest = dgst
-	target.Status.Context = integration.Spec.Context
-	target.Status.Dependencies = integration.Spec.Dependencies
-	target.Status.Image = ""
-
-	logrus.Info("Integration ", target.Name, " transitioning to state ", target.Status.Phase)
-
-	return sdk.Update(target)
-}
diff --git a/pkg/stub/action/integration/monitor.go b/pkg/stub/action/integration/monitor.go
deleted file mode 100644
index 7f3bf335..00000000
--- a/pkg/stub/action/integration/monitor.go
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package integration
-
-import (
-	"github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
-	"github.com/apache/camel-k/pkg/util/digest"
-	"github.com/operator-framework/operator-sdk/pkg/sdk"
-	"github.com/sirupsen/logrus"
-)
-
-// NewMonitorAction creates a new monitoring action for an integration
-func NewMonitorAction() Action {
-	return &monitorAction{}
-}
-
-type monitorAction struct {
-}
-
-func (action *monitorAction) Name() string {
-	return "monitor"
-}
-
-func (action *monitorAction) CanHandle(integration *v1alpha1.Integration) bool {
-	return integration.Status.Phase == v1alpha1.IntegrationPhaseRunning ||
-		integration.Status.Phase == v1alpha1.IntegrationPhaseError
-}
-
-func (action *monitorAction) Handle(integration *v1alpha1.Integration) error {
-
-	hash, err := digest.ComputeForIntegration(integration)
-	if err != nil {
-		return err
-	}
-
-	if hash != integration.Status.Digest {
-		logrus.Info("Integration ", integration.Name, " needs a rebuild")
-
-		target := integration.DeepCopy()
-		target.Status.Digest = hash
-		target.Status.Phase = ""
-
-		logrus.Info("Integration ", target.Name, " transitioning to state ", target.Status.Phase)
-
-		return sdk.Update(target)
-	}
-
-	// TODO check also if deployment matches (e.g. replicas)
-	return nil
-}
diff --git a/pkg/stub/action/integration/util.go b/pkg/stub/action/integration/util.go
deleted file mode 100644
index 15230c76..00000000
--- a/pkg/stub/action/integration/util.go
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package integration
-
-import (
-	"github.com/apache/camel-k/pkg/util"
-
-	"github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
-	"github.com/operator-framework/operator-sdk/pkg/sdk"
-	"github.com/pkg/errors"
-)
-
-// LookupContextForIntegration --
-func LookupContextForIntegration(integration *v1alpha1.Integration) (*v1alpha1.IntegrationContext, error) {
-	if integration.Status.Context != "" {
-		name := integration.Status.Context
-		ctx := v1alpha1.NewIntegrationContext(integration.Namespace, name)
-
-		if err := sdk.Get(&ctx); err != nil {
-			return nil, errors.Wrapf(err, "unable to find integration context %s, %s", ctx.Name, err)
-		}
-
-		return &ctx, nil
-	}
-
-	ctxList := v1alpha1.NewIntegrationContextList()
-	if err := sdk.List(integration.Namespace, &ctxList); err != nil {
-		return nil, err
-	}
-
-	for _, ctx := range ctxList.Items {
-		ctx := ctx // pin
-		if ctx.Labels["camel.apache.org/context.type"] == v1alpha1.IntegrationContextTypePlatform {
-			ideps := len(integration.Status.Dependencies)
-			cdeps := len(ctx.Spec.Dependencies)
-
-			if ideps != cdeps {
-				continue
-			}
-
-			if util.StringSliceContains(ctx.Spec.Dependencies, integration.Status.Dependencies) {
-				return &ctx, nil
-			}
-		}
-	}
-
-	return nil, nil
-}
diff --git a/pkg/stub/action/platform/action.go b/pkg/stub/action/platform/action.go
deleted file mode 100644
index cb9ba4c7..00000000
--- a/pkg/stub/action/platform/action.go
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package platform
-
-import (
-	"github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
-)
-
-// Action --
-type Action interface {
-	// a user friendly name for the action
-	Name() string
-
-	// returns true if the action can handle the integration context
-	CanHandle(platform *v1alpha1.IntegrationPlatform) bool
-
-	// executes the handling function
-	Handle(platform *v1alpha1.IntegrationPlatform) error
-}
diff --git a/pkg/stub/action/platform/create.go b/pkg/stub/action/platform/create.go
deleted file mode 100644
index c5b70fd7..00000000
--- a/pkg/stub/action/platform/create.go
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package platform
-
-import (
-	"github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
-	"github.com/apache/camel-k/pkg/install"
-	"github.com/operator-framework/operator-sdk/pkg/sdk"
-	"github.com/sirupsen/logrus"
-)
-
-var resources = []string{
-	"platform-integration-context-jvm.yaml",
-	"platform-integration-context-groovy.yaml",
-	"platform-integration-context-kotlin.yaml",
-	"platform-integration-context-spring-boot.yaml",
-}
-
-var knativeResources = []string{
-	"platform-integration-context-knative.yaml",
-}
-
-// NewCreateAction returns a action that creates resources needed by the platform
-func NewCreateAction() Action {
-	return &createAction{}
-}
-
-type createAction struct {
-}
-
-func (action *createAction) Name() string {
-	return "create"
-}
-
-func (action *createAction) CanHandle(platform *v1alpha1.IntegrationPlatform) bool {
-	return platform.Status.Phase == v1alpha1.IntegrationPlatformPhaseCreating
-}
-
-func (action *createAction) Handle(platform *v1alpha1.IntegrationPlatform) error {
-	logrus.Info("Installing platform resources")
-	err := install.Resources(platform.Namespace, resources...)
-	if err != nil {
-		return err
-	}
-
-	if platform.Spec.Profile == v1alpha1.TraitProfileKnative {
-		logrus.Info("Installing knative resources")
-		err := install.Resources(platform.Namespace, knativeResources...)
-		if err != nil {
-			return err
-		}
-	}
-
-	target := platform.DeepCopy()
-	target.Status.Phase = v1alpha1.IntegrationPlatformPhaseStarting
-	logrus.Info("Platform ", target.Name, " transitioning to state ", target.Status.Phase)
-
-	return sdk.Update(target)
-}
diff --git a/pkg/stub/action/platform/initialize.go b/pkg/stub/action/platform/initialize.go
deleted file mode 100644
index 06e6b796..00000000
--- a/pkg/stub/action/platform/initialize.go
+++ /dev/null
@@ -1,113 +0,0 @@
-/*
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package platform
-
-import (
-	"errors"
-
-	"github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
-	platformutils "github.com/apache/camel-k/pkg/platform"
-	"github.com/apache/camel-k/pkg/util/openshift"
-	"github.com/operator-framework/operator-sdk/pkg/sdk"
-	"github.com/sirupsen/logrus"
-)
-
-// NewInitializeAction returns a action that initializes the platform configuration when not provided by the user
-func NewInitializeAction() Action {
-	return &initializeAction{}
-}
-
-type initializeAction struct {
-}
-
-func (action *initializeAction) Name() string {
-	return "initialize"
-}
-
-func (action *initializeAction) CanHandle(platform *v1alpha1.IntegrationPlatform) bool {
-	return platform.Status.Phase == "" || platform.Status.Phase == v1alpha1.IntegrationPlatformPhaseDuplicate
-}
-
-func (action *initializeAction) Handle(platform *v1alpha1.IntegrationPlatform) error {
-	target := platform.DeepCopy()
-
-	duplicate, err := action.isDuplicate(platform)
-	if err != nil {
-		return err
-	}
-	if duplicate {
-		// another platform already present in the namespace
-		if platform.Status.Phase != v1alpha1.IntegrationPlatformPhaseDuplicate {
-			target := platform.DeepCopy()
-			logrus.Info("Platform ", target.Name, " transitioning to state ", v1alpha1.IntegrationPlatformPhaseDuplicate)
-			target.Status.Phase = v1alpha1.IntegrationPlatformPhaseDuplicate
-			return sdk.Update(target)
-		}
-		return nil
-	}
-
-	// update missing fields in the resource
-	if target.Spec.Cluster == "" {
-		// determine the kind of cluster the platform in installed into
-		isOpenshift, err := openshift.IsOpenShift()
-		switch {
-		case err != nil:
-			return err
-		case isOpenshift:
-			target.Spec.Cluster = v1alpha1.IntegrationPlatformClusterOpenShift
-		default:
-			target.Spec.Cluster = v1alpha1.IntegrationPlatformClusterKubernetes
-		}
-	}
-
-	if target.Spec.Build.PublishStrategy == "" {
-		if target.Spec.Cluster == v1alpha1.IntegrationPlatformClusterOpenShift {
-			target.Spec.Build.PublishStrategy = v1alpha1.IntegrationPlatformBuildPublishStrategyS2I
-		} else {
-			target.Spec.Build.PublishStrategy = v1alpha1.IntegrationPlatformBuildPublishStrategyKaniko
-		}
-	}
-
-	if target.Spec.Build.PublishStrategy == v1alpha1.IntegrationPlatformBuildPublishStrategyKaniko && target.Spec.Build.Registry == "" {
-		return errors.New("no registry specified for publishing images")
-	}
-
-	if target.Spec.Profile == "" {
-		target.Spec.Profile = platformutils.GetProfile(target)
-	}
-
-	// next status
-	logrus.Info("Platform ", target.Name, " transitioning to state ", v1alpha1.IntegrationPlatformPhaseCreating)
-	target.Status.Phase = v1alpha1.IntegrationPlatformPhaseCreating
-	return sdk.Update(target)
-}
-
-func (action *initializeAction) isDuplicate(thisPlatform *v1alpha1.IntegrationPlatform) (bool, error) {
-	platforms, err := platformutils.ListPlatforms(thisPlatform.Namespace)
-	if err != nil {
-		return false, err
-	}
-	for _, platform := range platforms.Items {
-		platform := platform // pin
-		if platform.Name != thisPlatform.Name && platformutils.IsActive(&platform) {
-			return true, nil
-		}
-	}
-
-	return false, nil
-}
diff --git a/pkg/stub/action/platform/start.go b/pkg/stub/action/platform/start.go
deleted file mode 100644
index df205dbc..00000000
--- a/pkg/stub/action/platform/start.go
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package platform
-
-import (
-	"github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
-	"github.com/operator-framework/operator-sdk/pkg/sdk"
-	"github.com/sirupsen/logrus"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-// NewStartAction returns a action that waits for all required platform resources to start
-func NewStartAction() Action {
-	return &startAction{}
-}
-
-type startAction struct {
-}
-
-func (action *startAction) Name() string {
-	return "start"
-}
-
-func (action *startAction) CanHandle(platform *v1alpha1.IntegrationPlatform) bool {
-	return platform.Status.Phase == v1alpha1.IntegrationPlatformPhaseStarting || platform.Status.Phase == v1alpha1.IntegrationPlatformPhaseError
-}
-
-func (action *startAction) Handle(platform *v1alpha1.IntegrationPlatform) error {
-	aggregatePhase, err := action.aggregatePlatformPhaseFromContexts(platform.Namespace)
-	if err != nil {
-		return err
-	}
-	if platform.Status.Phase != aggregatePhase {
-		target := platform.DeepCopy()
-		logrus.Info("Platform ", target.Name, " transitioning to state ", aggregatePhase)
-		target.Status.Phase = aggregatePhase
-		return sdk.Update(target)
-	}
-	// wait
-	return nil
-}
-
-func (action *startAction) aggregatePlatformPhaseFromContexts(namespace string) (v1alpha1.IntegrationPlatformPhase, error) {
-	ctxs := v1alpha1.NewIntegrationContextList()
-	options := metav1.ListOptions{
-		LabelSelector: "camel.apache.org/context.type=platform",
-	}
-	if err := sdk.List(namespace, &ctxs, sdk.WithListOptions(&options)); err != nil {
-		return "", err
-	}
-
-	countReady := 0
-	for _, ctx := range ctxs.Items {
-		if ctx.Status.Phase == v1alpha1.IntegrationContextPhaseError {
-			return v1alpha1.IntegrationPlatformPhaseError, nil
-		} else if ctx.Status.Phase == v1alpha1.IntegrationContextPhaseReady {
-			countReady++
-		}
-	}
-
-	if countReady < len(ctxs.Items) {
-		return v1alpha1.IntegrationPlatformPhaseStarting, nil
-	}
-
-	return v1alpha1.IntegrationPlatformPhaseReady, nil
-}
diff --git a/pkg/stub/handler.go b/pkg/stub/handler.go
deleted file mode 100644
index f4d3eaa1..00000000
--- a/pkg/stub/handler.go
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package stub
-
-import (
-	ctx "context"
-
-	"github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
-	"github.com/apache/camel-k/pkg/stub/action/platform"
-
-	"github.com/apache/camel-k/pkg/stub/action/context"
-	"github.com/apache/camel-k/pkg/stub/action/integration"
-	"github.com/operator-framework/operator-sdk/pkg/sdk"
-	"github.com/sirupsen/logrus"
-)
-
-// NewHandler --
-func NewHandler(ctx ctx.Context, namespace string) sdk.Handler {
-	return &handler{
-		integrationActionPool: []integration.Action{
-			integration.NewInitializeAction(),
-			integration.NewBuildContextAction(namespace),
-			integration.NewBuildImageAction(ctx, namespace),
-			integration.NewDeployAction(),
-			integration.NewMonitorAction(),
-		},
-		integrationContextActionPool: []context.Action{
-			context.NewInitializeAction(),
-			context.NewBuildAction(ctx),
-			context.NewMonitorAction(),
-		},
-		integrationPlatformActionPool: []platform.Action{
-			platform.NewInitializeAction(),
-			platform.NewCreateAction(),
-			platform.NewStartAction(),
-		},
-	}
-}
-
-type handler struct {
-	integrationActionPool         []integration.Action
-	integrationContextActionPool  []context.Action
-	integrationPlatformActionPool []platform.Action
-}
-
-func (h *handler) Handle(ctx ctx.Context, event sdk.Event) error {
-	switch o := event.Object.(type) {
-	case *v1alpha1.Integration:
-		for _, a := range h.integrationActionPool {
-			if a.CanHandle(o) {
-				logrus.Debug("Invoking action ", a.Name(), " on integration ", o.Name)
-				if err := a.Handle(o); err != nil {
-					return err
-				}
-			}
-		}
-	case *v1alpha1.IntegrationContext:
-		for _, a := range h.integrationContextActionPool {
-			if a.CanHandle(o) {
-				logrus.Debug("Invoking action ", a.Name(), " on context ", o.Name)
-				if err := a.Handle(o); err != nil {
-					return err
-				}
-			}
-		}
-	case *v1alpha1.IntegrationPlatform:
-		for _, a := range h.integrationPlatformActionPool {
-			if a.CanHandle(o) {
-				logrus.Debug("Invoking action ", a.Name(), " on platform ", o.Name)
-				if err := a.Handle(o); err != nil {
-					return err
-				}
-			}
-		}
-	}
-	return nil
-}
diff --git a/pkg/trait/builder_test.go b/pkg/trait/builder_test.go
index bf863301..1afefabc 100644
--- a/pkg/trait/builder_test.go
+++ b/pkg/trait/builder_test.go
@@ -18,6 +18,7 @@ limitations under the License.
 package trait
 
 import (
+	"context"
 	"testing"
 
 	"k8s.io/api/core/v1"
@@ -42,7 +43,7 @@ func TestBuilderTraitNotAppliedBecauseOfNilContext(t *testing.T) {
 		e.Context = nil
 
 		t.Run(string(e.Platform.Spec.Cluster), func(t *testing.T) {
-			err := NewCatalog().apply(e)
+			err := NewBuilderTestCatalog().apply(e)
 
 			assert.Nil(t, err)
 			assert.NotEmpty(t, e.ExecutedTraits)
@@ -63,7 +64,7 @@ func TestBuilderTraitNotAppliedBecauseOfNilPhase(t *testing.T) {
 		e.Context.Status.Phase = ""
 
 		t.Run(string(e.Platform.Spec.Cluster), func(t *testing.T) {
-			err := NewCatalog().apply(e)
+			err := NewBuilderTestCatalog().apply(e)
 
 			assert.Nil(t, err)
 			assert.NotEmpty(t, e.ExecutedTraits)
@@ -75,7 +76,7 @@ func TestBuilderTraitNotAppliedBecauseOfNilPhase(t *testing.T) {
 
 func TestS2IBuilderTrait(t *testing.T) {
 	env := createBuilderTestEnv(v1alpha1.IntegrationPlatformClusterOpenShift, v1alpha1.IntegrationPlatformBuildPublishStrategyS2I)
-	err := NewCatalog().apply(env)
+	err := NewBuilderTestCatalog().apply(env)
 
 	assert.Nil(t, err)
 	assert.NotEmpty(t, env.ExecutedTraits)
@@ -95,7 +96,7 @@ func TestS2IBuilderTrait(t *testing.T) {
 
 func TestKanikoBuilderTrait(t *testing.T) {
 	env := createBuilderTestEnv(v1alpha1.IntegrationPlatformClusterKubernetes, v1alpha1.IntegrationPlatformBuildPublishStrategyKaniko)
-	err := NewCatalog().apply(env)
+	err := NewBuilderTestCatalog().apply(env)
 
 	assert.Nil(t, err)
 	assert.NotEmpty(t, env.ExecutedTraits)
@@ -153,3 +154,7 @@ func TestIPReplacement(t *testing.T) {
 	assert.Equal(t, "gcr.io/camel-k/camel-k:latest", getImageWithOpenShiftHost("gcr.io/camel-k/camel-k:latest"))
 	assert.Equal(t, "docker.io/camel-k:latest", getImageWithOpenShiftHost("docker.io/camel-k:latest"))
 }
+
+func NewBuilderTestCatalog() *Catalog {
+	return NewCatalog(context.TODO(), nil)
+}
diff --git a/pkg/trait/catalog.go b/pkg/trait/catalog.go
index 0e5b9255..47decb06 100644
--- a/pkg/trait/catalog.go
+++ b/pkg/trait/catalog.go
@@ -18,13 +18,14 @@ limitations under the License.
 package trait
 
 import (
+	"context"
 	"reflect"
 	"strings"
 
-	"github.com/sirupsen/logrus"
-
 	"github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
+	"github.com/apache/camel-k/pkg/client"
 	"github.com/fatih/structs"
+	"github.com/sirupsen/logrus"
 )
 
 // Catalog collects all information about traits in one place
@@ -45,8 +46,8 @@ type Catalog struct {
 }
 
 // NewCatalog creates a new trait Catalog
-func NewCatalog() *Catalog {
-	return &Catalog{
+func NewCatalog(ctx context.Context, c client.Client) *Catalog {
+	catalog := Catalog{
 		tDebug:        newDebugTrait(),
 		tDependencies: newDependenciesTrait(),
 		tDeployment:   newDeploymentTrait(),
@@ -61,6 +62,16 @@ func NewCatalog() *Catalog {
 		tEnvironment:  newEnvironmentTrait(),
 		tClasspath:    newClasspathTrait(),
 	}
+
+	for _, t := range catalog.allTraits() {
+		if ctx != nil {
+			t.InjectContext(ctx)
+		}
+		if c != nil {
+			t.InjectClient(c)
+		}
+	}
+	return &catalog
 }
 
 func (c *Catalog) allTraits() []Trait {
diff --git a/pkg/trait/classpath.go b/pkg/trait/classpath.go
index 619e6b31..2eec71a9 100644
--- a/pkg/trait/classpath.go
+++ b/pkg/trait/classpath.go
@@ -21,11 +21,11 @@ import (
 	"fmt"
 	"strings"
 
-	"github.com/operator-framework/operator-sdk/pkg/sdk"
 	"github.com/pkg/errors"
 
 	"github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
 	"github.com/apache/camel-k/pkg/util/envvar"
+	k8sclient "sigs.k8s.io/controller-runtime/pkg/client"
 )
 
 type classpathTrait struct {
@@ -57,8 +57,12 @@ func (t *classpathTrait) Apply(e *Environment) error {
 	if ctx == nil && e.Integration.Status.Context != "" {
 		name := e.Integration.Status.Context
 		c := v1alpha1.NewIntegrationContext(e.Integration.Namespace, name)
+		key := k8sclient.ObjectKey{
+			Namespace: e.Integration.Namespace,
+			Name:      name,
+		}
 
-		if err := sdk.Get(&c); err != nil {
+		if err := t.client.Get(t.ctx, key, &c); err != nil {
 			return errors.Wrapf(err, "unable to find integration context %s, %s", name, err)
 		}
 
diff --git a/pkg/trait/environment_test.go b/pkg/trait/environment_test.go
index 430bef2a..0fd35f15 100644
--- a/pkg/trait/environment_test.go
+++ b/pkg/trait/environment_test.go
@@ -18,6 +18,7 @@ limitations under the License.
 package trait
 
 import (
+	"context"
 	"testing"
 
 	corev1 "k8s.io/api/core/v1"
@@ -51,7 +52,7 @@ func TestDefaultEnvironment(t *testing.T) {
 		Resources:      kubernetes.NewCollection(),
 	}
 
-	err := NewCatalog().apply(&env)
+	err := NewEnvironmentTestCatalog().apply(&env)
 
 	assert.Nil(t, err)
 
@@ -106,7 +107,7 @@ func TestEnabledContainerMetaDataEnvVars(t *testing.T) {
 		Resources:      kubernetes.NewCollection(),
 	}
 
-	err := NewCatalog().apply(&env)
+	err := NewEnvironmentTestCatalog().apply(&env)
 
 	assert.Nil(t, err)
 
@@ -161,7 +162,7 @@ func TestDisabledContainerMetaDataEnvVars(t *testing.T) {
 		Resources:      kubernetes.NewCollection(),
 	}
 
-	err := NewCatalog().apply(&env)
+	err := NewEnvironmentTestCatalog().apply(&env)
 
 	assert.Nil(t, err)
 
@@ -187,3 +188,7 @@ func TestDisabledContainerMetaDataEnvVars(t *testing.T) {
 	assert.False(t, name)
 	assert.True(t, ck)
 }
+
+func NewEnvironmentTestCatalog() *Catalog {
+	return NewCatalog(context.TODO(), nil)
+}
diff --git a/pkg/trait/knative.go b/pkg/trait/knative.go
index f5f02a3c..cc6e990b 100644
--- a/pkg/trait/knative.go
+++ b/pkg/trait/knative.go
@@ -19,24 +19,20 @@ package trait
 
 import (
 	"fmt"
-
-	"github.com/apache/camel-k/pkg/util/envvar"
-
 	"strconv"
 	"strings"
 
-	"github.com/operator-framework/operator-sdk/pkg/sdk"
-	"github.com/pkg/errors"
-
 	"github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
-
 	knativeapi "github.com/apache/camel-k/pkg/apis/camel/v1alpha1/knative"
 	"github.com/apache/camel-k/pkg/metadata"
+	"github.com/apache/camel-k/pkg/util/envvar"
 	knativeutil "github.com/apache/camel-k/pkg/util/knative"
 	eventing "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
 	serving "github.com/knative/serving/pkg/apis/serving/v1alpha1"
+	"github.com/pkg/errors"
 	corev1 "k8s.io/api/core/v1"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	k8sclient "sigs.k8s.io/controller-runtime/pkg/client"
 )
 
 const (
@@ -403,7 +399,7 @@ func (*knativeTrait) getSinkChannels(e *Environment) []string {
 	return channels
 }
 
-func (*knativeTrait) retrieveChannel(namespace string, name string) (*eventing.Channel, error) {
+func (t *knativeTrait) retrieveChannel(namespace string, name string) (*eventing.Channel, error) {
 	channel := eventing.Channel{
 		TypeMeta: metav1.TypeMeta{
 			Kind:       "Channel",
@@ -414,7 +410,11 @@ func (*knativeTrait) retrieveChannel(namespace string, name string) (*eventing.C
 			Name:      name,
 		},
 	}
-	if err := sdk.Get(&channel); err != nil {
+	key := k8sclient.ObjectKey{
+		Namespace: namespace,
+		Name:      name,
+	}
+	if err := t.client.Get(t.ctx, key, &channel); err != nil {
 		return nil, errors.Wrap(err, "could not retrieve channel "+name+" in namespace "+namespace)
 	}
 	return &channel, nil
diff --git a/pkg/trait/knative_test.go b/pkg/trait/knative_test.go
index 9827ef90..73406e2a 100644
--- a/pkg/trait/knative_test.go
+++ b/pkg/trait/knative_test.go
@@ -18,6 +18,7 @@ limitations under the License.
 package trait
 
 import (
+	"context"
 	"testing"
 
 	"github.com/apache/camel-k/pkg/util/envvar"
@@ -90,7 +91,7 @@ func TestKnativeTraitWithCompressedSources(t *testing.T) {
 		Resources:      kubernetes.NewCollection(),
 	}
 
-	err := NewCatalog().apply(&environment)
+	err := NewKnativeTestCatalog().apply(&environment)
 
 	assert.Nil(t, err)
 	assert.NotEmpty(t, environment.ExecutedTraits)
@@ -131,3 +132,7 @@ func TestKnativeTraitWithCompressedSources(t *testing.T) {
 	assert.True(t, services > 0)
 	assert.True(t, environment.Resources.Size() > 0)
 }
+
+func NewKnativeTestCatalog() *Catalog {
+	return NewCatalog(context.TODO(), nil)
+}
diff --git a/pkg/trait/trait.go b/pkg/trait/trait.go
index fdc54285..8c280868 100644
--- a/pkg/trait/trait.go
+++ b/pkg/trait/trait.go
@@ -18,7 +18,10 @@ limitations under the License.
 package trait
 
 import (
+	"context"
+
 	"github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
+	"github.com/apache/camel-k/pkg/client"
 	"github.com/apache/camel-k/pkg/platform"
 	"github.com/apache/camel-k/pkg/util/kubernetes"
 	"github.com/pkg/errors"
@@ -29,13 +32,13 @@ import (
 const True = "true"
 
 // Apply --
-func Apply(integration *v1alpha1.Integration, ctx *v1alpha1.IntegrationContext) (*Environment, error) {
-	environment, err := newEnvironment(integration, ctx)
+func Apply(ctx context.Context, c client.Client, integration *v1alpha1.Integration, ictx *v1alpha1.IntegrationContext) (*Environment, error) {
+	environment, err := newEnvironment(ctx, c, integration, ictx)
 	if err != nil {
 		return nil, err
 	}
 
-	catalog := NewCatalog()
+	catalog := NewCatalog(ctx, c)
 
 	// invoke the trait framework to determine the needed resources
 	if err := catalog.apply(environment); err != nil {
@@ -46,7 +49,7 @@ func Apply(integration *v1alpha1.Integration, ctx *v1alpha1.IntegrationContext)
 }
 
 // newEnvironment creates a Environment from the given data
-func newEnvironment(integration *v1alpha1.Integration, ctx *v1alpha1.IntegrationContext) (*Environment, error) {
+func newEnvironment(ctx context.Context, c client.Client, integration *v1alpha1.Integration, ictx *v1alpha1.IntegrationContext) (*Environment, error) {
 	if integration == nil && ctx == nil {
 		return nil, errors.New("neither integration nor context are ste")
 	}
@@ -54,17 +57,17 @@ func newEnvironment(integration *v1alpha1.Integration, ctx *v1alpha1.Integration
 	namespace := ""
 	if integration != nil {
 		namespace = integration.Namespace
-	} else if ctx != nil {
-		namespace = ctx.Namespace
+	} else if ictx != nil {
+		namespace = ictx.Namespace
 	}
 
-	pl, err := platform.GetCurrentPlatform(namespace)
+	pl, err := platform.GetCurrentPlatform(ctx, c, namespace)
 	if err != nil {
 		return nil, err
 	}
 
-	if ctx == nil {
-		ctx, err = GetIntegrationContext(integration)
+	if ictx == nil {
+		ictx, err = GetIntegrationContext(ctx, c, integration)
 		if err != nil {
 			return nil, err
 		}
@@ -72,7 +75,7 @@ func newEnvironment(integration *v1alpha1.Integration, ctx *v1alpha1.Integration
 
 	return &Environment{
 		Platform:       pl,
-		Context:        ctx,
+		Context:        ictx,
 		Integration:    integration,
 		ExecutedTraits: make([]Trait, 0),
 		Resources:      kubernetes.NewCollection(),
diff --git a/pkg/trait/trait_test.go b/pkg/trait/trait_test.go
index e2490d2e..3ea9b7d5 100644
--- a/pkg/trait/trait_test.go
+++ b/pkg/trait/trait_test.go
@@ -18,6 +18,7 @@ limitations under the License.
 package trait
 
 import (
+	"context"
 	"testing"
 
 	"github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
@@ -160,7 +161,7 @@ func TestTraitDecode(t *testing.T) {
 }
 
 func processTestEnv(t *testing.T, env *Environment) *kubernetes.Collection {
-	catalog := NewCatalog()
+	catalog := NewTraitTestCatalog()
 	err := catalog.apply(env)
 	assert.Nil(t, err)
 	return env.Resources
@@ -199,3 +200,7 @@ func createTestEnv(cluster v1alpha1.IntegrationPlatformCluster, script string) *
 		Resources:      kubernetes.NewCollection(),
 	}
 }
+
+func NewTraitTestCatalog() *Catalog {
+	return NewCatalog(context.TODO(), nil)
+}
diff --git a/pkg/trait/types.go b/pkg/trait/types.go
index bbba5acc..b9d892dd 100644
--- a/pkg/trait/types.go
+++ b/pkg/trait/types.go
@@ -18,8 +18,11 @@ limitations under the License.
 package trait
 
 import (
+	"context"
+
 	"github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
 	"github.com/apache/camel-k/pkg/builder"
+	"github.com/apache/camel-k/pkg/client"
 	"github.com/apache/camel-k/pkg/platform"
 	"github.com/apache/camel-k/pkg/util/kubernetes"
 	"k8s.io/api/core/v1"
@@ -36,6 +39,10 @@ type ID string
 // Trait is the interface of all traits
 type Trait interface {
 	Identifiable
+	client.Injectable
+
+	// InjectContext to inject a context
+	InjectContext(context.Context)
 
 	// Configure the trait
 	Configure(environment *Environment) (bool, error)
@@ -50,6 +57,8 @@ type Trait interface {
 type BaseTrait struct {
 	id      ID
 	Enabled *bool `property:"enabled"`
+	client  client.Client
+	ctx     context.Context
 }
 
 // ID returns the identifier of the trait
@@ -57,6 +66,16 @@ func (trait *BaseTrait) ID() ID {
 	return trait.id
 }
 
+// InjectClient implements client.ClientInject and allows to inject a client into the trait
+func (trait *BaseTrait) InjectClient(c client.Client) {
+	trait.client = c
+}
+
+// InjectContext allows to inject a context into the trait
+func (trait *BaseTrait) InjectContext(ctx context.Context) {
+	trait.ctx = ctx
+}
+
 /* Environment */
 
 // A Environment provides the context where the trait is executed
diff --git a/pkg/trait/util.go b/pkg/trait/util.go
index f9875a5a..14554e77 100644
--- a/pkg/trait/util.go
+++ b/pkg/trait/util.go
@@ -18,22 +18,28 @@ limitations under the License.
 package trait
 
 import (
+	"context"
 	"strings"
 
 	"github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
-	"github.com/operator-framework/operator-sdk/pkg/sdk"
+	"github.com/apache/camel-k/pkg/client"
+	k8sclient "sigs.k8s.io/controller-runtime/pkg/client"
 )
 
 // GetIntegrationContext retrieves the context set on the integration
-func GetIntegrationContext(integration *v1alpha1.Integration) (*v1alpha1.IntegrationContext, error) {
+func GetIntegrationContext(ctx context.Context, c client.Client, integration *v1alpha1.Integration) (*v1alpha1.IntegrationContext, error) {
 	if integration.Status.Context == "" {
 		return nil, nil
 	}
 
 	name := integration.Status.Context
-	ctx := v1alpha1.NewIntegrationContext(integration.Namespace, name)
-	err := sdk.Get(&ctx)
-	return &ctx, err
+	ictx := v1alpha1.NewIntegrationContext(integration.Namespace, name)
+	key := k8sclient.ObjectKey{
+		Namespace: integration.Namespace,
+		Name:      name,
+	}
+	err := c.Get(ctx, key, &ictx)
+	return &ictx, err
 }
 
 // VisitConfigurations --
diff --git a/pkg/util/knative/knative.go b/pkg/util/knative/knative.go
index c17d7c0d..7f86ba9c 100644
--- a/pkg/util/knative/knative.go
+++ b/pkg/util/knative/knative.go
@@ -18,13 +18,15 @@ limitations under the License.
 package knative
 
 import (
-	"github.com/operator-framework/operator-sdk/pkg/k8sclient"
+	"context"
+
 	"k8s.io/apimachinery/pkg/api/errors"
+	"k8s.io/client-go/kubernetes"
 )
 
 // IsInstalled returns true if we are connected to a cluster with Knative installed
-func IsInstalled() (bool, error) {
-	_, err := k8sclient.GetKubeClient().Discovery().ServerResourcesForGroupVersion("serving.knative.dev/v1alpha1")
+func IsInstalled(ctx context.Context, c kubernetes.Interface) (bool, error) {
+	_, err := c.Discovery().ServerResourcesForGroupVersion("serving.knative.dev/v1alpha1")
 	if err != nil && errors.IsNotFound(err) {
 		return false, nil
 	} else if err != nil {
diff --git a/pkg/util/kubernetes/config.go b/pkg/util/kubernetes/config.go
deleted file mode 100644
index 634e9b80..00000000
--- a/pkg/util/kubernetes/config.go
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package kubernetes
-
-import (
-	"os"
-	"os/user"
-	"path/filepath"
-
-	"github.com/operator-framework/operator-sdk/pkg/util/k8sutil"
-)
-
-// InitKubeClient initialize the k8s client
-func InitKubeClient(kubeconfig string) error {
-	if kubeconfig == "" {
-		kubeconfig = getDefaultKubeConfigFile()
-	}
-	os.Setenv(k8sutil.KubeConfigEnvVar, kubeconfig)
-	return nil
-}
-
-func getDefaultKubeConfigFile() string {
-	usr, err := user.Current()
-	if err != nil {
-		panic(err) // TODO handle error
-	}
-
-	return filepath.Join(usr.HomeDir, ".kube", "config")
-}
diff --git a/pkg/util/kubernetes/customclient/customclient.go b/pkg/util/kubernetes/customclient/customclient.go
index c0e3b649..345be1db 100644
--- a/pkg/util/kubernetes/customclient/customclient.go
+++ b/pkg/util/kubernetes/customclient/customclient.go
@@ -18,28 +18,50 @@ limitations under the License.
 package customclient
 
 import (
-	"github.com/operator-framework/operator-sdk/pkg/k8sclient"
 	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/client-go/dynamic"
+	"k8s.io/client-go/kubernetes"
 	"k8s.io/client-go/rest"
+	"sigs.k8s.io/controller-runtime/pkg/client/config"
 )
 
 // GetClientFor returns a RESTClient for the given group and version
-func GetClientFor(group string, version string) (*rest.RESTClient, error) {
-	inConfig := k8sclient.GetKubeConfig()
-	config := rest.CopyConfig(inConfig)
-	config.GroupVersion = &schema.GroupVersion{
+func GetClientFor(c kubernetes.Interface, group string, version string) (*rest.RESTClient, error) {
+	inConfig, err := config.GetConfig()
+	if err != nil {
+		return nil, err
+	}
+	conf := rest.CopyConfig(inConfig)
+	conf.GroupVersion = &schema.GroupVersion{
 		Group:   group,
 		Version: version,
 	}
-	config.APIPath = "/apis"
-	config.AcceptContentTypes = "application/json"
-	config.ContentType = "application/json"
+	conf.APIPath = "/apis"
+	conf.AcceptContentTypes = "application/json"
+	conf.ContentType = "application/json"
 
 	// this gets used for discovery and error handling types
-	config.NegotiatedSerializer = basicNegotiatedSerializer{}
-	if config.UserAgent == "" {
-		config.UserAgent = rest.DefaultKubernetesUserAgent()
+	conf.NegotiatedSerializer = basicNegotiatedSerializer{}
+	if conf.UserAgent == "" {
+		conf.UserAgent = rest.DefaultKubernetesUserAgent()
 	}
 
-	return rest.RESTClientFor(config)
+	return rest.RESTClientFor(conf)
+}
+
+// GetDynamicClientFor returns a dynamic client for a given kind
+func GetDynamicClientFor(group string, version string, kind string, namespace string) (dynamic.ResourceInterface, error) {
+	conf, err := config.GetConfig()
+	if err != nil {
+		return nil, err
+	}
+	dynamicClient, err := dynamic.NewForConfig(conf)
+	if err != nil {
+		return nil, err
+	}
+	return dynamicClient.Resource(schema.GroupVersionResource{
+		Group:    group,
+		Version:  version,
+		Resource: kind,
+	}).Namespace(namespace), nil
 }
diff --git a/pkg/util/kubernetes/loader.go b/pkg/util/kubernetes/loader.go
index 19085f18..aba97005 100644
--- a/pkg/util/kubernetes/loader.go
+++ b/pkg/util/kubernetes/loader.go
@@ -19,15 +19,16 @@ package kubernetes
 
 import (
 	"encoding/json"
+	"fmt"
 
-	"github.com/operator-framework/operator-sdk/pkg/util/k8sutil"
 	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
 	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/serializer"
 	"k8s.io/apimachinery/pkg/util/yaml"
 )
 
 // LoadResourceFromYaml loads a k8s resource from a yaml definition
-func LoadResourceFromYaml(data string) (runtime.Object, error) {
+func LoadResourceFromYaml(scheme *runtime.Scheme, data string) (runtime.Object, error) {
 	source := []byte(data)
 	jsonSource, err := yaml.ToJSON(source)
 	if err != nil {
@@ -38,8 +39,7 @@ func LoadResourceFromYaml(data string) (runtime.Object, error) {
 	if err != nil {
 		return nil, err
 	}
-
-	return k8sutil.RuntimeObjectFromUnstructured(&u)
+	return RuntimeObjectFromUnstructured(scheme, &u)
 }
 
 // LoadRawResourceFromYaml loads a k8s resource from a yaml definition without making assumptions on the underlying type
@@ -57,3 +57,20 @@ func LoadRawResourceFromYaml(data string) (runtime.Object, error) {
 		Object: objmap,
 	}, nil
 }
+
+// RuntimeObjectFromUnstructured converts an unstructured to a runtime object
+func RuntimeObjectFromUnstructured(scheme *runtime.Scheme, u *unstructured.Unstructured) (runtime.Object, error) {
+	gvk := u.GroupVersionKind()
+	codecs := serializer.NewCodecFactory(scheme)
+	decoder := codecs.UniversalDecoder(gvk.GroupVersion())
+
+	b, err := u.MarshalJSON()
+	if err != nil {
+		return nil, fmt.Errorf("error running MarshalJSON on unstructured object: %v", err)
+	}
+	ro, _, err := decoder.Decode(b, &gvk, nil)
+	if err != nil {
+		return nil, fmt.Errorf("failed to decode json data with gvk(%v): %v", gvk.String(), err)
+	}
+	return ro, nil
+}
diff --git a/pkg/util/kubernetes/namespace.go b/pkg/util/kubernetes/namespace.go
deleted file mode 100644
index 664330cc..00000000
--- a/pkg/util/kubernetes/namespace.go
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package kubernetes
-
-import (
-	"io/ioutil"
-
-	"github.com/pkg/errors"
-	"k8s.io/apimachinery/pkg/runtime/schema"
-	"k8s.io/client-go/tools/clientcmd"
-	clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
-	clientcmdlatest "k8s.io/client-go/tools/clientcmd/api/latest"
-)
-
-// GetClientCurrentNamespace --
-func GetClientCurrentNamespace(kubeconfig string) (string, error) {
-	if kubeconfig == "" {
-		kubeconfig = getDefaultKubeConfigFile()
-	}
-	if kubeconfig == "" {
-		return "default", nil
-	}
-
-	data, err := ioutil.ReadFile(kubeconfig)
-	if err != nil {
-		return "", err
-	}
-	config := clientcmdapi.NewConfig()
-	if len(data) == 0 {
-		return "", errors.New("kubernetes config file is empty")
-	}
-
-	decoded, _, err := clientcmdlatest.Codec.Decode(data, &schema.GroupVersionKind{Version: clientcmdlatest.Version, Kind: "Config"}, config)
-	if err != nil {
-		return "", err
-	}
-
-	clientcmdconfig := decoded.(*clientcmdapi.Config)
-
-	cc := clientcmd.NewDefaultClientConfig(*clientcmdconfig, &clientcmd.ConfigOverrides{})
-	ns, _, err := cc.Namespace()
-	return ns, err
-}
diff --git a/pkg/util/kubernetes/replace.go b/pkg/util/kubernetes/replace.go
index 6d8ea490..ba7b3477 100644
--- a/pkg/util/kubernetes/replace.go
+++ b/pkg/util/kubernetes/replace.go
@@ -18,20 +18,23 @@ limitations under the License.
 package kubernetes
 
 import (
+	"context"
+
+	"github.com/apache/camel-k/pkg/client"
 	eventing "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
 	routev1 "github.com/openshift/api/route/v1"
-	"github.com/operator-framework/operator-sdk/pkg/sdk"
 	"github.com/pkg/errors"
 	corev1 "k8s.io/api/core/v1"
 	k8serrors "k8s.io/apimachinery/pkg/api/errors"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/apimachinery/pkg/runtime"
+	k8sclient "sigs.k8s.io/controller-runtime/pkg/client"
 )
 
 // ReplaceResources allows to completely replace a list of resources on Kubernetes, taking care of immutable fields and resource versions
-func ReplaceResources(objects []runtime.Object) error {
+func ReplaceResources(ctx context.Context, c client.Client, objects []runtime.Object) error {
 	for _, object := range objects {
-		err := ReplaceResource(object)
+		err := ReplaceResource(ctx, c, object)
 		if err != nil {
 			return err
 		}
@@ -40,11 +43,16 @@ func ReplaceResources(objects []runtime.Object) error {
 }
 
 // ReplaceResource allows to completely replace a resource on Kubernetes, taking care of immutable fields and resource versions
-func ReplaceResource(res runtime.Object) error {
-	err := sdk.Create(res)
+func ReplaceResource(ctx context.Context, c client.Client, res runtime.Object) error {
+	err := c.Create(ctx, res)
 	if err != nil && k8serrors.IsAlreadyExists(err) {
 		existing := res.DeepCopyObject()
-		err = sdk.Get(existing)
+		var key k8sclient.ObjectKey
+		key, err = k8sclient.ObjectKeyFromObject(existing)
+		if err != nil {
+			return err
+		}
+		err = c.Get(ctx, key, existing)
 		if err != nil {
 			return err
 		}
@@ -52,7 +60,7 @@ func ReplaceResource(res runtime.Object) error {
 		mapRequiredServiceData(existing, res)
 		mapRequiredRouteData(existing, res)
 		mapRequiredKnativeData(existing, res)
-		err = sdk.Update(res)
+		err = c.Update(ctx, res)
 	}
 	if err != nil {
 		return errors.Wrap(err, "could not create or replace "+findResourceDetails(res))
diff --git a/pkg/util/kubernetes/util.go b/pkg/util/kubernetes/util.go
index ae109080..6da33adb 100644
--- a/pkg/util/kubernetes/util.go
+++ b/pkg/util/kubernetes/util.go
@@ -18,26 +18,16 @@ limitations under the License.
 package kubernetes
 
 import (
-	"encoding/json"
 	"fmt"
 
-	"github.com/operator-framework/operator-sdk/pkg/util/k8sutil"
-	yaml "gopkg.in/yaml.v2"
-	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+	"gopkg.in/yaml.v2"
 	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/util/json"
 )
 
 // ToJSON --
 func ToJSON(value runtime.Object) ([]byte, error) {
-	u, err := k8sutil.UnstructuredFromRuntimeObject(value)
-	if err != nil {
-		return nil, fmt.Errorf("error creating unstructured data: %v", err)
-	}
-	data, err := runtime.Encode(unstructured.UnstructuredJSONScheme, u)
-	if err != nil {
-		return nil, fmt.Errorf("error marshalling to json: %v", err)
-	}
-	return data, nil
+	return json.Marshal(value)
 }
 
 // ToYAML --
diff --git a/pkg/util/kubernetes/wait.go b/pkg/util/kubernetes/wait.go
index 9ae69480..e4895d74 100644
--- a/pkg/util/kubernetes/wait.go
+++ b/pkg/util/kubernetes/wait.go
@@ -18,11 +18,13 @@ limitations under the License.
 package kubernetes
 
 import (
+	"context"
 	"time"
 
-	"github.com/operator-framework/operator-sdk/pkg/sdk"
+	"github.com/apache/camel-k/pkg/client"
 	"github.com/pkg/errors"
 	"k8s.io/apimachinery/pkg/runtime"
+	k8sclient "sigs.k8s.io/controller-runtime/pkg/client"
 )
 
 // ResourceRetrieveFunction --
@@ -36,11 +38,14 @@ const (
 )
 
 // WaitCondition --
-func WaitCondition(obj runtime.Object, condition ResourceCheckFunction, maxDuration time.Duration) error {
+func WaitCondition(ctx context.Context, c client.Client, obj runtime.Object, condition ResourceCheckFunction, maxDuration time.Duration) error {
 	start := time.Now()
-
+	key, err := k8sclient.ObjectKeyFromObject(obj)
+	if err != nil {
+		return err
+	}
 	for start.Add(maxDuration).After(time.Now()) {
-		err := sdk.Get(obj)
+		err := c.Get(ctx, key, obj)
 		if err != nil {
 			time.Sleep(sleepTime)
 			continue
diff --git a/pkg/util/log/annotation_scraper.go b/pkg/util/log/annotation_scraper.go
index 5aad25d4..31aaba7f 100644
--- a/pkg/util/log/annotation_scraper.go
+++ b/pkg/util/log/annotation_scraper.go
@@ -26,14 +26,15 @@ import (
 	"sync/atomic"
 	"time"
 
-	"github.com/operator-framework/operator-sdk/pkg/sdk"
 	"github.com/sirupsen/logrus"
 	"k8s.io/api/core/v1"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/client-go/kubernetes"
 )
 
 // SelectorScraper scrapes all pods with a given selector
 type SelectorScraper struct {
+	client        kubernetes.Interface
 	namespace     string
 	labelSelector string
 	podScrapers   sync.Map
@@ -41,8 +42,9 @@ type SelectorScraper struct {
 }
 
 // NewSelectorScraper creates a new SelectorScraper
-func NewSelectorScraper(namespace string, labelSelector string) *SelectorScraper {
+func NewSelectorScraper(client kubernetes.Interface, namespace string, labelSelector string) *SelectorScraper {
 	return &SelectorScraper{
+		client:        client,
 		namespace:     namespace,
 		labelSelector: labelSelector,
 	}
@@ -121,7 +123,7 @@ func (s *SelectorScraper) synchronize(ctx context.Context, out *bufio.Writer) er
 }
 
 func (s *SelectorScraper) addPodScraper(ctx context.Context, name string, out *bufio.Writer) {
-	podScraper := NewPodScraper(s.namespace, name)
+	podScraper := NewPodScraper(s.client, s.namespace, name)
 	podCtx, podCancel := context.WithCancel(ctx)
 	id := atomic.AddUint64(&s.counter, 1)
 	prefix := "[" + strconv.FormatUint(id, 10) + "] "
@@ -156,20 +158,12 @@ func (s *SelectorScraper) addPodScraper(ctx context.Context, name string, out *b
 }
 
 func (s *SelectorScraper) listPods() (*v1.PodList, error) {
-	list := v1.PodList{
-		TypeMeta: metav1.TypeMeta{
-			Kind:       "Pod",
-			APIVersion: v1.SchemeGroupVersion.String(),
-		},
-	}
-
-	err := sdk.List(s.namespace, &list, sdk.WithListOptions(&metav1.ListOptions{
+	list, err := s.client.CoreV1().Pods(s.namespace).List(metav1.ListOptions{
 		LabelSelector: s.labelSelector,
-	}))
-
+	})
 	if err != nil {
 		return nil, err
 	}
 
-	return &list, nil
+	return list, nil
 }
diff --git a/pkg/util/log/pod_scraper.go b/pkg/util/log/pod_scraper.go
index c46f2bed..a0c3c16c 100644
--- a/pkg/util/log/pod_scraper.go
+++ b/pkg/util/log/pod_scraper.go
@@ -23,15 +23,15 @@ import (
 	"io"
 	"time"
 
-	"github.com/operator-framework/operator-sdk/pkg/k8sclient"
-	"github.com/operator-framework/operator-sdk/pkg/util/k8sutil"
 	"github.com/pkg/errors"
 	"github.com/sirupsen/logrus"
 	"k8s.io/api/core/v1"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
 	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/util/json"
 	"k8s.io/apimachinery/pkg/watch"
+	"k8s.io/client-go/kubernetes"
 )
 
 var commonUserContainerNames = map[string]bool{
@@ -43,13 +43,15 @@ var commonUserContainerNames = map[string]bool{
 type PodScraper struct {
 	namespace string
 	name      string
+	client    kubernetes.Interface
 }
 
 // NewPodScraper creates a new pod scraper
-func NewPodScraper(namespace string, name string) *PodScraper {
+func NewPodScraper(c kubernetes.Interface, namespace string, name string) *PodScraper {
 	return &PodScraper{
 		namespace: namespace,
 		name:      name,
+		client:    c,
 	}
 }
 
@@ -76,7 +78,7 @@ func (s *PodScraper) doScrape(ctx context.Context, out *bufio.Writer, clientClos
 		Follow:    true,
 		Container: containerName,
 	}
-	byteReader, err := k8sclient.GetKubeClient().CoreV1().Pods(s.namespace).GetLogs(s.name, &logOptions).Context(ctx).Stream()
+	byteReader, err := s.client.CoreV1().Pods(s.namespace).GetLogs(s.name, &logOptions).Context(ctx).Stream()
 	if err != nil {
 		s.handleAndRestart(ctx, err, 5*time.Second, out, clientCloser)
 		return
@@ -142,11 +144,8 @@ func (s *PodScraper) waitForPodRunning(ctx context.Context, namespace string, na
 			Namespace: namespace,
 		},
 	}
-	resourceClient, _, err := k8sclient.GetResourceClient(pod.APIVersion, pod.Kind, pod.Namespace)
-	if err != nil {
-		return "", err
-	}
-	watcher, err := resourceClient.Watch(metav1.ListOptions{
+	podClient := s.client.CoreV1().Pods(pod.Namespace)
+	watcher, err := podClient.Watch(metav1.ListOptions{
 		FieldSelector: "metadata.name=" + pod.Name,
 	})
 	if err != nil {
@@ -163,19 +162,26 @@ func (s *PodScraper) waitForPodRunning(ctx context.Context, namespace string, na
 			}
 
 			if e.Object != nil {
+				var recvPod *v1.Pod
 				if runtimeUnstructured, ok := e.Object.(runtime.Unstructured); ok {
 					unstr := unstructured.Unstructured{
 						Object: runtimeUnstructured.UnstructuredContent(),
 					}
-					pcopy := pod.DeepCopy()
-					err := k8sutil.UnstructuredIntoRuntimeObject(&unstr, pcopy)
+					jsondata, err := unstr.MarshalJSON()
 					if err != nil {
 						return "", err
 					}
-
-					if pcopy.Status.Phase == v1.PodRunning {
-						return s.chooseContainer(pcopy), nil
+					recvPod := pod.DeepCopy()
+					if err := json.Unmarshal(jsondata, recvPod); err != nil {
+						return "", err
 					}
+
+				} else if gotPod, ok := e.Object.(*v1.Pod); ok {
+					recvPod = gotPod
+				}
+
+				if recvPod != nil && recvPod.Status.Phase == v1.PodRunning {
+					return s.chooseContainer(recvPod), nil
 				}
 			} else if e.Type == watch.Deleted || e.Type == watch.Error {
 				return "", errors.New("unable to watch pod " + s.name)
diff --git a/pkg/util/log/util.go b/pkg/util/log/util.go
index c32ae15b..eb20eb9a 100644
--- a/pkg/util/log/util.go
+++ b/pkg/util/log/util.go
@@ -25,11 +25,12 @@ import (
 	"os"
 
 	"github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
+	"k8s.io/client-go/kubernetes"
 )
 
 // Print prints integrations logs to the stdout
-func Print(ctx context.Context, integration *v1alpha1.Integration) error {
-	scraper := NewSelectorScraper(integration.Namespace, "camel.apache.org/integration="+integration.Name)
+func Print(ctx context.Context, client kubernetes.Interface, integration *v1alpha1.Integration) error {
+	scraper := NewSelectorScraper(client, integration.Namespace, "camel.apache.org/integration="+integration.Name)
 	reader := scraper.Start(ctx)
 
 	if _, err := io.Copy(os.Stdout, ioutil.NopCloser(reader)); err != nil {
diff --git a/pkg/util/minishift/minishift.go b/pkg/util/minishift/minishift.go
index b9f6bbca..1a106e2a 100644
--- a/pkg/util/minishift/minishift.go
+++ b/pkg/util/minishift/minishift.go
@@ -19,11 +19,14 @@ limitations under the License.
 package minishift
 
 import (
+	"context"
 	"strconv"
 
-	"github.com/operator-framework/operator-sdk/pkg/sdk"
-	v1 "k8s.io/api/core/v1"
+	"github.com/apache/camel-k/pkg/client"
+	"k8s.io/api/core/v1"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/labels"
+	k8sclient "sigs.k8s.io/controller-runtime/pkg/client"
 )
 
 const (
@@ -31,17 +34,20 @@ const (
 )
 
 // FindRegistry returns the Minishift registry location if any
-func FindRegistry() (*string, error) {
+func FindRegistry(ctx context.Context, c client.Client) (*string, error) {
 	svcs := v1.ServiceList{
 		TypeMeta: metav1.TypeMeta{
 			APIVersion: v1.SchemeGroupVersion.String(),
 			Kind:       "Service",
 		},
 	}
-	options := metav1.ListOptions{
-		LabelSelector: "kubernetes.io/minikube-addons=registry",
+	options := k8sclient.ListOptions{
+		LabelSelector: labels.SelectorFromSet(labels.Set{
+			"kubernetes.io/minikube-addons": "registry",
+		}),
+		Namespace: registryNamespace,
 	}
-	if err := sdk.List(registryNamespace, &svcs, sdk.WithListOptions(&options)); err != nil {
+	if err := c.List(ctx, &options, &svcs); err != nil {
 		return nil, err
 	}
 	if len(svcs.Items) == 0 {
diff --git a/pkg/util/openshift/openshift.go b/pkg/util/openshift/openshift.go
index 0826dfda..7b27ee00 100644
--- a/pkg/util/openshift/openshift.go
+++ b/pkg/util/openshift/openshift.go
@@ -18,13 +18,13 @@ limitations under the License.
 package openshift
 
 import (
-	"github.com/operator-framework/operator-sdk/pkg/k8sclient"
 	"k8s.io/apimachinery/pkg/api/errors"
+	"k8s.io/client-go/kubernetes"
 )
 
 // IsOpenShift returns true if we are connected to a OpenShift cluster
-func IsOpenShift() (bool, error) {
-	_, err := k8sclient.GetKubeClient().Discovery().ServerResourcesForGroupVersion("image.openshift.io/v1")
+func IsOpenShift(client kubernetes.Interface) (bool, error) {
+	_, err := client.Discovery().ServerResourcesForGroupVersion("image.openshift.io/v1")
 	if err != nil && errors.IsNotFound(err) {
 		return false, nil
 	} else if err != nil {
diff --git a/pkg/util/openshift/register.go b/pkg/util/openshift/register.go
index 373e42ed..3e00ffe8 100644
--- a/pkg/util/openshift/register.go
+++ b/pkg/util/openshift/register.go
@@ -24,19 +24,14 @@ import (
 	image "github.com/openshift/api/image/v1"
 	route "github.com/openshift/api/route/v1"
 	template "github.com/openshift/api/template/v1"
-	"github.com/operator-framework/operator-sdk/pkg/util/k8sutil"
 	"github.com/sirupsen/logrus"
 	"k8s.io/apimachinery/pkg/runtime"
 )
 
-// Register all OpenShift types that we want to manage.
-func init() {
-	k8sutil.AddToSDKScheme(addKnownTypes)
-}
-
 type registerFunction func(*runtime.Scheme) error
 
-func addKnownTypes(scheme *runtime.Scheme) error {
+// AddToScheme adds OpenShift types to the scheme
+func AddToScheme(scheme *runtime.Scheme) error {
 
 	var err error
 
@@ -48,14 +43,6 @@ func addKnownTypes(scheme *runtime.Scheme) error {
 	err = doAdd(build.AddToScheme, scheme, err)
 	err = doAdd(authorization.AddToScheme, scheme, err)
 
-	// Legacy "oapi" resources
-	err = doAdd(apps.AddToSchemeInCoreGroup, scheme, err)
-	err = doAdd(template.AddToSchemeInCoreGroup, scheme, err)
-	err = doAdd(image.AddToSchemeInCoreGroup, scheme, err)
-	err = doAdd(route.AddToSchemeInCoreGroup, scheme, err)
-	err = doAdd(build.AddToSchemeInCoreGroup, scheme, err)
-	err = doAdd(authorization.AddToSchemeInCoreGroup, scheme, err)
-
 	return err
 }
 
diff --git a/pkg/util/watch/watch.go b/pkg/util/watch/watch.go
index 18473fb5..dcf35ae5 100644
--- a/pkg/util/watch/watch.go
+++ b/pkg/util/watch/watch.go
@@ -21,69 +21,14 @@ import (
 	"context"
 
 	"github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
-	"github.com/operator-framework/operator-sdk/pkg/k8sclient"
-	"github.com/operator-framework/operator-sdk/pkg/util/k8sutil"
+	"github.com/apache/camel-k/pkg/util/kubernetes/customclient"
 	"github.com/sirupsen/logrus"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
 	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/util/json"
 )
 
-// StateChanges watches a integration resource and send it through a channel when its status changes
-func StateChanges(ctx context.Context, integration *v1alpha1.Integration) (<-chan *v1alpha1.Integration, error) {
-	resourceClient, _, err := k8sclient.GetResourceClient(integration.APIVersion, integration.Kind, integration.Namespace)
-	if err != nil {
-		return nil, err
-	}
-	watcher, err := resourceClient.Watch(metav1.ListOptions{
-		FieldSelector: "metadata.name=" + integration.Name,
-	})
-	if err != nil {
-		return nil, err
-	}
-	events := watcher.ResultChan()
-
-	out := make(chan *v1alpha1.Integration)
-	var lastObservedState *v1alpha1.IntegrationPhase
-
-	go func() {
-		defer watcher.Stop()
-		defer close(out)
-
-		for {
-			select {
-			case <-ctx.Done():
-				return
-			case e, ok := <-events:
-				if !ok {
-					return
-				}
-
-				if e.Object != nil {
-					if runtimeUnstructured, ok := e.Object.(runtime.Unstructured); ok {
-						unstr := unstructured.Unstructured{
-							Object: runtimeUnstructured.UnstructuredContent(),
-						}
-						icopy := integration.DeepCopy()
-						err := k8sutil.UnstructuredIntoRuntimeObject(&unstr, icopy)
-						if err != nil {
-							logrus.Error("Unexpected error detected when watching resource", err)
-							return // closes the channel
-						}
-
-						if lastObservedState == nil || *lastObservedState != icopy.Status.Phase {
-							lastObservedState = &icopy.Status.Phase
-							out <- icopy
-						}
-					}
-				}
-			}
-		}
-	}()
-
-	return out, nil
-}
-
 //
 // HandleStateChanges watches a integration resource and invoke the given handler when its status changes.
 //
@@ -98,11 +43,11 @@ func StateChanges(ctx context.Context, integration *v1alpha1.Integration) (<-cha
 // This function blocks until the handler function returns true or either the events channel or the context is closed.
 //
 func HandleStateChanges(ctx context.Context, integration *v1alpha1.Integration, handler func(integration *v1alpha1.Integration) bool) error {
-	resourceClient, _, err := k8sclient.GetResourceClient(integration.APIVersion, integration.Kind, integration.Namespace)
+	dynamicClient, err := customclient.GetDynamicClientFor(v1alpha1.SchemeGroupVersion.Group, v1alpha1.SchemeGroupVersion.Version, "integrations", integration.Namespace)
 	if err != nil {
 		return err
 	}
-	watcher, err := resourceClient.Watch(metav1.ListOptions{
+	watcher, err := dynamicClient.Watch(metav1.ListOptions{
 		FieldSelector: "metadata.name=" + integration.Name,
 	})
 	if err != nil {
@@ -128,8 +73,12 @@ func HandleStateChanges(ctx context.Context, integration *v1alpha1.Integration,
 					unstr := unstructured.Unstructured{
 						Object: runtimeUnstructured.UnstructuredContent(),
 					}
+					jsondata, err := unstr.MarshalJSON()
+					if err != nil {
+						return err
+					}
 					icopy := integration.DeepCopy()
-					err := k8sutil.UnstructuredIntoRuntimeObject(&unstr, icopy)
+					err = json.Unmarshal(jsondata, icopy)
 					if err != nil {
 						logrus.Error("Unexpected error detected when watching resource", err)
 						return nil
diff --git a/script/Makefile b/script/Makefile
new file mode 100644
index 00000000..a3815f77
--- /dev/null
+++ b/script/Makefile
@@ -0,0 +1,79 @@
+build: build-runtime build-operator build-kamel build-compile-integration-tests test
+
+build-go: build-embed-resources build-operator build-kamel
+
+build-operator: build-embed-resources
+	go build -o camel-k ./cmd/manager/*.go
+
+build-kamel:
+	go build -o kamel ./cmd/kamel/*.go
+
+build-embed-resources:
+	./script/embed_resources.sh deploy
+
+build-compile-integration-tests:
+	go test -c -tags=integration ./test/*.go
+
+build-runtime:
+	./mvnw clean install -f ./runtime/pom.xml
+
+release: clean prepare-release build images-build images-push cross-compile package-examples git-tag
+
+prepare-release:
+	./script/prepare_release.sh
+
+new-version: increment-snapshot build images-build images-push
+
+increment-snapshot:
+	./script/next_snapshot.sh
+
+cross-compile:
+	./script/cross_compile.sh
+
+package-examples:
+	./script/package_examples.sh
+
+git-tag:
+	./script/git_tag.sh
+
+dep:
+	dep ensure -v
+
+generate:
+	operator-sdk generate k8s
+
+clean:
+	./mvnw clean -f ./runtime/pom.xml
+	go clean
+	rm -f camel-k
+	rm -f kamel
+	rm -rf build/_maven_output
+	rm -rf build/_output
+
+images: images-build
+
+images-build:
+	./script/images_build.sh
+
+images-push:
+	./script/images_push.sh
+
+install: install-minishift
+install-minishift:
+	./script/install_minishift.sh
+
+install-minikube:
+	./script/install_minikube.sh
+
+test: check
+check:
+	go test ./...
+
+test-integration: check-integration
+check-integration:
+	go test ./... -tags=integration
+
+lint:
+	golangci-lint run
+
+.PHONY: build build-operator build-kamel build-embed-resources build-runtime dep codegen images images-build images-push test check test-integration check-integration clean release prepare-release cross-compile package-examples new-version git-tag increment-snapshot install-minishift
diff --git a/build/cross_compile.sh b/script/cross_compile.sh
similarity index 100%
rename from build/cross_compile.sh
rename to script/cross_compile.sh
diff --git a/script/embed_resources.sh b/script/embed_resources.sh
new file mode 100755
index 00000000..eec75c98
--- /dev/null
+++ b/script/embed_resources.sh
@@ -0,0 +1,47 @@
+#!/bin/sh
+
+if [ $# -ne 1 ]; then
+    echo "Error invoking embed_resources.sh: directory argument required"
+    exit 1
+fi
+
+location=$(dirname $0)
+destdir=$location/../$1
+destfile=$location/../$1/resources.go
+
+cat > $destfile << EOM
+/*
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by script/embed_resources.sh. DO NOT EDIT.
+
+package deploy
+
+var Resources map[string]string
+
+func init() {
+	Resources = make(map[string]string)
+
+EOM
+
+for f in $(ls $destdir | grep ".yaml" | grep -v -e "^operator.yaml$"); do
+	printf "\tResources[\"$f\"] =\n\t\t\`\n" >> $destfile
+	cat $destdir/$f >> $destfile
+	printf "\n\`\n" >> $destfile
+done
+
+printf "\n}\n" >> $destfile
\ No newline at end of file
diff --git a/build/get_version.sh b/script/get_version.sh
similarity index 100%
rename from build/get_version.sh
rename to script/get_version.sh
diff --git a/build/git_tag.sh b/script/git_tag.sh
similarity index 100%
rename from build/git_tag.sh
rename to script/git_tag.sh
diff --git a/build/images_build.sh b/script/images_build.sh
similarity index 100%
rename from build/images_build.sh
rename to script/images_build.sh
diff --git a/build/images_push.sh b/script/images_push.sh
similarity index 100%
rename from build/images_push.sh
rename to script/images_push.sh
diff --git a/build/install_minikube.sh b/script/install_minikube.sh
similarity index 100%
rename from build/install_minikube.sh
rename to script/install_minikube.sh
diff --git a/build/install_minishift.sh b/script/install_minishift.sh
similarity index 100%
rename from build/install_minishift.sh
rename to script/install_minishift.sh
diff --git a/build/next_snapshot.sh b/script/next_snapshot.sh
similarity index 100%
rename from build/next_snapshot.sh
rename to script/next_snapshot.sh
diff --git a/build/package_examples.sh b/script/package_examples.sh
similarity index 100%
rename from build/package_examples.sh
rename to script/package_examples.sh
diff --git a/script/package_maven_artifacts.sh b/script/package_maven_artifacts.sh
new file mode 100755
index 00000000..0454a7d0
--- /dev/null
+++ b/script/package_maven_artifacts.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+
+location=$(dirname $0)
+cd $location/../
+./mvnw clean install -DskipTests -f runtime/pom.xml -s build/maven/settings.xml
diff --git a/build/prepare_release.sh b/script/prepare_release.sh
similarity index 100%
rename from build/prepare_release.sh
rename to script/prepare_release.sh
diff --git a/build/set_version.sh b/script/set_version.sh
similarity index 100%
rename from build/set_version.sh
rename to script/set_version.sh
diff --git a/script/travis_build.sh b/script/travis_build.sh
new file mode 100755
index 00000000..2590f6bd
--- /dev/null
+++ b/script/travis_build.sh
@@ -0,0 +1,72 @@
+#!/bin/sh
+
+set -e
+
+# Find the JAVA_HOME and set the KOTLIN_JDK_HOME
+echo "Java home: $JAVA_HOME"
+export KOTLIN_JDK_HOME=$JAVA_HOME
+
+# First build the whole project
+make
+
+# set docker0 to promiscuous mode
+sudo ip link set docker0 promisc on
+
+# Download and install the oc binary
+sudo mount --make-shared /
+sudo service docker stop
+sudo sed -i 's/DOCKER_OPTS=\"/DOCKER_OPTS=\"--insecure-registry 172.30.0.0\/16 /' /etc/default/docker
+sudo service docker start
+wget https://github.com/openshift/origin/releases/download/v$OPENSHIFT_VERSION/openshift-origin-client-tools-v$OPENSHIFT_VERSION-$OPENSHIFT_COMMIT-linux-64bit.tar.gz
+tar xvzOf openshift-origin-client-tools-v$OPENSHIFT_VERSION-$OPENSHIFT_COMMIT-linux-64bit.tar.gz > oc.bin
+sudo mv oc.bin /usr/local/bin/oc
+sudo chmod 755 /usr/local/bin/oc
+
+# Figure out this host's IP address
+IP_ADDR="$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)"
+
+# Start OpenShift
+oc cluster up --public-hostname=$IP_ADDR
+
+oc login -u system:admin
+
+# Wait until we have a ready node in openshift
+TIMEOUT=0
+TIMEOUT_COUNT=60
+until [ $TIMEOUT -eq $TIMEOUT_COUNT ]; do
+  if [ -n "$(oc get nodes | grep Ready)" ]; then
+    break
+  fi
+
+  echo "openshift is not up yet"
+  let TIMEOUT=TIMEOUT+1
+  sleep 5
+done
+
+if [ $TIMEOUT -eq $TIMEOUT_COUNT ]; then
+  echo "Failed to start openshift"
+  exit 1
+fi
+
+echo "openshift is deployed and reachable"
+oc describe nodes
+
+echo "Adding maven artifacts to the image context"
+./script/package_maven_artifacts.sh
+
+echo "Copying binary file to docker dir"
+mkdir -p ./build/_output/bin
+cp ./camel-k ./build/_output/bin/
+
+echo "Building the images"
+export IMAGE=docker.io/apache/camel-k:$(./script/get_version.sh)
+docker build -t "${IMAGE}" -f build/Dockerfile .
+
+echo "installing camel k cluster resources"
+./kamel install --cluster-setup
+
+oc login -u developer
+
+
+# Then run integration tests
+make test-integration
diff --git a/test/build_manager_integration_test.go b/test/build_manager_integration_test.go
index b315ec45..b1f33428 100644
--- a/test/build_manager_integration_test.go
+++ b/test/build_manager_integration_test.go
@@ -22,7 +22,6 @@ limitations under the License.
 package test
 
 import (
-	"context"
 	"testing"
 	"time"
 
@@ -35,9 +34,8 @@ import (
 )
 
 func TestBuildManagerBuild(t *testing.T) {
-	ctx := context.TODO()
 	namespace := getTargetNamespace()
-	b := builder.New(ctx, namespace)
+	b := builder.New(testContext, testClient, namespace)
 
 	r := builder.Request{
 		Meta: v1.ObjectMeta{
@@ -76,9 +74,8 @@ func TestBuildManagerBuild(t *testing.T) {
 }
 
 func TestBuildManagerFailedBuild(t *testing.T) {
-	ctx := context.TODO()
 	namespace := getTargetNamespace()
-	b := builder.New(ctx, namespace)
+	b := builder.New(testContext, testClient, namespace)
 
 	r := builder.Request{
 		Meta: v1.ObjectMeta{
diff --git a/test/cluster_integration_test.go b/test/cluster_integration_test.go
index 28fc854b..93897b8d 100644
--- a/test/cluster_integration_test.go
+++ b/test/cluster_integration_test.go
@@ -29,15 +29,15 @@ import (
 )
 
 func TestInstallation(t *testing.T) {
-	installedCtxCRD, err := install.IsCRDInstalled("IntegrationContext")
+	installedCtxCRD, err := install.IsCRDInstalled(testContext, testClient, "IntegrationContext")
 	assert.Nil(t, err)
 	assert.True(t, installedCtxCRD)
 
-	installedCRD, err := install.IsCRDInstalled("Integration")
+	installedCRD, err := install.IsCRDInstalled(testContext, testClient, "Integration")
 	assert.Nil(t, err)
 	assert.True(t, installedCRD)
 
-	installedClusterRole, err := install.IsClusterRoleInstalled()
+	installedClusterRole, err := install.IsClusterRoleInstalled(testContext, testClient)
 	assert.Nil(t, err)
 	assert.True(t, installedClusterRole)
 }
diff --git a/test/log_scrape_integration_test.go b/test/log_scrape_integration_test.go
index e790e998..9a74b572 100644
--- a/test/log_scrape_integration_test.go
+++ b/test/log_scrape_integration_test.go
@@ -28,19 +28,18 @@ import (
 	"time"
 
 	"github.com/apache/camel-k/pkg/util/log"
-	"github.com/operator-framework/operator-sdk/pkg/sdk"
 	"github.com/stretchr/testify/assert"
 )
 
 func TestPodLogScrape(t *testing.T) {
 	token := "Hello Camel K!"
 	pod, err := createDummyPod("scraped", "/bin/sh", "-c", "for i in `seq 1 50`; do echo \""+token+"\" && sleep 2; done")
-	defer sdk.Delete(pod)
+	defer testClient.Delete(testContext, pod)
 	assert.Nil(t, err)
 
 	ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(30*time.Second))
 	defer cancel()
-	scraper := log.NewPodScraper(pod.Namespace, pod.Name)
+	scraper := log.NewPodScraper(testClient, pod.Namespace, pod.Name)
 	in := scraper.Start(ctx)
 
 	res := make(chan bool)
@@ -70,12 +69,12 @@ func TestSelectorLogScrape(t *testing.T) {
 	token := "Hello Camel K!"
 	replicas := int32(3)
 	deployment, err := createDummyDeployment("scraped-deployment", &replicas, "scrape", "me", "/bin/sh", "-c", "for i in `seq 1 50`; do echo \""+token+"\" && sleep 2; done")
-	defer sdk.Delete(deployment)
+	defer testClient.Delete(testContext, deployment)
 	assert.Nil(t, err)
 
 	ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(30*time.Second))
 	defer cancel()
-	scraper := log.NewSelectorScraper(deployment.Namespace, "scrape=me")
+	scraper := log.NewSelectorScraper(testClient, deployment.Namespace, "scrape=me")
 	in := scraper.Start(ctx)
 
 	res := make(chan string)
diff --git a/test/testing_env.go b/test/testing_env.go
index d8265c4e..dbeae00e 100644
--- a/test/testing_env.go
+++ b/test/testing_env.go
@@ -22,34 +22,47 @@ limitations under the License.
 package test
 
 import (
+	"context"
+	"k8s.io/apimachinery/pkg/labels"
 	"time"
 
+	"github.com/apache/camel-k/pkg/client"
 	"github.com/apache/camel-k/pkg/install"
-	"github.com/apache/camel-k/pkg/util/kubernetes"
-	"github.com/operator-framework/operator-sdk/pkg/sdk"
 	appsv1 "k8s.io/api/apps/v1"
 	"k8s.io/api/core/v1"
 	k8serrors "k8s.io/apimachinery/pkg/api/errors"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	k8sclient "sigs.k8s.io/controller-runtime/pkg/client"
 )
 
+var testContext context.Context
+var testClient client.Client
+
+func newTestClient() (client.Client, error) {
+	return client.NewOutOfClusterClient("")
+}
+
 func init() {
-	// Initializes the kubernetes client to auto-detect the context
-	kubernetes.InitKubeClient("")
+	var err error
+	err = install.SetupClusterwideResources(testContext, client.Provider{Get: newTestClient})
+	if err != nil {
+		panic(err)
+	}
 
-	err := install.SetupClusterwideResources()
+	testContext = context.TODO()
+	testClient, err = newTestClient()
 	if err != nil {
 		panic(err)
 	}
 
-	err = install.Operator(getTargetNamespace())
+	err = install.Operator(testContext, testClient, getTargetNamespace())
 	if err != nil {
 		panic(err)
 	}
 }
 
 func getTargetNamespace() string {
-	ns, err := kubernetes.GetClientCurrentNamespace("")
+	ns, err := client.GetCurrentNamespace("")
 	if err != nil {
 		panic(err)
 	}
@@ -58,8 +71,7 @@ func getTargetNamespace() string {
 
 func createDummyDeployment(name string, replicas *int32, labelKey string, labelValue string, command ...string) (*appsv1.Deployment, error) {
 	deployment := getDummyDeployment(name, replicas, labelKey, labelValue, command...)
-	gracePeriod := int64(0)
-	err := sdk.Delete(&deployment, sdk.WithDeleteOptions(&metav1.DeleteOptions{GracePeriodSeconds: &gracePeriod}))
+	err := testClient.Delete(testContext, &deployment, k8sclient.GracePeriodSeconds(0))
 	if err != nil && !k8serrors.IsNotFound(err) {
 		return nil, err
 	}
@@ -70,10 +82,13 @@ func createDummyDeployment(name string, replicas *int32, labelKey string, labelV
 				APIVersion: v1.SchemeGroupVersion.String(),
 			},
 		}
-
-		err := sdk.List(getTargetNamespace(), &list, sdk.WithListOptions(&metav1.ListOptions{
-			LabelSelector: labelKey + "=" + labelValue,
-		}))
+		options := k8sclient.ListOptions{
+			Namespace: getTargetNamespace(),
+			LabelSelector: labels.SelectorFromSet(labels.Set{
+				labelKey: labelValue,
+			}),
+		}
+		err := testClient.List(testContext, &options, &list)
 		if err != nil {
 			return nil, err
 		}
@@ -84,7 +99,7 @@ func createDummyDeployment(name string, replicas *int32, labelKey string, labelV
 			break
 		}
 	}
-	err = sdk.Create(&deployment)
+	err = testClient.Create(testContext, &deployment)
 	return &deployment, err
 }
 
@@ -119,13 +134,12 @@ func getDummyDeployment(name string, replicas *int32, labelKey string, labelValu
 
 func createDummyPod(name string, command ...string) (*v1.Pod, error) {
 	pod := getDummyPod(name, command...)
-	gracePeriod := int64(0)
-	err := sdk.Delete(&pod, sdk.WithDeleteOptions(&metav1.DeleteOptions{GracePeriodSeconds: &gracePeriod}))
+	err := testClient.Delete(testContext, &pod, k8sclient.GracePeriodSeconds(0))
 	if err != nil && !k8serrors.IsNotFound(err) {
 		return nil, err
 	}
 	for {
-		err := sdk.Create(&pod)
+		err := testClient.Create(testContext, &pod)
 		if err != nil && k8serrors.IsAlreadyExists(err) {
 			time.Sleep(1 * time.Second)
 		} else if err != nil {
diff --git a/tmp/build/Dockerfile b/tmp/build/Dockerfile
deleted file mode 100644
index f0952d44..00000000
--- a/tmp/build/Dockerfile
+++ /dev/null
@@ -1,14 +0,0 @@
-FROM fabric8/s2i-java:2.3
-
-#RUN adduser -D camel-k
-#USER camel-k
-
-ADD tmp/_maven_output /tmp/artifacts/m2
-
-ADD tmp/_output/bin/camel-k /usr/local/bin/camel-k
-
-USER 0
-RUN chgrp -R 0 /tmp/artifacts/m2 \
- && chmod -R g=u /tmp/artifacts/m2
-
-USER 1000
diff --git a/tmp/build/build.sh b/tmp/build/build.sh
deleted file mode 100755
index 6755c89f..00000000
--- a/tmp/build/build.sh
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/usr/bin/env bash
-
-set -o errexit
-set -o nounset
-set -o pipefail
-
-if ! which go > /dev/null; then
-	echo "golang needs to be installed"
-	exit 1
-fi
-
-BIN_DIR="$(pwd)/tmp/_output/bin"
-mkdir -p ${BIN_DIR}
-PROJECT_NAME="camel-k"
-REPO_PATH="github.com/apache/camel-k"
-BUILD_PATH="${REPO_PATH}/cmd/${PROJECT_NAME}"
-echo "building "${PROJECT_NAME}"..."
-GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build -o ${BIN_DIR}/${PROJECT_NAME} $BUILD_PATH
diff --git a/tmp/build/docker_build.sh b/tmp/build/docker_build.sh
deleted file mode 100755
index da98858d..00000000
--- a/tmp/build/docker_build.sh
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/usr/bin/env bash
-
-if ! which docker > /dev/null; then
-	echo "docker needs to be installed"
-	exit 1
-fi
-
-: ${IMAGE:?"Need to set IMAGE, e.g. gcr.io/<repo>/<your>-operator"}
-
-echo "building container ${IMAGE}..."
-docker build -t "${IMAGE}" -f tmp/build/Dockerfile .
diff --git a/tmp/codegen/boilerplate.go.txt b/tmp/codegen/boilerplate.go.txt
deleted file mode 100644
index c61bcc7c..00000000
--- a/tmp/codegen/boilerplate.go.txt
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
diff --git a/tmp/codegen/update-generated.sh b/tmp/codegen/update-generated.sh
deleted file mode 100755
index 060b6289..00000000
--- a/tmp/codegen/update-generated.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/usr/bin/env bash
-
-set -o errexit
-set -o nounset
-set -o pipefail
-
-vendor/k8s.io/code-generator/generate-groups.sh \
-deepcopy \
-github.com/apache/camel-k/pkg/generated \
-github.com/apache/camel-k/pkg/apis \
-camel:v1alpha1 \
---go-header-file "./tmp/codegen/boilerplate.go.txt"
diff --git a/tmp/maven/settings.xml b/tmp/maven/settings.xml
deleted file mode 100644
index 6d16002f..00000000
--- a/tmp/maven/settings.xml
+++ /dev/null
@@ -1,6 +0,0 @@
-<settings xmlns="http://maven.apache.org/SETTINGS/1.0.0"
-          xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-          xsi:schemaLocation="http://maven.apache.org/SETTINGS/1.0.0
-                          https://maven.apache.org/xsd/settings-1.0.0.xsd">
-    <localRepository>tmp/_maven_output</localRepository>
-</settings>
\ No newline at end of file
diff --git a/vendor/cloud.google.com/go/asset/apiv1beta1/asset_client.go b/vendor/cloud.google.com/go/asset/apiv1beta1/asset_client.go
new file mode 100644
index 00000000..4ff3bdb3
--- /dev/null
+++ b/vendor/cloud.google.com/go/asset/apiv1beta1/asset_client.go
@@ -0,0 +1,248 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// AUTO-GENERATED CODE. DO NOT EDIT.
+
+package asset
+
+import (
+	"context"
+	"time"
+
+	"cloud.google.com/go/longrunning"
+	lroauto "cloud.google.com/go/longrunning/autogen"
+	gax "github.com/googleapis/gax-go"
+	"google.golang.org/api/option"
+	"google.golang.org/api/transport"
+	assetpb "google.golang.org/genproto/googleapis/cloud/asset/v1beta1"
+	longrunningpb "google.golang.org/genproto/googleapis/longrunning"
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/metadata"
+)
+
+// CallOptions contains the retry settings for each method of Client.
+type CallOptions struct {
+	ExportAssets          []gax.CallOption
+	BatchGetAssetsHistory []gax.CallOption
+}
+
+func defaultClientOptions() []option.ClientOption {
+	return []option.ClientOption{
+		option.WithEndpoint("cloudasset.googleapis.com:443"),
+		option.WithScopes(DefaultAuthScopes()...),
+	}
+}
+
+func defaultCallOptions() *CallOptions {
+	retry := map[[2]string][]gax.CallOption{
+		{"default", "idempotent"}: {
+			gax.WithRetry(func() gax.Retryer {
+				return gax.OnCodes([]codes.Code{
+					codes.DeadlineExceeded,
+					codes.Unavailable,
+				}, gax.Backoff{
+					Initial:    100 * time.Millisecond,
+					Max:        60000 * time.Millisecond,
+					Multiplier: 1.3,
+				})
+			}),
+		},
+	}
+	return &CallOptions{
+		ExportAssets:          retry[[2]string{"default", "non_idempotent"}],
+		BatchGetAssetsHistory: retry[[2]string{"default", "idempotent"}],
+	}
+}
+
+// Client is a client for interacting with Cloud Asset API.
+//
+// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
+type Client struct {
+	// The connection to the service.
+	conn *grpc.ClientConn
+
+	// The gRPC API client.
+	client assetpb.AssetServiceClient
+
+	// LROClient is used internally to handle longrunning operations.
+	// It is exposed so that its CallOptions can be modified if required.
+	// Users should not Close this client.
+	LROClient *lroauto.OperationsClient
+
+	// The call options for this service.
+	CallOptions *CallOptions
+
+	// The x-goog-* metadata to be sent with each request.
+	xGoogMetadata metadata.MD
+}
+
+// NewClient creates a new asset service client.
+//
+// Asset service definition.
+func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {
+	conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...)
+	if err != nil {
+		return nil, err
+	}
+	c := &Client{
+		conn:        conn,
+		CallOptions: defaultCallOptions(),
+
+		client: assetpb.NewAssetServiceClient(conn),
+	}
+	c.setGoogleClientInfo()
+
+	c.LROClient, err = lroauto.NewOperationsClient(ctx, option.WithGRPCConn(conn))
+	if err != nil {
+		// This error "should not happen", since we are just reusing old connection
+		// and never actually need to dial.
+		// If this does happen, we could leak conn. However, we cannot close conn:
+		// If the user invoked the function with option.WithGRPCConn,
+		// we would close a connection that's still in use.
+		// TODO(pongad): investigate error conditions.
+		return nil, err
+	}
+	return c, nil
+}
+
+// Connection returns the client's connection to the API service.
+func (c *Client) Connection() *grpc.ClientConn {
+	return c.conn
+}
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *Client) Close() error {
+	return c.conn.Close()
+}
+
+// setGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *Client) setGoogleClientInfo(keyval ...string) {
+	kv := append([]string{"gl-go", versionGo()}, keyval...)
+	kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version)
+	c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
+}
+
+// ExportAssets exports assets with time and resource types to a given Cloud Storage
+// location. The output format is newline-delimited JSON.
+// This API implements the [google.longrunning.Operation][google.longrunning.Operation] API allowing you
+// to keep track of the export.
+func (c *Client) ExportAssets(ctx context.Context, req *assetpb.ExportAssetsRequest, opts ...gax.CallOption) (*ExportAssetsOperation, error) {
+	ctx = insertMetadata(ctx, c.xGoogMetadata)
+	opts = append(c.CallOptions.ExportAssets[0:len(c.CallOptions.ExportAssets):len(c.CallOptions.ExportAssets)], opts...)
+	var resp *longrunningpb.Operation
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+		var err error
+		resp, err = c.client.ExportAssets(ctx, req, settings.GRPC...)
+		return err
+	}, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return &ExportAssetsOperation{
+		lro: longrunning.InternalNewOperation(c.LROClient, resp),
+	}, nil
+}
+
+// BatchGetAssetsHistory batch gets the update history of assets that overlap a time window.
+// For RESOURCE content, this API outputs history with asset in both
+// non-delete or deleted status.
+// For IAM_POLICY content, this API outputs history when the asset and its
+// attached IAM POLICY both exist. This can create gaps in the output history.
+func (c *Client) BatchGetAssetsHistory(ctx context.Context, req *assetpb.BatchGetAssetsHistoryRequest, opts ...gax.CallOption) (*assetpb.BatchGetAssetsHistoryResponse, error) {
+	ctx = insertMetadata(ctx, c.xGoogMetadata)
+	opts = append(c.CallOptions.BatchGetAssetsHistory[0:len(c.CallOptions.BatchGetAssetsHistory):len(c.CallOptions.BatchGetAssetsHistory)], opts...)
+	var resp *assetpb.BatchGetAssetsHistoryResponse
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+		var err error
+		resp, err = c.client.BatchGetAssetsHistory(ctx, req, settings.GRPC...)
+		return err
+	}, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return resp, nil
+}
+
+// ExportAssetsOperation manages a long-running operation from ExportAssets.
+type ExportAssetsOperation struct {
+	lro *longrunning.Operation
+}
+
+// ExportAssetsOperation returns a new ExportAssetsOperation from a given name.
+// The name must be that of a previously created ExportAssetsOperation, possibly from a different process.
+func (c *Client) ExportAssetsOperation(name string) *ExportAssetsOperation {
+	return &ExportAssetsOperation{
+		lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}),
+	}
+}
+
+// Wait blocks until the long-running operation is completed, returning the response and any errors encountered.
+//
+// See documentation of Poll for error-handling information.
+func (op *ExportAssetsOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*assetpb.ExportAssetsResponse, error) {
+	var resp assetpb.ExportAssetsResponse
+	if err := op.lro.WaitWithInterval(ctx, &resp, 5000*time.Millisecond, opts...); err != nil {
+		return nil, err
+	}
+	return &resp, nil
+}
+
+// Poll fetches the latest state of the long-running operation.
+//
+// Poll also fetches the latest metadata, which can be retrieved by Metadata.
+//
+// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
+// the operation has completed with failure, the error is returned and op.Done will return true.
+// If Poll succeeds and the operation has completed successfully,
+// op.Done will return true, and the response of the operation is returned.
+// If Poll succeeds and the operation has not completed, the returned response and error are both nil.
+func (op *ExportAssetsOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*assetpb.ExportAssetsResponse, error) {
+	var resp assetpb.ExportAssetsResponse
+	if err := op.lro.Poll(ctx, &resp, opts...); err != nil {
+		return nil, err
+	}
+	if !op.Done() {
+		return nil, nil
+	}
+	return &resp, nil
+}
+
+// Metadata returns metadata associated with the long-running operation.
+// Metadata itself does not contact the server, but Poll does.
+// To get the latest metadata, call this method after a successful call to Poll.
+// If the metadata is not available, the returned metadata and error are both nil.
+func (op *ExportAssetsOperation) Metadata() (*assetpb.ExportAssetsRequest, error) {
+	var meta assetpb.ExportAssetsRequest
+	if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
+		return nil, nil
+	} else if err != nil {
+		return nil, err
+	}
+	return &meta, nil
+}
+
+// Done reports whether the long-running operation has completed.
+func (op *ExportAssetsOperation) Done() bool {
+	return op.lro.Done()
+}
+
+// Name returns the name of the long-running operation.
+// The name is assigned by the server and is unique within the service from which the operation is created.
+func (op *ExportAssetsOperation) Name() string {
+	return op.lro.Name()
+}
diff --git a/vendor/cloud.google.com/go/asset/apiv1beta1/doc.go b/vendor/cloud.google.com/go/asset/apiv1beta1/doc.go
new file mode 100644
index 00000000..7ba09aff
--- /dev/null
+++ b/vendor/cloud.google.com/go/asset/apiv1beta1/doc.go
@@ -0,0 +1,89 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// AUTO-GENERATED CODE. DO NOT EDIT.
+
+// Package asset is an auto-generated package for the
+// Cloud Asset API.
+//
+//   NOTE: This package is in alpha. It is not stable, and is likely to change.
+//
+// The cloud asset API manages the history and inventory of cloud resources.
+package asset // import "cloud.google.com/go/asset/apiv1beta1"
+
+import (
+	"context"
+	"runtime"
+	"strings"
+	"unicode"
+
+	"google.golang.org/grpc/metadata"
+)
+
+func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
+	out, _ := metadata.FromOutgoingContext(ctx)
+	out = out.Copy()
+	for _, md := range mds {
+		for k, v := range md {
+			out[k] = append(out[k], v...)
+		}
+	}
+	return metadata.NewOutgoingContext(ctx, out)
+}
+
+// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
+func DefaultAuthScopes() []string {
+	return []string{
+		"https://www.googleapis.com/auth/cloud-platform",
+	}
+}
+
+// versionGo returns the Go runtime version. The returned string
+// has no whitespace, suitable for reporting in header.
+func versionGo() string {
+	const develPrefix = "devel +"
+
+	s := runtime.Version()
+	if strings.HasPrefix(s, develPrefix) {
+		s = s[len(develPrefix):]
+		if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 {
+			s = s[:p]
+		}
+		return s
+	}
+
+	notSemverRune := func(r rune) bool {
+		return strings.IndexRune("0123456789.", r) < 0
+	}
+
+	if strings.HasPrefix(s, "go1") {
+		s = s[2:]
+		var prerelease string
+		if p := strings.IndexFunc(s, notSemverRune); p >= 0 {
+			s, prerelease = s[:p], s[p:]
+		}
+		if strings.HasSuffix(s, ".") {
+			s += "0"
+		} else if strings.Count(s, ".") < 2 {
+			s += ".0"
+		}
+		if prerelease != "" {
+			s += "-" + prerelease
+		}
+		return s
+	}
+	return "UNKNOWN"
+}
+
+const versionClient = "20181129"
diff --git a/vendor/cloud.google.com/go/asset/v1beta1/asset_client.go b/vendor/cloud.google.com/go/asset/v1beta1/asset_client.go
new file mode 100644
index 00000000..4ff3bdb3
--- /dev/null
+++ b/vendor/cloud.google.com/go/asset/v1beta1/asset_client.go
@@ -0,0 +1,248 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// AUTO-GENERATED CODE. DO NOT EDIT.
+
+package asset
+
+import (
+	"context"
+	"time"
+
+	"cloud.google.com/go/longrunning"
+	lroauto "cloud.google.com/go/longrunning/autogen"
+	gax "github.com/googleapis/gax-go"
+	"google.golang.org/api/option"
+	"google.golang.org/api/transport"
+	assetpb "google.golang.org/genproto/googleapis/cloud/asset/v1beta1"
+	longrunningpb "google.golang.org/genproto/googleapis/longrunning"
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/metadata"
+)
+
+// CallOptions contains the retry settings for each method of Client.
+type CallOptions struct {
+	ExportAssets          []gax.CallOption
+	BatchGetAssetsHistory []gax.CallOption
+}
+
+func defaultClientOptions() []option.ClientOption {
+	return []option.ClientOption{
+		option.WithEndpoint("cloudasset.googleapis.com:443"),
+		option.WithScopes(DefaultAuthScopes()...),
+	}
+}
+
+func defaultCallOptions() *CallOptions {
+	retry := map[[2]string][]gax.CallOption{
+		{"default", "idempotent"}: {
+			gax.WithRetry(func() gax.Retryer {
+				return gax.OnCodes([]codes.Code{
+					codes.DeadlineExceeded,
+					codes.Unavailable,
+				}, gax.Backoff{
+					Initial:    100 * time.Millisecond,
+					Max:        60000 * time.Millisecond,
+					Multiplier: 1.3,
+				})
+			}),
+		},
+	}
+	return &CallOptions{
+		ExportAssets:          retry[[2]string{"default", "non_idempotent"}],
+		BatchGetAssetsHistory: retry[[2]string{"default", "idempotent"}],
+	}
+}
+
+// Client is a client for interacting with Cloud Asset API.
+//
+// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
+type Client struct {
+	// The connection to the service.
+	conn *grpc.ClientConn
+
+	// The gRPC API client.
+	client assetpb.AssetServiceClient
+
+	// LROClient is used internally to handle longrunning operations.
+	// It is exposed so that its CallOptions can be modified if required.
+	// Users should not Close this client.
+	LROClient *lroauto.OperationsClient
+
+	// The call options for this service.
+	CallOptions *CallOptions
+
+	// The x-goog-* metadata to be sent with each request.
+	xGoogMetadata metadata.MD
+}
+
+// NewClient creates a new asset service client.
+//
+// Asset service definition.
+func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {
+	conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...)
+	if err != nil {
+		return nil, err
+	}
+	c := &Client{
+		conn:        conn,
+		CallOptions: defaultCallOptions(),
+
+		client: assetpb.NewAssetServiceClient(conn),
+	}
+	c.setGoogleClientInfo()
+
+	c.LROClient, err = lroauto.NewOperationsClient(ctx, option.WithGRPCConn(conn))
+	if err != nil {
+		// This error "should not happen", since we are just reusing old connection
+		// and never actually need to dial.
+		// If this does happen, we could leak conn. However, we cannot close conn:
+		// If the user invoked the function with option.WithGRPCConn,
+		// we would close a connection that's still in use.
+		// TODO(pongad): investigate error conditions.
+		return nil, err
+	}
+	return c, nil
+}
+
+// Connection returns the client's connection to the API service.
+func (c *Client) Connection() *grpc.ClientConn {
+	return c.conn
+}
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *Client) Close() error {
+	return c.conn.Close()
+}
+
+// setGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *Client) setGoogleClientInfo(keyval ...string) {
+	kv := append([]string{"gl-go", versionGo()}, keyval...)
+	kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version)
+	c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
+}
+
+// ExportAssets exports assets with time and resource types to a given Cloud Storage
+// location. The output format is newline-delimited JSON.
+// This API implements the [google.longrunning.Operation][google.longrunning.Operation] API allowing you
+// to keep track of the export.
+func (c *Client) ExportAssets(ctx context.Context, req *assetpb.ExportAssetsRequest, opts ...gax.CallOption) (*ExportAssetsOperation, error) {
+	ctx = insertMetadata(ctx, c.xGoogMetadata)
+	opts = append(c.CallOptions.ExportAssets[0:len(c.CallOptions.ExportAssets):len(c.CallOptions.ExportAssets)], opts...)
+	var resp *longrunningpb.Operation
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+		var err error
+		resp, err = c.client.ExportAssets(ctx, req, settings.GRPC...)
+		return err
+	}, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return &ExportAssetsOperation{
+		lro: longrunning.InternalNewOperation(c.LROClient, resp),
+	}, nil
+}
+
+// BatchGetAssetsHistory batch gets the update history of assets that overlap a time window.
+// For RESOURCE content, this API outputs history with asset in both
+// non-delete or deleted status.
+// For IAM_POLICY content, this API outputs history when the asset and its
+// attached IAM POLICY both exist. This can create gaps in the output history.
+func (c *Client) BatchGetAssetsHistory(ctx context.Context, req *assetpb.BatchGetAssetsHistoryRequest, opts ...gax.CallOption) (*assetpb.BatchGetAssetsHistoryResponse, error) {
+	ctx = insertMetadata(ctx, c.xGoogMetadata)
+	opts = append(c.CallOptions.BatchGetAssetsHistory[0:len(c.CallOptions.BatchGetAssetsHistory):len(c.CallOptions.BatchGetAssetsHistory)], opts...)
+	var resp *assetpb.BatchGetAssetsHistoryResponse
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+		var err error
+		resp, err = c.client.BatchGetAssetsHistory(ctx, req, settings.GRPC...)
+		return err
+	}, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return resp, nil
+}
+
+// ExportAssetsOperation manages a long-running operation from ExportAssets.
+type ExportAssetsOperation struct {
+	lro *longrunning.Operation
+}
+
+// ExportAssetsOperation returns a new ExportAssetsOperation from a given name.
+// The name must be that of a previously created ExportAssetsOperation, possibly from a different process.
+func (c *Client) ExportAssetsOperation(name string) *ExportAssetsOperation {
+	return &ExportAssetsOperation{
+		lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}),
+	}
+}
+
+// Wait blocks until the long-running operation is completed, returning the response and any errors encountered.
+//
+// See documentation of Poll for error-handling information.
+func (op *ExportAssetsOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*assetpb.ExportAssetsResponse, error) {
+	var resp assetpb.ExportAssetsResponse
+	if err := op.lro.WaitWithInterval(ctx, &resp, 5000*time.Millisecond, opts...); err != nil {
+		return nil, err
+	}
+	return &resp, nil
+}
+
+// Poll fetches the latest state of the long-running operation.
+//
+// Poll also fetches the latest metadata, which can be retrieved by Metadata.
+//
+// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
+// the operation has completed with failure, the error is returned and op.Done will return true.
+// If Poll succeeds and the operation has completed successfully,
+// op.Done will return true, and the response of the operation is returned.
+// If Poll succeeds and the operation has not completed, the returned response and error are both nil.
+func (op *ExportAssetsOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*assetpb.ExportAssetsResponse, error) {
+	var resp assetpb.ExportAssetsResponse
+	if err := op.lro.Poll(ctx, &resp, opts...); err != nil {
+		return nil, err
+	}
+	if !op.Done() {
+		return nil, nil
+	}
+	return &resp, nil
+}
+
+// Metadata returns metadata associated with the long-running operation.
+// Metadata itself does not contact the server, but Poll does.
+// To get the latest metadata, call this method after a successful call to Poll.
+// If the metadata is not available, the returned metadata and error are both nil.
+func (op *ExportAssetsOperation) Metadata() (*assetpb.ExportAssetsRequest, error) {
+	var meta assetpb.ExportAssetsRequest
+	if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
+		return nil, nil
+	} else if err != nil {
+		return nil, err
+	}
+	return &meta, nil
+}
+
+// Done reports whether the long-running operation has completed.
+func (op *ExportAssetsOperation) Done() bool {
+	return op.lro.Done()
+}
+
+// Name returns the name of the long-running operation.
+// The name is assigned by the server and is unique within the service from which the operation is created.
+func (op *ExportAssetsOperation) Name() string {
+	return op.lro.Name()
+}
diff --git a/vendor/cloud.google.com/go/asset/v1beta1/doc.go b/vendor/cloud.google.com/go/asset/v1beta1/doc.go
new file mode 100644
index 00000000..a2a639b4
--- /dev/null
+++ b/vendor/cloud.google.com/go/asset/v1beta1/doc.go
@@ -0,0 +1,89 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// AUTO-GENERATED CODE. DO NOT EDIT.
+
+// Package asset is an auto-generated package for the
+// Cloud Asset API.
+//
+//   NOTE: This package is in alpha. It is not stable, and is likely to change.
+//
+// The cloud asset API manages the history and inventory of cloud resources.
+package asset // import "cloud.google.com/go/asset/v1beta1"
+
+import (
+	"context"
+	"runtime"
+	"strings"
+	"unicode"
+
+	"google.golang.org/grpc/metadata"
+)
+
+func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
+	out, _ := metadata.FromOutgoingContext(ctx)
+	out = out.Copy()
+	for _, md := range mds {
+		for k, v := range md {
+			out[k] = append(out[k], v...)
+		}
+	}
+	return metadata.NewOutgoingContext(ctx, out)
+}
+
+// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
+func DefaultAuthScopes() []string {
+	return []string{
+		"https://www.googleapis.com/auth/cloud-platform",
+	}
+}
+
+// versionGo returns the Go runtime version. The returned string
+// has no whitespace, suitable for reporting in header.
+func versionGo() string {
+	const develPrefix = "devel +"
+
+	s := runtime.Version()
+	if strings.HasPrefix(s, develPrefix) {
+		s = s[len(develPrefix):]
+		if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 {
+			s = s[:p]
+		}
+		return s
+	}
+
+	notSemverRune := func(r rune) bool {
+		return strings.IndexRune("0123456789.", r) < 0
+	}
+
+	if strings.HasPrefix(s, "go1") {
+		s = s[2:]
+		var prerelease string
+		if p := strings.IndexFunc(s, notSemverRune); p >= 0 {
+			s, prerelease = s[:p], s[p:]
+		}
+		if strings.HasSuffix(s, ".") {
+			s += "0"
+		} else if strings.Count(s, ".") < 2 {
+			s += ".0"
+		}
+		if prerelease != "" {
+			s += "-" + prerelease
+		}
+		return s
+	}
+	return "UNKNOWN"
+}
+
+const versionClient = "20181129"
diff --git a/vendor/cloud.google.com/go/bigquery/benchmarks/bench.go b/vendor/cloud.google.com/go/bigquery/benchmarks/bench.go
new file mode 100644
index 00000000..7d608c88
--- /dev/null
+++ b/vendor/cloud.google.com/go/bigquery/benchmarks/bench.go
@@ -0,0 +1,85 @@
+// Copyright 2017 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//+build ignore
+
+package main
+
+import (
+	"context"
+	"encoding/json"
+	"flag"
+	"io/ioutil"
+	"log"
+	"time"
+
+	"cloud.google.com/go/bigquery"
+	"google.golang.org/api/iterator"
+)
+
+func main() {
+	flag.Parse()
+
+	ctx := context.Background()
+	c, err := bigquery.NewClient(ctx, flag.Arg(0))
+	if err != nil {
+		log.Fatal(err)
+	}
+
+	queriesJSON, err := ioutil.ReadFile(flag.Arg(1))
+	if err != nil {
+		log.Fatal(err)
+	}
+
+	var queries []string
+	if err := json.Unmarshal(queriesJSON, &queries); err != nil {
+		log.Fatal(err)
+	}
+
+	for _, q := range queries {
+		doQuery(ctx, c, q)
+	}
+}
+
+func doQuery(ctx context.Context, c *bigquery.Client, qt string) {
+	startTime := time.Now()
+	q := c.Query(qt)
+	it, err := q.Read(ctx)
+	if err != nil {
+		log.Fatal(err)
+	}
+
+	numRows, numCols := 0, 0
+	var firstByte time.Duration
+
+	for {
+		var values []bigquery.Value
+		err := it.Next(&values)
+		if err == iterator.Done {
+			break
+		}
+		if err != nil {
+			log.Fatal(err)
+		}
+		if numRows == 0 {
+			numCols = len(values)
+			firstByte = time.Since(startTime)
+		} else if numCols != len(values) {
+			log.Fatalf("got %d columns, want %d", len(values), numCols)
+		}
+		numRows++
+	}
+	log.Printf("query %q: %d rows, %d cols, first byte %f sec, total %f sec",
+		qt, numRows, numCols, firstByte.Seconds(), time.Since(startTime).Seconds())
+}
diff --git a/vendor/cloud.google.com/go/bigquery/bigquery.go b/vendor/cloud.google.com/go/bigquery/bigquery.go
new file mode 100644
index 00000000..83e160e3
--- /dev/null
+++ b/vendor/cloud.google.com/go/bigquery/bigquery.go
@@ -0,0 +1,162 @@
+// Copyright 2015 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package bigquery
+
+import (
+	"context"
+	"fmt"
+	"io"
+	"net/http"
+	"time"
+
+	"cloud.google.com/go/internal"
+	"cloud.google.com/go/internal/version"
+	gax "github.com/googleapis/gax-go"
+	bq "google.golang.org/api/bigquery/v2"
+	"google.golang.org/api/googleapi"
+	"google.golang.org/api/option"
+	htransport "google.golang.org/api/transport/http"
+)
+
+const (
+	prodAddr = "https://www.googleapis.com/bigquery/v2/"
+	// Scope is the Oauth2 scope for the service.
+	Scope     = "https://www.googleapis.com/auth/bigquery"
+	userAgent = "gcloud-golang-bigquery/20160429"
+)
+
+var xGoogHeader = fmt.Sprintf("gl-go/%s gccl/%s", version.Go(), version.Repo)
+
+func setClientHeader(headers http.Header) {
+	headers.Set("x-goog-api-client", xGoogHeader)
+}
+
+// Client may be used to perform BigQuery operations.
+type Client struct {
+	// Location, if set, will be used as the default location for all subsequent
+	// dataset creation and job operations. A location specified directly in one of
+	// those operations will override this value.
+	Location string
+
+	projectID string
+	bqs       *bq.Service
+}
+
+// NewClient constructs a new Client which can perform BigQuery operations.
+// Operations performed via the client are billed to the specified GCP project.
+func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*Client, error) {
+	o := []option.ClientOption{
+		option.WithEndpoint(prodAddr),
+		option.WithScopes(Scope),
+		option.WithUserAgent(userAgent),
+	}
+	o = append(o, opts...)
+	httpClient, endpoint, err := htransport.NewClient(ctx, o...)
+	if err != nil {
+		return nil, fmt.Errorf("bigquery: dialing: %v", err)
+	}
+	bqs, err := bq.New(httpClient)
+	if err != nil {
+		return nil, fmt.Errorf("bigquery: constructing client: %v", err)
+	}
+	bqs.BasePath = endpoint
+	c := &Client{
+		projectID: projectID,
+		bqs:       bqs,
+	}
+	return c, nil
+}
+
+// Close closes any resources held by the client.
+// Close should be called when the client is no longer needed.
+// It need not be called at program exit.
+func (c *Client) Close() error {
+	return nil
+}
+
+// Calls the Jobs.Insert RPC and returns a Job.
+func (c *Client) insertJob(ctx context.Context, job *bq.Job, media io.Reader) (*Job, error) {
+	call := c.bqs.Jobs.Insert(c.projectID, job).Context(ctx)
+	setClientHeader(call.Header())
+	if media != nil {
+		call.Media(media)
+	}
+	var res *bq.Job
+	var err error
+	invoke := func() error {
+		res, err = call.Do()
+		return err
+	}
+	// A job with a client-generated ID can be retried; the presence of the
+	// ID makes the insert operation idempotent.
+	// We don't retry if there is media, because it is an io.Reader. We'd
+	// have to read the contents and keep it in memory, and that could be expensive.
+	// TODO(jba): Look into retrying if media != nil.
+	if job.JobReference != nil && media == nil {
+		err = runWithRetry(ctx, invoke)
+	} else {
+		err = invoke()
+	}
+	if err != nil {
+		return nil, err
+	}
+	return bqToJob(res, c)
+}
+
+// Convert a number of milliseconds since the Unix epoch to a time.Time.
+// Treat an input of zero specially: convert it to the zero time,
+// rather than the start of the epoch.
+func unixMillisToTime(m int64) time.Time {
+	if m == 0 {
+		return time.Time{}
+	}
+	return time.Unix(0, m*1e6)
+}
+
+// runWithRetry calls the function until it returns nil or a non-retryable error, or
+// the context is done.
+// See the similar function in ../storage/invoke.go. The main difference is the
+// reason for retrying.
+func runWithRetry(ctx context.Context, call func() error) error {
+	// These parameters match the suggestions in https://cloud.google.com/bigquery/sla.
+	backoff := gax.Backoff{
+		Initial:    1 * time.Second,
+		Max:        32 * time.Second,
+		Multiplier: 2,
+	}
+	return internal.Retry(ctx, backoff, func() (stop bool, err error) {
+		err = call()
+		if err == nil {
+			return true, nil
+		}
+		return !retryableError(err), err
+	})
+}
+
+// This is the correct definition of retryable according to the BigQuery team. It
+// also considers 502 ("Bad Gateway") and 503 ("Service Unavailable") errors
+// retryable; these are returned by systems between the client and the BigQuery
+// service.
+func retryableError(err error) bool {
+	e, ok := err.(*googleapi.Error)
+	if !ok {
+		return false
+	}
+	var reason string
+	if len(e.Errors) > 0 {
+		reason = e.Errors[0].Reason
+	}
+	return e.Code == http.StatusServiceUnavailable || e.Code == http.StatusBadGateway || reason == "backendError" || reason == "rateLimitExceeded"
+}
diff --git a/vendor/cloud.google.com/go/bigquery/copy.go b/vendor/cloud.google.com/go/bigquery/copy.go
new file mode 100644
index 00000000..44cc68d1
--- /dev/null
+++ b/vendor/cloud.google.com/go/bigquery/copy.go
@@ -0,0 +1,107 @@
+// Copyright 2016 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package bigquery
+
+import (
+	"context"
+
+	bq "google.golang.org/api/bigquery/v2"
+)
+
+// CopyConfig holds the configuration for a copy job.
+type CopyConfig struct {
+	// Srcs are the tables from which data will be copied.
+	Srcs []*Table
+
+	// Dst is the table into which the data will be copied.
+	Dst *Table
+
+	// CreateDisposition specifies the circumstances under which the destination table will be created.
+	// The default is CreateIfNeeded.
+	CreateDisposition TableCreateDisposition
+
+	// WriteDisposition specifies how existing data in the destination table is treated.
+	// The default is WriteEmpty.
+	WriteDisposition TableWriteDisposition
+
+	// The labels associated with this job.
+	Labels map[string]string
+
+	// Custom encryption configuration (e.g., Cloud KMS keys).
+	DestinationEncryptionConfig *EncryptionConfig
+}
+
+func (c *CopyConfig) toBQ() *bq.JobConfiguration {
+	var ts []*bq.TableReference
+	for _, t := range c.Srcs {
+		ts = append(ts, t.toBQ())
+	}
+	return &bq.JobConfiguration{
+		Labels: c.Labels,
+		Copy: &bq.JobConfigurationTableCopy{
+			CreateDisposition:                  string(c.CreateDisposition),
+			WriteDisposition:                   string(c.WriteDisposition),
+			DestinationTable:                   c.Dst.toBQ(),
+			DestinationEncryptionConfiguration: c.DestinationEncryptionConfig.toBQ(),
+			SourceTables:                       ts,
+		},
+	}
+}
+
+func bqToCopyConfig(q *bq.JobConfiguration, c *Client) *CopyConfig {
+	cc := &CopyConfig{
+		Labels:                      q.Labels,
+		CreateDisposition:           TableCreateDisposition(q.Copy.CreateDisposition),
+		WriteDisposition:            TableWriteDisposition(q.Copy.WriteDisposition),
+		Dst:                         bqToTable(q.Copy.DestinationTable, c),
+		DestinationEncryptionConfig: bqToEncryptionConfig(q.Copy.DestinationEncryptionConfiguration),
+	}
+	for _, t := range q.Copy.SourceTables {
+		cc.Srcs = append(cc.Srcs, bqToTable(t, c))
+	}
+	return cc
+}
+
+// A Copier copies data into a BigQuery table from one or more BigQuery tables.
+type Copier struct {
+	JobIDConfig
+	CopyConfig
+	c *Client
+}
+
+// CopierFrom returns a Copier which can be used to copy data into a
+// BigQuery table from one or more BigQuery tables.
+// The returned Copier may optionally be further configured before its Run method is called.
+func (t *Table) CopierFrom(srcs ...*Table) *Copier {
+	return &Copier{
+		c: t.c,
+		CopyConfig: CopyConfig{
+			Srcs: srcs,
+			Dst:  t,
+		},
+	}
+}
+
+// Run initiates a copy job.
+func (c *Copier) Run(ctx context.Context) (*Job, error) {
+	return c.c.insertJob(ctx, c.newJob(), nil)
+}
+
+func (c *Copier) newJob() *bq.Job {
+	return &bq.Job{
+		JobReference:  c.JobIDConfig.createJobRef(c.c),
+		Configuration: c.CopyConfig.toBQ(),
+	}
+}
diff --git a/vendor/cloud.google.com/go/bigquery/dataset.go b/vendor/cloud.google.com/go/bigquery/dataset.go
new file mode 100644
index 00000000..7bdb70a5
--- /dev/null
+++ b/vendor/cloud.google.com/go/bigquery/dataset.go
@@ -0,0 +1,536 @@
+// Copyright 2015 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package bigquery
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"time"
+
+	"cloud.google.com/go/internal/optional"
+	"cloud.google.com/go/internal/trace"
+	bq "google.golang.org/api/bigquery/v2"
+	"google.golang.org/api/iterator"
+)
+
+// Dataset is a reference to a BigQuery dataset.
+type Dataset struct {
+	ProjectID string
+	DatasetID string
+	c         *Client
+}
+
+// DatasetMetadata contains information about a BigQuery dataset.
+type DatasetMetadata struct {
+	// These fields can be set when creating a dataset.
+	Name                   string            // The user-friendly name for this dataset.
+	Description            string            // The user-friendly description of this dataset.
+	Location               string            // The geo location of the dataset.
+	DefaultTableExpiration time.Duration     // The default expiration time for new tables.
+	Labels                 map[string]string // User-provided labels.
+	Access                 []*AccessEntry    // Access permissions.
+
+	// These fields are read-only.
+	CreationTime     time.Time
+	LastModifiedTime time.Time // When the dataset or any of its tables were modified.
+	FullID           string    // The full dataset ID in the form projectID:datasetID.
+
+	// ETag is the ETag obtained when reading metadata. Pass it to Dataset.Update to
+	// ensure that the metadata hasn't changed since it was read.
+	ETag string
+}
+
+// DatasetMetadataToUpdate is used when updating a dataset's metadata.
+// Only non-nil fields will be updated.
+type DatasetMetadataToUpdate struct {
+	Description optional.String // The user-friendly description of this table.
+	Name        optional.String // The user-friendly name for this dataset.
+
+	// DefaultTableExpiration is the default expiration time for new tables.
+	// If set to time.Duration(0), new tables never expire.
+	DefaultTableExpiration optional.Duration
+
+	// The entire access list. It is not possible to replace individual entries.
+	Access []*AccessEntry
+
+	labelUpdater
+}
+
+// Dataset creates a handle to a BigQuery dataset in the client's project.
+func (c *Client) Dataset(id string) *Dataset {
+	return c.DatasetInProject(c.projectID, id)
+}
+
+// DatasetInProject creates a handle to a BigQuery dataset in the specified project.
+func (c *Client) DatasetInProject(projectID, datasetID string) *Dataset {
+	return &Dataset{
+		ProjectID: projectID,
+		DatasetID: datasetID,
+		c:         c,
+	}
+}
+
+// Create creates a dataset in the BigQuery service. An error will be returned if the
+// dataset already exists. Pass in a DatasetMetadata value to configure the dataset.
+func (d *Dataset) Create(ctx context.Context, md *DatasetMetadata) (err error) {
+	ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Dataset.Create")
+	defer func() { trace.EndSpan(ctx, err) }()
+
+	ds, err := md.toBQ()
+	if err != nil {
+		return err
+	}
+	ds.DatasetReference = &bq.DatasetReference{DatasetId: d.DatasetID}
+	// Use Client.Location as a default.
+	if ds.Location == "" {
+		ds.Location = d.c.Location
+	}
+	call := d.c.bqs.Datasets.Insert(d.ProjectID, ds).Context(ctx)
+	setClientHeader(call.Header())
+	_, err = call.Do()
+	return err
+}
+
+func (dm *DatasetMetadata) toBQ() (*bq.Dataset, error) {
+	ds := &bq.Dataset{}
+	if dm == nil {
+		return ds, nil
+	}
+	ds.FriendlyName = dm.Name
+	ds.Description = dm.Description
+	ds.Location = dm.Location
+	ds.DefaultTableExpirationMs = int64(dm.DefaultTableExpiration / time.Millisecond)
+	ds.Labels = dm.Labels
+	var err error
+	ds.Access, err = accessListToBQ(dm.Access)
+	if err != nil {
+		return nil, err
+	}
+	if !dm.CreationTime.IsZero() {
+		return nil, errors.New("bigquery: Dataset.CreationTime is not writable")
+	}
+	if !dm.LastModifiedTime.IsZero() {
+		return nil, errors.New("bigquery: Dataset.LastModifiedTime is not writable")
+	}
+	if dm.FullID != "" {
+		return nil, errors.New("bigquery: Dataset.FullID is not writable")
+	}
+	if dm.ETag != "" {
+		return nil, errors.New("bigquery: Dataset.ETag is not writable")
+	}
+	return ds, nil
+}
+
+func accessListToBQ(a []*AccessEntry) ([]*bq.DatasetAccess, error) {
+	var q []*bq.DatasetAccess
+	for _, e := range a {
+		a, err := e.toBQ()
+		if err != nil {
+			return nil, err
+		}
+		q = append(q, a)
+	}
+	return q, nil
+}
+
+// Delete deletes the dataset.  Delete will fail if the dataset is not empty.
+func (d *Dataset) Delete(ctx context.Context) (err error) {
+	return d.deleteInternal(ctx, false)
+}
+
+// DeleteWithContents deletes the dataset, as well as contained resources.
+func (d *Dataset) DeleteWithContents(ctx context.Context) (err error) {
+	return d.deleteInternal(ctx, true)
+}
+
+func (d *Dataset) deleteInternal(ctx context.Context, deleteContents bool) (err error) {
+	ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Dataset.Delete")
+	defer func() { trace.EndSpan(ctx, err) }()
+
+	call := d.c.bqs.Datasets.Delete(d.ProjectID, d.DatasetID).Context(ctx).DeleteContents(deleteContents)
+	setClientHeader(call.Header())
+	return call.Do()
+}
+
+// Metadata fetches the metadata for the dataset.
+func (d *Dataset) Metadata(ctx context.Context) (md *DatasetMetadata, err error) {
+	ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Dataset.Metadata")
+	defer func() { trace.EndSpan(ctx, err) }()
+
+	call := d.c.bqs.Datasets.Get(d.ProjectID, d.DatasetID).Context(ctx)
+	setClientHeader(call.Header())
+	var ds *bq.Dataset
+	if err := runWithRetry(ctx, func() (err error) {
+		ds, err = call.Do()
+		return err
+	}); err != nil {
+		return nil, err
+	}
+	return bqToDatasetMetadata(ds)
+}
+
+func bqToDatasetMetadata(d *bq.Dataset) (*DatasetMetadata, error) {
+	dm := &DatasetMetadata{
+		CreationTime:           unixMillisToTime(d.CreationTime),
+		LastModifiedTime:       unixMillisToTime(d.LastModifiedTime),
+		DefaultTableExpiration: time.Duration(d.DefaultTableExpirationMs) * time.Millisecond,
+		Description:            d.Description,
+		Name:                   d.FriendlyName,
+		FullID:                 d.Id,
+		Location:               d.Location,
+		Labels:                 d.Labels,
+		ETag:                   d.Etag,
+	}
+	for _, a := range d.Access {
+		e, err := bqToAccessEntry(a, nil)
+		if err != nil {
+			return nil, err
+		}
+		dm.Access = append(dm.Access, e)
+	}
+	return dm, nil
+}
+
+// Update modifies specific Dataset metadata fields.
+// To perform a read-modify-write that protects against intervening reads,
+// set the etag argument to the DatasetMetadata.ETag field from the read.
+// Pass the empty string for etag for a "blind write" that will always succeed.
+func (d *Dataset) Update(ctx context.Context, dm DatasetMetadataToUpdate, etag string) (md *DatasetMetadata, err error) {
+	ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Dataset.Update")
+	defer func() { trace.EndSpan(ctx, err) }()
+
+	ds, err := dm.toBQ()
+	if err != nil {
+		return nil, err
+	}
+	call := d.c.bqs.Datasets.Patch(d.ProjectID, d.DatasetID, ds).Context(ctx)
+	setClientHeader(call.Header())
+	if etag != "" {
+		call.Header().Set("If-Match", etag)
+	}
+	var ds2 *bq.Dataset
+	if err := runWithRetry(ctx, func() (err error) {
+		ds2, err = call.Do()
+		return err
+	}); err != nil {
+		return nil, err
+	}
+	return bqToDatasetMetadata(ds2)
+}
+
+func (dm *DatasetMetadataToUpdate) toBQ() (*bq.Dataset, error) {
+	ds := &bq.Dataset{}
+	forceSend := func(field string) {
+		ds.ForceSendFields = append(ds.ForceSendFields, field)
+	}
+
+	if dm.Description != nil {
+		ds.Description = optional.ToString(dm.Description)
+		forceSend("Description")
+	}
+	if dm.Name != nil {
+		ds.FriendlyName = optional.ToString(dm.Name)
+		forceSend("FriendlyName")
+	}
+	if dm.DefaultTableExpiration != nil {
+		dur := optional.ToDuration(dm.DefaultTableExpiration)
+		if dur == 0 {
+			// Send a null to delete the field.
+			ds.NullFields = append(ds.NullFields, "DefaultTableExpirationMs")
+		} else {
+			ds.DefaultTableExpirationMs = int64(dur / time.Millisecond)
+		}
+	}
+	if dm.Access != nil {
+		var err error
+		ds.Access, err = accessListToBQ(dm.Access)
+		if err != nil {
+			return nil, err
+		}
+		if len(ds.Access) == 0 {
+			ds.NullFields = append(ds.NullFields, "Access")
+		}
+	}
+	labels, forces, nulls := dm.update()
+	ds.Labels = labels
+	ds.ForceSendFields = append(ds.ForceSendFields, forces...)
+	ds.NullFields = append(ds.NullFields, nulls...)
+	return ds, nil
+}
+
+// Table creates a handle to a BigQuery table in the dataset.
+// To determine if a table exists, call Table.Metadata.
+// If the table does not already exist, use Table.Create to create it.
+func (d *Dataset) Table(tableID string) *Table {
+	return &Table{ProjectID: d.ProjectID, DatasetID: d.DatasetID, TableID: tableID, c: d.c}
+}
+
+// Tables returns an iterator over the tables in the Dataset.
+func (d *Dataset) Tables(ctx context.Context) *TableIterator {
+	it := &TableIterator{
+		ctx:     ctx,
+		dataset: d,
+	}
+	it.pageInfo, it.nextFunc = iterator.NewPageInfo(
+		it.fetch,
+		func() int { return len(it.tables) },
+		func() interface{} { b := it.tables; it.tables = nil; return b })
+	return it
+}
+
+// A TableIterator is an iterator over Tables.
+type TableIterator struct {
+	ctx      context.Context
+	dataset  *Dataset
+	tables   []*Table
+	pageInfo *iterator.PageInfo
+	nextFunc func() error
+}
+
+// Next returns the next result. Its second return value is Done if there are
+// no more results. Once Next returns Done, all subsequent calls will return
+// Done.
+func (it *TableIterator) Next() (*Table, error) {
+	if err := it.nextFunc(); err != nil {
+		return nil, err
+	}
+	t := it.tables[0]
+	it.tables = it.tables[1:]
+	return t, nil
+}
+
+// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
+func (it *TableIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
+
+// for testing
+var listTables = func(it *TableIterator, pageSize int, pageToken string) (*bq.TableList, error) {
+	call := it.dataset.c.bqs.Tables.List(it.dataset.ProjectID, it.dataset.DatasetID).
+		PageToken(pageToken).
+		Context(it.ctx)
+	setClientHeader(call.Header())
+	if pageSize > 0 {
+		call.MaxResults(int64(pageSize))
+	}
+	var res *bq.TableList
+	err := runWithRetry(it.ctx, func() (err error) {
+		res, err = call.Do()
+		return err
+	})
+	return res, err
+}
+
+func (it *TableIterator) fetch(pageSize int, pageToken string) (string, error) {
+	res, err := listTables(it, pageSize, pageToken)
+	if err != nil {
+		return "", err
+	}
+	for _, t := range res.Tables {
+		it.tables = append(it.tables, bqToTable(t.TableReference, it.dataset.c))
+	}
+	return res.NextPageToken, nil
+}
+
+func bqToTable(tr *bq.TableReference, c *Client) *Table {
+	if tr == nil {
+		return nil
+	}
+	return &Table{
+		ProjectID: tr.ProjectId,
+		DatasetID: tr.DatasetId,
+		TableID:   tr.TableId,
+		c:         c,
+	}
+}
+
+// Datasets returns an iterator over the datasets in a project.
+// The Client's project is used by default, but that can be
+// changed by setting ProjectID on the returned iterator before calling Next.
+func (c *Client) Datasets(ctx context.Context) *DatasetIterator {
+	return c.DatasetsInProject(ctx, c.projectID)
+}
+
+// DatasetsInProject returns an iterator over the datasets in the provided project.
+//
+// Deprecated: call Client.Datasets, then set ProjectID on the returned iterator.
+func (c *Client) DatasetsInProject(ctx context.Context, projectID string) *DatasetIterator {
+	it := &DatasetIterator{
+		ctx:       ctx,
+		c:         c,
+		ProjectID: projectID,
+	}
+	it.pageInfo, it.nextFunc = iterator.NewPageInfo(
+		it.fetch,
+		func() int { return len(it.items) },
+		func() interface{} { b := it.items; it.items = nil; return b })
+	return it
+}
+
+// DatasetIterator iterates over the datasets in a project.
+type DatasetIterator struct {
+	// ListHidden causes hidden datasets to be listed when set to true.
+	// Set before the first call to Next.
+	ListHidden bool
+
+	// Filter restricts the datasets returned by label. The filter syntax is described in
+	// https://cloud.google.com/bigquery/docs/labeling-datasets#filtering_datasets_using_labels
+	// Set before the first call to Next.
+	Filter string
+
+	// The project ID of the listed datasets.
+	// Set before the first call to Next.
+	ProjectID string
+
+	ctx      context.Context
+	c        *Client
+	pageInfo *iterator.PageInfo
+	nextFunc func() error
+	items    []*Dataset
+}
+
+// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
+func (it *DatasetIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
+
+// Next returns the next Dataset. Its second return value is iterator.Done if
+// there are no more results. Once Next returns Done, all subsequent calls will
+// return Done.
+func (it *DatasetIterator) Next() (*Dataset, error) {
+	if err := it.nextFunc(); err != nil {
+		return nil, err
+	}
+	item := it.items[0]
+	it.items = it.items[1:]
+	return item, nil
+}
+
+// for testing
+var listDatasets = func(it *DatasetIterator, pageSize int, pageToken string) (*bq.DatasetList, error) {
+	call := it.c.bqs.Datasets.List(it.ProjectID).
+		Context(it.ctx).
+		PageToken(pageToken).
+		All(it.ListHidden)
+	setClientHeader(call.Header())
+	if pageSize > 0 {
+		call.MaxResults(int64(pageSize))
+	}
+	if it.Filter != "" {
+		call.Filter(it.Filter)
+	}
+	var res *bq.DatasetList
+	err := runWithRetry(it.ctx, func() (err error) {
+		res, err = call.Do()
+		return err
+	})
+	return res, err
+}
+
+func (it *DatasetIterator) fetch(pageSize int, pageToken string) (string, error) {
+	res, err := listDatasets(it, pageSize, pageToken)
+	if err != nil {
+		return "", err
+	}
+	for _, d := range res.Datasets {
+		it.items = append(it.items, &Dataset{
+			ProjectID: d.DatasetReference.ProjectId,
+			DatasetID: d.DatasetReference.DatasetId,
+			c:         it.c,
+		})
+	}
+	return res.NextPageToken, nil
+}
+
+// An AccessEntry describes the permissions that an entity has on a dataset.
+type AccessEntry struct {
+	Role       AccessRole // The role of the entity
+	EntityType EntityType // The type of entity
+	Entity     string     // The entity (individual or group) granted access
+	View       *Table     // The view granted access (EntityType must be ViewEntity)
+}
+
+// AccessRole is the level of access to grant to a dataset.
+type AccessRole string
+
+const (
+	// OwnerRole is the OWNER AccessRole.
+	OwnerRole AccessRole = "OWNER"
+	// ReaderRole is the READER AccessRole.
+	ReaderRole AccessRole = "READER"
+	// WriterRole is the WRITER AccessRole.
+	WriterRole AccessRole = "WRITER"
+)
+
+// EntityType is the type of entity in an AccessEntry.
+type EntityType int
+
+const (
+	// DomainEntity is a domain (e.g. "example.com").
+	DomainEntity EntityType = iota + 1
+
+	// GroupEmailEntity is an email address of a Google Group.
+	GroupEmailEntity
+
+	// UserEmailEntity is an email address of an individual user.
+	UserEmailEntity
+
+	// SpecialGroupEntity is a special group: one of projectOwners, projectReaders, projectWriters or
+	// allAuthenticatedUsers.
+	SpecialGroupEntity
+
+	// ViewEntity is a BigQuery view.
+	ViewEntity
+)
+
+func (e *AccessEntry) toBQ() (*bq.DatasetAccess, error) {
+	q := &bq.DatasetAccess{Role: string(e.Role)}
+	switch e.EntityType {
+	case DomainEntity:
+		q.Domain = e.Entity
+	case GroupEmailEntity:
+		q.GroupByEmail = e.Entity
+	case UserEmailEntity:
+		q.UserByEmail = e.Entity
+	case SpecialGroupEntity:
+		q.SpecialGroup = e.Entity
+	case ViewEntity:
+		q.View = e.View.toBQ()
+	default:
+		return nil, fmt.Errorf("bigquery: unknown entity type %d", e.EntityType)
+	}
+	return q, nil
+}
+
+func bqToAccessEntry(q *bq.DatasetAccess, c *Client) (*AccessEntry, error) {
+	e := &AccessEntry{Role: AccessRole(q.Role)}
+	switch {
+	case q.Domain != "":
+		e.Entity = q.Domain
+		e.EntityType = DomainEntity
+	case q.GroupByEmail != "":
+		e.Entity = q.GroupByEmail
+		e.EntityType = GroupEmailEntity
+	case q.UserByEmail != "":
+		e.Entity = q.UserByEmail
+		e.EntityType = UserEmailEntity
+	case q.SpecialGroup != "":
+		e.Entity = q.SpecialGroup
+		e.EntityType = SpecialGroupEntity
+	case q.View != nil:
+		e.View = c.DatasetInProject(q.View.ProjectId, q.View.DatasetId).Table(q.View.TableId)
+		e.EntityType = ViewEntity
+	default:
+		return nil, errors.New("bigquery: invalid access value")
+	}
+	return e, nil
+}
diff --git a/vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/data_transfer_client.go b/vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/data_transfer_client.go
new file mode 100644
index 00000000..fea963e0
--- /dev/null
+++ b/vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/data_transfer_client.go
@@ -0,0 +1,625 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// AUTO-GENERATED CODE. DO NOT EDIT.
+
+package datatransfer
+
+import (
+	"context"
+	"fmt"
+	"math"
+	"time"
+
+	"github.com/golang/protobuf/proto"
+	gax "github.com/googleapis/gax-go"
+	"google.golang.org/api/iterator"
+	"google.golang.org/api/option"
+	"google.golang.org/api/transport"
+	datatransferpb "google.golang.org/genproto/googleapis/cloud/bigquery/datatransfer/v1"
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/metadata"
+)
+
+// CallOptions contains the retry settings for each method of Client.
+type CallOptions struct {
+	GetDataSource        []gax.CallOption
+	ListDataSources      []gax.CallOption
+	CreateTransferConfig []gax.CallOption
+	UpdateTransferConfig []gax.CallOption
+	DeleteTransferConfig []gax.CallOption
+	GetTransferConfig    []gax.CallOption
+	ListTransferConfigs  []gax.CallOption
+	ScheduleTransferRuns []gax.CallOption
+	GetTransferRun       []gax.CallOption
+	DeleteTransferRun    []gax.CallOption
+	ListTransferRuns     []gax.CallOption
+	ListTransferLogs     []gax.CallOption
+	CheckValidCreds      []gax.CallOption
+}
+
+func defaultClientOptions() []option.ClientOption {
+	return []option.ClientOption{
+		option.WithEndpoint("bigquerydatatransfer.googleapis.com:443"),
+		option.WithScopes(DefaultAuthScopes()...),
+	}
+}
+
+func defaultCallOptions() *CallOptions {
+	retry := map[[2]string][]gax.CallOption{
+		{"default", "idempotent"}: {
+			gax.WithRetry(func() gax.Retryer {
+				return gax.OnCodes([]codes.Code{
+					codes.DeadlineExceeded,
+					codes.Unavailable,
+				}, gax.Backoff{
+					Initial:    100 * time.Millisecond,
+					Max:        60000 * time.Millisecond,
+					Multiplier: 1.3,
+				})
+			}),
+		},
+	}
+	return &CallOptions{
+		GetDataSource:        retry[[2]string{"default", "idempotent"}],
+		ListDataSources:      retry[[2]string{"default", "idempotent"}],
+		CreateTransferConfig: retry[[2]string{"default", "non_idempotent"}],
+		UpdateTransferConfig: retry[[2]string{"default", "non_idempotent"}],
+		DeleteTransferConfig: retry[[2]string{"default", "idempotent"}],
+		GetTransferConfig:    retry[[2]string{"default", "idempotent"}],
+		ListTransferConfigs:  retry[[2]string{"default", "idempotent"}],
+		ScheduleTransferRuns: retry[[2]string{"default", "non_idempotent"}],
+		GetTransferRun:       retry[[2]string{"default", "idempotent"}],
+		DeleteTransferRun:    retry[[2]string{"default", "idempotent"}],
+		ListTransferRuns:     retry[[2]string{"default", "idempotent"}],
+		ListTransferLogs:     retry[[2]string{"default", "idempotent"}],
+		CheckValidCreds:      retry[[2]string{"default", "idempotent"}],
+	}
+}
+
+// Client is a client for interacting with BigQuery Data Transfer API.
+//
+// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
+type Client struct {
+	// The connection to the service.
+	conn *grpc.ClientConn
+
+	// The gRPC API client.
+	client datatransferpb.DataTransferServiceClient
+
+	// The call options for this service.
+	CallOptions *CallOptions
+
+	// The x-goog-* metadata to be sent with each request.
+	xGoogMetadata metadata.MD
+}
+
+// NewClient creates a new data transfer service client.
+//
+// The Google BigQuery Data Transfer Service API enables BigQuery users to
+// configure the transfer of their data from other Google Products into BigQuery.
+// This service contains methods that are end user exposed. It backs up the
+// frontend.
+func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {
+	conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...)
+	if err != nil {
+		return nil, err
+	}
+	c := &Client{
+		conn:        conn,
+		CallOptions: defaultCallOptions(),
+
+		client: datatransferpb.NewDataTransferServiceClient(conn),
+	}
+	c.setGoogleClientInfo()
+	return c, nil
+}
+
+// Connection returns the client's connection to the API service.
+func (c *Client) Connection() *grpc.ClientConn {
+	return c.conn
+}
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *Client) Close() error {
+	return c.conn.Close()
+}
+
+// setGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *Client) setGoogleClientInfo(keyval ...string) {
+	kv := append([]string{"gl-go", versionGo()}, keyval...)
+	kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version)
+	c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
+}
+
+// GetDataSource retrieves a supported data source and returns its settings,
+// which can be used for UI rendering.
+func (c *Client) GetDataSource(ctx context.Context, req *datatransferpb.GetDataSourceRequest, opts ...gax.CallOption) (*datatransferpb.DataSource, error) {
+	md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
+	ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+	opts = append(c.CallOptions.GetDataSource[0:len(c.CallOptions.GetDataSource):len(c.CallOptions.GetDataSource)], opts...)
+	var resp *datatransferpb.DataSource
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+		var err error
+		resp, err = c.client.GetDataSource(ctx, req, settings.GRPC...)
+		return err
+	}, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return resp, nil
+}
+
+// ListDataSources lists supported data sources and returns their settings,
+// which can be used for UI rendering.
+func (c *Client) ListDataSources(ctx context.Context, req *datatransferpb.ListDataSourcesRequest, opts ...gax.CallOption) *DataSourceIterator {
+	md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", req.GetParent()))
+	ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+	opts = append(c.CallOptions.ListDataSources[0:len(c.CallOptions.ListDataSources):len(c.CallOptions.ListDataSources)], opts...)
+	it := &DataSourceIterator{}
+	req = proto.Clone(req).(*datatransferpb.ListDataSourcesRequest)
+	it.InternalFetch = func(pageSize int, pageToken string) ([]*datatransferpb.DataSource, string, error) {
+		var resp *datatransferpb.ListDataSourcesResponse
+		req.PageToken = pageToken
+		if pageSize > math.MaxInt32 {
+			req.PageSize = math.MaxInt32
+		} else {
+			req.PageSize = int32(pageSize)
+		}
+		err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+			var err error
+			resp, err = c.client.ListDataSources(ctx, req, settings.GRPC...)
+			return err
+		}, opts...)
+		if err != nil {
+			return nil, "", err
+		}
+		return resp.DataSources, resp.NextPageToken, nil
+	}
+	fetch := func(pageSize int, pageToken string) (string, error) {
+		items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+		if err != nil {
+			return "", err
+		}
+		it.items = append(it.items, items...)
+		return nextPageToken, nil
+	}
+	it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+	it.pageInfo.MaxSize = int(req.PageSize)
+	return it
+}
+
+// CreateTransferConfig creates a new data transfer configuration.
+func (c *Client) CreateTransferConfig(ctx context.Context, req *datatransferpb.CreateTransferConfigRequest, opts ...gax.CallOption) (*datatransferpb.TransferConfig, error) {
+	md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", req.GetParent()))
+	ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+	opts = append(c.CallOptions.CreateTransferConfig[0:len(c.CallOptions.CreateTransferConfig):len(c.CallOptions.CreateTransferConfig)], opts...)
+	var resp *datatransferpb.TransferConfig
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+		var err error
+		resp, err = c.client.CreateTransferConfig(ctx, req, settings.GRPC...)
+		return err
+	}, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return resp, nil
+}
+
+// UpdateTransferConfig updates a data transfer configuration.
+// All fields must be set, even if they are not updated.
+func (c *Client) UpdateTransferConfig(ctx context.Context, req *datatransferpb.UpdateTransferConfigRequest, opts ...gax.CallOption) (*datatransferpb.TransferConfig, error) {
+	md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "transfer_config.name", req.GetTransferConfig().GetName()))
+	ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+	opts = append(c.CallOptions.UpdateTransferConfig[0:len(c.CallOptions.UpdateTransferConfig):len(c.CallOptions.UpdateTransferConfig)], opts...)
+	var resp *datatransferpb.TransferConfig
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+		var err error
+		resp, err = c.client.UpdateTransferConfig(ctx, req, settings.GRPC...)
+		return err
+	}, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return resp, nil
+}
+
+// DeleteTransferConfig deletes a data transfer configuration,
+// including any associated transfer runs and logs.
+func (c *Client) DeleteTransferConfig(ctx context.Context, req *datatransferpb.DeleteTransferConfigRequest, opts ...gax.CallOption) error {
+	md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
+	ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+	opts = append(c.CallOptions.DeleteTransferConfig[0:len(c.CallOptions.DeleteTransferConfig):len(c.CallOptions.DeleteTransferConfig)], opts...)
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+		var err error
+		_, err = c.client.DeleteTransferConfig(ctx, req, settings.GRPC...)
+		return err
+	}, opts...)
+	return err
+}
+
+// GetTransferConfig returns information about a data transfer config.
+func (c *Client) GetTransferConfig(ctx context.Context, req *datatransferpb.GetTransferConfigRequest, opts ...gax.CallOption) (*datatransferpb.TransferConfig, error) {
+	md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
+	ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+	opts = append(c.CallOptions.GetTransferConfig[0:len(c.CallOptions.GetTransferConfig):len(c.CallOptions.GetTransferConfig)], opts...)
+	var resp *datatransferpb.TransferConfig
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+		var err error
+		resp, err = c.client.GetTransferConfig(ctx, req, settings.GRPC...)
+		return err
+	}, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return resp, nil
+}
+
+// ListTransferConfigs returns information about all data transfers in the project.
+func (c *Client) ListTransferConfigs(ctx context.Context, req *datatransferpb.ListTransferConfigsRequest, opts ...gax.CallOption) *TransferConfigIterator {
+	md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", req.GetParent()))
+	ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+	opts = append(c.CallOptions.ListTransferConfigs[0:len(c.CallOptions.ListTransferConfigs):len(c.CallOptions.ListTransferConfigs)], opts...)
+	it := &TransferConfigIterator{}
+	req = proto.Clone(req).(*datatransferpb.ListTransferConfigsRequest)
+	it.InternalFetch = func(pageSize int, pageToken string) ([]*datatransferpb.TransferConfig, string, error) {
+		var resp *datatransferpb.ListTransferConfigsResponse
+		req.PageToken = pageToken
+		if pageSize > math.MaxInt32 {
+			req.PageSize = math.MaxInt32
+		} else {
+			req.PageSize = int32(pageSize)
+		}
+		err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+			var err error
+			resp, err = c.client.ListTransferConfigs(ctx, req, settings.GRPC...)
+			return err
+		}, opts...)
+		if err != nil {
+			return nil, "", err
+		}
+		return resp.TransferConfigs, resp.NextPageToken, nil
+	}
+	fetch := func(pageSize int, pageToken string) (string, error) {
+		items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+		if err != nil {
+			return "", err
+		}
+		it.items = append(it.items, items...)
+		return nextPageToken, nil
+	}
+	it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+	it.pageInfo.MaxSize = int(req.PageSize)
+	return it
+}
+
+// ScheduleTransferRuns creates transfer runs for a time range [start_time, end_time].
+// For each date - or whatever granularity the data source supports - in the
+// range, one transfer run is created.
+// Note that runs are created per UTC time in the time range.
+func (c *Client) ScheduleTransferRuns(ctx context.Context, req *datatransferpb.ScheduleTransferRunsRequest, opts ...gax.CallOption) (*datatransferpb.ScheduleTransferRunsResponse, error) {
+	md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", req.GetParent()))
+	ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+	opts = append(c.CallOptions.ScheduleTransferRuns[0:len(c.CallOptions.ScheduleTransferRuns):len(c.CallOptions.ScheduleTransferRuns)], opts...)
+	var resp *datatransferpb.ScheduleTransferRunsResponse
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+		var err error
+		resp, err = c.client.ScheduleTransferRuns(ctx, req, settings.GRPC...)
+		return err
+	}, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return resp, nil
+}
+
+// GetTransferRun returns information about the particular transfer run.
+func (c *Client) GetTransferRun(ctx context.Context, req *datatransferpb.GetTransferRunRequest, opts ...gax.CallOption) (*datatransferpb.TransferRun, error) {
+	md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
+	ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+	opts = append(c.CallOptions.GetTransferRun[0:len(c.CallOptions.GetTransferRun):len(c.CallOptions.GetTransferRun)], opts...)
+	var resp *datatransferpb.TransferRun
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+		var err error
+		resp, err = c.client.GetTransferRun(ctx, req, settings.GRPC...)
+		return err
+	}, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return resp, nil
+}
+
+// DeleteTransferRun deletes the specified transfer run.
+func (c *Client) DeleteTransferRun(ctx context.Context, req *datatransferpb.DeleteTransferRunRequest, opts ...gax.CallOption) error {
+	md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
+	ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+	opts = append(c.CallOptions.DeleteTransferRun[0:len(c.CallOptions.DeleteTransferRun):len(c.CallOptions.DeleteTransferRun)], opts...)
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+		var err error
+		_, err = c.client.DeleteTransferRun(ctx, req, settings.GRPC...)
+		return err
+	}, opts...)
+	return err
+}
+
+// ListTransferRuns returns information about running and completed jobs.
+func (c *Client) ListTransferRuns(ctx context.Context, req *datatransferpb.ListTransferRunsRequest, opts ...gax.CallOption) *TransferRunIterator {
+	md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", req.GetParent()))
+	ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+	opts = append(c.CallOptions.ListTransferRuns[0:len(c.CallOptions.ListTransferRuns):len(c.CallOptions.ListTransferRuns)], opts...)
+	it := &TransferRunIterator{}
+	req = proto.Clone(req).(*datatransferpb.ListTransferRunsRequest)
+	it.InternalFetch = func(pageSize int, pageToken string) ([]*datatransferpb.TransferRun, string, error) {
+		var resp *datatransferpb.ListTransferRunsResponse
+		req.PageToken = pageToken
+		if pageSize > math.MaxInt32 {
+			req.PageSize = math.MaxInt32
+		} else {
+			req.PageSize = int32(pageSize)
+		}
+		err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+			var err error
+			resp, err = c.client.ListTransferRuns(ctx, req, settings.GRPC...)
+			return err
+		}, opts...)
+		if err != nil {
+			return nil, "", err
+		}
+		return resp.TransferRuns, resp.NextPageToken, nil
+	}
+	fetch := func(pageSize int, pageToken string) (string, error) {
+		items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+		if err != nil {
+			return "", err
+		}
+		it.items = append(it.items, items...)
+		return nextPageToken, nil
+	}
+	it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+	it.pageInfo.MaxSize = int(req.PageSize)
+	return it
+}
+
+// ListTransferLogs returns user facing log messages for the data transfer run.
+func (c *Client) ListTransferLogs(ctx context.Context, req *datatransferpb.ListTransferLogsRequest, opts ...gax.CallOption) *TransferMessageIterator {
+	md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", req.GetParent()))
+	ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+	opts = append(c.CallOptions.ListTransferLogs[0:len(c.CallOptions.ListTransferLogs):len(c.CallOptions.ListTransferLogs)], opts...)
+	it := &TransferMessageIterator{}
+	req = proto.Clone(req).(*datatransferpb.ListTransferLogsRequest)
+	it.InternalFetch = func(pageSize int, pageToken string) ([]*datatransferpb.TransferMessage, string, error) {
+		var resp *datatransferpb.ListTransferLogsResponse
+		req.PageToken = pageToken
+		if pageSize > math.MaxInt32 {
+			req.PageSize = math.MaxInt32
+		} else {
+			req.PageSize = int32(pageSize)
+		}
+		err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+			var err error
+			resp, err = c.client.ListTransferLogs(ctx, req, settings.GRPC...)
+			return err
+		}, opts...)
+		if err != nil {
+			return nil, "", err
+		}
+		return resp.TransferMessages, resp.NextPageToken, nil
+	}
+	fetch := func(pageSize int, pageToken string) (string, error) {
+		items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+		if err != nil {
+			return "", err
+		}
+		it.items = append(it.items, items...)
+		return nextPageToken, nil
+	}
+	it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+	it.pageInfo.MaxSize = int(req.PageSize)
+	return it
+}
+
+// CheckValidCreds returns true if valid credentials exist for the given data source and
+// requesting user.
+// Some data sources doesn't support service account, so we need to talk to
+// them on behalf of the end user. This API just checks whether we have OAuth
+// token for the particular user, which is a pre-requisite before user can
+// create a transfer config.
+func (c *Client) CheckValidCreds(ctx context.Context, req *datatransferpb.CheckValidCredsRequest, opts ...gax.CallOption) (*datatransferpb.CheckValidCredsResponse, error) {
+	md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
+	ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+	opts = append(c.CallOptions.CheckValidCreds[0:len(c.CallOptions.CheckValidCreds):len(c.CallOptions.CheckValidCreds)], opts...)
+	var resp *datatransferpb.CheckValidCredsResponse
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+		var err error
+		resp, err = c.client.CheckValidCreds(ctx, req, settings.GRPC...)
+		return err
+	}, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return resp, nil
+}
+
+// DataSourceIterator manages a stream of *datatransferpb.DataSource.
+type DataSourceIterator struct {
+	items    []*datatransferpb.DataSource
+	pageInfo *iterator.PageInfo
+	nextFunc func() error
+
+	// InternalFetch is for use by the Google Cloud Libraries only.
+	// It is not part of the stable interface of this package.
+	//
+	// InternalFetch returns results from a single call to the underlying RPC.
+	// The number of results is no greater than pageSize.
+	// If there are no more results, nextPageToken is empty and err is nil.
+	InternalFetch func(pageSize int, pageToken string) (results []*datatransferpb.DataSource, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
+func (it *DataSourceIterator) PageInfo() *iterator.PageInfo {
+	return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *DataSourceIterator) Next() (*datatransferpb.DataSource, error) {
+	var item *datatransferpb.DataSource
+	if err := it.nextFunc(); err != nil {
+		return item, err
+	}
+	item = it.items[0]
+	it.items = it.items[1:]
+	return item, nil
+}
+
+func (it *DataSourceIterator) bufLen() int {
+	return len(it.items)
+}
+
+func (it *DataSourceIterator) takeBuf() interface{} {
+	b := it.items
+	it.items = nil
+	return b
+}
+
+// TransferConfigIterator manages a stream of *datatransferpb.TransferConfig.
+type TransferConfigIterator struct {
+	items    []*datatransferpb.TransferConfig
+	pageInfo *iterator.PageInfo
+	nextFunc func() error
+
+	// InternalFetch is for use by the Google Cloud Libraries only.
+	// It is not part of the stable interface of this package.
+	//
+	// InternalFetch returns results from a single call to the underlying RPC.
+	// The number of results is no greater than pageSize.
+	// If there are no more results, nextPageToken is empty and err is nil.
+	InternalFetch func(pageSize int, pageToken string) (results []*datatransferpb.TransferConfig, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
+func (it *TransferConfigIterator) PageInfo() *iterator.PageInfo {
+	return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *TransferConfigIterator) Next() (*datatransferpb.TransferConfig, error) {
+	var item *datatransferpb.TransferConfig
+	if err := it.nextFunc(); err != nil {
+		return item, err
+	}
+	item = it.items[0]
+	it.items = it.items[1:]
+	return item, nil
+}
+
+func (it *TransferConfigIterator) bufLen() int {
+	return len(it.items)
+}
+
+func (it *TransferConfigIterator) takeBuf() interface{} {
+	b := it.items
+	it.items = nil
+	return b
+}
+
+// TransferMessageIterator manages a stream of *datatransferpb.TransferMessage.
+type TransferMessageIterator struct {
+	items    []*datatransferpb.TransferMessage
+	pageInfo *iterator.PageInfo
+	nextFunc func() error
+
+	// InternalFetch is for use by the Google Cloud Libraries only.
+	// It is not part of the stable interface of this package.
+	//
+	// InternalFetch returns results from a single call to the underlying RPC.
+	// The number of results is no greater than pageSize.
+	// If there are no more results, nextPageToken is empty and err is nil.
+	InternalFetch func(pageSize int, pageToken string) (results []*datatransferpb.TransferMessage, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
+func (it *TransferMessageIterator) PageInfo() *iterator.PageInfo {
+	return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *TransferMessageIterator) Next() (*datatransferpb.TransferMessage, error) {
+	var item *datatransferpb.TransferMessage
+	if err := it.nextFunc(); err != nil {
+		return item, err
+	}
+	item = it.items[0]
+	it.items = it.items[1:]
+	return item, nil
+}
+
+func (it *TransferMessageIterator) bufLen() int {
+	return len(it.items)
+}
+
+func (it *TransferMessageIterator) takeBuf() interface{} {
+	b := it.items
+	it.items = nil
+	return b
+}
+
+// TransferRunIterator manages a stream of *datatransferpb.TransferRun.
+type TransferRunIterator struct {
+	items    []*datatransferpb.TransferRun
+	pageInfo *iterator.PageInfo
+	nextFunc func() error
+
+	// InternalFetch is for use by the Google Cloud Libraries only.
+	// It is not part of the stable interface of this package.
+	//
+	// InternalFetch returns results from a single call to the underlying RPC.
+	// The number of results is no greater than pageSize.
+	// If there are no more results, nextPageToken is empty and err is nil.
+	InternalFetch func(pageSize int, pageToken string) (results []*datatransferpb.TransferRun, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
+func (it *TransferRunIterator) PageInfo() *iterator.PageInfo {
+	return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *TransferRunIterator) Next() (*datatransferpb.TransferRun, error) {
+	var item *datatransferpb.TransferRun
+	if err := it.nextFunc(); err != nil {
+		return item, err
+	}
+	item = it.items[0]
+	it.items = it.items[1:]
+	return item, nil
+}
+
+func (it *TransferRunIterator) bufLen() int {
+	return len(it.items)
+}
+
+func (it *TransferRunIterator) takeBuf() interface{} {
+	b := it.items
+	it.items = nil
+	return b
+}
diff --git a/vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/doc.go b/vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/doc.go
new file mode 100644
index 00000000..6cd68697
--- /dev/null
+++ b/vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/doc.go
@@ -0,0 +1,90 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// AUTO-GENERATED CODE. DO NOT EDIT.
+
+// Package datatransfer is an auto-generated package for the
+// BigQuery Data Transfer API.
+//
+//   NOTE: This package is in alpha. It is not stable, and is likely to change.
+//
+// Transfers data from partner SaaS applications to Google BigQuery on a
+// scheduled, managed basis.
+package datatransfer // import "cloud.google.com/go/bigquery/datatransfer/apiv1"
+
+import (
+	"context"
+	"runtime"
+	"strings"
+	"unicode"
+
+	"google.golang.org/grpc/metadata"
+)
+
+func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
+	out, _ := metadata.FromOutgoingContext(ctx)
+	out = out.Copy()
+	for _, md := range mds {
+		for k, v := range md {
+			out[k] = append(out[k], v...)
+		}
+	}
+	return metadata.NewOutgoingContext(ctx, out)
+}
+
+// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
+func DefaultAuthScopes() []string {
+	return []string{
+		"https://www.googleapis.com/auth/cloud-platform",
+	}
+}
+
+// versionGo returns the Go runtime version. The returned string
+// has no whitespace, suitable for reporting in header.
+func versionGo() string {
+	const develPrefix = "devel +"
+
+	s := runtime.Version()
+	if strings.HasPrefix(s, develPrefix) {
+		s = s[len(develPrefix):]
+		if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 {
+			s = s[:p]
+		}
+		return s
+	}
+
+	notSemverRune := func(r rune) bool {
+		return strings.IndexRune("0123456789.", r) < 0
+	}
+
+	if strings.HasPrefix(s, "go1") {
+		s = s[2:]
+		var prerelease string
+		if p := strings.IndexFunc(s, notSemverRune); p >= 0 {
+			s, prerelease = s[:p], s[p:]
+		}
+		if strings.HasSuffix(s, ".") {
+			s += "0"
+		} else if strings.Count(s, ".") < 2 {
+			s += ".0"
+		}
+		if prerelease != "" {
+			s += "-" + prerelease
+		}
+		return s
+	}
+	return "UNKNOWN"
+}
+
+const versionClient = "20181129"
diff --git a/vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/path_funcs.go b/vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/path_funcs.go
new file mode 100644
index 00000000..89eb5bb1
--- /dev/null
+++ b/vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/path_funcs.go
@@ -0,0 +1,135 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package datatransfer
+
+// ProjectPath returns the path for the project resource.
+//
+// Deprecated: Use
+//   fmt.Sprintf("projects/%s", project)
+// instead.
+func ProjectPath(project string) string {
+	return "" +
+		"projects/" +
+		project +
+		""
+}
+
+// LocationPath returns the path for the location resource.
+//
+// Deprecated: Use
+//   fmt.Sprintf("projects/%s/locations/%s", project, location)
+// instead.
+func LocationPath(project, location string) string {
+	return "" +
+		"projects/" +
+		project +
+		"/locations/" +
+		location +
+		""
+}
+
+// LocationDataSourcePath returns the path for the location data source resource.
+//
+// Deprecated: Use
+//   fmt.Sprintf("projects/%s/locations/%s/dataSources/%s", project, location, dataSource)
+// instead.
+func LocationDataSourcePath(project, location, dataSource string) string {
+	return "" +
+		"projects/" +
+		project +
+		"/locations/" +
+		location +
+		"/dataSources/" +
+		dataSource +
+		""
+}
+
+// LocationTransferConfigPath returns the path for the location transfer config resource.
+//
+// Deprecated: Use
+//   fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s", project, location, transferConfig)
+// instead.
+func LocationTransferConfigPath(project, location, transferConfig string) string {
+	return "" +
+		"projects/" +
+		project +
+		"/locations/" +
+		location +
+		"/transferConfigs/" +
+		transferConfig +
+		""
+}
+
+// LocationRunPath returns the path for the location run resource.
+//
+// Deprecated: Use
+//   fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s/runs/%s", project, location, transferConfig, run)
+// instead.
+func LocationRunPath(project, location, transferConfig, run string) string {
+	return "" +
+		"projects/" +
+		project +
+		"/locations/" +
+		location +
+		"/transferConfigs/" +
+		transferConfig +
+		"/runs/" +
+		run +
+		""
+}
+
+// DataSourcePath returns the path for the data source resource.
+//
+// Deprecated: Use
+//   fmt.Sprintf("projects/%s/dataSources/%s", project, dataSource)
+// instead.
+func DataSourcePath(project, dataSource string) string {
+	return "" +
+		"projects/" +
+		project +
+		"/dataSources/" +
+		dataSource +
+		""
+}
+
+// TransferConfigPath returns the path for the transfer config resource.
+//
+// Deprecated: Use
+//   fmt.Sprintf("projects/%s/transferConfigs/%s", project, transferConfig)
+// instead.
+func TransferConfigPath(project, transferConfig string) string {
+	return "" +
+		"projects/" +
+		project +
+		"/transferConfigs/" +
+		transferConfig +
+		""
+}
+
+// RunPath returns the path for the run resource.
+//
+// Deprecated: Use
+//   fmt.Sprintf("projects/%s/transferConfigs/%s/runs/%s", project, transferConfig, run)
+// instead.
+func RunPath(project, transferConfig, run string) string {
+	return "" +
+		"projects/" +
+		project +
+		"/transferConfigs/" +
+		transferConfig +
+		"/runs/" +
+		run +
+		""
+}
diff --git a/vendor/cloud.google.com/go/bigquery/doc.go b/vendor/cloud.google.com/go/bigquery/doc.go
new file mode 100644
index 00000000..43e491ce
--- /dev/null
+++ b/vendor/cloud.google.com/go/bigquery/doc.go
@@ -0,0 +1,310 @@
+// Copyright 2015 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package bigquery provides a client for the BigQuery service.
+
+Note: This package is in beta.  Some backwards-incompatible changes may occur.
+
+The following assumes a basic familiarity with BigQuery concepts.
+See https://cloud.google.com/bigquery/docs.
+
+See https://godoc.org/cloud.google.com/go for authentication, timeouts,
+connection pooling and similar aspects of this package.
+
+
+Creating a Client
+
+To start working with this package, create a client:
+
+    ctx := context.Background()
+    client, err := bigquery.NewClient(ctx, projectID)
+    if err != nil {
+        // TODO: Handle error.
+    }
+
+Querying
+
+To query existing tables, create a Query and call its Read method:
+
+    q := client.Query(`
+        SELECT year, SUM(number) as num
+        FROM [bigquery-public-data:usa_names.usa_1910_2013]
+        WHERE name = "William"
+        GROUP BY year
+        ORDER BY year
+    `)
+    it, err := q.Read(ctx)
+    if err != nil {
+        // TODO: Handle error.
+    }
+
+Then iterate through the resulting rows. You can store a row using
+anything that implements the ValueLoader interface, or with a slice or map of bigquery.Value.
+A slice is simplest:
+
+    for {
+        var values []bigquery.Value
+        err := it.Next(&values)
+        if err == iterator.Done {
+            break
+        }
+        if err != nil {
+            // TODO: Handle error.
+        }
+        fmt.Println(values)
+    }
+
+You can also use a struct whose exported fields match the query:
+
+    type Count struct {
+        Year int
+        Num  int
+    }
+    for {
+        var c Count
+        err := it.Next(&c)
+        if err == iterator.Done {
+            break
+        }
+        if err != nil {
+            // TODO: Handle error.
+        }
+        fmt.Println(c)
+    }
+
+You can also start the query running and get the results later.
+Create the query as above, but call Run instead of Read. This returns a Job,
+which represents an asynchronous operation.
+
+    job, err := q.Run(ctx)
+    if err != nil {
+        // TODO: Handle error.
+    }
+
+Get the job's ID, a printable string. You can save this string to retrieve
+the results at a later time, even in another process.
+
+    jobID := job.ID()
+    fmt.Printf("The job ID is %s\n", jobID)
+
+To retrieve the job's results from the ID, first look up the Job:
+
+    job, err = client.JobFromID(ctx, jobID)
+    if err != nil {
+        // TODO: Handle error.
+    }
+
+Use the Job.Read method to obtain an iterator, and loop over the rows.
+Query.Read is just a convenience method that combines Query.Run and Job.Read.
+
+    it, err = job.Read(ctx)
+    if err != nil {
+        // TODO: Handle error.
+    }
+    // Proceed with iteration as above.
+
+Datasets and Tables
+
+You can refer to datasets in the client's project with the Dataset method, and
+in other projects with the DatasetInProject method:
+
+    myDataset := client.Dataset("my_dataset")
+    yourDataset := client.DatasetInProject("your-project-id", "your_dataset")
+
+These methods create references to datasets, not the datasets themselves. You can have
+a dataset reference even if the dataset doesn't exist yet. Use Dataset.Create to
+create a dataset from a reference:
+
+    if err := myDataset.Create(ctx, nil); err != nil {
+        // TODO: Handle error.
+    }
+
+You can refer to tables with Dataset.Table. Like bigquery.Dataset, bigquery.Table is a reference
+to an object in BigQuery that may or may not exist.
+
+    table := myDataset.Table("my_table")
+
+You can create, delete and update the metadata of tables with methods on Table.
+For instance, you could create a temporary table with:
+
+    err = myDataset.Table("temp").Create(ctx, &bigquery.TableMetadata{
+        ExpirationTime: time.Now().Add(1*time.Hour)})
+    if err != nil {
+        // TODO: Handle error.
+    }
+
+We'll see how to create a table with a schema in the next section.
+
+Schemas
+
+There are two ways to construct schemas with this package.
+You can build a schema by hand, like so:
+
+    schema1 := bigquery.Schema{
+        {Name: "Name", Required: true, Type: bigquery.StringFieldType},
+        {Name: "Grades", Repeated: true, Type: bigquery.IntegerFieldType},
+        {Name: "Optional", Required: false, Type: bigquery.IntegerFieldType},
+    }
+
+Or you can infer the schema from a struct:
+
+    type student struct {
+        Name   string
+        Grades []int
+        Optional bigquery.NullInt64
+    }
+    schema2, err := bigquery.InferSchema(student{})
+    if err != nil {
+        // TODO: Handle error.
+    }
+    // schema1 and schema2 are identical.
+
+Struct inference supports tags like those of the encoding/json package, so you can
+change names, ignore fields, or mark a field as nullable (non-required). Fields
+declared as one of the Null types (NullInt64, NullFloat64, NullString, NullBool,
+NullTimestamp, NullDate, NullTime and NullDateTime) are automatically inferred as
+nullable, so the "nullable" tag is only needed for []byte, *big.Rat and
+pointer-to-struct fields.
+
+    type student2 struct {
+        Name     string `bigquery:"full_name"`
+        Grades   []int
+        Secret   string `bigquery:"-"`
+        Optional []byte `bigquery:",nullable"
+    }
+    schema3, err := bigquery.InferSchema(student2{})
+    if err != nil {
+        // TODO: Handle error.
+    }
+    // schema3 has required fields "full_name" and "Grade", and nullable BYTES field "Optional".
+
+Having constructed a schema, you can create a table with it like so:
+
+    if err := table.Create(ctx, &bigquery.TableMetadata{Schema: schema1}); err != nil {
+        // TODO: Handle error.
+    }
+
+Copying
+
+You can copy one or more tables to another table. Begin by constructing a Copier
+describing the copy. Then set any desired copy options, and finally call Run to get a Job:
+
+    copier := myDataset.Table("dest").CopierFrom(myDataset.Table("src"))
+    copier.WriteDisposition = bigquery.WriteTruncate
+    job, err = copier.Run(ctx)
+    if err != nil {
+        // TODO: Handle error.
+    }
+
+You can chain the call to Run if you don't want to set options:
+
+    job, err = myDataset.Table("dest").CopierFrom(myDataset.Table("src")).Run(ctx)
+    if err != nil {
+        // TODO: Handle error.
+    }
+
+You can wait for your job to complete:
+
+    status, err := job.Wait(ctx)
+    if err != nil {
+        // TODO: Handle error.
+    }
+
+Job.Wait polls with exponential backoff. You can also poll yourself, if you
+wish:
+
+    for {
+        status, err := job.Status(ctx)
+        if err != nil {
+            // TODO: Handle error.
+        }
+        if status.Done() {
+            if status.Err() != nil {
+                log.Fatalf("Job failed with error %v", status.Err())
+            }
+            break
+        }
+        time.Sleep(pollInterval)
+    }
+
+Loading and Uploading
+
+There are two ways to populate a table with this package: load the data from a Google Cloud Storage
+object, or upload rows directly from your program.
+
+For loading, first create a GCSReference, configuring it if desired. Then make a Loader, optionally configure
+it as well, and call its Run method.
+
+    gcsRef := bigquery.NewGCSReference("gs://my-bucket/my-object")
+    gcsRef.AllowJaggedRows = true
+    loader := myDataset.Table("dest").LoaderFrom(gcsRef)
+    loader.CreateDisposition = bigquery.CreateNever
+    job, err = loader.Run(ctx)
+    // Poll the job for completion if desired, as above.
+
+To upload, first define a type that implements the ValueSaver interface, which has a single method named Save.
+Then create an Uploader, and call its Put method with a slice of values.
+
+    u := table.Uploader()
+    // Item implements the ValueSaver interface.
+    items := []*Item{
+        {Name: "n1", Size: 32.6, Count: 7},
+        {Name: "n2", Size: 4, Count: 2},
+        {Name: "n3", Size: 101.5, Count: 1},
+    }
+    if err := u.Put(ctx, items); err != nil {
+        // TODO: Handle error.
+    }
+
+You can also upload a struct that doesn't implement ValueSaver. Use the StructSaver type
+to specify the schema and insert ID by hand, or just supply the struct or struct pointer
+directly and the schema will be inferred:
+
+    type Item2 struct {
+        Name  string
+        Size  float64
+        Count int
+    }
+    // Item implements the ValueSaver interface.
+    items2 := []*Item2{
+        {Name: "n1", Size: 32.6, Count: 7},
+        {Name: "n2", Size: 4, Count: 2},
+        {Name: "n3", Size: 101.5, Count: 1},
+    }
+    if err := u.Put(ctx, items2); err != nil {
+        // TODO: Handle error.
+    }
+
+Extracting
+
+If you've been following so far, extracting data from a BigQuery table
+into a Google Cloud Storage object will feel familiar. First create an
+Extractor, then optionally configure it, and lastly call its Run method.
+
+    extractor := table.ExtractorTo(gcsRef)
+    extractor.DisableHeader = true
+    job, err = extractor.Run(ctx)
+    // Poll the job for completion if desired, as above.
+
+Errors
+
+Errors returned by this client are often of the type [`googleapi.Error`](https://godoc.org/google.golang.org/api/googleapi#Error).
+These errors can be introspected for more information by type asserting to the richer `googleapi.Error` type. For example:
+
+	if e, ok := err.(*googleapi.Error); ok {
+		  if e.Code = 409 { ... }
+	}
+*/
+package bigquery // import "cloud.google.com/go/bigquery"
diff --git a/vendor/cloud.google.com/go/bigquery/error.go b/vendor/cloud.google.com/go/bigquery/error.go
new file mode 100644
index 00000000..27e86982
--- /dev/null
+++ b/vendor/cloud.google.com/go/bigquery/error.go
@@ -0,0 +1,83 @@
+// Copyright 2015 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package bigquery
+
+import (
+	"fmt"
+
+	bq "google.golang.org/api/bigquery/v2"
+)
+
+// An Error contains detailed information about a failed bigquery operation.
+// Detailed description of possible Reasons can be found here: https://cloud.google.com/bigquery/troubleshooting-errors.
+type Error struct {
+	// Mirrors bq.ErrorProto, but drops DebugInfo
+	Location, Message, Reason string
+}
+
+func (e Error) Error() string {
+	return fmt.Sprintf("{Location: %q; Message: %q; Reason: %q}", e.Location, e.Message, e.Reason)
+}
+
+func bqToError(ep *bq.ErrorProto) *Error {
+	if ep == nil {
+		return nil
+	}
+	return &Error{
+		Location: ep.Location,
+		Message:  ep.Message,
+		Reason:   ep.Reason,
+	}
+}
+
+// A MultiError contains multiple related errors.
+type MultiError []error
+
+func (m MultiError) Error() string {
+	switch len(m) {
+	case 0:
+		return "(0 errors)"
+	case 1:
+		return m[0].Error()
+	case 2:
+		return m[0].Error() + " (and 1 other error)"
+	}
+	return fmt.Sprintf("%s (and %d other errors)", m[0].Error(), len(m)-1)
+}
+
+// RowInsertionError contains all errors that occurred when attempting to insert a row.
+type RowInsertionError struct {
+	InsertID string // The InsertID associated with the affected row.
+	RowIndex int    // The 0-based index of the affected row in the batch of rows being inserted.
+	Errors   MultiError
+}
+
+func (e *RowInsertionError) Error() string {
+	errFmt := "insertion of row [insertID: %q; insertIndex: %v] failed with error: %s"
+	return fmt.Sprintf(errFmt, e.InsertID, e.RowIndex, e.Errors.Error())
+}
+
+// PutMultiError contains an error for each row which was not successfully inserted
+// into a BigQuery table.
+type PutMultiError []RowInsertionError
+
+func (pme PutMultiError) Error() string {
+	plural := "s"
+	if len(pme) == 1 {
+		plural = ""
+	}
+
+	return fmt.Sprintf("%v row insertion%s failed", len(pme), plural)
+}
diff --git a/vendor/cloud.google.com/go/bigquery/external.go b/vendor/cloud.google.com/go/bigquery/external.go
new file mode 100644
index 00000000..2ceb38d5
--- /dev/null
+++ b/vendor/cloud.google.com/go/bigquery/external.go
@@ -0,0 +1,400 @@
+// Copyright 2017 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package bigquery
+
+import (
+	"encoding/base64"
+	"unicode/utf8"
+
+	bq "google.golang.org/api/bigquery/v2"
+)
+
+// DataFormat describes the format of BigQuery table data.
+type DataFormat string
+
+// Constants describing the format of BigQuery table data.
+const (
+	CSV             DataFormat = "CSV"
+	Avro            DataFormat = "AVRO"
+	JSON            DataFormat = "NEWLINE_DELIMITED_JSON"
+	DatastoreBackup DataFormat = "DATASTORE_BACKUP"
+	GoogleSheets    DataFormat = "GOOGLE_SHEETS"
+	Bigtable        DataFormat = "BIGTABLE"
+	Parquet         DataFormat = "PARQUET"
+	ORC             DataFormat = "ORC"
+)
+
+// ExternalData is a table which is stored outside of BigQuery. It is implemented by
+// *ExternalDataConfig.
+// GCSReference also implements it, for backwards compatibility.
+type ExternalData interface {
+	toBQ() bq.ExternalDataConfiguration
+}
+
+// ExternalDataConfig describes data external to BigQuery that can be used
+// in queries and to create external tables.
+type ExternalDataConfig struct {
+	// The format of the data. Required.
+	SourceFormat DataFormat
+
+	// The fully-qualified URIs that point to your
+	// data in Google Cloud. Required.
+	//
+	// For Google Cloud Storage URIs, each URI can contain one '*' wildcard character
+	// and it must come after the 'bucket' name. Size limits related to load jobs
+	// apply to external data sources.
+	//
+	// For Google Cloud Bigtable URIs, exactly one URI can be specified and it has be
+	// a fully specified and valid HTTPS URL for a Google Cloud Bigtable table.
+	//
+	// For Google Cloud Datastore backups, exactly one URI can be specified. Also,
+	// the '*' wildcard character is not allowed.
+	SourceURIs []string
+
+	// The schema of the data. Required for CSV and JSON; disallowed for the
+	// other formats.
+	Schema Schema
+
+	// Try to detect schema and format options automatically.
+	// Any option specified explicitly will be honored.
+	AutoDetect bool
+
+	// The compression type of the data.
+	Compression Compression
+
+	// IgnoreUnknownValues causes values not matching the schema to be
+	// tolerated. Unknown values are ignored. For CSV this ignores extra values
+	// at the end of a line. For JSON this ignores named values that do not
+	// match any column name. If this field is not set, records containing
+	// unknown values are treated as bad records. The MaxBadRecords field can
+	// be used to customize how bad records are handled.
+	IgnoreUnknownValues bool
+
+	// MaxBadRecords is the maximum number of bad records that will be ignored
+	// when reading data.
+	MaxBadRecords int64
+
+	// Additional options for CSV, GoogleSheets and Bigtable formats.
+	Options ExternalDataConfigOptions
+}
+
+func (e *ExternalDataConfig) toBQ() bq.ExternalDataConfiguration {
+	q := bq.ExternalDataConfiguration{
+		SourceFormat:        string(e.SourceFormat),
+		SourceUris:          e.SourceURIs,
+		Autodetect:          e.AutoDetect,
+		Compression:         string(e.Compression),
+		IgnoreUnknownValues: e.IgnoreUnknownValues,
+		MaxBadRecords:       e.MaxBadRecords,
+	}
+	if e.Schema != nil {
+		q.Schema = e.Schema.toBQ()
+	}
+	if e.Options != nil {
+		e.Options.populateExternalDataConfig(&q)
+	}
+	return q
+}
+
+func bqToExternalDataConfig(q *bq.ExternalDataConfiguration) (*ExternalDataConfig, error) {
+	e := &ExternalDataConfig{
+		SourceFormat:        DataFormat(q.SourceFormat),
+		SourceURIs:          q.SourceUris,
+		AutoDetect:          q.Autodetect,
+		Compression:         Compression(q.Compression),
+		IgnoreUnknownValues: q.IgnoreUnknownValues,
+		MaxBadRecords:       q.MaxBadRecords,
+		Schema:              bqToSchema(q.Schema),
+	}
+	switch {
+	case q.CsvOptions != nil:
+		e.Options = bqToCSVOptions(q.CsvOptions)
+	case q.GoogleSheetsOptions != nil:
+		e.Options = bqToGoogleSheetsOptions(q.GoogleSheetsOptions)
+	case q.BigtableOptions != nil:
+		var err error
+		e.Options, err = bqToBigtableOptions(q.BigtableOptions)
+		if err != nil {
+			return nil, err
+		}
+	}
+	return e, nil
+}
+
+// ExternalDataConfigOptions are additional options for external data configurations.
+// This interface is implemented by CSVOptions, GoogleSheetsOptions and BigtableOptions.
+type ExternalDataConfigOptions interface {
+	populateExternalDataConfig(*bq.ExternalDataConfiguration)
+}
+
+// CSVOptions are additional options for CSV external data sources.
+type CSVOptions struct {
+	// AllowJaggedRows causes missing trailing optional columns to be tolerated
+	// when reading CSV data. Missing values are treated as nulls.
+	AllowJaggedRows bool
+
+	// AllowQuotedNewlines sets whether quoted data sections containing
+	// newlines are allowed when reading CSV data.
+	AllowQuotedNewlines bool
+
+	// Encoding is the character encoding of data to be read.
+	Encoding Encoding
+
+	// FieldDelimiter is the separator for fields in a CSV file, used when
+	// reading or exporting data. The default is ",".
+	FieldDelimiter string
+
+	// Quote is the value used to quote data sections in a CSV file. The
+	// default quotation character is the double quote ("), which is used if
+	// both Quote and ForceZeroQuote are unset.
+	// To specify that no character should be interpreted as a quotation
+	// character, set ForceZeroQuote to true.
+	// Only used when reading data.
+	Quote          string
+	ForceZeroQuote bool
+
+	// The number of rows at the top of a CSV file that BigQuery will skip when
+	// reading data.
+	SkipLeadingRows int64
+}
+
+func (o *CSVOptions) populateExternalDataConfig(c *bq.ExternalDataConfiguration) {
+	c.CsvOptions = &bq.CsvOptions{
+		AllowJaggedRows:     o.AllowJaggedRows,
+		AllowQuotedNewlines: o.AllowQuotedNewlines,
+		Encoding:            string(o.Encoding),
+		FieldDelimiter:      o.FieldDelimiter,
+		Quote:               o.quote(),
+		SkipLeadingRows:     o.SkipLeadingRows,
+	}
+}
+
+// quote returns the CSV quote character, or nil if unset.
+func (o *CSVOptions) quote() *string {
+	if o.ForceZeroQuote {
+		quote := ""
+		return &quote
+	}
+	if o.Quote == "" {
+		return nil
+	}
+	return &o.Quote
+}
+
+func (o *CSVOptions) setQuote(ps *string) {
+	if ps != nil {
+		o.Quote = *ps
+		if o.Quote == "" {
+			o.ForceZeroQuote = true
+		}
+	}
+}
+
+func bqToCSVOptions(q *bq.CsvOptions) *CSVOptions {
+	o := &CSVOptions{
+		AllowJaggedRows:     q.AllowJaggedRows,
+		AllowQuotedNewlines: q.AllowQuotedNewlines,
+		Encoding:            Encoding(q.Encoding),
+		FieldDelimiter:      q.FieldDelimiter,
+		SkipLeadingRows:     q.SkipLeadingRows,
+	}
+	o.setQuote(q.Quote)
+	return o
+}
+
+// GoogleSheetsOptions are additional options for GoogleSheets external data sources.
+type GoogleSheetsOptions struct {
+	// The number of rows at the top of a sheet that BigQuery will skip when
+	// reading data.
+	SkipLeadingRows int64
+}
+
+func (o *GoogleSheetsOptions) populateExternalDataConfig(c *bq.ExternalDataConfiguration) {
+	c.GoogleSheetsOptions = &bq.GoogleSheetsOptions{
+		SkipLeadingRows: o.SkipLeadingRows,
+	}
+}
+
+func bqToGoogleSheetsOptions(q *bq.GoogleSheetsOptions) *GoogleSheetsOptions {
+	return &GoogleSheetsOptions{
+		SkipLeadingRows: q.SkipLeadingRows,
+	}
+}
+
+// BigtableOptions are additional options for Bigtable external data sources.
+type BigtableOptions struct {
+	// A list of column families to expose in the table schema along with their
+	// types. If omitted, all column families are present in the table schema and
+	// their values are read as BYTES.
+	ColumnFamilies []*BigtableColumnFamily
+
+	// If true, then the column families that are not specified in columnFamilies
+	// list are not exposed in the table schema. Otherwise, they are read with BYTES
+	// type values. The default is false.
+	IgnoreUnspecifiedColumnFamilies bool
+
+	// If true, then the rowkey column families will be read and converted to string.
+	// Otherwise they are read with BYTES type values and users need to manually cast
+	// them with CAST if necessary. The default is false.
+	ReadRowkeyAsString bool
+}
+
+func (o *BigtableOptions) populateExternalDataConfig(c *bq.ExternalDataConfiguration) {
+	q := &bq.BigtableOptions{
+		IgnoreUnspecifiedColumnFamilies: o.IgnoreUnspecifiedColumnFamilies,
+		ReadRowkeyAsString:              o.ReadRowkeyAsString,
+	}
+	for _, f := range o.ColumnFamilies {
+		q.ColumnFamilies = append(q.ColumnFamilies, f.toBQ())
+	}
+	c.BigtableOptions = q
+}
+
+func bqToBigtableOptions(q *bq.BigtableOptions) (*BigtableOptions, error) {
+	b := &BigtableOptions{
+		IgnoreUnspecifiedColumnFamilies: q.IgnoreUnspecifiedColumnFamilies,
+		ReadRowkeyAsString:              q.ReadRowkeyAsString,
+	}
+	for _, f := range q.ColumnFamilies {
+		f2, err := bqToBigtableColumnFamily(f)
+		if err != nil {
+			return nil, err
+		}
+		b.ColumnFamilies = append(b.ColumnFamilies, f2)
+	}
+	return b, nil
+}
+
+// BigtableColumnFamily describes how BigQuery should access a Bigtable column family.
+type BigtableColumnFamily struct {
+	// Identifier of the column family.
+	FamilyID string
+
+	// Lists of columns that should be exposed as individual fields as opposed to a
+	// list of (column name, value) pairs. All columns whose qualifier matches a
+	// qualifier in this list can be accessed as .. Other columns can be accessed as
+	// a list through .Column field.
+	Columns []*BigtableColumn
+
+	// The encoding of the values when the type is not STRING. Acceptable encoding values are:
+	// - TEXT - indicates values are alphanumeric text strings.
+	// - BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions.
+	// This can be overridden for a specific column by listing that column in 'columns' and
+	// specifying an encoding for it.
+	Encoding string
+
+	// If true, only the latest version of values are exposed for all columns in this
+	// column family. This can be overridden for a specific column by listing that
+	// column in 'columns' and specifying a different setting for that column.
+	OnlyReadLatest bool
+
+	// The type to convert the value in cells of this
+	// column family. The values are expected to be encoded using HBase
+	// Bytes.toBytes function when using the BINARY encoding value.
+	// Following BigQuery types are allowed (case-sensitive):
+	// BYTES STRING INTEGER FLOAT BOOLEAN.
+	// The default type is BYTES. This can be overridden for a specific column by
+	// listing that column in 'columns' and specifying a type for it.
+	Type string
+}
+
+func (b *BigtableColumnFamily) toBQ() *bq.BigtableColumnFamily {
+	q := &bq.BigtableColumnFamily{
+		FamilyId:       b.FamilyID,
+		Encoding:       b.Encoding,
+		OnlyReadLatest: b.OnlyReadLatest,
+		Type:           b.Type,
+	}
+	for _, col := range b.Columns {
+		q.Columns = append(q.Columns, col.toBQ())
+	}
+	return q
+}
+
+func bqToBigtableColumnFamily(q *bq.BigtableColumnFamily) (*BigtableColumnFamily, error) {
+	b := &BigtableColumnFamily{
+		FamilyID:       q.FamilyId,
+		Encoding:       q.Encoding,
+		OnlyReadLatest: q.OnlyReadLatest,
+		Type:           q.Type,
+	}
+	for _, col := range q.Columns {
+		c, err := bqToBigtableColumn(col)
+		if err != nil {
+			return nil, err
+		}
+		b.Columns = append(b.Columns, c)
+	}
+	return b, nil
+}
+
+// BigtableColumn describes how BigQuery should access a Bigtable column.
+type BigtableColumn struct {
+	// Qualifier of the column. Columns in the parent column family that have this
+	// exact qualifier are exposed as . field. The column field name is the
+	// same as the column qualifier.
+	Qualifier string
+
+	// If the qualifier is not a valid BigQuery field identifier i.e. does not match
+	// [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided as the column field
+	// name and is used as field name in queries.
+	FieldName string
+
+	// If true, only the latest version of values are exposed for this column.
+	// See BigtableColumnFamily.OnlyReadLatest.
+	OnlyReadLatest bool
+
+	// The encoding of the values when the type is not STRING.
+	// See BigtableColumnFamily.Encoding
+	Encoding string
+
+	// The type to convert the value in cells of this column.
+	// See BigtableColumnFamily.Type
+	Type string
+}
+
+func (b *BigtableColumn) toBQ() *bq.BigtableColumn {
+	q := &bq.BigtableColumn{
+		FieldName:      b.FieldName,
+		OnlyReadLatest: b.OnlyReadLatest,
+		Encoding:       b.Encoding,
+		Type:           b.Type,
+	}
+	if utf8.ValidString(b.Qualifier) {
+		q.QualifierString = b.Qualifier
+	} else {
+		q.QualifierEncoded = base64.RawStdEncoding.EncodeToString([]byte(b.Qualifier))
+	}
+	return q
+}
+
+func bqToBigtableColumn(q *bq.BigtableColumn) (*BigtableColumn, error) {
+	b := &BigtableColumn{
+		FieldName:      q.FieldName,
+		OnlyReadLatest: q.OnlyReadLatest,
+		Encoding:       q.Encoding,
+		Type:           q.Type,
+	}
+	if q.QualifierString != "" {
+		b.Qualifier = q.QualifierString
+	} else {
+		bytes, err := base64.RawStdEncoding.DecodeString(q.QualifierEncoded)
+		if err != nil {
+			return nil, err
+		}
+		b.Qualifier = string(bytes)
+	}
+	return b, nil
+}
diff --git a/vendor/cloud.google.com/go/bigquery/extract.go b/vendor/cloud.google.com/go/bigquery/extract.go
new file mode 100644
index 00000000..a77f2759
--- /dev/null
+++ b/vendor/cloud.google.com/go/bigquery/extract.go
@@ -0,0 +1,110 @@
+// Copyright 2016 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package bigquery
+
+import (
+	"context"
+
+	"cloud.google.com/go/internal/trace"
+	bq "google.golang.org/api/bigquery/v2"
+)
+
+// ExtractConfig holds the configuration for an extract job.
+type ExtractConfig struct {
+	// Src is the table from which data will be extracted.
+	Src *Table
+
+	// Dst is the destination into which the data will be extracted.
+	Dst *GCSReference
+
+	// DisableHeader disables the printing of a header row in exported data.
+	DisableHeader bool
+
+	// The labels associated with this job.
+	Labels map[string]string
+}
+
+func (e *ExtractConfig) toBQ() *bq.JobConfiguration {
+	var printHeader *bool
+	if e.DisableHeader {
+		f := false
+		printHeader = &f
+	}
+	return &bq.JobConfiguration{
+		Labels: e.Labels,
+		Extract: &bq.JobConfigurationExtract{
+			DestinationUris:   append([]string{}, e.Dst.URIs...),
+			Compression:       string(e.Dst.Compression),
+			DestinationFormat: string(e.Dst.DestinationFormat),
+			FieldDelimiter:    e.Dst.FieldDelimiter,
+			SourceTable:       e.Src.toBQ(),
+			PrintHeader:       printHeader,
+		},
+	}
+}
+
+func bqToExtractConfig(q *bq.JobConfiguration, c *Client) *ExtractConfig {
+	qe := q.Extract
+	return &ExtractConfig{
+		Labels: q.Labels,
+		Dst: &GCSReference{
+			URIs:              qe.DestinationUris,
+			Compression:       Compression(qe.Compression),
+			DestinationFormat: DataFormat(qe.DestinationFormat),
+			FileConfig: FileConfig{
+				CSVOptions: CSVOptions{
+					FieldDelimiter: qe.FieldDelimiter,
+				},
+			},
+		},
+		DisableHeader: qe.PrintHeader != nil && !*qe.PrintHeader,
+		Src:           bqToTable(qe.SourceTable, c),
+	}
+}
+
+// An Extractor extracts data from a BigQuery table into Google Cloud Storage.
+type Extractor struct {
+	JobIDConfig
+	ExtractConfig
+	c *Client
+}
+
+// ExtractorTo returns an Extractor which can be used to extract data from a
+// BigQuery table into Google Cloud Storage.
+// The returned Extractor may optionally be further configured before its Run method is called.
+func (t *Table) ExtractorTo(dst *GCSReference) *Extractor {
+	return &Extractor{
+		c: t.c,
+		ExtractConfig: ExtractConfig{
+			Src: t,
+			Dst: dst,
+		},
+	}
+}
+
+// Run initiates an extract job.
+func (e *Extractor) Run(ctx context.Context) (j *Job, err error) {
+	ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Extractor.Run")
+	defer func() { trace.EndSpan(ctx, err) }()
+
+	return e.c.insertJob(ctx, e.newJob(), nil)
+}
+
+func (e *Extractor) newJob() *bq.Job {
+	return &bq.Job{
+		JobReference:  e.JobIDConfig.createJobRef(e.c),
+		Configuration: e.ExtractConfig.toBQ(),
+	}
+}
diff --git a/vendor/cloud.google.com/go/bigquery/file.go b/vendor/cloud.google.com/go/bigquery/file.go
new file mode 100644
index 00000000..8dd86f5d
--- /dev/null
+++ b/vendor/cloud.google.com/go/bigquery/file.go
@@ -0,0 +1,137 @@
+// Copyright 2016 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package bigquery
+
+import (
+	"io"
+
+	bq "google.golang.org/api/bigquery/v2"
+)
+
+// A ReaderSource is a source for a load operation that gets
+// data from an io.Reader.
+//
+// When a ReaderSource is part of a LoadConfig obtained via Job.Config,
+// its internal io.Reader will be nil, so it cannot be used for a
+// subsequent load operation.
+type ReaderSource struct {
+	r io.Reader
+	FileConfig
+}
+
+// NewReaderSource creates a ReaderSource from an io.Reader. You may
+// optionally configure properties on the ReaderSource that describe the
+// data being read, before passing it to Table.LoaderFrom.
+func NewReaderSource(r io.Reader) *ReaderSource {
+	return &ReaderSource{r: r}
+}
+
+func (r *ReaderSource) populateLoadConfig(lc *bq.JobConfigurationLoad) io.Reader {
+	r.FileConfig.populateLoadConfig(lc)
+	return r.r
+}
+
+// FileConfig contains configuration options that pertain to files, typically
+// text files that require interpretation to be used as a BigQuery table. A
+// file may live in Google Cloud Storage (see GCSReference), or it may be
+// loaded into a table via the Table.LoaderFromReader.
+type FileConfig struct {
+	// SourceFormat is the format of the data to be read.
+	// Allowed values are: Avro, CSV, DatastoreBackup, JSON, ORC, and Parquet.  The default is CSV.
+	SourceFormat DataFormat
+
+	// Indicates if we should automatically infer the options and
+	// schema for CSV and JSON sources.
+	AutoDetect bool
+
+	// MaxBadRecords is the maximum number of bad records that will be ignored
+	// when reading data.
+	MaxBadRecords int64
+
+	// IgnoreUnknownValues causes values not matching the schema to be
+	// tolerated. Unknown values are ignored. For CSV this ignores extra values
+	// at the end of a line. For JSON this ignores named values that do not
+	// match any column name. If this field is not set, records containing
+	// unknown values are treated as bad records. The MaxBadRecords field can
+	// be used to customize how bad records are handled.
+	IgnoreUnknownValues bool
+
+	// Schema describes the data. It is required when reading CSV or JSON data,
+	// unless the data is being loaded into a table that already exists.
+	Schema Schema
+
+	// Additional options for CSV files.
+	CSVOptions
+}
+
+func (fc *FileConfig) populateLoadConfig(conf *bq.JobConfigurationLoad) {
+	conf.SkipLeadingRows = fc.SkipLeadingRows
+	conf.SourceFormat = string(fc.SourceFormat)
+	conf.Autodetect = fc.AutoDetect
+	conf.AllowJaggedRows = fc.AllowJaggedRows
+	conf.AllowQuotedNewlines = fc.AllowQuotedNewlines
+	conf.Encoding = string(fc.Encoding)
+	conf.FieldDelimiter = fc.FieldDelimiter
+	conf.IgnoreUnknownValues = fc.IgnoreUnknownValues
+	conf.MaxBadRecords = fc.MaxBadRecords
+	if fc.Schema != nil {
+		conf.Schema = fc.Schema.toBQ()
+	}
+	conf.Quote = fc.quote()
+}
+
+func bqPopulateFileConfig(conf *bq.JobConfigurationLoad, fc *FileConfig) {
+	fc.SourceFormat = DataFormat(conf.SourceFormat)
+	fc.AutoDetect = conf.Autodetect
+	fc.MaxBadRecords = conf.MaxBadRecords
+	fc.IgnoreUnknownValues = conf.IgnoreUnknownValues
+	fc.Schema = bqToSchema(conf.Schema)
+	fc.SkipLeadingRows = conf.SkipLeadingRows
+	fc.AllowJaggedRows = conf.AllowJaggedRows
+	fc.AllowQuotedNewlines = conf.AllowQuotedNewlines
+	fc.Encoding = Encoding(conf.Encoding)
+	fc.FieldDelimiter = conf.FieldDelimiter
+	fc.CSVOptions.setQuote(conf.Quote)
+}
+
+func (fc *FileConfig) populateExternalDataConfig(conf *bq.ExternalDataConfiguration) {
+	format := fc.SourceFormat
+	if format == "" {
+		// Format must be explicitly set for external data sources.
+		format = CSV
+	}
+	conf.Autodetect = fc.AutoDetect
+	conf.IgnoreUnknownValues = fc.IgnoreUnknownValues
+	conf.MaxBadRecords = fc.MaxBadRecords
+	conf.SourceFormat = string(format)
+	if fc.Schema != nil {
+		conf.Schema = fc.Schema.toBQ()
+	}
+	if format == CSV {
+		fc.CSVOptions.populateExternalDataConfig(conf)
+	}
+}
+
+// Encoding specifies the character encoding of data to be loaded into BigQuery.
+// See https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.encoding
+// for more details about how this is used.
+type Encoding string
+
+const (
+	// UTF_8 specifies the UTF-8 encoding type.
+	UTF_8 Encoding = "UTF-8"
+	// ISO_8859_1 specifies the ISO-8859-1 encoding type.
+	ISO_8859_1 Encoding = "ISO-8859-1"
+)
diff --git a/vendor/cloud.google.com/go/bigquery/gcs.go b/vendor/cloud.google.com/go/bigquery/gcs.go
new file mode 100644
index 00000000..6b70126b
--- /dev/null
+++ b/vendor/cloud.google.com/go/bigquery/gcs.go
@@ -0,0 +1,75 @@
+// Copyright 2015 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package bigquery
+
+import (
+	"io"
+
+	bq "google.golang.org/api/bigquery/v2"
+)
+
+// GCSReference is a reference to one or more Google Cloud Storage objects, which together constitute
+// an input or output to a BigQuery operation.
+type GCSReference struct {
+	// URIs refer to Google Cloud Storage objects.
+	URIs []string
+
+	FileConfig
+
+	// DestinationFormat is the format to use when writing exported files.
+	// Allowed values are: CSV, Avro, JSON.  The default is CSV.
+	// CSV is not supported for tables with nested or repeated fields.
+	DestinationFormat DataFormat
+
+	// Compression specifies the type of compression to apply when writing data
+	// to Google Cloud Storage, or using this GCSReference as an ExternalData
+	// source with CSV or JSON SourceFormat. Default is None.
+	Compression Compression
+}
+
+// NewGCSReference constructs a reference to one or more Google Cloud Storage objects, which together constitute a data source or destination.
+// In the simple case, a single URI in the form gs://bucket/object may refer to a single GCS object.
+// Data may also be split into mutiple files, if multiple URIs or URIs containing wildcards are provided.
+// Each URI may contain one '*' wildcard character, which (if present) must come after the bucket name.
+// For more information about the treatment of wildcards and multiple URIs,
+// see https://cloud.google.com/bigquery/exporting-data-from-bigquery#exportingmultiple
+func NewGCSReference(uri ...string) *GCSReference {
+	return &GCSReference{URIs: uri}
+}
+
+// Compression is the type of compression to apply when writing data to Google Cloud Storage.
+type Compression string
+
+const (
+	// None specifies no compression.
+	None Compression = "NONE"
+	// Gzip specifies gzip compression.
+	Gzip Compression = "GZIP"
+)
+
+func (gcs *GCSReference) populateLoadConfig(lc *bq.JobConfigurationLoad) io.Reader {
+	lc.SourceUris = gcs.URIs
+	gcs.FileConfig.populateLoadConfig(lc)
+	return nil
+}
+
+func (gcs *GCSReference) toBQ() bq.ExternalDataConfiguration {
+	conf := bq.ExternalDataConfiguration{
+		Compression: string(gcs.Compression),
+		SourceUris:  append([]string{}, gcs.URIs...),
+	}
+	gcs.FileConfig.populateExternalDataConfig(&conf)
+	return conf
+}
diff --git a/vendor/cloud.google.com/go/bigquery/inserter.go b/vendor/cloud.google.com/go/bigquery/inserter.go
new file mode 100644
index 00000000..132994e9
--- /dev/null
+++ b/vendor/cloud.google.com/go/bigquery/inserter.go
@@ -0,0 +1,238 @@
+// Copyright 2015 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package bigquery
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"reflect"
+
+	"cloud.google.com/go/internal/trace"
+	bq "google.golang.org/api/bigquery/v2"
+)
+
+// An Inserter does streaming inserts into a BigQuery table.
+// It is safe for concurrent use.
+type Inserter struct {
+	t *Table
+
+	// SkipInvalidRows causes rows containing invalid data to be silently
+	// ignored. The default value is false, which causes the entire request to
+	// fail if there is an attempt to insert an invalid row.
+	SkipInvalidRows bool
+
+	// IgnoreUnknownValues causes values not matching the schema to be ignored.
+	// The default value is false, which causes records containing such values
+	// to be treated as invalid records.
+	IgnoreUnknownValues bool
+
+	// A TableTemplateSuffix allows Inserters to create tables automatically.
+	//
+	// Experimental: this option is experimental and may be modified or removed in future versions,
+	// regardless of any other documented package stability guarantees.
+	//
+	// When you specify a suffix, the table you upload data to
+	// will be used as a template for creating a new table, with the same schema,
+	// called <table> + <suffix>.
+	//
+	// More information is available at
+	// https://cloud.google.com/bigquery/streaming-data-into-bigquery#template-tables
+	TableTemplateSuffix string
+}
+
+// Inserter returns an Inserter that can be used to append rows to t.
+// The returned Inserter may optionally be further configured before its Put method is called.
+//
+// To stream rows into a date-partitioned table at a particular date, add the
+// $yyyymmdd suffix to the table name when constructing the Table.
+func (t *Table) Inserter() *Inserter {
+	return &Inserter{t: t}
+}
+
+// Uploader calls Inserter.
+// Deprecated: use Table.Inserter instead.
+func (t *Table) Uploader() *Inserter { return t.Inserter() }
+
+// Put uploads one or more rows to the BigQuery service.
+//
+// If src is ValueSaver, then its Save method is called to produce a row for uploading.
+//
+// If src is a struct or pointer to a struct, then a schema is inferred from it
+// and used to create a StructSaver. The InsertID of the StructSaver will be
+// empty.
+//
+// If src is a slice of ValueSavers, structs, or struct pointers, then each
+// element of the slice is treated as above, and multiple rows are uploaded.
+//
+// Put returns a PutMultiError if one or more rows failed to be uploaded.
+// The PutMultiError contains a RowInsertionError for each failed row.
+//
+// Put will retry on temporary errors (see
+// https://cloud.google.com/bigquery/troubleshooting-errors). This can result
+// in duplicate rows if you do not use insert IDs. Also, if the error persists,
+// the call will run indefinitely. Pass a context with a timeout to prevent
+// hanging calls.
+func (u *Inserter) Put(ctx context.Context, src interface{}) (err error) {
+	ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Inserter.Put")
+	defer func() { trace.EndSpan(ctx, err) }()
+
+	savers, err := valueSavers(src)
+	if err != nil {
+		return err
+	}
+	return u.putMulti(ctx, savers)
+}
+
+func valueSavers(src interface{}) ([]ValueSaver, error) {
+	saver, ok, err := toValueSaver(src)
+	if err != nil {
+		return nil, err
+	}
+	if ok {
+		return []ValueSaver{saver}, nil
+	}
+	srcVal := reflect.ValueOf(src)
+	if srcVal.Kind() != reflect.Slice {
+		return nil, fmt.Errorf("%T is not a ValueSaver, struct, struct pointer, or slice", src)
+
+	}
+	var savers []ValueSaver
+	for i := 0; i < srcVal.Len(); i++ {
+		s := srcVal.Index(i).Interface()
+		saver, ok, err := toValueSaver(s)
+		if err != nil {
+			return nil, err
+		}
+		if !ok {
+			return nil, fmt.Errorf("src[%d] has type %T, which is not a ValueSaver, struct or struct pointer", i, s)
+		}
+		savers = append(savers, saver)
+	}
+	return savers, nil
+}
+
+// Make a ValueSaver from x, which must implement ValueSaver already
+// or be a struct or pointer to struct.
+func toValueSaver(x interface{}) (ValueSaver, bool, error) {
+	if _, ok := x.(StructSaver); ok {
+		return nil, false, errors.New("bigquery: use &StructSaver, not StructSaver")
+	}
+	var insertID string
+	// Handle StructSavers specially so we can infer the schema if necessary.
+	if ss, ok := x.(*StructSaver); ok && ss.Schema == nil {
+		x = ss.Struct
+		insertID = ss.InsertID
+		// Fall through so we can infer the schema.
+	}
+	if saver, ok := x.(ValueSaver); ok {
+		return saver, ok, nil
+	}
+	v := reflect.ValueOf(x)
+	// Support Put with []interface{}
+	if v.Kind() == reflect.Interface {
+		v = v.Elem()
+	}
+	if v.Kind() == reflect.Ptr {
+		v = v.Elem()
+	}
+	if v.Kind() != reflect.Struct {
+		return nil, false, nil
+	}
+	schema, err := inferSchemaReflectCached(v.Type())
+	if err != nil {
+		return nil, false, err
+	}
+	return &StructSaver{
+		Struct:   x,
+		InsertID: insertID,
+		Schema:   schema,
+	}, true, nil
+}
+
+func (u *Inserter) putMulti(ctx context.Context, src []ValueSaver) error {
+	req, err := u.newInsertRequest(src)
+	if err != nil {
+		return err
+	}
+	if req == nil {
+		return nil
+	}
+	call := u.t.c.bqs.Tabledata.InsertAll(u.t.ProjectID, u.t.DatasetID, u.t.TableID, req)
+	call = call.Context(ctx)
+	setClientHeader(call.Header())
+	var res *bq.TableDataInsertAllResponse
+	err = runWithRetry(ctx, func() (err error) {
+		res, err = call.Do()
+		return err
+	})
+	if err != nil {
+		return err
+	}
+	return handleInsertErrors(res.InsertErrors, req.Rows)
+}
+
+func (u *Inserter) newInsertRequest(savers []ValueSaver) (*bq.TableDataInsertAllRequest, error) {
+	if savers == nil { // If there are no rows, do nothing.
+		return nil, nil
+	}
+	req := &bq.TableDataInsertAllRequest{
+		TemplateSuffix:      u.TableTemplateSuffix,
+		IgnoreUnknownValues: u.IgnoreUnknownValues,
+		SkipInvalidRows:     u.SkipInvalidRows,
+	}
+	for _, saver := range savers {
+		row, insertID, err := saver.Save()
+		if err != nil {
+			return nil, err
+		}
+		if insertID == "" {
+			insertID = randomIDFn()
+		}
+		m := make(map[string]bq.JsonValue)
+		for k, v := range row {
+			m[k] = bq.JsonValue(v)
+		}
+		req.Rows = append(req.Rows, &bq.TableDataInsertAllRequestRows{
+			InsertId: insertID,
+			Json:     m,
+		})
+	}
+	return req, nil
+}
+
+func handleInsertErrors(ierrs []*bq.TableDataInsertAllResponseInsertErrors, rows []*bq.TableDataInsertAllRequestRows) error {
+	if len(ierrs) == 0 {
+		return nil
+	}
+	var errs PutMultiError
+	for _, e := range ierrs {
+		if int(e.Index) > len(rows) {
+			return fmt.Errorf("internal error: unexpected row index: %v", e.Index)
+		}
+		rie := RowInsertionError{
+			InsertID: rows[e.Index].InsertId,
+			RowIndex: int(e.Index),
+		}
+		for _, errp := range e.Errors {
+			rie.Errors = append(rie.Errors, bqToError(errp))
+		}
+		errs = append(errs, rie)
+	}
+	return errs
+}
+
+// Uploader is an obsolete name for Inserter.
+type Uploader = Inserter
diff --git a/vendor/cloud.google.com/go/bigquery/iterator.go b/vendor/cloud.google.com/go/bigquery/iterator.go
new file mode 100644
index 00000000..298143cb
--- /dev/null
+++ b/vendor/cloud.google.com/go/bigquery/iterator.go
@@ -0,0 +1,222 @@
+// Copyright 2015 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package bigquery
+
+import (
+	"context"
+	"fmt"
+	"reflect"
+
+	bq "google.golang.org/api/bigquery/v2"
+	"google.golang.org/api/iterator"
+)
+
+// Construct a RowIterator.
+// If pf is nil, there are no rows in the result set.
+func newRowIterator(ctx context.Context, t *Table, pf pageFetcher) *RowIterator {
+	it := &RowIterator{
+		ctx:   ctx,
+		table: t,
+		pf:    pf,
+	}
+	if pf != nil {
+		it.pageInfo, it.nextFunc = iterator.NewPageInfo(
+			it.fetch,
+			func() int { return len(it.rows) },
+			func() interface{} { r := it.rows; it.rows = nil; return r })
+	}
+	return it
+}
+
+// A RowIterator provides access to the result of a BigQuery lookup.
+type RowIterator struct {
+	ctx      context.Context
+	table    *Table
+	pf       pageFetcher
+	pageInfo *iterator.PageInfo
+	nextFunc func() error
+
+	// StartIndex can be set before the first call to Next. If PageInfo().Token
+	// is also set, StartIndex is ignored.
+	StartIndex uint64
+
+	// The schema of the table. Available after the first call to Next.
+	Schema Schema
+
+	// The total number of rows in the result. Available after the first call to Next.
+	// May be zero just after rows were inserted.
+	TotalRows uint64
+
+	rows         [][]Value
+	structLoader structLoader // used to populate a pointer to a struct
+}
+
+// Next loads the next row into dst. Its return value is iterator.Done if there
+// are no more results. Once Next returns iterator.Done, all subsequent calls
+// will return iterator.Done.
+//
+// dst may implement ValueLoader, or may be a *[]Value, *map[string]Value, or struct pointer.
+//
+// If dst is a *[]Value, it will be set to new []Value whose i'th element
+// will be populated with the i'th column of the row.
+//
+// If dst is a *map[string]Value, a new map will be created if dst is nil. Then
+// for each schema column name, the map key of that name will be set to the column's
+// value. STRUCT types (RECORD types or nested schemas) become nested maps.
+//
+// If dst is pointer to a struct, each column in the schema will be matched
+// with an exported field of the struct that has the same name, ignoring case.
+// Unmatched schema columns and struct fields will be ignored.
+//
+// Each BigQuery column type corresponds to one or more Go types; a matching struct
+// field must be of the correct type. The correspondences are:
+//
+//   STRING      string
+//   BOOL        bool
+//   INTEGER     int, int8, int16, int32, int64, uint8, uint16, uint32
+//   FLOAT       float32, float64
+//   BYTES       []byte
+//   TIMESTAMP   time.Time
+//   DATE        civil.Date
+//   TIME        civil.Time
+//   DATETIME    civil.DateTime
+//
+// A repeated field corresponds to a slice or array of the element type. A STRUCT
+// type (RECORD or nested schema) corresponds to a nested struct or struct pointer.
+// All calls to Next on the same iterator must use the same struct type.
+//
+// It is an error to attempt to read a BigQuery NULL value into a struct field,
+// unless the field is of type []byte or is one of the special Null types: NullInt64,
+// NullFloat64, NullBool, NullString, NullTimestamp, NullDate, NullTime or
+// NullDateTime. You can also use a *[]Value or *map[string]Value to read from a
+// table with NULLs.
+func (it *RowIterator) Next(dst interface{}) error {
+	if it.pf == nil { // There are no rows in the result set.
+		return iterator.Done
+	}
+	var vl ValueLoader
+	switch dst := dst.(type) {
+	case ValueLoader:
+		vl = dst
+	case *[]Value:
+		vl = (*valueList)(dst)
+	case *map[string]Value:
+		vl = (*valueMap)(dst)
+	default:
+		if !isStructPtr(dst) {
+			return fmt.Errorf("bigquery: cannot convert %T to ValueLoader (need pointer to []Value, map[string]Value, or struct)", dst)
+		}
+	}
+	if err := it.nextFunc(); err != nil {
+		return err
+	}
+	row := it.rows[0]
+	it.rows = it.rows[1:]
+
+	if vl == nil {
+		// This can only happen if dst is a pointer to a struct. We couldn't
+		// set vl above because we need the schema.
+		if err := it.structLoader.set(dst, it.Schema); err != nil {
+			return err
+		}
+		vl = &it.structLoader
+	}
+	return vl.Load(row, it.Schema)
+}
+
+func isStructPtr(x interface{}) bool {
+	t := reflect.TypeOf(x)
+	return t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct
+}
+
+// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
+func (it *RowIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
+
+func (it *RowIterator) fetch(pageSize int, pageToken string) (string, error) {
+	res, err := it.pf(it.ctx, it.table, it.Schema, it.StartIndex, int64(pageSize), pageToken)
+	if err != nil {
+		return "", err
+	}
+	it.rows = append(it.rows, res.rows...)
+	it.Schema = res.schema
+	it.TotalRows = res.totalRows
+	return res.pageToken, nil
+}
+
+// A pageFetcher returns a page of rows from a destination table.
+type pageFetcher func(ctx context.Context, _ *Table, _ Schema, startIndex uint64, pageSize int64, pageToken string) (*fetchPageResult, error)
+
+type fetchPageResult struct {
+	pageToken string
+	rows      [][]Value
+	totalRows uint64
+	schema    Schema
+}
+
+// fetchPage gets a page of rows from t.
+func fetchPage(ctx context.Context, t *Table, schema Schema, startIndex uint64, pageSize int64, pageToken string) (*fetchPageResult, error) {
+	// Fetch the table schema in the background, if necessary.
+	errc := make(chan error, 1)
+	if schema != nil {
+		errc <- nil
+	} else {
+		go func() {
+			var bqt *bq.Table
+			err := runWithRetry(ctx, func() (err error) {
+				bqt, err = t.c.bqs.Tables.Get(t.ProjectID, t.DatasetID, t.TableID).
+					Fields("schema").
+					Context(ctx).
+					Do()
+				return err
+			})
+			if err == nil && bqt.Schema != nil {
+				schema = bqToSchema(bqt.Schema)
+			}
+			errc <- err
+		}()
+	}
+	call := t.c.bqs.Tabledata.List(t.ProjectID, t.DatasetID, t.TableID)
+	setClientHeader(call.Header())
+	if pageToken != "" {
+		call.PageToken(pageToken)
+	} else {
+		call.StartIndex(startIndex)
+	}
+	if pageSize > 0 {
+		call.MaxResults(pageSize)
+	}
+	var res *bq.TableDataList
+	err := runWithRetry(ctx, func() (err error) {
+		res, err = call.Context(ctx).Do()
+		return err
+	})
+	if err != nil {
+		return nil, err
+	}
+	err = <-errc
+	if err != nil {
+		return nil, err
+	}
+	rows, err := convertRows(res.Rows, schema)
+	if err != nil {
+		return nil, err
+	}
+	return &fetchPageResult{
+		pageToken: res.PageToken,
+		rows:      rows,
+		totalRows: uint64(res.TotalRows),
+		schema:    schema,
+	}, nil
+}
diff --git a/vendor/cloud.google.com/go/bigquery/job.go b/vendor/cloud.google.com/go/bigquery/job.go
new file mode 100644
index 00000000..d98211d9
--- /dev/null
+++ b/vendor/cloud.google.com/go/bigquery/job.go
@@ -0,0 +1,821 @@
+// Copyright 2015 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package bigquery
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"time"
+
+	"cloud.google.com/go/internal"
+	"cloud.google.com/go/internal/trace"
+	gax "github.com/googleapis/gax-go"
+	bq "google.golang.org/api/bigquery/v2"
+	"google.golang.org/api/googleapi"
+	"google.golang.org/api/iterator"
+)
+
+// A Job represents an operation which has been submitted to BigQuery for processing.
+type Job struct {
+	c          *Client
+	projectID  string
+	jobID      string
+	location   string
+	email      string
+	config     *bq.JobConfiguration
+	lastStatus *JobStatus
+}
+
+// JobFromID creates a Job which refers to an existing BigQuery job. The job
+// need not have been created by this package. For example, the job may have
+// been created in the BigQuery console.
+//
+// For jobs whose location is other than "US" or "EU", set Client.Location or use
+// JobFromIDLocation.
+func (c *Client) JobFromID(ctx context.Context, id string) (*Job, error) {
+	return c.JobFromIDLocation(ctx, id, c.Location)
+}
+
+// JobFromIDLocation creates a Job which refers to an existing BigQuery job. The job
+// need not have been created by this package (for example, it may have
+// been created in the BigQuery console), but it must exist in the specified location.
+func (c *Client) JobFromIDLocation(ctx context.Context, id, location string) (j *Job, err error) {
+	ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.JobFromIDLocation")
+	defer func() { trace.EndSpan(ctx, err) }()
+
+	bqjob, err := c.getJobInternal(ctx, id, location, "configuration", "jobReference", "status", "statistics")
+	if err != nil {
+		return nil, err
+	}
+	return bqToJob(bqjob, c)
+}
+
+// ID returns the job's ID.
+func (j *Job) ID() string {
+	return j.jobID
+}
+
+// Location returns the job's location.
+func (j *Job) Location() string {
+	return j.location
+}
+
+// Email returns the email of the job's creator.
+func (j *Job) Email() string {
+	return j.email
+}
+
+// State is one of a sequence of states that a Job progresses through as it is processed.
+type State int
+
+const (
+	// StateUnspecified is the default JobIterator state.
+	StateUnspecified State = iota
+	// Pending is a state that describes that the job is pending.
+	Pending
+	// Running is a state that describes that the job is running.
+	Running
+	// Done is a state that describes that the job is done.
+	Done
+)
+
+// JobStatus contains the current State of a job, and errors encountered while processing that job.
+type JobStatus struct {
+	State State
+
+	err error
+
+	// All errors encountered during the running of the job.
+	// Not all Errors are fatal, so errors here do not necessarily mean that the job has completed or was unsuccessful.
+	Errors []*Error
+
+	// Statistics about the job.
+	Statistics *JobStatistics
+}
+
+// JobConfig contains configuration information for a job. It is implemented by
+// *CopyConfig, *ExtractConfig, *LoadConfig and *QueryConfig.
+type JobConfig interface {
+	isJobConfig()
+}
+
+func (*CopyConfig) isJobConfig()    {}
+func (*ExtractConfig) isJobConfig() {}
+func (*LoadConfig) isJobConfig()    {}
+func (*QueryConfig) isJobConfig()   {}
+
+// Config returns the configuration information for j.
+func (j *Job) Config() (JobConfig, error) {
+	return bqToJobConfig(j.config, j.c)
+}
+
+func bqToJobConfig(q *bq.JobConfiguration, c *Client) (JobConfig, error) {
+	switch {
+	case q == nil:
+		return nil, nil
+	case q.Copy != nil:
+		return bqToCopyConfig(q, c), nil
+	case q.Extract != nil:
+		return bqToExtractConfig(q, c), nil
+	case q.Load != nil:
+		return bqToLoadConfig(q, c), nil
+	case q.Query != nil:
+		return bqToQueryConfig(q, c)
+	default:
+		return nil, nil
+	}
+}
+
+// JobIDConfig  describes how to create an ID for a job.
+type JobIDConfig struct {
+	// JobID is the ID to use for the job. If empty, a random job ID will be generated.
+	JobID string
+
+	// If AddJobIDSuffix is true, then a random string will be appended to JobID.
+	AddJobIDSuffix bool
+
+	// Location is the location for the job.
+	Location string
+}
+
+// createJobRef creates a JobReference.
+func (j *JobIDConfig) createJobRef(c *Client) *bq.JobReference {
+	// We don't check whether projectID is empty; the server will return an
+	// error when it encounters the resulting JobReference.
+	loc := j.Location
+	if loc == "" { // Use Client.Location as a default.
+		loc = c.Location
+	}
+	jr := &bq.JobReference{ProjectId: c.projectID, Location: loc}
+	if j.JobID == "" {
+		jr.JobId = randomIDFn()
+	} else if j.AddJobIDSuffix {
+		jr.JobId = j.JobID + "-" + randomIDFn()
+	} else {
+		jr.JobId = j.JobID
+	}
+	return jr
+}
+
+// Done reports whether the job has completed.
+// After Done returns true, the Err method will return an error if the job completed unsuccessfully.
+func (s *JobStatus) Done() bool {
+	return s.State == Done
+}
+
+// Err returns the error that caused the job to complete unsuccessfully (if any).
+func (s *JobStatus) Err() error {
+	return s.err
+}
+
+// Status retrieves the current status of the job from BigQuery. It fails if the Status could not be determined.
+func (j *Job) Status(ctx context.Context) (js *JobStatus, err error) {
+	ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Job.Status")
+	defer func() { trace.EndSpan(ctx, err) }()
+
+	bqjob, err := j.c.getJobInternal(ctx, j.jobID, j.location, "status", "statistics")
+	if err != nil {
+		return nil, err
+	}
+	if err := j.setStatus(bqjob.Status); err != nil {
+		return nil, err
+	}
+	j.setStatistics(bqjob.Statistics, j.c)
+	return j.lastStatus, nil
+}
+
+// LastStatus returns the most recently retrieved status of the job. The status is
+// retrieved when a new job is created, or when JobFromID or Job.Status is called.
+// Call Job.Status to get the most up-to-date information about a job.
+func (j *Job) LastStatus() *JobStatus {
+	return j.lastStatus
+}
+
+// Cancel requests that a job be cancelled. This method returns without waiting for
+// cancellation to take effect. To check whether the job has terminated, use Job.Status.
+// Cancelled jobs may still incur costs.
+func (j *Job) Cancel(ctx context.Context) error {
+	// Jobs.Cancel returns a job entity, but the only relevant piece of
+	// data it may contain (the status of the job) is unreliable.  From the
+	// docs: "This call will return immediately, and the client will need
+	// to poll for the job status to see if the cancel completed
+	// successfully".  So it would be misleading to return a status.
+	call := j.c.bqs.Jobs.Cancel(j.projectID, j.jobID).
+		Location(j.location).
+		Fields(). // We don't need any of the response data.
+		Context(ctx)
+	setClientHeader(call.Header())
+	return runWithRetry(ctx, func() error {
+		_, err := call.Do()
+		return err
+	})
+}
+
+// Wait blocks until the job or the context is done. It returns the final status
+// of the job.
+// If an error occurs while retrieving the status, Wait returns that error. But
+// Wait returns nil if the status was retrieved successfully, even if
+// status.Err() != nil. So callers must check both errors. See the example.
+func (j *Job) Wait(ctx context.Context) (js *JobStatus, err error) {
+	ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Job.Wait")
+	defer func() { trace.EndSpan(ctx, err) }()
+
+	if j.isQuery() {
+		// We can avoid polling for query jobs.
+		if _, _, err := j.waitForQuery(ctx, j.projectID); err != nil {
+			return nil, err
+		}
+		// Note: extra RPC even if you just want to wait for the query to finish.
+		js, err := j.Status(ctx)
+		if err != nil {
+			return nil, err
+		}
+		return js, nil
+	}
+	// Non-query jobs must poll.
+	err = internal.Retry(ctx, gax.Backoff{}, func() (stop bool, err error) {
+		js, err = j.Status(ctx)
+		if err != nil {
+			return true, err
+		}
+		if js.Done() {
+			return true, nil
+		}
+		return false, nil
+	})
+	if err != nil {
+		return nil, err
+	}
+	return js, nil
+}
+
+// Read fetches the results of a query job.
+// If j is not a query job, Read returns an error.
+func (j *Job) Read(ctx context.Context) (ri *RowIterator, err error) {
+	ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Job.Read")
+	defer func() { trace.EndSpan(ctx, err) }()
+
+	return j.read(ctx, j.waitForQuery, fetchPage)
+}
+
+func (j *Job) read(ctx context.Context, waitForQuery func(context.Context, string) (Schema, uint64, error), pf pageFetcher) (*RowIterator, error) {
+	if !j.isQuery() {
+		return nil, errors.New("bigquery: cannot read from a non-query job")
+	}
+	destTable := j.config.Query.DestinationTable
+	// The destination table should only be nil if there was a query error.
+	projectID := j.projectID
+	if destTable != nil && projectID != destTable.ProjectId {
+		return nil, fmt.Errorf("bigquery: job project ID is %q, but destination table's is %q", projectID, destTable.ProjectId)
+	}
+	schema, totalRows, err := waitForQuery(ctx, projectID)
+	if err != nil {
+		return nil, err
+	}
+	if destTable == nil {
+		return nil, errors.New("bigquery: query job missing destination table")
+	}
+	dt := bqToTable(destTable, j.c)
+	if totalRows == 0 {
+		pf = nil
+	}
+	it := newRowIterator(ctx, dt, pf)
+	it.Schema = schema
+	it.TotalRows = totalRows
+	return it, nil
+}
+
+// waitForQuery waits for the query job to complete and returns its schema. It also
+// returns the total number of rows in the result set.
+func (j *Job) waitForQuery(ctx context.Context, projectID string) (Schema, uint64, error) {
+	// Use GetQueryResults only to wait for completion, not to read results.
+	call := j.c.bqs.Jobs.GetQueryResults(projectID, j.jobID).Location(j.location).Context(ctx).MaxResults(0)
+	setClientHeader(call.Header())
+	backoff := gax.Backoff{
+		Initial:    1 * time.Second,
+		Multiplier: 2,
+		Max:        60 * time.Second,
+	}
+	var res *bq.GetQueryResultsResponse
+	err := internal.Retry(ctx, backoff, func() (stop bool, err error) {
+		res, err = call.Do()
+		if err != nil {
+			return !retryableError(err), err
+		}
+		if !res.JobComplete { // GetQueryResults may return early without error; retry.
+			return false, nil
+		}
+		return true, nil
+	})
+	if err != nil {
+		return nil, 0, err
+	}
+	return bqToSchema(res.Schema), res.TotalRows, nil
+}
+
+// JobStatistics contains statistics about a job.
+type JobStatistics struct {
+	CreationTime        time.Time
+	StartTime           time.Time
+	EndTime             time.Time
+	TotalBytesProcessed int64
+
+	Details Statistics
+}
+
+// Statistics is one of ExtractStatistics, LoadStatistics or QueryStatistics.
+type Statistics interface {
+	implementsStatistics()
+}
+
+// ExtractStatistics contains statistics about an extract job.
+type ExtractStatistics struct {
+	// The number of files per destination URI or URI pattern specified in the
+	// extract configuration. These values will be in the same order as the
+	// URIs specified in the 'destinationUris' field.
+	DestinationURIFileCounts []int64
+}
+
+// LoadStatistics contains statistics about a load job.
+type LoadStatistics struct {
+	// The number of bytes of source data in a load job.
+	InputFileBytes int64
+
+	// The number of source files in a load job.
+	InputFiles int64
+
+	// Size of the loaded data in bytes. Note that while a load job is in the
+	// running state, this value may change.
+	OutputBytes int64
+
+	// The number of rows imported in a load job. Note that while an import job is
+	// in the running state, this value may change.
+	OutputRows int64
+}
+
+// QueryStatistics contains statistics about a query job.
+type QueryStatistics struct {
+	// Billing tier for the job.
+	BillingTier int64
+
+	// Whether the query result was fetched from the query cache.
+	CacheHit bool
+
+	// The type of query statement, if valid.
+	StatementType string
+
+	// Total bytes billed for the job.
+	TotalBytesBilled int64
+
+	// Total bytes processed for the job.
+	TotalBytesProcessed int64
+
+	// Describes execution plan for the query.
+	QueryPlan []*ExplainQueryStage
+
+	// The number of rows affected by a DML statement. Present only for DML
+	// statements INSERT, UPDATE or DELETE.
+	NumDMLAffectedRows int64
+
+	// Describes a timeline of job execution.
+	Timeline []*QueryTimelineSample
+
+	// ReferencedTables: [Output-only, Experimental] Referenced tables for
+	// the job. Queries that reference more than 50 tables will not have a
+	// complete list.
+	ReferencedTables []*Table
+
+	// The schema of the results. Present only for successful dry run of
+	// non-legacy SQL queries.
+	Schema Schema
+
+	// Slot-milliseconds consumed by this query job.
+	SlotMillis int64
+
+	// Standard SQL: list of undeclared query parameter names detected during a
+	// dry run validation.
+	UndeclaredQueryParameterNames []string
+
+	// DDL target table.
+	DDLTargetTable *Table
+
+	// DDL Operation performed on the target table.  Used to report how the
+	// query impacted the DDL target table.
+	DDLOperationPerformed string
+}
+
+// ExplainQueryStage describes one stage of a query.
+type ExplainQueryStage struct {
+	// CompletedParallelInputs: Number of parallel input segments completed.
+	CompletedParallelInputs int64
+
+	// ComputeAvg: Duration the average shard spent on CPU-bound tasks.
+	ComputeAvg time.Duration
+
+	// ComputeMax: Duration the slowest shard spent on CPU-bound tasks.
+	ComputeMax time.Duration
+
+	// Relative amount of the total time the average shard spent on CPU-bound tasks.
+	ComputeRatioAvg float64
+
+	// Relative amount of the total time the slowest shard spent on CPU-bound tasks.
+	ComputeRatioMax float64
+
+	// EndTime: Stage end time.
+	EndTime time.Time
+
+	// Unique ID for stage within plan.
+	ID int64
+
+	// InputStages: IDs for stages that are inputs to this stage.
+	InputStages []int64
+
+	// Human-readable name for stage.
+	Name string
+
+	// ParallelInputs: Number of parallel input segments to be processed.
+	ParallelInputs int64
+
+	// ReadAvg: Duration the average shard spent reading input.
+	ReadAvg time.Duration
+
+	// ReadMax: Duration the slowest shard spent reading input.
+	ReadMax time.Duration
+
+	// Relative amount of the total time the average shard spent reading input.
+	ReadRatioAvg float64
+
+	// Relative amount of the total time the slowest shard spent reading input.
+	ReadRatioMax float64
+
+	// Number of records read into the stage.
+	RecordsRead int64
+
+	// Number of records written by the stage.
+	RecordsWritten int64
+
+	// ShuffleOutputBytes: Total number of bytes written to shuffle.
+	ShuffleOutputBytes int64
+
+	// ShuffleOutputBytesSpilled: Total number of bytes written to shuffle
+	// and spilled to disk.
+	ShuffleOutputBytesSpilled int64
+
+	// StartTime: Stage start time.
+	StartTime time.Time
+
+	// Current status for the stage.
+	Status string
+
+	// List of operations within the stage in dependency order (approximately
+	// chronological).
+	Steps []*ExplainQueryStep
+
+	// WaitAvg: Duration the average shard spent waiting to be scheduled.
+	WaitAvg time.Duration
+
+	// WaitMax: Duration the slowest shard spent waiting to be scheduled.
+	WaitMax time.Duration
+
+	// Relative amount of the total time the average shard spent waiting to be scheduled.
+	WaitRatioAvg float64
+
+	// Relative amount of the total time the slowest shard spent waiting to be scheduled.
+	WaitRatioMax float64
+
+	// WriteAvg: Duration the average shard spent on writing output.
+	WriteAvg time.Duration
+
+	// WriteMax: Duration the slowest shard spent on writing output.
+	WriteMax time.Duration
+
+	// Relative amount of the total time the average shard spent on writing output.
+	WriteRatioAvg float64
+
+	// Relative amount of the total time the slowest shard spent on writing output.
+	WriteRatioMax float64
+}
+
+// ExplainQueryStep describes one step of a query stage.
+type ExplainQueryStep struct {
+	// Machine-readable operation type.
+	Kind string
+
+	// Human-readable stage descriptions.
+	Substeps []string
+}
+
+// QueryTimelineSample represents a sample of execution statistics at a point in time.
+type QueryTimelineSample struct {
+
+	// Total number of units currently being processed by workers, represented as largest value since last sample.
+	ActiveUnits int64
+
+	// Total parallel units of work completed by this query.
+	CompletedUnits int64
+
+	// Time elapsed since start of query execution.
+	Elapsed time.Duration
+
+	// Total parallel units of work remaining for the active stages.
+	PendingUnits int64
+
+	// Cumulative slot-milliseconds consumed by the query.
+	SlotMillis int64
+}
+
+func (*ExtractStatistics) implementsStatistics() {}
+func (*LoadStatistics) implementsStatistics()    {}
+func (*QueryStatistics) implementsStatistics()   {}
+
+// Jobs lists jobs within a project.
+func (c *Client) Jobs(ctx context.Context) *JobIterator {
+	it := &JobIterator{
+		ctx:       ctx,
+		c:         c,
+		ProjectID: c.projectID,
+	}
+	it.pageInfo, it.nextFunc = iterator.NewPageInfo(
+		it.fetch,
+		func() int { return len(it.items) },
+		func() interface{} { b := it.items; it.items = nil; return b })
+	return it
+}
+
+// JobIterator iterates over jobs in a project.
+type JobIterator struct {
+	ProjectID       string    // Project ID of the jobs to list. Default is the client's project.
+	AllUsers        bool      // Whether to list jobs owned by all users in the project, or just the current caller.
+	State           State     // List only jobs in the given state. Defaults to all states.
+	MinCreationTime time.Time // List only jobs created after this time.
+	MaxCreationTime time.Time // List only jobs created before this time.
+
+	ctx      context.Context
+	c        *Client
+	pageInfo *iterator.PageInfo
+	nextFunc func() error
+	items    []*Job
+}
+
+// PageInfo is a getter for the JobIterator's PageInfo.
+func (it *JobIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
+
+// Next returns the next Job. Its second return value is iterator.Done if
+// there are no more results. Once Next returns Done, all subsequent calls will
+// return Done.
+func (it *JobIterator) Next() (*Job, error) {
+	if err := it.nextFunc(); err != nil {
+		return nil, err
+	}
+	item := it.items[0]
+	it.items = it.items[1:]
+	return item, nil
+}
+
+func (it *JobIterator) fetch(pageSize int, pageToken string) (string, error) {
+	var st string
+	switch it.State {
+	case StateUnspecified:
+		st = ""
+	case Pending:
+		st = "pending"
+	case Running:
+		st = "running"
+	case Done:
+		st = "done"
+	default:
+		return "", fmt.Errorf("bigquery: invalid value for JobIterator.State: %d", it.State)
+	}
+
+	req := it.c.bqs.Jobs.List(it.ProjectID).
+		Context(it.ctx).
+		PageToken(pageToken).
+		Projection("full").
+		AllUsers(it.AllUsers)
+	if st != "" {
+		req.StateFilter(st)
+	}
+	if !it.MinCreationTime.IsZero() {
+		req.MinCreationTime(uint64(it.MinCreationTime.UnixNano() / 1e6))
+	}
+	if !it.MaxCreationTime.IsZero() {
+		req.MaxCreationTime(uint64(it.MaxCreationTime.UnixNano() / 1e6))
+	}
+	setClientHeader(req.Header())
+	if pageSize > 0 {
+		req.MaxResults(int64(pageSize))
+	}
+	res, err := req.Do()
+	if err != nil {
+		return "", err
+	}
+	for _, j := range res.Jobs {
+		job, err := convertListedJob(j, it.c)
+		if err != nil {
+			return "", err
+		}
+		it.items = append(it.items, job)
+	}
+	return res.NextPageToken, nil
+}
+
+func convertListedJob(j *bq.JobListJobs, c *Client) (*Job, error) {
+	return bqToJob2(j.JobReference, j.Configuration, j.Status, j.Statistics, j.UserEmail, c)
+}
+
+func (c *Client) getJobInternal(ctx context.Context, jobID, location string, fields ...googleapi.Field) (*bq.Job, error) {
+	var job *bq.Job
+	call := c.bqs.Jobs.Get(c.projectID, jobID).Context(ctx)
+	if location != "" {
+		call = call.Location(location)
+	}
+	if len(fields) > 0 {
+		call = call.Fields(fields...)
+	}
+	setClientHeader(call.Header())
+	err := runWithRetry(ctx, func() (err error) {
+		job, err = call.Do()
+		return err
+	})
+	if err != nil {
+		return nil, err
+	}
+	return job, nil
+}
+
+func bqToJob(q *bq.Job, c *Client) (*Job, error) {
+	return bqToJob2(q.JobReference, q.Configuration, q.Status, q.Statistics, q.UserEmail, c)
+}
+
+func bqToJob2(qr *bq.JobReference, qc *bq.JobConfiguration, qs *bq.JobStatus, qt *bq.JobStatistics, email string, c *Client) (*Job, error) {
+	j := &Job{
+		projectID: qr.ProjectId,
+		jobID:     qr.JobId,
+		location:  qr.Location,
+		c:         c,
+		email:     email,
+	}
+	j.setConfig(qc)
+	if err := j.setStatus(qs); err != nil {
+		return nil, err
+	}
+	j.setStatistics(qt, c)
+	return j, nil
+}
+
+func (j *Job) setConfig(config *bq.JobConfiguration) {
+	if config == nil {
+		return
+	}
+	j.config = config
+}
+
+func (j *Job) isQuery() bool {
+	return j.config != nil && j.config.Query != nil
+}
+
+var stateMap = map[string]State{"PENDING": Pending, "RUNNING": Running, "DONE": Done}
+
+func (j *Job) setStatus(qs *bq.JobStatus) error {
+	if qs == nil {
+		return nil
+	}
+	state, ok := stateMap[qs.State]
+	if !ok {
+		return fmt.Errorf("unexpected job state: %v", qs.State)
+	}
+	j.lastStatus = &JobStatus{
+		State: state,
+		err:   nil,
+	}
+	if err := bqToError(qs.ErrorResult); state == Done && err != nil {
+		j.lastStatus.err = err
+	}
+	for _, ep := range qs.Errors {
+		j.lastStatus.Errors = append(j.lastStatus.Errors, bqToError(ep))
+	}
+	return nil
+}
+
+func (j *Job) setStatistics(s *bq.JobStatistics, c *Client) {
+	if s == nil || j.lastStatus == nil {
+		return
+	}
+	js := &JobStatistics{
+		CreationTime:        unixMillisToTime(s.CreationTime),
+		StartTime:           unixMillisToTime(s.StartTime),
+		EndTime:             unixMillisToTime(s.EndTime),
+		TotalBytesProcessed: s.TotalBytesProcessed,
+	}
+	switch {
+	case s.Extract != nil:
+		js.Details = &ExtractStatistics{
+			DestinationURIFileCounts: []int64(s.Extract.DestinationUriFileCounts),
+		}
+	case s.Load != nil:
+		js.Details = &LoadStatistics{
+			InputFileBytes: s.Load.InputFileBytes,
+			InputFiles:     s.Load.InputFiles,
+			OutputBytes:    s.Load.OutputBytes,
+			OutputRows:     s.Load.OutputRows,
+		}
+	case s.Query != nil:
+		var names []string
+		for _, qp := range s.Query.UndeclaredQueryParameters {
+			names = append(names, qp.Name)
+		}
+		var tables []*Table
+		for _, tr := range s.Query.ReferencedTables {
+			tables = append(tables, bqToTable(tr, c))
+		}
+		js.Details = &QueryStatistics{
+			BillingTier:                   s.Query.BillingTier,
+			CacheHit:                      s.Query.CacheHit,
+			DDLTargetTable:                bqToTable(s.Query.DdlTargetTable, c),
+			DDLOperationPerformed:         s.Query.DdlOperationPerformed,
+			StatementType:                 s.Query.StatementType,
+			TotalBytesBilled:              s.Query.TotalBytesBilled,
+			TotalBytesProcessed:           s.Query.TotalBytesProcessed,
+			NumDMLAffectedRows:            s.Query.NumDmlAffectedRows,
+			QueryPlan:                     queryPlanFromProto(s.Query.QueryPlan),
+			Schema:                        bqToSchema(s.Query.Schema),
+			SlotMillis:                    s.Query.TotalSlotMs,
+			Timeline:                      timelineFromProto(s.Query.Timeline),
+			ReferencedTables:              tables,
+			UndeclaredQueryParameterNames: names,
+		}
+	}
+	j.lastStatus.Statistics = js
+}
+
+func queryPlanFromProto(stages []*bq.ExplainQueryStage) []*ExplainQueryStage {
+	var res []*ExplainQueryStage
+	for _, s := range stages {
+		var steps []*ExplainQueryStep
+		for _, p := range s.Steps {
+			steps = append(steps, &ExplainQueryStep{
+				Kind:     p.Kind,
+				Substeps: p.Substeps,
+			})
+		}
+		res = append(res, &ExplainQueryStage{
+			CompletedParallelInputs:   s.CompletedParallelInputs,
+			ComputeAvg:                time.Duration(s.ComputeMsAvg) * time.Millisecond,
+			ComputeMax:                time.Duration(s.ComputeMsMax) * time.Millisecond,
+			ComputeRatioAvg:           s.ComputeRatioAvg,
+			ComputeRatioMax:           s.ComputeRatioMax,
+			EndTime:                   time.Unix(0, s.EndMs*1e6),
+			ID:                        s.Id,
+			InputStages:               s.InputStages,
+			Name:                      s.Name,
+			ParallelInputs:            s.ParallelInputs,
+			ReadAvg:                   time.Duration(s.ReadMsAvg) * time.Millisecond,
+			ReadMax:                   time.Duration(s.ReadMsMax) * time.Millisecond,
+			ReadRatioAvg:              s.ReadRatioAvg,
+			ReadRatioMax:              s.ReadRatioMax,
+			RecordsRead:               s.RecordsRead,
+			RecordsWritten:            s.RecordsWritten,
+			ShuffleOutputBytes:        s.ShuffleOutputBytes,
+			ShuffleOutputBytesSpilled: s.ShuffleOutputBytesSpilled,
+			StartTime:                 time.Unix(0, s.StartMs*1e6),
+			Status:                    s.Status,
+			Steps:                     steps,
+			WaitAvg:                   time.Duration(s.WaitMsAvg) * time.Millisecond,
+			WaitMax:                   time.Duration(s.WaitMsMax) * time.Millisecond,
+			WaitRatioAvg:              s.WaitRatioAvg,
+			WaitRatioMax:              s.WaitRatioMax,
+			WriteAvg:                  time.Duration(s.WriteMsAvg) * time.Millisecond,
+			WriteMax:                  time.Duration(s.WriteMsMax) * time.Millisecond,
+			WriteRatioAvg:             s.WriteRatioAvg,
+			WriteRatioMax:             s.WriteRatioMax,
+		})
+	}
+	return res
+}
+
+func timelineFromProto(timeline []*bq.QueryTimelineSample) []*QueryTimelineSample {
+	var res []*QueryTimelineSample
+	for _, s := range timeline {
+		res = append(res, &QueryTimelineSample{
+			ActiveUnits:    s.ActiveUnits,
+			CompletedUnits: s.CompletedUnits,
+			Elapsed:        time.Duration(s.ElapsedMs) * time.Millisecond,
+			PendingUnits:   s.PendingUnits,
+			SlotMillis:     s.TotalSlotMs,
+		})
+	}
+	return res
+}
diff --git a/vendor/cloud.google.com/go/bigquery/load.go b/vendor/cloud.google.com/go/bigquery/load.go
new file mode 100644
index 00000000..1cbcc84a
--- /dev/null
+++ b/vendor/cloud.google.com/go/bigquery/load.go
@@ -0,0 +1,146 @@
+// Copyright 2016 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package bigquery
+
+import (
+	"context"
+	"io"
+
+	"cloud.google.com/go/internal/trace"
+	bq "google.golang.org/api/bigquery/v2"
+)
+
+// LoadConfig holds the configuration for a load job.
+type LoadConfig struct {
+	// Src is the source from which data will be loaded.
+	Src LoadSource
+
+	// Dst is the table into which the data will be loaded.
+	Dst *Table
+
+	// CreateDisposition specifies the circumstances under which the destination table will be created.
+	// The default is CreateIfNeeded.
+	CreateDisposition TableCreateDisposition
+
+	// WriteDisposition specifies how existing data in the destination table is treated.
+	// The default is WriteAppend.
+	WriteDisposition TableWriteDisposition
+
+	// The labels associated with this job.
+	Labels map[string]string
+
+	// If non-nil, the destination table is partitioned by time.
+	TimePartitioning *TimePartitioning
+
+	// Clustering specifies the data clustering configuration for the destination table.
+	Clustering *Clustering
+
+	// Custom encryption configuration (e.g., Cloud KMS keys).
+	DestinationEncryptionConfig *EncryptionConfig
+
+	// Allows the schema of the destination table to be updated as a side effect of
+	// the load job.
+	SchemaUpdateOptions []string
+}
+
+func (l *LoadConfig) toBQ() (*bq.JobConfiguration, io.Reader) {
+	config := &bq.JobConfiguration{
+		Labels: l.Labels,
+		Load: &bq.JobConfigurationLoad{
+			CreateDisposition:                  string(l.CreateDisposition),
+			WriteDisposition:                   string(l.WriteDisposition),
+			DestinationTable:                   l.Dst.toBQ(),
+			TimePartitioning:                   l.TimePartitioning.toBQ(),
+			Clustering:                         l.Clustering.toBQ(),
+			DestinationEncryptionConfiguration: l.DestinationEncryptionConfig.toBQ(),
+			SchemaUpdateOptions:                l.SchemaUpdateOptions,
+		},
+	}
+	media := l.Src.populateLoadConfig(config.Load)
+	return config, media
+}
+
+func bqToLoadConfig(q *bq.JobConfiguration, c *Client) *LoadConfig {
+	lc := &LoadConfig{
+		Labels:                      q.Labels,
+		CreateDisposition:           TableCreateDisposition(q.Load.CreateDisposition),
+		WriteDisposition:            TableWriteDisposition(q.Load.WriteDisposition),
+		Dst:                         bqToTable(q.Load.DestinationTable, c),
+		TimePartitioning:            bqToTimePartitioning(q.Load.TimePartitioning),
+		Clustering:                  bqToClustering(q.Load.Clustering),
+		DestinationEncryptionConfig: bqToEncryptionConfig(q.Load.DestinationEncryptionConfiguration),
+		SchemaUpdateOptions:         q.Load.SchemaUpdateOptions,
+	}
+	var fc *FileConfig
+	if len(q.Load.SourceUris) == 0 {
+		s := NewReaderSource(nil)
+		fc = &s.FileConfig
+		lc.Src = s
+	} else {
+		s := NewGCSReference(q.Load.SourceUris...)
+		fc = &s.FileConfig
+		lc.Src = s
+	}
+	bqPopulateFileConfig(q.Load, fc)
+	return lc
+}
+
+// A Loader loads data from Google Cloud Storage into a BigQuery table.
+type Loader struct {
+	JobIDConfig
+	LoadConfig
+	c *Client
+}
+
+// A LoadSource represents a source of data that can be loaded into
+// a BigQuery table.
+//
+// This package defines two LoadSources: GCSReference, for Google Cloud Storage
+// objects, and ReaderSource, for data read from an io.Reader.
+type LoadSource interface {
+	// populates config, returns media
+	populateLoadConfig(*bq.JobConfigurationLoad) io.Reader
+}
+
+// LoaderFrom returns a Loader which can be used to load data into a BigQuery table.
+// The returned Loader may optionally be further configured before its Run method is called.
+// See GCSReference and ReaderSource for additional configuration options that
+// affect loading.
+func (t *Table) LoaderFrom(src LoadSource) *Loader {
+	return &Loader{
+		c: t.c,
+		LoadConfig: LoadConfig{
+			Src: src,
+			Dst: t,
+		},
+	}
+}
+
+// Run initiates a load job.
+func (l *Loader) Run(ctx context.Context) (j *Job, err error) {
+	ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Load.Run")
+	defer func() { trace.EndSpan(ctx, err) }()
+
+	job, media := l.newJob()
+	return l.c.insertJob(ctx, job, media)
+}
+
+func (l *Loader) newJob() (*bq.Job, io.Reader) {
+	config, media := l.LoadConfig.toBQ()
+	return &bq.Job{
+		JobReference:  l.JobIDConfig.createJobRef(l.c),
+		Configuration: config,
+	}, media
+}
diff --git a/vendor/cloud.google.com/go/bigquery/nulls.go b/vendor/cloud.google.com/go/bigquery/nulls.go
new file mode 100644
index 00000000..5b4fbd96
--- /dev/null
+++ b/vendor/cloud.google.com/go/bigquery/nulls.go
@@ -0,0 +1,320 @@
+// Copyright 2015 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package bigquery
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"reflect"
+	"strconv"
+	"time"
+
+	"cloud.google.com/go/civil"
+)
+
+// NullInt64 represents a BigQuery INT64 that may be NULL.
+type NullInt64 struct {
+	Int64 int64
+	Valid bool // Valid is true if Int64 is not NULL.
+}
+
+func (n NullInt64) String() string { return nullstr(n.Valid, n.Int64) }
+
+// NullString represents a BigQuery STRING that may be NULL.
+type NullString struct {
+	StringVal string
+	Valid     bool // Valid is true if StringVal is not NULL.
+}
+
+func (n NullString) String() string { return nullstr(n.Valid, n.StringVal) }
+
+// NullFloat64 represents a BigQuery FLOAT64 that may be NULL.
+type NullFloat64 struct {
+	Float64 float64
+	Valid   bool // Valid is true if Float64 is not NULL.
+}
+
+func (n NullFloat64) String() string { return nullstr(n.Valid, n.Float64) }
+
+// NullBool represents a BigQuery BOOL that may be NULL.
+type NullBool struct {
+	Bool  bool
+	Valid bool // Valid is true if Bool is not NULL.
+}
+
+func (n NullBool) String() string { return nullstr(n.Valid, n.Bool) }
+
+// NullTimestamp represents a BigQuery TIMESTAMP that may be null.
+type NullTimestamp struct {
+	Timestamp time.Time
+	Valid     bool // Valid is true if Time is not NULL.
+}
+
+func (n NullTimestamp) String() string { return nullstr(n.Valid, n.Timestamp) }
+
+// NullDate represents a BigQuery DATE that may be null.
+type NullDate struct {
+	Date  civil.Date
+	Valid bool // Valid is true if Date is not NULL.
+}
+
+func (n NullDate) String() string { return nullstr(n.Valid, n.Date) }
+
+// NullTime represents a BigQuery TIME that may be null.
+type NullTime struct {
+	Time  civil.Time
+	Valid bool // Valid is true if Time is not NULL.
+}
+
+func (n NullTime) String() string {
+	if !n.Valid {
+		return "<null>"
+	}
+	return CivilTimeString(n.Time)
+}
+
+// NullDateTime represents a BigQuery DATETIME that may be null.
+type NullDateTime struct {
+	DateTime civil.DateTime
+	Valid    bool // Valid is true if DateTime is not NULL.
+}
+
+func (n NullDateTime) String() string {
+	if !n.Valid {
+		return "<null>"
+	}
+	return CivilDateTimeString(n.DateTime)
+}
+
+// MarshalJSON converts the NullInt64 to JSON.
+func (n NullInt64) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.Int64) }
+
+// MarshalJSON converts the NullFloat64 to JSON.
+func (n NullFloat64) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.Float64) }
+
+// MarshalJSON converts the NullBool to JSON.
+func (n NullBool) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.Bool) }
+
+// MarshalJSON converts the NullString to JSON.
+func (n NullString) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.StringVal) }
+
+// MarshalJSON converts the NullTimestamp to JSON.
+func (n NullTimestamp) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.Timestamp) }
+
+// MarshalJSON converts the NullDate to JSON.
+func (n NullDate) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.Date) }
+
+// MarshalJSON converts the NullTime to JSON.
+func (n NullTime) MarshalJSON() ([]byte, error) {
+	if !n.Valid {
+		return jsonNull, nil
+	}
+	return []byte(`"` + CivilTimeString(n.Time) + `"`), nil
+}
+
+// MarshalJSON converts the NullDateTime to JSON.
+func (n NullDateTime) MarshalJSON() ([]byte, error) {
+	if !n.Valid {
+		return jsonNull, nil
+	}
+	return []byte(`"` + CivilDateTimeString(n.DateTime) + `"`), nil
+}
+
+func nullstr(valid bool, v interface{}) string {
+	if !valid {
+		return "NULL"
+	}
+	return fmt.Sprint(v)
+}
+
+var jsonNull = []byte("null")
+
+func nulljson(valid bool, v interface{}) ([]byte, error) {
+	if !valid {
+		return jsonNull, nil
+	}
+	return json.Marshal(v)
+}
+
+// UnmarshalJSON converts JSON into a NullInt64.
+func (n *NullInt64) UnmarshalJSON(b []byte) error {
+	n.Valid = false
+	n.Int64 = 0
+	if bytes.Equal(b, jsonNull) {
+		return nil
+	}
+
+	if err := json.Unmarshal(b, &n.Int64); err != nil {
+		return err
+	}
+	n.Valid = true
+	return nil
+}
+
+// UnmarshalJSON converts JSON into a NullFloat64.
+func (n *NullFloat64) UnmarshalJSON(b []byte) error {
+	n.Valid = false
+	n.Float64 = 0
+	if bytes.Equal(b, jsonNull) {
+		return nil
+	}
+
+	if err := json.Unmarshal(b, &n.Float64); err != nil {
+		return err
+	}
+	n.Valid = true
+	return nil
+}
+
+// UnmarshalJSON converts JSON into a NullBool.
+func (n *NullBool) UnmarshalJSON(b []byte) error {
+	n.Valid = false
+	n.Bool = false
+	if bytes.Equal(b, jsonNull) {
+		return nil
+	}
+
+	if err := json.Unmarshal(b, &n.Bool); err != nil {
+		return err
+	}
+	n.Valid = true
+	return nil
+}
+
+// UnmarshalJSON converts JSON into a NullString.
+func (n *NullString) UnmarshalJSON(b []byte) error {
+	n.Valid = false
+	n.StringVal = ""
+	if bytes.Equal(b, jsonNull) {
+		return nil
+	}
+
+	if err := json.Unmarshal(b, &n.StringVal); err != nil {
+		return err
+	}
+	n.Valid = true
+	return nil
+}
+
+// UnmarshalJSON converts JSON into a NullTimestamp.
+func (n *NullTimestamp) UnmarshalJSON(b []byte) error {
+	n.Valid = false
+	n.Timestamp = time.Time{}
+	if bytes.Equal(b, jsonNull) {
+		return nil
+	}
+
+	if err := json.Unmarshal(b, &n.Timestamp); err != nil {
+		return err
+	}
+	n.Valid = true
+	return nil
+}
+
+// UnmarshalJSON converts JSON into a NullDate.
+func (n *NullDate) UnmarshalJSON(b []byte) error {
+	n.Valid = false
+	n.Date = civil.Date{}
+	if bytes.Equal(b, jsonNull) {
+		return nil
+	}
+
+	if err := json.Unmarshal(b, &n.Date); err != nil {
+		return err
+	}
+	n.Valid = true
+	return nil
+}
+
+// UnmarshalJSON converts JSON into a NullTime.
+func (n *NullTime) UnmarshalJSON(b []byte) error {
+	n.Valid = false
+	n.Time = civil.Time{}
+	if bytes.Equal(b, jsonNull) {
+		return nil
+	}
+
+	s, err := strconv.Unquote(string(b))
+	if err != nil {
+		return err
+	}
+
+	t, err := civil.ParseTime(s)
+	if err != nil {
+		return err
+	}
+	n.Time = t
+
+	n.Valid = true
+	return nil
+}
+
+// UnmarshalJSON converts JSON into a NullDateTime.
+func (n *NullDateTime) UnmarshalJSON(b []byte) error {
+	n.Valid = false
+	n.DateTime = civil.DateTime{}
+	if bytes.Equal(b, jsonNull) {
+		return nil
+	}
+
+	s, err := strconv.Unquote(string(b))
+	if err != nil {
+		return err
+	}
+
+	dt, err := parseCivilDateTime(s)
+	if err != nil {
+		return err
+	}
+	n.DateTime = dt
+
+	n.Valid = true
+	return nil
+}
+
+var (
+	typeOfNullInt64     = reflect.TypeOf(NullInt64{})
+	typeOfNullFloat64   = reflect.TypeOf(NullFloat64{})
+	typeOfNullBool      = reflect.TypeOf(NullBool{})
+	typeOfNullString    = reflect.TypeOf(NullString{})
+	typeOfNullTimestamp = reflect.TypeOf(NullTimestamp{})
+	typeOfNullDate      = reflect.TypeOf(NullDate{})
+	typeOfNullTime      = reflect.TypeOf(NullTime{})
+	typeOfNullDateTime  = reflect.TypeOf(NullDateTime{})
+)
+
+func nullableFieldType(t reflect.Type) FieldType {
+	switch t {
+	case typeOfNullInt64:
+		return IntegerFieldType
+	case typeOfNullFloat64:
+		return FloatFieldType
+	case typeOfNullBool:
+		return BooleanFieldType
+	case typeOfNullString:
+		return StringFieldType
+	case typeOfNullTimestamp:
+		return TimestampFieldType
+	case typeOfNullDate:
+		return DateFieldType
+	case typeOfNullTime:
+		return TimeFieldType
+	case typeOfNullDateTime:
+		return DateTimeFieldType
+	default:
+		return ""
+	}
+}
diff --git a/vendor/cloud.google.com/go/bigquery/params.go b/vendor/cloud.google.com/go/bigquery/params.go
new file mode 100644
index 00000000..fb79947a
--- /dev/null
+++ b/vendor/cloud.google.com/go/bigquery/params.go
@@ -0,0 +1,356 @@
+// Copyright 2016 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package bigquery
+
+import (
+	"encoding/base64"
+	"errors"
+	"fmt"
+	"math/big"
+	"reflect"
+	"regexp"
+	"time"
+
+	"cloud.google.com/go/civil"
+	"cloud.google.com/go/internal/fields"
+	bq "google.golang.org/api/bigquery/v2"
+)
+
+var (
+	// See https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#timestamp-type.
+	timestampFormat = "2006-01-02 15:04:05.999999-07:00"
+
+	// See https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#schema.fields.name
+	validFieldName = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]{0,127}$")
+)
+
+const nullableTagOption = "nullable"
+
+func bqTagParser(t reflect.StructTag) (name string, keep bool, other interface{}, err error) {
+	name, keep, opts, err := fields.ParseStandardTag("bigquery", t)
+	if err != nil {
+		return "", false, nil, err
+	}
+	if name != "" && !validFieldName.MatchString(name) {
+		return "", false, nil, errInvalidFieldName
+	}
+	for _, opt := range opts {
+		if opt != nullableTagOption {
+			return "", false, nil, fmt.Errorf(
+				"bigquery: invalid tag option %q. The only valid option is %q",
+				opt, nullableTagOption)
+		}
+	}
+	return name, keep, opts, nil
+}
+
+var fieldCache = fields.NewCache(bqTagParser, nil, nil)
+
+var (
+	int64ParamType     = &bq.QueryParameterType{Type: "INT64"}
+	float64ParamType   = &bq.QueryParameterType{Type: "FLOAT64"}
+	boolParamType      = &bq.QueryParameterType{Type: "BOOL"}
+	stringParamType    = &bq.QueryParameterType{Type: "STRING"}
+	bytesParamType     = &bq.QueryParameterType{Type: "BYTES"}
+	dateParamType      = &bq.QueryParameterType{Type: "DATE"}
+	timeParamType      = &bq.QueryParameterType{Type: "TIME"}
+	dateTimeParamType  = &bq.QueryParameterType{Type: "DATETIME"}
+	timestampParamType = &bq.QueryParameterType{Type: "TIMESTAMP"}
+	numericParamType   = &bq.QueryParameterType{Type: "NUMERIC"}
+)
+
+var (
+	typeOfDate     = reflect.TypeOf(civil.Date{})
+	typeOfTime     = reflect.TypeOf(civil.Time{})
+	typeOfDateTime = reflect.TypeOf(civil.DateTime{})
+	typeOfGoTime   = reflect.TypeOf(time.Time{})
+	typeOfRat      = reflect.TypeOf(&big.Rat{})
+)
+
+// A QueryParameter is a parameter to a query.
+type QueryParameter struct {
+	// Name is used for named parameter mode.
+	// It must match the name in the query case-insensitively.
+	Name string
+
+	// Value is the value of the parameter.
+	//
+	// When you create a QueryParameter to send to BigQuery, the following Go types
+	// are supported, with their corresponding Bigquery types:
+	// int, int8, int16, int32, int64, uint8, uint16, uint32: INT64
+	//   Note that uint, uint64 and uintptr are not supported, because
+	//   they may contain values that cannot fit into a 64-bit signed integer.
+	// float32, float64: FLOAT64
+	// bool: BOOL
+	// string: STRING
+	// []byte: BYTES
+	// time.Time: TIMESTAMP
+	// *big.Rat: NUMERIC
+	// Arrays and slices of the above.
+	// Structs of the above. Only the exported fields are used.
+	//
+	// When a QueryParameter is returned inside a QueryConfig from a call to
+	// Job.Config:
+	// Integers are of type int64.
+	// Floating-point values are of type float64.
+	// Arrays are of type []interface{}, regardless of the array element type.
+	// Structs are of type map[string]interface{}.
+	Value interface{}
+}
+
+func (p QueryParameter) toBQ() (*bq.QueryParameter, error) {
+	pv, err := paramValue(reflect.ValueOf(p.Value))
+	if err != nil {
+		return nil, err
+	}
+	pt, err := paramType(reflect.TypeOf(p.Value))
+	if err != nil {
+		return nil, err
+	}
+	return &bq.QueryParameter{
+		Name:           p.Name,
+		ParameterValue: &pv,
+		ParameterType:  pt,
+	}, nil
+}
+
+func paramType(t reflect.Type) (*bq.QueryParameterType, error) {
+	if t == nil {
+		return nil, errors.New("bigquery: nil parameter")
+	}
+	switch t {
+	case typeOfDate:
+		return dateParamType, nil
+	case typeOfTime:
+		return timeParamType, nil
+	case typeOfDateTime:
+		return dateTimeParamType, nil
+	case typeOfGoTime:
+		return timestampParamType, nil
+	case typeOfRat:
+		return numericParamType, nil
+	}
+	switch t.Kind() {
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint8, reflect.Uint16, reflect.Uint32:
+		return int64ParamType, nil
+
+	case reflect.Float32, reflect.Float64:
+		return float64ParamType, nil
+
+	case reflect.Bool:
+		return boolParamType, nil
+
+	case reflect.String:
+		return stringParamType, nil
+
+	case reflect.Slice:
+		if t.Elem().Kind() == reflect.Uint8 {
+			return bytesParamType, nil
+		}
+		fallthrough
+
+	case reflect.Array:
+		et, err := paramType(t.Elem())
+		if err != nil {
+			return nil, err
+		}
+		return &bq.QueryParameterType{Type: "ARRAY", ArrayType: et}, nil
+
+	case reflect.Ptr:
+		if t.Elem().Kind() != reflect.Struct {
+			break
+		}
+		t = t.Elem()
+		fallthrough
+
+	case reflect.Struct:
+		var fts []*bq.QueryParameterTypeStructTypes
+		fields, err := fieldCache.Fields(t)
+		if err != nil {
+			return nil, err
+		}
+		for _, f := range fields {
+			pt, err := paramType(f.Type)
+			if err != nil {
+				return nil, err
+			}
+			fts = append(fts, &bq.QueryParameterTypeStructTypes{
+				Name: f.Name,
+				Type: pt,
+			})
+		}
+		return &bq.QueryParameterType{Type: "STRUCT", StructTypes: fts}, nil
+	}
+	return nil, fmt.Errorf("bigquery: Go type %s cannot be represented as a parameter type", t)
+}
+
+func paramValue(v reflect.Value) (bq.QueryParameterValue, error) {
+	var res bq.QueryParameterValue
+	if !v.IsValid() {
+		return res, errors.New("bigquery: nil parameter")
+	}
+	t := v.Type()
+	switch t {
+	case typeOfDate:
+		res.Value = v.Interface().(civil.Date).String()
+		return res, nil
+
+	case typeOfTime:
+		// civil.Time has nanosecond resolution, but BigQuery TIME only microsecond.
+		// (If we send nanoseconds, then when we try to read the result we get "query job
+		// missing destination table").
+		res.Value = CivilTimeString(v.Interface().(civil.Time))
+		return res, nil
+
+	case typeOfDateTime:
+		res.Value = CivilDateTimeString(v.Interface().(civil.DateTime))
+		return res, nil
+
+	case typeOfGoTime:
+		res.Value = v.Interface().(time.Time).Format(timestampFormat)
+		return res, nil
+
+	case typeOfRat:
+		res.Value = NumericString(v.Interface().(*big.Rat))
+		return res, nil
+	}
+	switch t.Kind() {
+	case reflect.Slice:
+		if t.Elem().Kind() == reflect.Uint8 {
+			res.Value = base64.StdEncoding.EncodeToString(v.Interface().([]byte))
+			return res, nil
+		}
+		fallthrough
+
+	case reflect.Array:
+		var vals []*bq.QueryParameterValue
+		for i := 0; i < v.Len(); i++ {
+			val, err := paramValue(v.Index(i))
+			if err != nil {
+				return bq.QueryParameterValue{}, err
+			}
+			vals = append(vals, &val)
+		}
+		return bq.QueryParameterValue{ArrayValues: vals}, nil
+
+	case reflect.Ptr:
+		if t.Elem().Kind() != reflect.Struct {
+			return res, fmt.Errorf("bigquery: Go type %s cannot be represented as a parameter value", t)
+		}
+		t = t.Elem()
+		v = v.Elem()
+		if !v.IsValid() {
+			// nil pointer becomes empty value
+			return res, nil
+		}
+		fallthrough
+
+	case reflect.Struct:
+		fields, err := fieldCache.Fields(t)
+		if err != nil {
+			return bq.QueryParameterValue{}, err
+		}
+		res.StructValues = map[string]bq.QueryParameterValue{}
+		for _, f := range fields {
+			fv := v.FieldByIndex(f.Index)
+			fp, err := paramValue(fv)
+			if err != nil {
+				return bq.QueryParameterValue{}, err
+			}
+			res.StructValues[f.Name] = fp
+		}
+		return res, nil
+	}
+	// None of the above: assume a scalar type. (If it's not a valid type,
+	// paramType will catch the error.)
+	res.Value = fmt.Sprint(v.Interface())
+	return res, nil
+}
+
+func bqToQueryParameter(q *bq.QueryParameter) (QueryParameter, error) {
+	p := QueryParameter{Name: q.Name}
+	val, err := convertParamValue(q.ParameterValue, q.ParameterType)
+	if err != nil {
+		return QueryParameter{}, err
+	}
+	p.Value = val
+	return p, nil
+}
+
+var paramTypeToFieldType = map[string]FieldType{
+	int64ParamType.Type:   IntegerFieldType,
+	float64ParamType.Type: FloatFieldType,
+	boolParamType.Type:    BooleanFieldType,
+	stringParamType.Type:  StringFieldType,
+	bytesParamType.Type:   BytesFieldType,
+	dateParamType.Type:    DateFieldType,
+	timeParamType.Type:    TimeFieldType,
+	numericParamType.Type: NumericFieldType,
+}
+
+// Convert a parameter value from the service to a Go value. This is similar to, but
+// not quite the same as, converting data values.
+func convertParamValue(qval *bq.QueryParameterValue, qtype *bq.QueryParameterType) (interface{}, error) {
+	switch qtype.Type {
+	case "ARRAY":
+		if qval == nil {
+			return []interface{}(nil), nil
+		}
+		return convertParamArray(qval.ArrayValues, qtype.ArrayType)
+	case "STRUCT":
+		if qval == nil {
+			return map[string]interface{}(nil), nil
+		}
+		return convertParamStruct(qval.StructValues, qtype.StructTypes)
+	case "TIMESTAMP":
+		return time.Parse(timestampFormat, qval.Value)
+	case "DATETIME":
+		return parseCivilDateTime(qval.Value)
+	default:
+		return convertBasicType(qval.Value, paramTypeToFieldType[qtype.Type])
+	}
+}
+
+// convertParamArray converts a query parameter array value to a Go value. It
+// always returns a []interface{}.
+func convertParamArray(elVals []*bq.QueryParameterValue, elType *bq.QueryParameterType) ([]interface{}, error) {
+	var vals []interface{}
+	for _, el := range elVals {
+		val, err := convertParamValue(el, elType)
+		if err != nil {
+			return nil, err
+		}
+		vals = append(vals, val)
+	}
+	return vals, nil
+}
+
+// convertParamStruct converts a query parameter struct value into a Go value. It
+// always returns a map[string]interface{}.
+func convertParamStruct(sVals map[string]bq.QueryParameterValue, sTypes []*bq.QueryParameterTypeStructTypes) (map[string]interface{}, error) {
+	vals := map[string]interface{}{}
+	for _, st := range sTypes {
+		if sv, ok := sVals[st.Name]; ok {
+			val, err := convertParamValue(&sv, st.Type)
+			if err != nil {
+				return nil, err
+			}
+			vals[st.Name] = val
+		} else {
+			vals[st.Name] = nil
+		}
+	}
+	return vals, nil
+}
diff --git a/vendor/cloud.google.com/go/bigquery/query.go b/vendor/cloud.google.com/go/bigquery/query.go
new file mode 100644
index 00000000..735a54c4
--- /dev/null
+++ b/vendor/cloud.google.com/go/bigquery/query.go
@@ -0,0 +1,328 @@
+// Copyright 2015 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package bigquery
+
+import (
+	"context"
+	"errors"
+
+	"cloud.google.com/go/internal/trace"
+	bq "google.golang.org/api/bigquery/v2"
+)
+
+// QueryConfig holds the configuration for a query job.
+type QueryConfig struct {
+	// Dst is the table into which the results of the query will be written.
+	// If this field is nil, a temporary table will be created.
+	Dst *Table
+
+	// The query to execute. See https://cloud.google.com/bigquery/query-reference for details.
+	Q string
+
+	// DefaultProjectID and DefaultDatasetID specify the dataset to use for unqualified table names in the query.
+	// If DefaultProjectID is set, DefaultDatasetID must also be set.
+	DefaultProjectID string
+	DefaultDatasetID string
+
+	// TableDefinitions describes data sources outside of BigQuery.
+	// The map keys may be used as table names in the query string.
+	//
+	// When a QueryConfig is returned from Job.Config, the map values
+	// are always of type *ExternalDataConfig.
+	TableDefinitions map[string]ExternalData
+
+	// CreateDisposition specifies the circumstances under which the destination table will be created.
+	// The default is CreateIfNeeded.
+	CreateDisposition TableCreateDisposition
+
+	// WriteDisposition specifies how existing data in the destination table is treated.
+	// The default is WriteEmpty.
+	WriteDisposition TableWriteDisposition
+
+	// DisableQueryCache prevents results being fetched from the query cache.
+	// If this field is false, results are fetched from the cache if they are available.
+	// The query cache is a best-effort cache that is flushed whenever tables in the query are modified.
+	// Cached results are only available when TableID is unspecified in the query's destination Table.
+	// For more information, see https://cloud.google.com/bigquery/querying-data#querycaching
+	DisableQueryCache bool
+
+	// DisableFlattenedResults prevents results being flattened.
+	// If this field is false, results from nested and repeated fields are flattened.
+	// DisableFlattenedResults implies AllowLargeResults
+	// For more information, see https://cloud.google.com/bigquery/docs/data#nested
+	DisableFlattenedResults bool
+
+	// AllowLargeResults allows the query to produce arbitrarily large result tables.
+	// The destination must be a table.
+	// When using this option, queries will take longer to execute, even if the result set is small.
+	// For additional limitations, see https://cloud.google.com/bigquery/querying-data#largequeryresults
+	AllowLargeResults bool
+
+	// Priority specifies the priority with which to schedule the query.
+	// The default priority is InteractivePriority.
+	// For more information, see https://cloud.google.com/bigquery/querying-data#batchqueries
+	Priority QueryPriority
+
+	// MaxBillingTier sets the maximum billing tier for a Query.
+	// Queries that have resource usage beyond this tier will fail (without
+	// incurring a charge). If this field is zero, the project default will be used.
+	MaxBillingTier int
+
+	// MaxBytesBilled limits the number of bytes billed for
+	// this job.  Queries that would exceed this limit will fail (without incurring
+	// a charge).
+	// If this field is less than 1, the project default will be
+	// used.
+	MaxBytesBilled int64
+
+	// UseStandardSQL causes the query to use standard SQL. The default.
+	// Deprecated: use UseLegacySQL.
+	UseStandardSQL bool
+
+	// UseLegacySQL causes the query to use legacy SQL.
+	UseLegacySQL bool
+
+	// Parameters is a list of query parameters. The presence of parameters
+	// implies the use of standard SQL.
+	// If the query uses positional syntax ("?"), then no parameter may have a name.
+	// If the query uses named syntax ("@p"), then all parameters must have names.
+	// It is illegal to mix positional and named syntax.
+	Parameters []QueryParameter
+
+	// TimePartitioning specifies time-based partitioning
+	// for the destination table.
+	TimePartitioning *TimePartitioning
+
+	// Clustering specifies the data clustering configuration for the destination table.
+	Clustering *Clustering
+
+	// The labels associated with this job.
+	Labels map[string]string
+
+	// If true, don't actually run this job. A valid query will return a mostly
+	// empty response with some processing statistics, while an invalid query will
+	// return the same error it would if it wasn't a dry run.
+	//
+	// Query.Read will fail with dry-run queries. Call Query.Run instead, and then
+	// call LastStatus on the returned job to get statistics. Calling Status on a
+	// dry-run job will fail.
+	DryRun bool
+
+	// Custom encryption configuration (e.g., Cloud KMS keys).
+	DestinationEncryptionConfig *EncryptionConfig
+
+	// Allows the schema of the destination table to be updated as a side effect of
+	// the query job.
+	SchemaUpdateOptions []string
+}
+
+func (qc *QueryConfig) toBQ() (*bq.JobConfiguration, error) {
+	qconf := &bq.JobConfigurationQuery{
+		Query:                              qc.Q,
+		CreateDisposition:                  string(qc.CreateDisposition),
+		WriteDisposition:                   string(qc.WriteDisposition),
+		AllowLargeResults:                  qc.AllowLargeResults,
+		Priority:                           string(qc.Priority),
+		MaximumBytesBilled:                 qc.MaxBytesBilled,
+		TimePartitioning:                   qc.TimePartitioning.toBQ(),
+		Clustering:                         qc.Clustering.toBQ(),
+		DestinationEncryptionConfiguration: qc.DestinationEncryptionConfig.toBQ(),
+		SchemaUpdateOptions:                qc.SchemaUpdateOptions,
+	}
+	if len(qc.TableDefinitions) > 0 {
+		qconf.TableDefinitions = make(map[string]bq.ExternalDataConfiguration)
+	}
+	for name, data := range qc.TableDefinitions {
+		qconf.TableDefinitions[name] = data.toBQ()
+	}
+	if qc.DefaultProjectID != "" || qc.DefaultDatasetID != "" {
+		qconf.DefaultDataset = &bq.DatasetReference{
+			DatasetId: qc.DefaultDatasetID,
+			ProjectId: qc.DefaultProjectID,
+		}
+	}
+	if tier := int64(qc.MaxBillingTier); tier > 0 {
+		qconf.MaximumBillingTier = &tier
+	}
+	f := false
+	if qc.DisableQueryCache {
+		qconf.UseQueryCache = &f
+	}
+	if qc.DisableFlattenedResults {
+		qconf.FlattenResults = &f
+		// DisableFlattenResults implies AllowLargeResults.
+		qconf.AllowLargeResults = true
+	}
+	if qc.UseStandardSQL && qc.UseLegacySQL {
+		return nil, errors.New("bigquery: cannot provide both UseStandardSQL and UseLegacySQL")
+	}
+	if len(qc.Parameters) > 0 && qc.UseLegacySQL {
+		return nil, errors.New("bigquery: cannot provide both Parameters (implying standard SQL) and UseLegacySQL")
+	}
+	ptrue := true
+	pfalse := false
+	if qc.UseLegacySQL {
+		qconf.UseLegacySql = &ptrue
+	} else {
+		qconf.UseLegacySql = &pfalse
+	}
+	if qc.Dst != nil && !qc.Dst.implicitTable() {
+		qconf.DestinationTable = qc.Dst.toBQ()
+	}
+	for _, p := range qc.Parameters {
+		qp, err := p.toBQ()
+		if err != nil {
+			return nil, err
+		}
+		qconf.QueryParameters = append(qconf.QueryParameters, qp)
+	}
+	return &bq.JobConfiguration{
+		Labels: qc.Labels,
+		DryRun: qc.DryRun,
+		Query:  qconf,
+	}, nil
+}
+
+func bqToQueryConfig(q *bq.JobConfiguration, c *Client) (*QueryConfig, error) {
+	qq := q.Query
+	qc := &QueryConfig{
+		Labels:                      q.Labels,
+		DryRun:                      q.DryRun,
+		Q:                           qq.Query,
+		CreateDisposition:           TableCreateDisposition(qq.CreateDisposition),
+		WriteDisposition:            TableWriteDisposition(qq.WriteDisposition),
+		AllowLargeResults:           qq.AllowLargeResults,
+		Priority:                    QueryPriority(qq.Priority),
+		MaxBytesBilled:              qq.MaximumBytesBilled,
+		UseLegacySQL:                qq.UseLegacySql == nil || *qq.UseLegacySql,
+		TimePartitioning:            bqToTimePartitioning(qq.TimePartitioning),
+		Clustering:                  bqToClustering(qq.Clustering),
+		DestinationEncryptionConfig: bqToEncryptionConfig(qq.DestinationEncryptionConfiguration),
+		SchemaUpdateOptions:         qq.SchemaUpdateOptions,
+	}
+	qc.UseStandardSQL = !qc.UseLegacySQL
+
+	if len(qq.TableDefinitions) > 0 {
+		qc.TableDefinitions = make(map[string]ExternalData)
+	}
+	for name, qedc := range qq.TableDefinitions {
+		edc, err := bqToExternalDataConfig(&qedc)
+		if err != nil {
+			return nil, err
+		}
+		qc.TableDefinitions[name] = edc
+	}
+	if qq.DefaultDataset != nil {
+		qc.DefaultProjectID = qq.DefaultDataset.ProjectId
+		qc.DefaultDatasetID = qq.DefaultDataset.DatasetId
+	}
+	if qq.MaximumBillingTier != nil {
+		qc.MaxBillingTier = int(*qq.MaximumBillingTier)
+	}
+	if qq.UseQueryCache != nil && !*qq.UseQueryCache {
+		qc.DisableQueryCache = true
+	}
+	if qq.FlattenResults != nil && !*qq.FlattenResults {
+		qc.DisableFlattenedResults = true
+	}
+	if qq.DestinationTable != nil {
+		qc.Dst = bqToTable(qq.DestinationTable, c)
+	}
+	for _, qp := range qq.QueryParameters {
+		p, err := bqToQueryParameter(qp)
+		if err != nil {
+			return nil, err
+		}
+		qc.Parameters = append(qc.Parameters, p)
+	}
+	return qc, nil
+}
+
+// QueryPriority specifies a priority with which a query is to be executed.
+type QueryPriority string
+
+const (
+	// BatchPriority specifies that the query should be scheduled with the
+	// batch priority.  BigQuery queues each batch query on your behalf, and
+	// starts the query as soon as idle resources are available, usually within
+	// a few minutes. If BigQuery hasn't started the query within 24 hours,
+	// BigQuery changes the job priority to interactive. Batch queries don't
+	// count towards your concurrent rate limit, which can make it easier to
+	// start many queries at once.
+	//
+	// More information can be found at https://cloud.google.com/bigquery/docs/running-queries#batchqueries.
+	BatchPriority QueryPriority = "BATCH"
+	// InteractivePriority specifies that the query should be scheduled with
+	// interactive priority, which means that the query is executed as soon as
+	// possible. Interactive queries count towards your concurrent rate limit
+	// and your daily limit. It is the default priority with which queries get
+	// executed.
+	//
+	// More information can be found at https://cloud.google.com/bigquery/docs/running-queries#queries.
+	InteractivePriority QueryPriority = "INTERACTIVE"
+)
+
+// A Query queries data from a BigQuery table. Use Client.Query to create a Query.
+type Query struct {
+	JobIDConfig
+	QueryConfig
+	client *Client
+}
+
+// Query creates a query with string q.
+// The returned Query may optionally be further configured before its Run method is called.
+func (c *Client) Query(q string) *Query {
+	return &Query{
+		client:      c,
+		QueryConfig: QueryConfig{Q: q},
+	}
+}
+
+// Run initiates a query job.
+func (q *Query) Run(ctx context.Context) (j *Job, err error) {
+	ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Query.Run")
+	defer func() { trace.EndSpan(ctx, err) }()
+
+	job, err := q.newJob()
+	if err != nil {
+		return nil, err
+	}
+	j, err = q.client.insertJob(ctx, job, nil)
+	if err != nil {
+		return nil, err
+	}
+	return j, nil
+}
+
+func (q *Query) newJob() (*bq.Job, error) {
+	config, err := q.QueryConfig.toBQ()
+	if err != nil {
+		return nil, err
+	}
+	return &bq.Job{
+		JobReference:  q.JobIDConfig.createJobRef(q.client),
+		Configuration: config,
+	}, nil
+}
+
+// Read submits a query for execution and returns the results via a RowIterator.
+// It is a shorthand for Query.Run followed by Job.Read.
+func (q *Query) Read(ctx context.Context) (*RowIterator, error) {
+	job, err := q.Run(ctx)
+	if err != nil {
+		return nil, err
+	}
+	return job.Read(ctx)
+}
diff --git a/vendor/cloud.google.com/go/bigquery/random.go b/vendor/cloud.google.com/go/bigquery/random.go
new file mode 100644
index 00000000..65f93843
--- /dev/null
+++ b/vendor/cloud.google.com/go/bigquery/random.go
@@ -0,0 +1,56 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package bigquery
+
+import (
+	"math/rand"
+	"os"
+	"sync"
+	"time"
+)
+
+// Support for random values (typically job IDs and insert IDs).
+
+const alphanum = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
+
+var (
+	rngMu sync.Mutex
+	rng   = rand.New(rand.NewSource(time.Now().UnixNano() ^ int64(os.Getpid())))
+)
+
+// For testing.
+var randomIDFn = randomID
+
+// As of August 2017, the BigQuery service uses 27 alphanumeric characters for
+// suffixes.
+const randomIDLen = 27
+
+func randomID() string {
+	// This is used for both job IDs and insert IDs.
+	var b [randomIDLen]byte
+	rngMu.Lock()
+	for i := 0; i < len(b); i++ {
+		b[i] = alphanum[rng.Intn(len(alphanum))]
+	}
+	rngMu.Unlock()
+	return string(b[:])
+}
+
+// Seed seeds this package's random number generator, used for generating job and
+// insert IDs. Use Seed to obtain repeatable, deterministic behavior from bigquery
+// clients. Seed should be called before any clients are created.
+func Seed(s int64) {
+	rng = rand.New(rand.NewSource(s))
+}
diff --git a/vendor/cloud.google.com/go/bigquery/schema.go b/vendor/cloud.google.com/go/bigquery/schema.go
new file mode 100644
index 00000000..e8454767
--- /dev/null
+++ b/vendor/cloud.google.com/go/bigquery/schema.go
@@ -0,0 +1,488 @@
+// Copyright 2015 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package bigquery
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"reflect"
+	"sync"
+
+	bq "google.golang.org/api/bigquery/v2"
+)
+
+// Schema describes the fields in a table or query result.
+type Schema []*FieldSchema
+
+// FieldSchema describes a single field.
+type FieldSchema struct {
+	// The field name.
+	// Must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_),
+	// and must start with a letter or underscore.
+	// The maximum length is 128 characters.
+	Name string
+
+	// A description of the field. The maximum length is 16,384 characters.
+	Description string
+
+	// Whether the field may contain multiple values.
+	Repeated bool
+	// Whether the field is required.  Ignored if Repeated is true.
+	Required bool
+
+	// The field data type.  If Type is Record, then this field contains a nested schema,
+	// which is described by Schema.
+	Type FieldType
+	// Describes the nested schema if Type is set to Record.
+	Schema Schema
+}
+
+func (fs *FieldSchema) toBQ() *bq.TableFieldSchema {
+	tfs := &bq.TableFieldSchema{
+		Description: fs.Description,
+		Name:        fs.Name,
+		Type:        string(fs.Type),
+	}
+
+	if fs.Repeated {
+		tfs.Mode = "REPEATED"
+	} else if fs.Required {
+		tfs.Mode = "REQUIRED"
+	} // else leave as default, which is interpreted as NULLABLE.
+
+	for _, f := range fs.Schema {
+		tfs.Fields = append(tfs.Fields, f.toBQ())
+	}
+
+	return tfs
+}
+
+func (s Schema) toBQ() *bq.TableSchema {
+	var fields []*bq.TableFieldSchema
+	for _, f := range s {
+		fields = append(fields, f.toBQ())
+	}
+	return &bq.TableSchema{Fields: fields}
+}
+
+func bqToFieldSchema(tfs *bq.TableFieldSchema) *FieldSchema {
+	fs := &FieldSchema{
+		Description: tfs.Description,
+		Name:        tfs.Name,
+		Repeated:    tfs.Mode == "REPEATED",
+		Required:    tfs.Mode == "REQUIRED",
+		Type:        FieldType(tfs.Type),
+	}
+
+	for _, f := range tfs.Fields {
+		fs.Schema = append(fs.Schema, bqToFieldSchema(f))
+	}
+	return fs
+}
+
+func bqToSchema(ts *bq.TableSchema) Schema {
+	if ts == nil {
+		return nil
+	}
+	var s Schema
+	for _, f := range ts.Fields {
+		s = append(s, bqToFieldSchema(f))
+	}
+	return s
+}
+
+// FieldType is the type of field.
+type FieldType string
+
+const (
+	// StringFieldType is a string field type.
+	StringFieldType FieldType = "STRING"
+	// BytesFieldType is a bytes field type.
+	BytesFieldType FieldType = "BYTES"
+	// IntegerFieldType is a integer field type.
+	IntegerFieldType FieldType = "INTEGER"
+	// FloatFieldType is a float field type.
+	FloatFieldType FieldType = "FLOAT"
+	// BooleanFieldType is a boolean field type.
+	BooleanFieldType FieldType = "BOOLEAN"
+	// TimestampFieldType is a timestamp field type.
+	TimestampFieldType FieldType = "TIMESTAMP"
+	// RecordFieldType is a record field type. It is typically used to create columns with repeated or nested data.
+	RecordFieldType FieldType = "RECORD"
+	// DateFieldType is a date field type.
+	DateFieldType FieldType = "DATE"
+	// TimeFieldType is a time field type.
+	TimeFieldType FieldType = "TIME"
+	// DateTimeFieldType is a datetime field type.
+	DateTimeFieldType FieldType = "DATETIME"
+	// NumericFieldType is a numeric field type. Numeric types include integer types, floating point types and the
+	// NUMERIC data type.
+	NumericFieldType FieldType = "NUMERIC"
+)
+
+var (
+	errNoStruct             = errors.New("bigquery: can only infer schema from struct or pointer to struct")
+	errUnsupportedFieldType = errors.New("bigquery: unsupported type of field in struct")
+	errInvalidFieldName     = errors.New("bigquery: invalid name of field in struct")
+	errBadNullable          = errors.New(`bigquery: use "nullable" only for []byte and struct pointers; for all other types, use a NullXXX type`)
+	errEmptyJSONSchema      = errors.New("bigquery: empty JSON schema")
+	fieldTypes              = map[FieldType]bool{
+		StringFieldType:    true,
+		BytesFieldType:     true,
+		IntegerFieldType:   true,
+		FloatFieldType:     true,
+		BooleanFieldType:   true,
+		TimestampFieldType: true,
+		RecordFieldType:    true,
+		DateFieldType:      true,
+		TimeFieldType:      true,
+		DateTimeFieldType:  true,
+		NumericFieldType:   true,
+	}
+)
+
+var typeOfByteSlice = reflect.TypeOf([]byte{})
+
+// InferSchema tries to derive a BigQuery schema from the supplied struct value.
+// Each exported struct field is mapped to a field in the schema.
+//
+// The following BigQuery types are inferred from the corresponding Go types.
+// (This is the same mapping as that used for RowIterator.Next.) Fields inferred
+// from these types are marked required (non-nullable).
+//
+//   STRING      string
+//   BOOL        bool
+//   INTEGER     int, int8, int16, int32, int64, uint8, uint16, uint32
+//   FLOAT       float32, float64
+//   BYTES       []byte
+//   TIMESTAMP   time.Time
+//   DATE        civil.Date
+//   TIME        civil.Time
+//   DATETIME    civil.DateTime
+//   NUMERIC     *big.Rat
+//
+// The big.Rat type supports numbers of arbitrary size and precision. Values
+// will be rounded to 9 digits after the decimal point before being transmitted
+// to BigQuery. See https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#numeric-type
+// for more on NUMERIC.
+//
+// A Go slice or array type is inferred to be a BigQuery repeated field of the
+// element type. The element type must be one of the above listed types.
+//
+// Nullable fields are inferred from the NullXXX types, declared in this package:
+//
+//   STRING      NullString
+//   BOOL        NullBool
+//   INTEGER     NullInt64
+//   FLOAT       NullFloat64
+//   TIMESTAMP   NullTimestamp
+//   DATE        NullDate
+//   TIME        NullTime
+//   DATETIME    NullDateTime
+//
+// For a nullable BYTES field, use the type []byte and tag the field "nullable" (see below).
+// For a nullable NUMERIC field, use the type *big.Rat and tag the field "nullable".
+//
+// A struct field that is of struct type is inferred to be a required field of type
+// RECORD with a schema inferred recursively. For backwards compatibility, a field of
+// type pointer to struct is also inferred to be required. To get a nullable RECORD
+// field, use the "nullable" tag (see below).
+//
+// InferSchema returns an error if any of the examined fields is of type uint,
+// uint64, uintptr, map, interface, complex64, complex128, func, or chan. Future
+// versions may handle these cases without error.
+//
+// Recursively defined structs are also disallowed.
+//
+// Struct fields may be tagged in a way similar to the encoding/json package.
+// A tag of the form
+//     bigquery:"name"
+// uses "name" instead of the struct field name as the BigQuery field name.
+// A tag of the form
+//     bigquery:"-"
+// omits the field from the inferred schema.
+// The "nullable" option marks the field as nullable (not required). It is only
+// needed for []byte, *big.Rat and pointer-to-struct fields, and cannot appear on other
+// fields. In this example, the Go name of the field is retained:
+//     bigquery:",nullable"
+func InferSchema(st interface{}) (Schema, error) {
+	return inferSchemaReflectCached(reflect.TypeOf(st))
+}
+
+var schemaCache sync.Map
+
+type cacheVal struct {
+	schema Schema
+	err    error
+}
+
+func inferSchemaReflectCached(t reflect.Type) (Schema, error) {
+	var cv cacheVal
+	v, ok := schemaCache.Load(t)
+	if ok {
+		cv = v.(cacheVal)
+	} else {
+		s, err := inferSchemaReflect(t)
+		cv = cacheVal{s, err}
+		schemaCache.Store(t, cv)
+	}
+	return cv.schema, cv.err
+}
+
+func inferSchemaReflect(t reflect.Type) (Schema, error) {
+	rec, err := hasRecursiveType(t, nil)
+	if err != nil {
+		return nil, err
+	}
+	if rec {
+		return nil, fmt.Errorf("bigquery: schema inference for recursive type %s", t)
+	}
+	return inferStruct(t)
+}
+
+func inferStruct(t reflect.Type) (Schema, error) {
+	switch t.Kind() {
+	case reflect.Ptr:
+		if t.Elem().Kind() != reflect.Struct {
+			return nil, errNoStruct
+		}
+		t = t.Elem()
+		fallthrough
+
+	case reflect.Struct:
+		return inferFields(t)
+	default:
+		return nil, errNoStruct
+	}
+}
+
+// inferFieldSchema infers the FieldSchema for a Go type
+func inferFieldSchema(rt reflect.Type, nullable bool) (*FieldSchema, error) {
+	// Only []byte and struct pointers can be tagged nullable.
+	if nullable && !(rt == typeOfByteSlice || rt.Kind() == reflect.Ptr && rt.Elem().Kind() == reflect.Struct) {
+		return nil, errBadNullable
+	}
+	switch rt {
+	case typeOfByteSlice:
+		return &FieldSchema{Required: !nullable, Type: BytesFieldType}, nil
+	case typeOfGoTime:
+		return &FieldSchema{Required: true, Type: TimestampFieldType}, nil
+	case typeOfDate:
+		return &FieldSchema{Required: true, Type: DateFieldType}, nil
+	case typeOfTime:
+		return &FieldSchema{Required: true, Type: TimeFieldType}, nil
+	case typeOfDateTime:
+		return &FieldSchema{Required: true, Type: DateTimeFieldType}, nil
+	case typeOfRat:
+		return &FieldSchema{Required: !nullable, Type: NumericFieldType}, nil
+	}
+	if ft := nullableFieldType(rt); ft != "" {
+		return &FieldSchema{Required: false, Type: ft}, nil
+	}
+	if isSupportedIntType(rt) || isSupportedUintType(rt) {
+		return &FieldSchema{Required: true, Type: IntegerFieldType}, nil
+	}
+	switch rt.Kind() {
+	case reflect.Slice, reflect.Array:
+		et := rt.Elem()
+		if et != typeOfByteSlice && (et.Kind() == reflect.Slice || et.Kind() == reflect.Array) {
+			// Multi dimensional slices/arrays are not supported by BigQuery
+			return nil, errUnsupportedFieldType
+		}
+		if nullableFieldType(et) != "" {
+			// Repeated nullable types are not supported by BigQuery.
+			return nil, errUnsupportedFieldType
+		}
+		f, err := inferFieldSchema(et, false)
+		if err != nil {
+			return nil, err
+		}
+		f.Repeated = true
+		f.Required = false
+		return f, nil
+	case reflect.Ptr:
+		if rt.Elem().Kind() != reflect.Struct {
+			return nil, errUnsupportedFieldType
+		}
+		fallthrough
+	case reflect.Struct:
+		nested, err := inferStruct(rt)
+		if err != nil {
+			return nil, err
+		}
+		return &FieldSchema{Required: !nullable, Type: RecordFieldType, Schema: nested}, nil
+	case reflect.String:
+		return &FieldSchema{Required: !nullable, Type: StringFieldType}, nil
+	case reflect.Bool:
+		return &FieldSchema{Required: !nullable, Type: BooleanFieldType}, nil
+	case reflect.Float32, reflect.Float64:
+		return &FieldSchema{Required: !nullable, Type: FloatFieldType}, nil
+	default:
+		return nil, errUnsupportedFieldType
+	}
+}
+
+// inferFields extracts all exported field types from struct type.
+func inferFields(rt reflect.Type) (Schema, error) {
+	var s Schema
+	fields, err := fieldCache.Fields(rt)
+	if err != nil {
+		return nil, err

  (This diff was longer than 20,000 lines, and has been truncated...)


 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services