You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@camel.apache.org by lb...@apache.org on 2018/11/09 06:52:57 UTC

[camel-k] 01/03: Added profiles and Knative serving

This is an automated email from the ASF dual-hosted git repository.

lburgazzoli pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/camel-k.git

commit 3c8da8882c416b3db65835cd6f8a2a10fe8ac79f
Author: nferraro <ni...@gmail.com>
AuthorDate: Thu Nov 8 01:04:40 2018 +0100

    Added profiles and Knative serving
---
 Gopkg.lock                                         |  72 ++-
 cmd/camel-k/main.go                                |   3 +
 cmd/kamel/main.go                                  |   3 +
 deploy/operator-role-binding-knative.yaml          |  13 +
 deploy/operator-role-knative.yaml                  |  20 +
 deploy/resources.go                                |  41 ++
 pkg/apis/camel/v1alpha1/types.go                   |  14 +
 pkg/build/assemble/maven_assembler.go              |   2 -
 pkg/install/operator.go                            |  49 +-
 pkg/platform/get.go                                |  14 +
 pkg/stub/action/platform/initialize.go             |   4 +
 pkg/trait/catalog.go                               |  21 +-
 pkg/trait/knative.go                               | 108 ++++
 cmd/kamel/main.go => pkg/util/knative/knative.go   |  40 +-
 cmd/kamel/main.go => pkg/util/knative/register.go  |  36 +-
 pkg/util/kubernetes/collection.go                  |  33 +-
 .../camel/k/groovy/GroovyRoutesLoader.groovy       |   4 +-
 .../java/org/apache/camel/k/jvm/Constants.java     |   1 +
 .../java/org/apache/camel/k/jvm/RoutesLoaders.java |  38 +-
 .../org/apache/camel/k/jvm/RuntimeSupport.java     |  18 +-
 .../java/org/apache/camel/k/jvm/URIResolver.java   |  51 ++
 .../apache/camel/k/kotlin/KotlinRoutesLoader.kt    |   4 +-
 vendor/github.com/evanphx/json-patch/LICENSE       |  25 +
 vendor/github.com/evanphx/json-patch/merge.go      | 383 ++++++++++++
 vendor/github.com/evanphx/json-patch/patch.go      | 682 +++++++++++++++++++++
 vendor/github.com/google/go-cmp/LICENSE            |  27 +
 vendor/github.com/google/go-cmp/cmp/compare.go     | 553 +++++++++++++++++
 .../go-cmp/cmp/internal/diff/debug_disable.go      |  17 +
 .../go-cmp/cmp/internal/diff/debug_enable.go       | 122 ++++
 .../google/go-cmp/cmp/internal/diff/diff.go        | 363 +++++++++++
 .../google/go-cmp/cmp/internal/function/func.go    |  49 ++
 .../google/go-cmp/cmp/internal/value/format.go     | 277 +++++++++
 .../google/go-cmp/cmp/internal/value/sort.go       | 111 ++++
 vendor/github.com/google/go-cmp/cmp/options.go     | 453 ++++++++++++++
 vendor/github.com/google/go-cmp/cmp/path.go        | 309 ++++++++++
 vendor/github.com/google/go-cmp/cmp/reporter.go    |  53 ++
 .../github.com/google/go-cmp/cmp/unsafe_panic.go   |  15 +
 .../github.com/google/go-cmp/cmp/unsafe_reflect.go |  23 +
 vendor/github.com/knative/build/AUTHORS            |   8 +
 vendor/github.com/knative/build/LICENSE            | 202 ++++++
 .../knative/build/cmd/controller/kodata/LICENSE    |   1 +
 .../build/cmd/controller/kodata/VENDOR-LICENSE     |   1 +
 .../knative/build/cmd/creds-init/kodata/LICENSE    |   1 +
 .../build/cmd/creds-init/kodata/VENDOR-LICENSE     |   1 +
 .../knative/build/cmd/git-init/kodata/LICENSE      |   1 +
 .../build/cmd/git-init/kodata/VENDOR-LICENSE       |   1 +
 .../knative/build/cmd/logs/kodata/LICENSE          |   1 +
 .../knative/build/cmd/logs/kodata/VENDOR-LICENSE   |   1 +
 .../knative/build/cmd/nop/kodata/LICENSE           |   1 +
 .../knative/build/cmd/nop/kodata/VENDOR-LICENSE    |   1 +
 .../knative/build/cmd/webhook/kodata/LICENSE       |   1 +
 .../build/cmd/webhook/kodata/VENDOR-LICENSE        |   1 +
 .../knative/build/config/300-imagecache.yaml       |   1 +
 .../knative/build/pkg/apis/build/register.go       |  20 +
 .../pkg/apis/build/v1alpha1/build_defaults.go      |  42 ++
 .../build/v1alpha1/build_template_interface.go     |  23 +
 .../apis/build/v1alpha1/build_template_types.go    | 116 ++++
 .../build/v1alpha1/build_template_validation.go    |  86 +++
 .../build/pkg/apis/build/v1alpha1/build_types.go   | 287 +++++++++
 .../pkg/apis/build/v1alpha1/build_validation.go    |  94 +++
 .../build/v1alpha1/cluster_build_template_types.go |  74 +++
 .../v1alpha1/cluster_build_template_validation.go  |  24 +
 .../knative/build/pkg/apis/build/v1alpha1/doc.go   |  21 +
 .../pkg/apis/build/v1alpha1/metadata_validation.go |  47 ++
 .../build/pkg/apis/build/v1alpha1/register.go      |  59 ++
 .../apis/build/v1alpha1/zz_generated.deepcopy.go   | 550 +++++++++++++++++
 .../knative/build/test/panic/kodata/LICENSE        |   1 +
 .../knative/build/test/panic/kodata/VENDOR-LICENSE |   1 +
 .../knative/build/test/workingdir/kodata/LICENSE   |   1 +
 .../build/test/workingdir/kodata/VENDOR-LICENSE    |   1 +
 vendor/github.com/knative/pkg/LICENSE              | 201 ++++++
 vendor/github.com/knative/pkg/apis/doc.go          |  18 +
 vendor/github.com/knative/pkg/apis/duck/cached.go  |  72 +++
 vendor/github.com/knative/pkg/apis/duck/doc.go     |  23 +
 vendor/github.com/knative/pkg/apis/duck/enqueue.go |  44 ++
 .../github.com/knative/pkg/apis/duck/interface.go  |  28 +
 vendor/github.com/knative/pkg/apis/duck/patch.go   |  60 ++
 vendor/github.com/knative/pkg/apis/duck/proxy.go   |  74 +++
 .../github.com/knative/pkg/apis/duck/register.go   |  21 +
 vendor/github.com/knative/pkg/apis/duck/typed.go   | 141 +++++
 .../knative/pkg/apis/duck/unstructured.go          |  37 ++
 .../pkg/apis/duck/v1alpha1/addressable_types.go    |  96 +++
 .../pkg/apis/duck/v1alpha1/condition_set.go        | 348 +++++++++++
 .../pkg/apis/duck/v1alpha1/conditions_types.go     | 167 +++++
 .../knative/pkg/apis/duck/v1alpha1/doc.go          |  23 +
 .../pkg/apis/duck/v1alpha1/generational_types.go   |  83 +++
 .../apis/duck/v1alpha1/legacy_targetable_types.go  |  96 +++
 .../knative/pkg/apis/duck/v1alpha1/register.go     |  61 ++
 .../apis/duck/v1alpha1/retired_targetable_types.go |  98 +++
 .../apis/duck/v1alpha1/zz_generated.deepcopy.go    | 493 +++++++++++++++
 vendor/github.com/knative/pkg/apis/duck/verify.go  |  85 +++
 vendor/github.com/knative/pkg/apis/field_error.go  | 337 ++++++++++
 vendor/github.com/knative/pkg/apis/interfaces.go   |  49 ++
 .../github.com/knative/pkg/apis/kind2resource.go   |  47 ++
 .../github.com/knative/pkg/apis/volatile_time.go   |  46 ++
 .../knative/pkg/apis/zz_generated.deepcopy.go      |  66 ++
 vendor/github.com/knative/pkg/kmeta/accessor.go    |  94 +++
 vendor/github.com/knative/pkg/kmeta/doc.go         |  19 +
 vendor/github.com/knative/pkg/kmeta/labels.go      | 114 ++++
 .../knative/pkg/kmeta/owner_references.go          |  38 ++
 vendor/github.com/knative/serving/AUTHORS          |  10 +
 vendor/github.com/knative/serving/LICENSE          | 202 ++++++
 .../knative/serving/cmd/activator/kodata/LICENSE   |   1 +
 .../serving/cmd/activator/kodata/VENDOR-LICENSE    |   1 +
 .../knative/serving/cmd/autoscaler/kodata/LICENSE  |   1 +
 .../serving/cmd/autoscaler/kodata/VENDOR-LICENSE   |   1 +
 .../knative/serving/cmd/controller/kodata/LICENSE  |   1 +
 .../serving/cmd/controller/kodata/VENDOR-LICENSE   |   1 +
 .../knative/serving/cmd/queue/kodata/LICENSE       |   1 +
 .../serving/cmd/queue/kodata/VENDOR-LICENSE        |   1 +
 .../knative/serving/cmd/webhook/kodata/LICENSE     |   1 +
 .../serving/cmd/webhook/kodata/VENDOR-LICENSE      |   1 +
 .../knative/serving/config/300-imagecache.yaml     |   1 +
 .../serving/pkg/apis/autoscaling/register.go       |  36 ++
 .../serving/pkg/apis/networking/register.go        |  40 ++
 .../networking/v1alpha1/clusteringress_defaults.go |  85 +++
 .../networking/v1alpha1/clusteringress_types.go    | 346 +++++++++++
 .../v1alpha1/clusteringress_validation.go          | 168 +++++
 .../serving/pkg/apis/networking/v1alpha1/doc.go    |  24 +
 .../pkg/apis/networking/v1alpha1/register.go       |  53 ++
 .../networking/v1alpha1/zz_generated.deepcopy.go   | 368 +++++++++++
 .../knative/serving/pkg/apis/serving/register.go   |  59 ++
 .../pkg/apis/serving/v1alpha1/build_compat.go      | 101 +++
 .../serving/v1alpha1/configuration_defaults.go     |  25 +
 .../apis/serving/v1alpha1/configuration_types.go   | 197 ++++++
 .../serving/v1alpha1/configuration_validation.go   |  50 ++
 .../serving/pkg/apis/serving/v1alpha1/doc.go       |  23 +
 .../apis/serving/v1alpha1/metadata_validation.go   |  90 +++
 .../serving/pkg/apis/serving/v1alpha1/register.go  |  59 ++
 .../pkg/apis/serving/v1alpha1/revision_defaults.go |  29 +
 .../pkg/apis/serving/v1alpha1/revision_types.go    | 463 ++++++++++++++
 .../apis/serving/v1alpha1/revision_validation.go   | 200 ++++++
 .../pkg/apis/serving/v1alpha1/route_defaults.go    |  24 +
 .../pkg/apis/serving/v1alpha1/route_types.go       | 249 ++++++++
 .../pkg/apis/serving/v1alpha1/route_validation.go  | 107 ++++
 .../pkg/apis/serving/v1alpha1/service_defaults.go  |  31 +
 .../pkg/apis/serving/v1alpha1/service_types.go     | 306 +++++++++
 .../apis/serving/v1alpha1/service_validation.go    | 114 ++++
 .../apis/serving/v1alpha1/zz_generated.deepcopy.go | 666 ++++++++++++++++++++
 .../pkg/autoscaler/testdata/config-autoscaler.yaml |   1 +
 .../knative/serving/pkg/gc/testdata/config-gc.yaml |   1 +
 .../pkg/logging/testdata/config-logging.yaml       |   1 +
 .../configuration/config/testdata/config-gc.yaml   |   1 +
 .../config/testdata/config-autoscaler.yaml         |   1 +
 .../config/testdata/config-controller.yaml         |   1 +
 .../revision/config/testdata/config-logging.yaml   |   1 +
 .../revision/config/testdata/config-network.yaml   |   1 +
 .../config/testdata/config-observability.yaml      |   1 +
 .../route/config/testdata/config-domain.yaml       |   1 +
 .../v1alpha1/route/config/testdata/config-gc.yaml  |   1 +
 .../serving/third_party/config/build/LICENSE       | 202 ++++++
 .../monitoring/logging/elasticsearch/LICENSE       | 201 ++++++
 .../monitoring/metrics/prometheus/istio/LICENSE    | 202 ++++++
 .../metrics/prometheus/kubernetes/LICENSE          | 201 ++++++
 .../metrics/prometheus/prometheus-operator/LICENSE | 202 ++++++
 .../metrics/prometheus/prometheus-operator/NOTICE  |   5 +
 vendor/github.com/mattbaird/jsonpatch/LICENSE      | 202 ++++++
 vendor/github.com/mattbaird/jsonpatch/jsonpatch.go | 257 ++++++++
 .../apimachinery/pkg/api/equality/semantic.go      |  49 ++
 159 files changed, 14361 insertions(+), 107 deletions(-)

diff --git a/Gopkg.lock b/Gopkg.lock
index b96c088..37c62b0 100644
--- a/Gopkg.lock
+++ b/Gopkg.lock
@@ -53,6 +53,14 @@
   version = "v2.8.0"
 
 [[projects]]
+  digest = "1:32598368f409bbee79deb9d43569fcd92b9fb27f39155f5e166b3371217f051f"
+  name = "github.com/evanphx/json-patch"
+  packages = ["."]
+  pruneopts = "NUT"
+  revision = "72bf35d0ff611848c1dc9df0f976c81192392fa5"
+  version = "v4.1.0"
+
+[[projects]]
   digest = "1:aa3ed0a71c4e66e4ae6486bf97a3f4cab28edc78df2e50c5ad01dc7d91604b88"
   name = "github.com/fatih/structs"
   packages = ["."]
@@ -142,6 +150,19 @@
   revision = "4030bb1f1f0c35b30ca7009e9ebd06849dd45306"
 
 [[projects]]
+  digest = "1:2e3c336fc7fde5c984d2841455a658a6d626450b1754a854b3b32e7a8f49a07a"
+  name = "github.com/google/go-cmp"
+  packages = [
+    "cmp",
+    "cmp/internal/diff",
+    "cmp/internal/function",
+    "cmp/internal/value",
+  ]
+  pruneopts = "NUT"
+  revision = "3af367b6b30c263d47e8895973edcca9a49cf029"
+  version = "v0.2.0"
+
+[[projects]]
   branch = "master"
   digest = "1:52c5834e2bebac9030c97cc0798ac11c3aa8a39f098aeb419f142533da6cd3cc"
   name = "github.com/google/gofuzz"
@@ -208,6 +229,44 @@
   version = "v1.1.5"
 
 [[projects]]
+  digest = "1:345bbba667abadd6263391c915251ede8d9fa6f6852839c60bb6738b6122b89c"
+  name = "github.com/knative/build"
+  packages = [
+    "pkg/apis/build",
+    "pkg/apis/build/v1alpha1",
+  ]
+  pruneopts = "NUT"
+  revision = "94859753e2c6724df2be86f6a254f810895fa3eb"
+  version = "v0.2.0"
+
+[[projects]]
+  branch = "master"
+  digest = "1:96faa335e445f205f2e3ce5a1077b4b9351047d7f0d08447ed4fc97c6470ef74"
+  name = "github.com/knative/pkg"
+  packages = [
+    "apis",
+    "apis/duck",
+    "apis/duck/v1alpha1",
+    "kmeta",
+  ]
+  pruneopts = "NUT"
+  revision = "af2c4bc84ed90694967a799bde0f6a29cb713d4c"
+
+[[projects]]
+  digest = "1:39d60b103c12246d5c192a7ba0805079f3206e57f9f5f3af6ccafa980f6d0ebd"
+  name = "github.com/knative/serving"
+  packages = [
+    "pkg/apis/autoscaling",
+    "pkg/apis/networking",
+    "pkg/apis/networking/v1alpha1",
+    "pkg/apis/serving",
+    "pkg/apis/serving/v1alpha1",
+  ]
+  pruneopts = "NUT"
+  revision = "5ec3b89b9ac9313dee514683229aefc3c8056577"
+  version = "v0.2.0"
+
+[[projects]]
   digest = "1:4059c14e87a2de3a434430340521b5feece186c1469eff0834c29a63870de3ed"
   name = "github.com/konsorten/go-windows-terminal-sequences"
   packages = ["."]
@@ -228,6 +287,14 @@
   revision = "60711f1a8329503b04e1c88535f419d0bb440bff"
 
 [[projects]]
+  branch = "master"
+  digest = "1:0e9bfc47ab9941ecc3344e580baca5deb4091177e84dd9773b48b38ec26b93d5"
+  name = "github.com/mattbaird/jsonpatch"
+  packages = ["."]
+  pruneopts = "NUT"
+  revision = "81af80346b1a01caae0cbc27fd3c1ba5b11e189f"
+
+[[projects]]
   digest = "1:5985ef4caf91ece5d54817c11ea25f182697534f8ae6521eadcd628c142ac4b6"
   name = "github.com/matttproud/golang_protobuf_extensions"
   packages = ["pbutil"]
@@ -592,9 +659,10 @@
   version = "kubernetes-1.11.2"
 
 [[projects]]
-  digest = "1:69b102c3ee60ab3704fac6e46bac1e8894e20b11498ec832846a229f21946200"
+  digest = "1:a26c8e664af7122535f2b13fd4977a0d25894a545e7ce679502e49372812f6e6"
   name = "k8s.io/apimachinery"
   packages = [
+    "pkg/api/equality",
     "pkg/api/errors",
     "pkg/api/meta",
     "pkg/api/resource",
@@ -779,6 +847,7 @@
   analyzer-version = 1
   input-imports = [
     "github.com/fatih/structs",
+    "github.com/knative/serving/pkg/apis/serving/v1alpha1",
     "github.com/mitchellh/mapstructure",
     "github.com/openshift/api/apps/v1",
     "github.com/openshift/api/authorization/v1",
@@ -800,6 +869,7 @@
     "gopkg.in/yaml.v2",
     "k8s.io/api/apps/v1",
     "k8s.io/api/core/v1",
+    "k8s.io/api/extensions/v1beta1",
     "k8s.io/api/rbac/v1",
     "k8s.io/apimachinery/pkg/api/errors",
     "k8s.io/apimachinery/pkg/apis/meta/v1",
diff --git a/cmd/camel-k/main.go b/cmd/camel-k/main.go
index 79ae793..e5d71f3 100644
--- a/cmd/camel-k/main.go
+++ b/cmd/camel-k/main.go
@@ -28,6 +28,9 @@ import (
 	"github.com/operator-framework/operator-sdk/pkg/util/k8sutil"
 	sdkVersion "github.com/operator-framework/operator-sdk/version"
 
+	_ "github.com/apache/camel-k/pkg/util/knative"
+	_ "github.com/apache/camel-k/pkg/util/openshift"
+
 	"github.com/sirupsen/logrus"
 	_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
 )
diff --git a/cmd/kamel/main.go b/cmd/kamel/main.go
index 502a978..5f95ea9 100644
--- a/cmd/kamel/main.go
+++ b/cmd/kamel/main.go
@@ -25,6 +25,9 @@ import (
 	"time"
 
 	"github.com/apache/camel-k/pkg/client/cmd"
+	
+	_ "github.com/apache/camel-k/pkg/util/knative"
+	_ "github.com/apache/camel-k/pkg/util/openshift"
 	_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
 )
 
diff --git a/deploy/operator-role-binding-knative.yaml b/deploy/operator-role-binding-knative.yaml
new file mode 100644
index 0000000..ac3ad27
--- /dev/null
+++ b/deploy/operator-role-binding-knative.yaml
@@ -0,0 +1,13 @@
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: camel-k-operator-knative
+  labels:
+    app: "camel-k"
+subjects:
+- kind: ServiceAccount
+  name: camel-k-operator
+roleRef:
+  kind: Role
+  name: camel-k-operator-knative
+  apiGroup: rbac.authorization.k8s.io
diff --git a/deploy/operator-role-knative.yaml b/deploy/operator-role-knative.yaml
new file mode 100644
index 0000000..e362e7a
--- /dev/null
+++ b/deploy/operator-role-knative.yaml
@@ -0,0 +1,20 @@
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: camel-k-operator-knative
+  labels:
+    app: "camel-k"
+rules:
+- apiGroups:
+  - serving.knative.dev
+  resources:
+  - "*"
+  verbs:
+  - create
+  - delete
+  - deletecollection
+  - get
+  - list
+  - patch
+  - update
+  - watch
diff --git a/deploy/resources.go b/deploy/resources.go
index de96acf..09a831e 100644
--- a/deploy/resources.go
+++ b/deploy/resources.go
@@ -2289,6 +2289,23 @@ spec:
               value: "camel-k"
 
 `
+	Resources["operator-role-binding-knative.yaml"] =
+		`
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: camel-k-operator-knative
+  labels:
+    app: "camel-k"
+subjects:
+- kind: ServiceAccount
+  name: camel-k-operator
+roleRef:
+  kind: Role
+  name: camel-k-operator-knative
+  apiGroup: rbac.authorization.k8s.io
+
+`
 	Resources["operator-role-binding.yaml"] =
 		`
 kind: RoleBinding
@@ -2306,6 +2323,30 @@ roleRef:
   apiGroup: rbac.authorization.k8s.io
 
 `
+	Resources["operator-role-knative.yaml"] =
+		`
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: camel-k-operator-knative
+  labels:
+    app: "camel-k"
+rules:
+- apiGroups:
+  - serving.knative.dev
+  resources:
+  - "*"
+  verbs:
+  - create
+  - delete
+  - deletecollection
+  - get
+  - list
+  - patch
+  - update
+  - watch
+
+`
 	Resources["operator-role-kubernetes.yaml"] =
 		`
 kind: Role
diff --git a/pkg/apis/camel/v1alpha1/types.go b/pkg/apis/camel/v1alpha1/types.go
index b79a0c5..adb8cdc 100644
--- a/pkg/apis/camel/v1alpha1/types.go
+++ b/pkg/apis/camel/v1alpha1/types.go
@@ -52,6 +52,7 @@ type IntegrationSpec struct {
 	Replicas      *int32                          `json:"replicas,omitempty"`
 	Source        SourceSpec                      `json:"source,omitempty"`
 	Context       string                          `json:"context,omitempty"`
+	Profile       TraitProfile                    `json:"profile,omitempty"`
 	Dependencies  []string                        `json:"dependencies,omitempty"`
 	Traits        map[string]IntegrationTraitSpec `json:"traits,omitempty"`
 	Configuration []ConfigurationSpec             `json:"configuration,omitempty"`
@@ -201,6 +202,7 @@ type IntegrationPlatform struct {
 // IntegrationPlatformSpec --
 type IntegrationPlatformSpec struct {
 	Cluster IntegrationPlatformCluster   `json:"cluster,omitempty"`
+	Profile TraitProfile                 `json:"profile,omitempty"`
 	Build   IntegrationPlatformBuildSpec `json:"build,omitempty"`
 }
 
@@ -214,6 +216,18 @@ const (
 	IntegrationPlatformClusterKubernetes = "Kubernetes"
 )
 
+// TraitProfile represents lists of traits that are enabled for the specific installation/integration
+type TraitProfile string
+
+const (
+	// TraitProfileOpenShift is used by default on OpenShift clusters
+	TraitProfileOpenShift = "OpenShift"
+	// TraitProfileKubernetes is used by default on Kubernetes clusters
+	TraitProfileKubernetes = "Kubernetes"
+	// TraitProfileKnative is used by default on OpenShift/Kubernetes clusters powered by Knative
+	TraitProfileKnative = "Knative"
+)
+
 // IntegrationPlatformBuildSpec contains platform related build information
 type IntegrationPlatformBuildSpec struct {
 	PublishStrategy IntegrationPlatformBuildPublishStrategy `json:"publishStrategy,omitempty"`
diff --git a/pkg/build/assemble/maven_assembler.go b/pkg/build/assemble/maven_assembler.go
index 9b2674e..c19f576 100644
--- a/pkg/build/assemble/maven_assembler.go
+++ b/pkg/build/assemble/maven_assembler.go
@@ -28,8 +28,6 @@ import (
 	"github.com/apache/camel-k/pkg/util/maven"
 	"github.com/sirupsen/logrus"
 
-	// import openshift utilities
-	_ "github.com/apache/camel-k/pkg/util/openshift"
 	"github.com/apache/camel-k/version"
 )
 
diff --git a/pkg/install/operator.go b/pkg/install/operator.go
index c4856e2..8aa7844 100644
--- a/pkg/install/operator.go
+++ b/pkg/install/operator.go
@@ -21,6 +21,7 @@ import (
 	"errors"
 	"github.com/apache/camel-k/deploy"
 	"github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
+	"github.com/apache/camel-k/pkg/util/knative"
 	"github.com/apache/camel-k/pkg/util/kubernetes"
 	"github.com/apache/camel-k/pkg/util/minishift"
 	"github.com/apache/camel-k/pkg/util/openshift"
@@ -36,9 +37,23 @@ func Operator(namespace string) error {
 		return err
 	}
 	if isOpenshift {
-		return installOpenshift(namespace)
+		if err := installOpenshift(namespace); err != nil {
+			return err
+		}
+	} else {
+		if err := installKubernetes(namespace); err != nil {
+			return err
+		}
+	}
+	// Additionally, install Knative resources (roles and bindings)
+	isKnative, err := knative.IsInstalled()
+	if err != nil {
+		return err
+	}
+	if isKnative {
+		return installKnative(namespace)
 	}
-	return installKubernetes(namespace)
+	return nil
 }
 
 func installOpenshift(namespace string) error {
@@ -62,6 +77,13 @@ func installKubernetes(namespace string) error {
 	)
 }
 
+func installKnative(namespace string) error {
+	return Resources(namespace,
+		"operator-role-knative.yaml",
+		"operator-role-binding-knative.yaml",
+	)
+}
+
 // Platform installs the platform custom resource
 func Platform(namespace string, registry string) error {
 	if err := waitForPlatformCRDAvailable(namespace, 15*time.Second); err != nil {
@@ -71,16 +93,14 @@ func Platform(namespace string, registry string) error {
 	if err != nil {
 		return err
 	}
-	if isOpenshift {
-		return Resource(namespace, "platform-cr.yaml")
-	}
-	platform, err := kubernetes.LoadResourceFromYaml(deploy.Resources["platform-cr.yaml"])
+	platformObject, err := kubernetes.LoadResourceFromYaml(deploy.Resources["platform-cr.yaml"])
 	if err != nil {
 		return err
 	}
-	if pl, ok := platform.(*v1alpha1.IntegrationPlatform); !ok {
-		panic("cannot find integration platform template")
-	} else {
+	pl := platformObject.(*v1alpha1.IntegrationPlatform)
+
+	if !isOpenshift {
+		// Kubernetes only (Minikube)
 		if registry == "" {
 			// This operation should be done here in the installer
 			// because the operator is not allowed to look into the "kube-system" namespace
@@ -94,8 +114,17 @@ func Platform(namespace string, registry string) error {
 			registry = *minishiftRegistry
 		}
 		pl.Spec.Build.Registry = registry
-		return RuntimeObject(namespace, pl)
 	}
+
+	var knativeInstalled bool
+	if knativeInstalled, err = knative.IsInstalled(); err != nil {
+		return err
+	}
+	if knativeInstalled {
+		pl.Spec.Profile = v1alpha1.TraitProfileKnative
+	}
+
+	return RuntimeObject(namespace, pl)
 }
 
 func waitForPlatformCRDAvailable(namespace string, timeout time.Duration) error {
diff --git a/pkg/platform/get.go b/pkg/platform/get.go
index 0da7163..c894818 100644
--- a/pkg/platform/get.go
+++ b/pkg/platform/get.go
@@ -51,3 +51,17 @@ func ListPlatforms(namespace string) (*v1alpha1.IntegrationPlatformList, error)
 func IsActive(p *v1alpha1.IntegrationPlatform) bool {
 	return p.Status.Phase != "" && p.Status.Phase != v1alpha1.IntegrationPlatformPhaseDuplicate
 }
+
+// GetProfile returns the current profile of the platform (if present) or computes it
+func GetProfile(p *v1alpha1.IntegrationPlatform) v1alpha1.TraitProfile {
+	if p.Spec.Profile != "" {
+		return p.Spec.Profile
+	}
+	switch p.Spec.Cluster {
+	case v1alpha1.IntegrationPlatformClusterKubernetes:
+		return v1alpha1.TraitProfileKubernetes
+	case v1alpha1.IntegrationPlatformClusterOpenShift:
+		return v1alpha1.TraitProfileOpenShift
+	}
+	return ""
+}
\ No newline at end of file
diff --git a/pkg/stub/action/platform/initialize.go b/pkg/stub/action/platform/initialize.go
index 07ea31f..f4eacde 100644
--- a/pkg/stub/action/platform/initialize.go
+++ b/pkg/stub/action/platform/initialize.go
@@ -84,6 +84,10 @@ func (action *initializeAction) Handle(platform *v1alpha1.IntegrationPlatform) e
 		return errors.New("no registry specified for publishing images")
 	}
 
+	if target.Spec.Profile == "" {
+		target.Spec.Profile = platformutils.GetProfile(target)
+	}
+
 	// next status
 	logrus.Info("Platform ", target.Name, " transitioning to state ", v1alpha1.IntegrationPlatformPhaseCreating)
 	target.Status.Phase = v1alpha1.IntegrationPlatformPhaseCreating
diff --git a/pkg/trait/catalog.go b/pkg/trait/catalog.go
index 956ed7e..d834aca 100644
--- a/pkg/trait/catalog.go
+++ b/pkg/trait/catalog.go
@@ -29,6 +29,7 @@ import (
 type Catalog struct {
 	tDependencies Trait
 	tDeployment   Trait
+	tKnative      Trait
 	tService      Trait
 	tRoute        Trait
 	tIngress      Trait
@@ -40,6 +41,7 @@ func NewCatalog() *Catalog {
 	return &Catalog{
 		tDependencies: newDependenciesTrait(),
 		tDeployment:   newDeploymentTrait(),
+		tKnative:      newKnativeTrait(),
 		tService:      newServiceTrait(),
 		tRoute:        newRouteTrait(),
 		tIngress:      newIngressTrait(),
@@ -51,6 +53,7 @@ func (c *Catalog) allTraits() []Trait {
 	return []Trait{
 		c.tDependencies,
 		c.tDeployment,
+		c.tKnative,
 		c.tService,
 		c.tRoute,
 		c.tIngress,
@@ -59,8 +62,12 @@ func (c *Catalog) allTraits() []Trait {
 }
 
 func (c *Catalog) traitsFor(environment *environment) []Trait {
-	switch environment.Platform.Spec.Cluster {
-	case v1alpha1.IntegrationPlatformClusterOpenShift:
+	profile := environment.Platform.Spec.Profile
+	if environment.Integration.Spec.Profile != "" {
+		profile = environment.Integration.Spec.Profile
+	}
+	switch profile {
+	case v1alpha1.TraitProfileOpenShift:
 		return []Trait{
 			c.tDependencies,
 			c.tDeployment,
@@ -68,7 +75,7 @@ func (c *Catalog) traitsFor(environment *environment) []Trait {
 			c.tRoute,
 			c.tOwner,
 		}
-	case v1alpha1.IntegrationPlatformClusterKubernetes:
+	case v1alpha1.TraitProfileKubernetes:
 		return []Trait{
 			c.tDependencies,
 			c.tDeployment,
@@ -76,8 +83,14 @@ func (c *Catalog) traitsFor(environment *environment) []Trait {
 			c.tIngress,
 			c.tOwner,
 		}
-		// case Knative: ...
+	case v1alpha1.TraitProfileKnative:
+		return []Trait{
+			c.tDependencies,
+			c.tKnative,
+			c.tOwner,
+		}
 	}
+
 	return nil
 }
 
diff --git a/pkg/trait/knative.go b/pkg/trait/knative.go
new file mode 100644
index 0000000..0d4e23c
--- /dev/null
+++ b/pkg/trait/knative.go
@@ -0,0 +1,108 @@
+/*
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package trait
+
+import (
+	"github.com/apache/camel-k/pkg/util/kubernetes"
+	knative "github.com/knative/serving/pkg/apis/serving/v1alpha1"
+	corev1 "k8s.io/api/core/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+type knativeTrait struct {
+	BaseTrait `property:",squash"`
+}
+
+func newKnativeTrait() *knativeTrait {
+	return &knativeTrait{
+		BaseTrait: newBaseTrait("knative"),
+	}
+}
+
+func (t *knativeTrait) autoconfigure(environment *environment, resources *kubernetes.Collection) error {
+	if t.Enabled == nil {
+		// disable by default
+		status := false
+		t.Enabled = &status
+	}
+	return nil
+}
+
+func (t *knativeTrait) beforeDeploy(environment *environment, resources *kubernetes.Collection) error {
+	resources.Add(t.getServiceFor(environment))
+	return nil
+}
+
+func (*knativeTrait) getServiceFor(e *environment) *knative.Service {
+	// combine properties of integration with context, integration
+	// properties have the priority
+	properties := CombineConfigurationAsMap("property", e.Context, e.Integration)
+
+	// combine environment of integration with context, integration
+	// environment has the priority
+	environment := CombineConfigurationAsMap("env", e.Context, e.Integration)
+
+	// set env vars needed by the runtime
+	environment["JAVA_MAIN_CLASS"] = "org.apache.camel.k.jvm.Application"
+
+	// camel-k runtime
+	environment["CAMEL_K_ROUTES_URI"] = "inline:" + e.Integration.Spec.Source.Content
+	environment["CAMEL_K_ROUTES_LANGUAGE"] = string(e.Integration.Spec.Source.Language)
+	environment["CAMEL_K_CONF"] = "inline:" + PropertiesString(properties)
+	environment["CAMEL_K_CONF_D"] = "/etc/camel/conf.d"
+
+	// add a dummy env var to trigger deployment if everything but the code
+	// has been changed
+	environment["CAMEL_K_DIGEST"] = e.Integration.Status.Digest
+
+	// optimizations
+	environment["AB_JOLOKIA_OFF"] = "true"
+
+	labels := map[string]string{
+		"camel.apache.org/integration": e.Integration.Name,
+	}
+
+	svc := knative.Service{
+		TypeMeta: metav1.TypeMeta{
+			Kind:       "Service",
+			APIVersion: knative.SchemeGroupVersion.String(),
+		},
+		ObjectMeta: metav1.ObjectMeta{
+			Name:        e.Integration.Name,
+			Namespace:   e.Integration.Namespace,
+			Labels:      labels,
+			Annotations: e.Integration.Annotations,
+		},
+		Spec: knative.ServiceSpec{
+			RunLatest: &knative.RunLatestType{
+				Configuration: knative.ConfigurationSpec{
+					RevisionTemplate: knative.RevisionTemplateSpec{
+						Spec: knative.RevisionSpec{
+							Container: corev1.Container{
+								Image: e.Integration.Status.Image,
+								Env:   EnvironmentAsEnvVarSlice(environment),
+							},
+						},
+					},
+				},
+			},
+		},
+	}
+
+	return &svc
+}
diff --git a/cmd/kamel/main.go b/pkg/util/knative/knative.go
similarity index 60%
copy from cmd/kamel/main.go
copy to pkg/util/knative/knative.go
index 502a978..c17d7c0 100644
--- a/cmd/kamel/main.go
+++ b/pkg/util/knative/knative.go
@@ -15,38 +15,20 @@ See the License for the specific language governing permissions and
 limitations under the License.
 */
 
-package main
+package knative
 
 import (
-	"context"
-	"fmt"
-	"math/rand"
-	"os"
-	"time"
-
-	"github.com/apache/camel-k/pkg/client/cmd"
-	_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
+	"github.com/operator-framework/operator-sdk/pkg/k8sclient"
+	"k8s.io/apimachinery/pkg/api/errors"
 )
 
-func main() {
-	rand.Seed(time.Now().UTC().UnixNano())
-
-	ctx, cancel := context.WithCancel(context.Background())
-
-	// Cancel ctx as soon as main returns
-	defer cancel()
-
-	rootCmd, err := cmd.NewKamelCommand(ctx)
-	exitOnError(err)
-
-	err = rootCmd.Execute()
-	exitOnError(err)
-}
-
-func exitOnError(err error) {
-	if err != nil {
-		fmt.Println("Error:", err)
-
-		os.Exit(1)
+// IsInstalled returns true if we are connected to a cluster with Knative installed
+func IsInstalled() (bool, error) {
+	_, err := k8sclient.GetKubeClient().Discovery().ServerResourcesForGroupVersion("serving.knative.dev/v1alpha1")
+	if err != nil && errors.IsNotFound(err) {
+		return false, nil
+	} else if err != nil {
+		return false, err
 	}
+	return true, nil
 }
diff --git a/cmd/kamel/main.go b/pkg/util/knative/register.go
similarity index 59%
copy from cmd/kamel/main.go
copy to pkg/util/knative/register.go
index 502a978..2e90b5d 100644
--- a/cmd/kamel/main.go
+++ b/pkg/util/knative/register.go
@@ -15,38 +15,14 @@ See the License for the specific language governing permissions and
 limitations under the License.
 */
 
-package main
+package knative
 
 import (
-	"context"
-	"fmt"
-	"math/rand"
-	"os"
-	"time"
-
-	"github.com/apache/camel-k/pkg/client/cmd"
-	_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
+	knative "github.com/knative/serving/pkg/apis/serving/v1alpha1"
+	"github.com/operator-framework/operator-sdk/pkg/util/k8sutil"
 )
 
-func main() {
-	rand.Seed(time.Now().UTC().UnixNano())
-
-	ctx, cancel := context.WithCancel(context.Background())
-
-	// Cancel ctx as soon as main returns
-	defer cancel()
-
-	rootCmd, err := cmd.NewKamelCommand(ctx)
-	exitOnError(err)
-
-	err = rootCmd.Execute()
-	exitOnError(err)
-}
-
-func exitOnError(err error) {
-	if err != nil {
-		fmt.Println("Error:", err)
-
-		os.Exit(1)
-	}
+// Register all Knative types that we want to manage.
+func init() {
+	k8sutil.AddToSDKScheme(knative.AddToScheme)
 }
diff --git a/pkg/util/kubernetes/collection.go b/pkg/util/kubernetes/collection.go
index 8f099c3..54b6ba7 100644
--- a/pkg/util/kubernetes/collection.go
+++ b/pkg/util/kubernetes/collection.go
@@ -57,7 +57,7 @@ func (c *Collection) VisitDeployment(visitor func(*appsv1.Deployment)) {
 }
 
 // GetDeployment returns a Deployment that matches the given function
-func (c *Collection) GetDeployment(filter func(*appsv1.Deployment)bool) *appsv1.Deployment {
+func (c *Collection) GetDeployment(filter func(*appsv1.Deployment) bool) *appsv1.Deployment {
 	var retValue *appsv1.Deployment
 	c.VisitDeployment(func(re *appsv1.Deployment) {
 		if filter(re) {
@@ -67,6 +67,20 @@ func (c *Collection) GetDeployment(filter func(*appsv1.Deployment)bool) *appsv1.
 	return retValue
 }
 
+// RemoveDeployment removes and returns a Deployment that matches the given function
+func (c *Collection) RemoveDeployment(filter func(*appsv1.Deployment) bool) *appsv1.Deployment {
+	res := c.Remove(func(res runtime.Object) bool {
+		if conv, ok := res.(*appsv1.Deployment); ok {
+			return filter(conv)
+		}
+		return false
+	})
+	if res == nil {
+		return nil
+	}
+	return res.(*appsv1.Deployment)
+}
+
 // VisitConfigMap executes the visitor function on all ConfigMap resources
 func (c *Collection) VisitConfigMap(visitor func(*corev1.ConfigMap)) {
 	c.Visit(func(res runtime.Object) {
@@ -77,7 +91,7 @@ func (c *Collection) VisitConfigMap(visitor func(*corev1.ConfigMap)) {
 }
 
 // GetConfigMap returns a ConfigMap that matches the given function
-func (c *Collection) GetConfigMap(filter func(*corev1.ConfigMap)bool) *corev1.ConfigMap {
+func (c *Collection) GetConfigMap(filter func(*corev1.ConfigMap) bool) *corev1.ConfigMap {
 	var retValue *corev1.ConfigMap
 	c.VisitConfigMap(func(re *corev1.ConfigMap) {
 		if filter(re) {
@@ -97,7 +111,7 @@ func (c *Collection) VisitService(visitor func(*corev1.Service)) {
 }
 
 // GetService returns a Service that matches the given function
-func (c *Collection) GetService(filter func(*corev1.Service)bool) *corev1.Service {
+func (c *Collection) GetService(filter func(*corev1.Service) bool) *corev1.Service {
 	var retValue *corev1.Service
 	c.VisitService(func(re *corev1.Service) {
 		if filter(re) {
@@ -117,7 +131,7 @@ func (c *Collection) VisitRoute(visitor func(*routev1.Route)) {
 }
 
 // GetRoute returns a Route that matches the given function
-func (c *Collection) GetRoute(filter func(*routev1.Route)bool) *routev1.Route {
+func (c *Collection) GetRoute(filter func(*routev1.Route) bool) *routev1.Route {
 	var retValue *routev1.Route
 	c.VisitRoute(func(re *routev1.Route) {
 		if filter(re) {
@@ -142,3 +156,14 @@ func (c *Collection) Visit(visitor func(runtime.Object)) {
 		visitor(res)
 	}
 }
+
+// Remove removes the given element from the collection and returns it
+func (c *Collection) Remove(selector func(runtime.Object) bool) runtime.Object {
+	for idx, res := range c.items {
+		if selector(res) {
+			c.items = append(c.items[0:idx], c.items[idx+1:]...)
+			return res
+		}
+	}
+	return nil
+}
diff --git a/runtime/groovy/src/main/groovy/org/apache/camel/k/groovy/GroovyRoutesLoader.groovy b/runtime/groovy/src/main/groovy/org/apache/camel/k/groovy/GroovyRoutesLoader.groovy
index 1f61b10..86cb7fe 100644
--- a/runtime/groovy/src/main/groovy/org/apache/camel/k/groovy/GroovyRoutesLoader.groovy
+++ b/runtime/groovy/src/main/groovy/org/apache/camel/k/groovy/GroovyRoutesLoader.groovy
@@ -22,7 +22,7 @@ import org.apache.camel.k.groovy.dsl.IntegrationConfiguration
 import org.apache.camel.k.jvm.Language
 import org.apache.camel.k.jvm.RoutesLoader
 import org.apache.camel.k.jvm.RuntimeRegistry
-import org.apache.camel.util.ResourceHelper
+import org.apache.camel.k.jvm.URIResolver
 import org.codehaus.groovy.control.CompilerConfiguration
 
 class GroovyRoutesLoader implements RoutesLoader {
@@ -41,7 +41,7 @@ class GroovyRoutesLoader implements RoutesLoader {
 
                 def cl = Thread.currentThread().getContextClassLoader()
                 def sh = new GroovyShell(cl, new Binding(), cc)
-                def is = ResourceHelper.resolveMandatoryResourceAsInputStream(context, resource)
+                def is = URIResolver.resolve(context, resource)
 
                 is.withCloseable {
                     def reader = new InputStreamReader(is)
diff --git a/runtime/jvm/src/main/java/org/apache/camel/k/jvm/Constants.java b/runtime/jvm/src/main/java/org/apache/camel/k/jvm/Constants.java
index d123d7a..e5a09ed 100644
--- a/runtime/jvm/src/main/java/org/apache/camel/k/jvm/Constants.java
+++ b/runtime/jvm/src/main/java/org/apache/camel/k/jvm/Constants.java
@@ -23,6 +23,7 @@ public final class Constants {
     public static final String ENV_CAMEL_K_CONF_D = "CAMEL_K_CONF_D";
     public static final String SCHEME_CLASSPATH = "classpath:";
     public static final String SCHEME_FILE = "file:";
+    public static final String SCHEME_INLINE = "inline:";
     public static final String LOGGING_LEVEL_PREFIX = "logging.level.";
 
     private Constants() {
diff --git a/runtime/jvm/src/main/java/org/apache/camel/k/jvm/RoutesLoaders.java b/runtime/jvm/src/main/java/org/apache/camel/k/jvm/RoutesLoaders.java
index a9ecf60..35cb47d 100644
--- a/runtime/jvm/src/main/java/org/apache/camel/k/jvm/RoutesLoaders.java
+++ b/runtime/jvm/src/main/java/org/apache/camel/k/jvm/RoutesLoaders.java
@@ -16,20 +16,6 @@
  */
 package org.apache.camel.k.jvm;
 
-import java.io.InputStream;
-import java.io.InputStreamReader;
-import java.nio.charset.StandardCharsets;
-import java.util.Collections;
-import java.util.List;
-import java.util.ServiceLoader;
-import java.util.function.Function;
-import java.util.function.Supplier;
-import javax.script.Bindings;
-import javax.script.ScriptEngine;
-import javax.script.ScriptEngineManager;
-import javax.script.SimpleBindings;
-import javax.xml.bind.UnmarshalException;
-
 import org.apache.camel.CamelContext;
 import org.apache.camel.builder.RouteBuilder;
 import org.apache.camel.k.jvm.dsl.Components;
@@ -37,15 +23,29 @@ import org.apache.camel.model.RouteDefinition;
 import org.apache.camel.model.rest.RestConfigurationDefinition;
 import org.apache.camel.model.rest.RestDefinition;
 import org.apache.camel.util.ObjectHelper;
-import org.apache.camel.util.ResourceHelper;
 import org.apache.commons.io.IOUtils;
 import org.apache.commons.lang3.StringUtils;
 import org.joor.Reflect;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import javax.script.Bindings;
+import javax.script.ScriptEngine;
+import javax.script.ScriptEngineManager;
+import javax.script.SimpleBindings;
+import javax.xml.bind.UnmarshalException;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.nio.charset.StandardCharsets;
+import java.util.Collections;
+import java.util.List;
+import java.util.ServiceLoader;
+import java.util.function.Function;
+import java.util.function.Supplier;
+
 import static org.apache.camel.k.jvm.Constants.SCHEME_CLASSPATH;
 import static org.apache.camel.k.jvm.Constants.SCHEME_FILE;
+import static org.apache.camel.k.jvm.Constants.SCHEME_INLINE;
 
 public final class RoutesLoaders {
     private static final Logger LOGGER = LoggerFactory.getLogger(RoutesLoaders.class);
@@ -86,7 +86,7 @@ public final class RoutesLoaders {
             return new RouteBuilder() {
                 @Override
                 public void configure() throws Exception {
-                    try (InputStream is = ResourceHelper.resolveMandatoryResourceAsInputStream(getContext(), resource)) {
+                    try (InputStream is = URIResolver.resolve(getContext(), resource)) {
                         String name = StringUtils.substringAfter(resource, ":");
                         name = StringUtils.removeEnd(name, ".java");
 
@@ -130,7 +130,7 @@ public final class RoutesLoaders {
                     bindings.put("rest", (Supplier<RestDefinition>) () -> rest());
                     bindings.put("restConfiguration", (Supplier<RestConfigurationDefinition>) () -> restConfiguration());
 
-                    try (InputStream is = ResourceHelper.resolveMandatoryResourceAsInputStream(context, resource)) {
+                    try (InputStream is = URIResolver.resolve(context, resource)) {
                         engine.eval(new InputStreamReader(is), bindings);
                     }
                 }
@@ -149,7 +149,7 @@ public final class RoutesLoaders {
             return new RouteBuilder() {
                 @Override
                 public void configure() throws Exception {
-                    try (InputStream is = ResourceHelper.resolveMandatoryResourceAsInputStream(getContext(), resource)) {
+                    try (InputStream is = URIResolver.resolve(getContext(), resource)) {
                         try {
                             setRouteCollection(
                                 getContext().loadRoutesDefinition(is)
@@ -173,7 +173,7 @@ public final class RoutesLoaders {
 
 
     public static RoutesLoader loaderFor(String resource, String languageName) {
-        if (!resource.startsWith(SCHEME_CLASSPATH) && !resource.startsWith(SCHEME_FILE)) {
+        if (!resource.startsWith(SCHEME_CLASSPATH) && !resource.startsWith(SCHEME_FILE) && !resource.startsWith(SCHEME_INLINE)) {
             throw new IllegalArgumentException("No valid resource format, expected scheme:path, found " + resource);
         }
 
diff --git a/runtime/jvm/src/main/java/org/apache/camel/k/jvm/RuntimeSupport.java b/runtime/jvm/src/main/java/org/apache/camel/k/jvm/RuntimeSupport.java
index 5826204..44dc3a1 100644
--- a/runtime/jvm/src/main/java/org/apache/camel/k/jvm/RuntimeSupport.java
+++ b/runtime/jvm/src/main/java/org/apache/camel/k/jvm/RuntimeSupport.java
@@ -36,6 +36,8 @@ import org.apache.logging.log4j.LogManager;
 import org.apache.logging.log4j.core.LoggerContext;
 import org.apache.logging.log4j.core.config.LoggerConfig;
 
+import static org.apache.camel.k.jvm.Constants.SCHEME_INLINE;
+
 public final class RuntimeSupport {
     private RuntimeSupport() {
     }
@@ -47,10 +49,18 @@ public final class RuntimeSupport {
 
         // Main location
         if (ObjectHelper.isNotEmpty(conf)) {
-            try (Reader reader = Files.newBufferedReader(Paths.get(conf))) {
-                properties.load(reader);
-            } catch (IOException e) {
-                throw new RuntimeException(e);
+            if (conf.startsWith(SCHEME_INLINE)) {
+                try (Reader reader = URIResolver.resolveInline(conf)) {
+                    properties.load(reader);
+                } catch (IOException e) {
+                    throw new RuntimeException(e);
+                }
+            } else {
+                try (Reader reader = Files.newBufferedReader(Paths.get(conf))) {
+                    properties.load(reader);
+                } catch (IOException e) {
+                    throw new RuntimeException(e);
+                }
             }
         }
 
diff --git a/runtime/jvm/src/main/java/org/apache/camel/k/jvm/URIResolver.java b/runtime/jvm/src/main/java/org/apache/camel/k/jvm/URIResolver.java
new file mode 100644
index 0000000..b52e81c
--- /dev/null
+++ b/runtime/jvm/src/main/java/org/apache/camel/k/jvm/URIResolver.java
@@ -0,0 +1,51 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.camel.k.jvm;
+
+import org.apache.camel.CamelContext;
+import org.apache.camel.util.ResourceHelper;
+
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.Reader;
+import java.io.StringReader;
+
+import static org.apache.camel.k.jvm.Constants.SCHEME_INLINE;
+
+public class URIResolver {
+
+    public static InputStream resolve(CamelContext ctx, String uri) throws IOException {
+        if (uri == null) {
+            throw new IllegalArgumentException("Cannot resolve null URI");
+        }
+        if (uri.startsWith(SCHEME_INLINE)) {
+            // Using platform encoding on purpose
+            return new ByteArrayInputStream(uri.substring(SCHEME_INLINE.length()).getBytes());
+        }
+
+        return ResourceHelper.resolveMandatoryResourceAsInputStream(ctx, uri);
+    }
+
+    public static Reader resolveInline(String uri) {
+        if (!uri.startsWith(SCHEME_INLINE)) {
+            throw new IllegalArgumentException("The provided content is not inline: " + uri);
+        }
+        return new StringReader(uri.substring(SCHEME_INLINE.length()));
+    }
+
+}
diff --git a/runtime/kotlin/src/main/kotlin/org/apache/camel/k/kotlin/KotlinRoutesLoader.kt b/runtime/kotlin/src/main/kotlin/org/apache/camel/k/kotlin/KotlinRoutesLoader.kt
index 9551e95..8602709 100644
--- a/runtime/kotlin/src/main/kotlin/org/apache/camel/k/kotlin/KotlinRoutesLoader.kt
+++ b/runtime/kotlin/src/main/kotlin/org/apache/camel/k/kotlin/KotlinRoutesLoader.kt
@@ -20,8 +20,8 @@ import org.apache.camel.builder.RouteBuilder
 import org.apache.camel.k.jvm.Language
 import org.apache.camel.k.jvm.RoutesLoader
 import org.apache.camel.k.jvm.RuntimeRegistry
+import org.apache.camel.k.jvm.URIResolver
 import org.apache.camel.k.kotlin.dsl.IntegrationConfiguration
-import org.apache.camel.util.ResourceHelper
 import org.slf4j.Logger
 import org.slf4j.LoggerFactory
 import java.io.File
@@ -57,7 +57,7 @@ class KotlinRoutesLoader : RoutesLoader {
 
                 LOGGER.info("JAVA_HOME is set to {}", javaHome)
 
-                ResourceHelper.resolveMandatoryResourceAsInputStream(context, resource).use { `is` ->
+                URIResolver.resolve(context, resource).use { `is` ->
                     val result = host.eval(
                         InputStreamReader(`is`).readText().toScriptSource(),
                         ScriptCompilationConfiguration {
diff --git a/vendor/github.com/evanphx/json-patch/LICENSE b/vendor/github.com/evanphx/json-patch/LICENSE
new file mode 100644
index 0000000..0eb9b72
--- /dev/null
+++ b/vendor/github.com/evanphx/json-patch/LICENSE
@@ -0,0 +1,25 @@
+Copyright (c) 2014, Evan Phoenix
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without 
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+  list of conditions and the following disclaimer.
+* Redistributions in binary form must reproduce the above copyright notice
+  this list of conditions and the following disclaimer in the documentation
+  and/or other materials provided with the distribution.
+* Neither the name of the Evan Phoenix nor the names of its contributors 
+  may be used to endorse or promote products derived from this software 
+  without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE 
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/evanphx/json-patch/merge.go b/vendor/github.com/evanphx/json-patch/merge.go
new file mode 100644
index 0000000..6806c4c
--- /dev/null
+++ b/vendor/github.com/evanphx/json-patch/merge.go
@@ -0,0 +1,383 @@
+package jsonpatch
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"reflect"
+)
+
+func merge(cur, patch *lazyNode, mergeMerge bool) *lazyNode {
+	curDoc, err := cur.intoDoc()
+
+	if err != nil {
+		pruneNulls(patch)
+		return patch
+	}
+
+	patchDoc, err := patch.intoDoc()
+
+	if err != nil {
+		return patch
+	}
+
+	mergeDocs(curDoc, patchDoc, mergeMerge)
+
+	return cur
+}
+
+func mergeDocs(doc, patch *partialDoc, mergeMerge bool) {
+	for k, v := range *patch {
+		if v == nil {
+			if mergeMerge {
+				(*doc)[k] = nil
+			} else {
+				delete(*doc, k)
+			}
+		} else {
+			cur, ok := (*doc)[k]
+
+			if !ok || cur == nil {
+				pruneNulls(v)
+				(*doc)[k] = v
+			} else {
+				(*doc)[k] = merge(cur, v, mergeMerge)
+			}
+		}
+	}
+}
+
+func pruneNulls(n *lazyNode) {
+	sub, err := n.intoDoc()
+
+	if err == nil {
+		pruneDocNulls(sub)
+	} else {
+		ary, err := n.intoAry()
+
+		if err == nil {
+			pruneAryNulls(ary)
+		}
+	}
+}
+
+func pruneDocNulls(doc *partialDoc) *partialDoc {
+	for k, v := range *doc {
+		if v == nil {
+			delete(*doc, k)
+		} else {
+			pruneNulls(v)
+		}
+	}
+
+	return doc
+}
+
+func pruneAryNulls(ary *partialArray) *partialArray {
+	newAry := []*lazyNode{}
+
+	for _, v := range *ary {
+		if v != nil {
+			pruneNulls(v)
+			newAry = append(newAry, v)
+		}
+	}
+
+	*ary = newAry
+
+	return ary
+}
+
+var errBadJSONDoc = fmt.Errorf("Invalid JSON Document")
+var errBadJSONPatch = fmt.Errorf("Invalid JSON Patch")
+var errBadMergeTypes = fmt.Errorf("Mismatched JSON Documents")
+
+// MergeMergePatches merges two merge patches together, such that
+// applying this resulting merged merge patch to a document yields the same
+// as merging each merge patch to the document in succession.
+func MergeMergePatches(patch1Data, patch2Data []byte) ([]byte, error) {
+	return doMergePatch(patch1Data, patch2Data, true)
+}
+
+// MergePatch merges the patchData into the docData.
+func MergePatch(docData, patchData []byte) ([]byte, error) {
+	return doMergePatch(docData, patchData, false)
+}
+
+func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) {
+	doc := &partialDoc{}
+
+	docErr := json.Unmarshal(docData, doc)
+
+	patch := &partialDoc{}
+
+	patchErr := json.Unmarshal(patchData, patch)
+
+	if _, ok := docErr.(*json.SyntaxError); ok {
+		return nil, errBadJSONDoc
+	}
+
+	if _, ok := patchErr.(*json.SyntaxError); ok {
+		return nil, errBadJSONPatch
+	}
+
+	if docErr == nil && *doc == nil {
+		return nil, errBadJSONDoc
+	}
+
+	if patchErr == nil && *patch == nil {
+		return nil, errBadJSONPatch
+	}
+
+	if docErr != nil || patchErr != nil {
+		// Not an error, just not a doc, so we turn straight into the patch
+		if patchErr == nil {
+			if mergeMerge {
+				doc = patch
+			} else {
+				doc = pruneDocNulls(patch)
+			}
+		} else {
+			patchAry := &partialArray{}
+			patchErr = json.Unmarshal(patchData, patchAry)
+
+			if patchErr != nil {
+				return nil, errBadJSONPatch
+			}
+
+			pruneAryNulls(patchAry)
+
+			out, patchErr := json.Marshal(patchAry)
+
+			if patchErr != nil {
+				return nil, errBadJSONPatch
+			}
+
+			return out, nil
+		}
+	} else {
+		mergeDocs(doc, patch, mergeMerge)
+	}
+
+	return json.Marshal(doc)
+}
+
+// resemblesJSONArray indicates whether the byte-slice "appears" to be
+// a JSON array or not.
+// False-positives are possible, as this function does not check the internal
+// structure of the array. It only checks that the outer syntax is present and
+// correct.
+func resemblesJSONArray(input []byte) bool {
+	input = bytes.TrimSpace(input)
+
+	hasPrefix := bytes.HasPrefix(input, []byte("["))
+	hasSuffix := bytes.HasSuffix(input, []byte("]"))
+
+	return hasPrefix && hasSuffix
+}
+
+// CreateMergePatch will return a merge patch document capable of converting
+// the original document(s) to the modified document(s).
+// The parameters can be bytes of either two JSON Documents, or two arrays of
+// JSON documents.
+// The merge patch returned follows the specification defined at http://tools.ietf.org/html/draft-ietf-appsawg-json-merge-patch-07
+func CreateMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) {
+	originalResemblesArray := resemblesJSONArray(originalJSON)
+	modifiedResemblesArray := resemblesJSONArray(modifiedJSON)
+
+	// Do both byte-slices seem like JSON arrays?
+	if originalResemblesArray && modifiedResemblesArray {
+		return createArrayMergePatch(originalJSON, modifiedJSON)
+	}
+
+	// Are both byte-slices are not arrays? Then they are likely JSON objects...
+	if !originalResemblesArray && !modifiedResemblesArray {
+		return createObjectMergePatch(originalJSON, modifiedJSON)
+	}
+
+	// None of the above? Then return an error because of mismatched types.
+	return nil, errBadMergeTypes
+}
+
+// createObjectMergePatch will return a merge-patch document capable of
+// converting the original document to the modified document.
+func createObjectMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) {
+	originalDoc := map[string]interface{}{}
+	modifiedDoc := map[string]interface{}{}
+
+	err := json.Unmarshal(originalJSON, &originalDoc)
+	if err != nil {
+		return nil, errBadJSONDoc
+	}
+
+	err = json.Unmarshal(modifiedJSON, &modifiedDoc)
+	if err != nil {
+		return nil, errBadJSONDoc
+	}
+
+	dest, err := getDiff(originalDoc, modifiedDoc)
+	if err != nil {
+		return nil, err
+	}
+
+	return json.Marshal(dest)
+}
+
+// createArrayMergePatch will return an array of merge-patch documents capable
+// of converting the original document to the modified document for each
+// pair of JSON documents provided in the arrays.
+// Arrays of mismatched sizes will result in an error.
+func createArrayMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) {
+	originalDocs := []json.RawMessage{}
+	modifiedDocs := []json.RawMessage{}
+
+	err := json.Unmarshal(originalJSON, &originalDocs)
+	if err != nil {
+		return nil, errBadJSONDoc
+	}
+
+	err = json.Unmarshal(modifiedJSON, &modifiedDocs)
+	if err != nil {
+		return nil, errBadJSONDoc
+	}
+
+	total := len(originalDocs)
+	if len(modifiedDocs) != total {
+		return nil, errBadJSONDoc
+	}
+
+	result := []json.RawMessage{}
+	for i := 0; i < len(originalDocs); i++ {
+		original := originalDocs[i]
+		modified := modifiedDocs[i]
+
+		patch, err := createObjectMergePatch(original, modified)
+		if err != nil {
+			return nil, err
+		}
+
+		result = append(result, json.RawMessage(patch))
+	}
+
+	return json.Marshal(result)
+}
+
+// Returns true if the array matches (must be json types).
+// As is idiomatic for go, an empty array is not the same as a nil array.
+func matchesArray(a, b []interface{}) bool {
+	if len(a) != len(b) {
+		return false
+	}
+	if (a == nil && b != nil) || (a != nil && b == nil) {
+		return false
+	}
+	for i := range a {
+		if !matchesValue(a[i], b[i]) {
+			return false
+		}
+	}
+	return true
+}
+
+// Returns true if the values matches (must be json types)
+// The types of the values must match, otherwise it will always return false
+// If two map[string]interface{} are given, all elements must match.
+func matchesValue(av, bv interface{}) bool {
+	if reflect.TypeOf(av) != reflect.TypeOf(bv) {
+		return false
+	}
+	switch at := av.(type) {
+	case string:
+		bt := bv.(string)
+		if bt == at {
+			return true
+		}
+	case float64:
+		bt := bv.(float64)
+		if bt == at {
+			return true
+		}
+	case bool:
+		bt := bv.(bool)
+		if bt == at {
+			return true
+		}
+	case nil:
+		// Both nil, fine.
+		return true
+	case map[string]interface{}:
+		bt := bv.(map[string]interface{})
+		for key := range at {
+			if !matchesValue(at[key], bt[key]) {
+				return false
+			}
+		}
+		for key := range bt {
+			if !matchesValue(at[key], bt[key]) {
+				return false
+			}
+		}
+		return true
+	case []interface{}:
+		bt := bv.([]interface{})
+		return matchesArray(at, bt)
+	}
+	return false
+}
+
+// getDiff returns the (recursive) difference between a and b as a map[string]interface{}.
+func getDiff(a, b map[string]interface{}) (map[string]interface{}, error) {
+	into := map[string]interface{}{}
+	for key, bv := range b {
+		av, ok := a[key]
+		// value was added
+		if !ok {
+			into[key] = bv
+			continue
+		}
+		// If types have changed, replace completely
+		if reflect.TypeOf(av) != reflect.TypeOf(bv) {
+			into[key] = bv
+			continue
+		}
+		// Types are the same, compare values
+		switch at := av.(type) {
+		case map[string]interface{}:
+			bt := bv.(map[string]interface{})
+			dst := make(map[string]interface{}, len(bt))
+			dst, err := getDiff(at, bt)
+			if err != nil {
+				return nil, err
+			}
+			if len(dst) > 0 {
+				into[key] = dst
+			}
+		case string, float64, bool:
+			if !matchesValue(av, bv) {
+				into[key] = bv
+			}
+		case []interface{}:
+			bt := bv.([]interface{})
+			if !matchesArray(at, bt) {
+				into[key] = bv
+			}
+		case nil:
+			switch bv.(type) {
+			case nil:
+				// Both nil, fine.
+			default:
+				into[key] = bv
+			}
+		default:
+			panic(fmt.Sprintf("Unknown type:%T in key %s", av, key))
+		}
+	}
+	// Now add all deleted values as nil
+	for key := range a {
+		_, found := b[key]
+		if !found {
+			into[key] = nil
+		}
+	}
+	return into, nil
+}
diff --git a/vendor/github.com/evanphx/json-patch/patch.go b/vendor/github.com/evanphx/json-patch/patch.go
new file mode 100644
index 0000000..f26b682
--- /dev/null
+++ b/vendor/github.com/evanphx/json-patch/patch.go
@@ -0,0 +1,682 @@
+package jsonpatch
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"strconv"
+	"strings"
+)
+
+const (
+	eRaw = iota
+	eDoc
+	eAry
+)
+
+var SupportNegativeIndices bool = true
+
+type lazyNode struct {
+	raw   *json.RawMessage
+	doc   partialDoc
+	ary   partialArray
+	which int
+}
+
+type operation map[string]*json.RawMessage
+
+// Patch is an ordered collection of operations.
+type Patch []operation
+
+type partialDoc map[string]*lazyNode
+type partialArray []*lazyNode
+
+type container interface {
+	get(key string) (*lazyNode, error)
+	set(key string, val *lazyNode) error
+	add(key string, val *lazyNode) error
+	remove(key string) error
+}
+
+func newLazyNode(raw *json.RawMessage) *lazyNode {
+	return &lazyNode{raw: raw, doc: nil, ary: nil, which: eRaw}
+}
+
+func (n *lazyNode) MarshalJSON() ([]byte, error) {
+	switch n.which {
+	case eRaw:
+		return json.Marshal(n.raw)
+	case eDoc:
+		return json.Marshal(n.doc)
+	case eAry:
+		return json.Marshal(n.ary)
+	default:
+		return nil, fmt.Errorf("Unknown type")
+	}
+}
+
+func (n *lazyNode) UnmarshalJSON(data []byte) error {
+	dest := make(json.RawMessage, len(data))
+	copy(dest, data)
+	n.raw = &dest
+	n.which = eRaw
+	return nil
+}
+
+func (n *lazyNode) intoDoc() (*partialDoc, error) {
+	if n.which == eDoc {
+		return &n.doc, nil
+	}
+
+	if n.raw == nil {
+		return nil, fmt.Errorf("Unable to unmarshal nil pointer as partial document")
+	}
+
+	err := json.Unmarshal(*n.raw, &n.doc)
+
+	if err != nil {
+		return nil, err
+	}
+
+	n.which = eDoc
+	return &n.doc, nil
+}
+
+func (n *lazyNode) intoAry() (*partialArray, error) {
+	if n.which == eAry {
+		return &n.ary, nil
+	}
+
+	if n.raw == nil {
+		return nil, fmt.Errorf("Unable to unmarshal nil pointer as partial array")
+	}
+
+	err := json.Unmarshal(*n.raw, &n.ary)
+
+	if err != nil {
+		return nil, err
+	}
+
+	n.which = eAry
+	return &n.ary, nil
+}
+
+func (n *lazyNode) compact() []byte {
+	buf := &bytes.Buffer{}
+
+	if n.raw == nil {
+		return nil
+	}
+
+	err := json.Compact(buf, *n.raw)
+
+	if err != nil {
+		return *n.raw
+	}
+
+	return buf.Bytes()
+}
+
+func (n *lazyNode) tryDoc() bool {
+	if n.raw == nil {
+		return false
+	}
+
+	err := json.Unmarshal(*n.raw, &n.doc)
+
+	if err != nil {
+		return false
+	}
+
+	n.which = eDoc
+	return true
+}
+
+func (n *lazyNode) tryAry() bool {
+	if n.raw == nil {
+		return false
+	}
+
+	err := json.Unmarshal(*n.raw, &n.ary)
+
+	if err != nil {
+		return false
+	}
+
+	n.which = eAry
+	return true
+}
+
+func (n *lazyNode) equal(o *lazyNode) bool {
+	if n.which == eRaw {
+		if !n.tryDoc() && !n.tryAry() {
+			if o.which != eRaw {
+				return false
+			}
+
+			return bytes.Equal(n.compact(), o.compact())
+		}
+	}
+
+	if n.which == eDoc {
+		if o.which == eRaw {
+			if !o.tryDoc() {
+				return false
+			}
+		}
+
+		if o.which != eDoc {
+			return false
+		}
+
+		for k, v := range n.doc {
+			ov, ok := o.doc[k]
+
+			if !ok {
+				return false
+			}
+
+			if v == nil && ov == nil {
+				continue
+			}
+
+			if !v.equal(ov) {
+				return false
+			}
+		}
+
+		return true
+	}
+
+	if o.which != eAry && !o.tryAry() {
+		return false
+	}
+
+	if len(n.ary) != len(o.ary) {
+		return false
+	}
+
+	for idx, val := range n.ary {
+		if !val.equal(o.ary[idx]) {
+			return false
+		}
+	}
+
+	return true
+}
+
+func (o operation) kind() string {
+	if obj, ok := o["op"]; ok && obj != nil {
+		var op string
+
+		err := json.Unmarshal(*obj, &op)
+
+		if err != nil {
+			return "unknown"
+		}
+
+		return op
+	}
+
+	return "unknown"
+}
+
+func (o operation) path() string {
+	if obj, ok := o["path"]; ok && obj != nil {
+		var op string
+
+		err := json.Unmarshal(*obj, &op)
+
+		if err != nil {
+			return "unknown"
+		}
+
+		return op
+	}
+
+	return "unknown"
+}
+
+func (o operation) from() string {
+	if obj, ok := o["from"]; ok && obj != nil {
+		var op string
+
+		err := json.Unmarshal(*obj, &op)
+
+		if err != nil {
+			return "unknown"
+		}
+
+		return op
+	}
+
+	return "unknown"
+}
+
+func (o operation) value() *lazyNode {
+	if obj, ok := o["value"]; ok {
+		return newLazyNode(obj)
+	}
+
+	return nil
+}
+
+func isArray(buf []byte) bool {
+Loop:
+	for _, c := range buf {
+		switch c {
+		case ' ':
+		case '\n':
+		case '\t':
+			continue
+		case '[':
+			return true
+		default:
+			break Loop
+		}
+	}
+
+	return false
+}
+
+func findObject(pd *container, path string) (container, string) {
+	doc := *pd
+
+	split := strings.Split(path, "/")
+
+	if len(split) < 2 {
+		return nil, ""
+	}
+
+	parts := split[1 : len(split)-1]
+
+	key := split[len(split)-1]
+
+	var err error
+
+	for _, part := range parts {
+
+		next, ok := doc.get(decodePatchKey(part))
+
+		if next == nil || ok != nil {
+			return nil, ""
+		}
+
+		if isArray(*next.raw) {
+			doc, err = next.intoAry()
+
+			if err != nil {
+				return nil, ""
+			}
+		} else {
+			doc, err = next.intoDoc()
+
+			if err != nil {
+				return nil, ""
+			}
+		}
+	}
+
+	return doc, decodePatchKey(key)
+}
+
+func (d *partialDoc) set(key string, val *lazyNode) error {
+	(*d)[key] = val
+	return nil
+}
+
+func (d *partialDoc) add(key string, val *lazyNode) error {
+	(*d)[key] = val
+	return nil
+}
+
+func (d *partialDoc) get(key string) (*lazyNode, error) {
+	return (*d)[key], nil
+}
+
+func (d *partialDoc) remove(key string) error {
+	_, ok := (*d)[key]
+	if !ok {
+		return fmt.Errorf("Unable to remove nonexistent key: %s", key)
+	}
+
+	delete(*d, key)
+	return nil
+}
+
+func (d *partialArray) set(key string, val *lazyNode) error {
+	if key == "-" {
+		*d = append(*d, val)
+		return nil
+	}
+
+	idx, err := strconv.Atoi(key)
+	if err != nil {
+		return err
+	}
+
+	sz := len(*d)
+	if idx+1 > sz {
+		sz = idx + 1
+	}
+
+	ary := make([]*lazyNode, sz)
+
+	cur := *d
+
+	copy(ary, cur)
+
+	if idx >= len(ary) {
+		return fmt.Errorf("Unable to access invalid index: %d", idx)
+	}
+
+	ary[idx] = val
+
+	*d = ary
+	return nil
+}
+
+func (d *partialArray) add(key string, val *lazyNode) error {
+	if key == "-" {
+		*d = append(*d, val)
+		return nil
+	}
+
+	idx, err := strconv.Atoi(key)
+	if err != nil {
+		return err
+	}
+
+	ary := make([]*lazyNode, len(*d)+1)
+
+	cur := *d
+
+	if idx >= len(ary) {
+		return fmt.Errorf("Unable to access invalid index: %d", idx)
+	}
+
+	if SupportNegativeIndices {
+		if idx < -len(ary) {
+			return fmt.Errorf("Unable to access invalid index: %d", idx)
+		}
+
+		if idx < 0 {
+			idx += len(ary)
+		}
+	}
+
+	copy(ary[0:idx], cur[0:idx])
+	ary[idx] = val
+	copy(ary[idx+1:], cur[idx:])
+
+	*d = ary
+	return nil
+}
+
+func (d *partialArray) get(key string) (*lazyNode, error) {
+	idx, err := strconv.Atoi(key)
+
+	if err != nil {
+		return nil, err
+	}
+
+	if idx >= len(*d) {
+		return nil, fmt.Errorf("Unable to access invalid index: %d", idx)
+	}
+
+	return (*d)[idx], nil
+}
+
+func (d *partialArray) remove(key string) error {
+	idx, err := strconv.Atoi(key)
+	if err != nil {
+		return err
+	}
+
+	cur := *d
+
+	if idx >= len(cur) {
+		return fmt.Errorf("Unable to access invalid index: %d", idx)
+	}
+
+	if SupportNegativeIndices {
+		if idx < -len(cur) {
+			return fmt.Errorf("Unable to access invalid index: %d", idx)
+		}
+
+		if idx < 0 {
+			idx += len(cur)
+		}
+	}
+
+	ary := make([]*lazyNode, len(cur)-1)
+
+	copy(ary[0:idx], cur[0:idx])
+	copy(ary[idx:], cur[idx+1:])
+
+	*d = ary
+	return nil
+
+}
+
+func (p Patch) add(doc *container, op operation) error {
+	path := op.path()
+
+	con, key := findObject(doc, path)
+
+	if con == nil {
+		return fmt.Errorf("jsonpatch add operation does not apply: doc is missing path: \"%s\"", path)
+	}
+
+	return con.add(key, op.value())
+}
+
+func (p Patch) remove(doc *container, op operation) error {
+	path := op.path()
+
+	con, key := findObject(doc, path)
+
+	if con == nil {
+		return fmt.Errorf("jsonpatch remove operation does not apply: doc is missing path: \"%s\"", path)
+	}
+
+	return con.remove(key)
+}
+
+func (p Patch) replace(doc *container, op operation) error {
+	path := op.path()
+
+	con, key := findObject(doc, path)
+
+	if con == nil {
+		return fmt.Errorf("jsonpatch replace operation does not apply: doc is missing path: %s", path)
+	}
+
+	_, ok := con.get(key)
+	if ok != nil {
+		return fmt.Errorf("jsonpatch replace operation does not apply: doc is missing key: %s", path)
+	}
+
+	return con.set(key, op.value())
+}
+
+func (p Patch) move(doc *container, op operation) error {
+	from := op.from()
+
+	con, key := findObject(doc, from)
+
+	if con == nil {
+		return fmt.Errorf("jsonpatch move operation does not apply: doc is missing from path: %s", from)
+	}
+
+	val, err := con.get(key)
+	if err != nil {
+		return err
+	}
+
+	err = con.remove(key)
+	if err != nil {
+		return err
+	}
+
+	path := op.path()
+
+	con, key = findObject(doc, path)
+
+	if con == nil {
+		return fmt.Errorf("jsonpatch move operation does not apply: doc is missing destination path: %s", path)
+	}
+
+	return con.set(key, val)
+}
+
+func (p Patch) test(doc *container, op operation) error {
+	path := op.path()
+
+	con, key := findObject(doc, path)
+
+	if con == nil {
+		return fmt.Errorf("jsonpatch test operation does not apply: is missing path: %s", path)
+	}
+
+	val, err := con.get(key)
+
+	if err != nil {
+		return err
+	}
+
+	if val == nil {
+		if op.value().raw == nil {
+			return nil
+		}
+		return fmt.Errorf("Testing value %s failed", path)
+	} else if op.value() == nil {
+		return fmt.Errorf("Testing value %s failed", path)
+	}
+
+	if val.equal(op.value()) {
+		return nil
+	}
+
+	return fmt.Errorf("Testing value %s failed", path)
+}
+
+func (p Patch) copy(doc *container, op operation) error {
+	from := op.from()
+
+	con, key := findObject(doc, from)
+
+	if con == nil {
+		return fmt.Errorf("jsonpatch copy operation does not apply: doc is missing from path: %s", from)
+	}
+
+	val, err := con.get(key)
+	if err != nil {
+		return err
+	}
+
+	path := op.path()
+
+	con, key = findObject(doc, path)
+
+	if con == nil {
+		return fmt.Errorf("jsonpatch copy operation does not apply: doc is missing destination path: %s", path)
+	}
+
+	return con.set(key, val)
+}
+
+// Equal indicates if 2 JSON documents have the same structural equality.
+func Equal(a, b []byte) bool {
+	ra := make(json.RawMessage, len(a))
+	copy(ra, a)
+	la := newLazyNode(&ra)
+
+	rb := make(json.RawMessage, len(b))
+	copy(rb, b)
+	lb := newLazyNode(&rb)
+
+	return la.equal(lb)
+}
+
+// DecodePatch decodes the passed JSON document as an RFC 6902 patch.
+func DecodePatch(buf []byte) (Patch, error) {
+	var p Patch
+
+	err := json.Unmarshal(buf, &p)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return p, nil
+}
+
+// Apply mutates a JSON document according to the patch, and returns the new
+// document.
+func (p Patch) Apply(doc []byte) ([]byte, error) {
+	return p.ApplyIndent(doc, "")
+}
+
+// ApplyIndent mutates a JSON document according to the patch, and returns the new
+// document indented.
+func (p Patch) ApplyIndent(doc []byte, indent string) ([]byte, error) {
+	var pd container
+	if doc[0] == '[' {
+		pd = &partialArray{}
+	} else {
+		pd = &partialDoc{}
+	}
+
+	err := json.Unmarshal(doc, pd)
+
+	if err != nil {
+		return nil, err
+	}
+
+	err = nil
+
+	for _, op := range p {
+		switch op.kind() {
+		case "add":
+			err = p.add(&pd, op)
+		case "remove":
+			err = p.remove(&pd, op)
+		case "replace":
+			err = p.replace(&pd, op)
+		case "move":
+			err = p.move(&pd, op)
+		case "test":
+			err = p.test(&pd, op)
+		case "copy":
+			err = p.copy(&pd, op)
+		default:
+			err = fmt.Errorf("Unexpected kind: %s", op.kind())
+		}
+
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	if indent != "" {
+		return json.MarshalIndent(pd, "", indent)
+	}
+
+	return json.Marshal(pd)
+}
+
+// From http://tools.ietf.org/html/rfc6901#section-4 :
+//
+// Evaluation of each reference token begins by decoding any escaped
+// character sequence.  This is performed by first transforming any
+// occurrence of the sequence '~1' to '/', and then transforming any
+// occurrence of the sequence '~0' to '~'.
+
+var (
+	rfc6901Decoder = strings.NewReplacer("~1", "/", "~0", "~")
+)
+
+func decodePatchKey(k string) string {
+	return rfc6901Decoder.Replace(k)
+}
diff --git a/vendor/github.com/google/go-cmp/LICENSE b/vendor/github.com/google/go-cmp/LICENSE
new file mode 100644
index 0000000..32017f8
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2017 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/google/go-cmp/cmp/compare.go b/vendor/github.com/google/go-cmp/cmp/compare.go
new file mode 100644
index 0000000..7e215f2
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/compare.go
@@ -0,0 +1,553 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE.md file.
+
+// Package cmp determines equality of values.
+//
+// This package is intended to be a more powerful and safer alternative to
+// reflect.DeepEqual for comparing whether two values are semantically equal.
+//
+// The primary features of cmp are:
+//
+// • When the default behavior of equality does not suit the needs of the test,
+// custom equality functions can override the equality operation.
+// For example, an equality function may report floats as equal so long as they
+// are within some tolerance of each other.
+//
+// • Types that have an Equal method may use that method to determine equality.
+// This allows package authors to determine the equality operation for the types
+// that they define.
+//
+// • If no custom equality functions are used and no Equal method is defined,
+// equality is determined by recursively comparing the primitive kinds on both
+// values, much like reflect.DeepEqual. Unlike reflect.DeepEqual, unexported
+// fields are not compared by default; they result in panics unless suppressed
+// by using an Ignore option (see cmpopts.IgnoreUnexported) or explicitly compared
+// using the AllowUnexported option.
+package cmp
+
+import (
+	"fmt"
+	"reflect"
+
+	"github.com/google/go-cmp/cmp/internal/diff"
+	"github.com/google/go-cmp/cmp/internal/function"
+	"github.com/google/go-cmp/cmp/internal/value"
+)
+
+// BUG(dsnet): Maps with keys containing NaN values cannot be properly compared due to
+// the reflection package's inability to retrieve such entries. Equal will panic
+// anytime it comes across a NaN key, but this behavior may change.
+//
+// See https://golang.org/issue/11104 for more details.
+
+var nothing = reflect.Value{}
+
+// Equal reports whether x and y are equal by recursively applying the
+// following rules in the given order to x and y and all of their sub-values:
+//
+// • If two values are not of the same type, then they are never equal
+// and the overall result is false.
+//
+// • Let S be the set of all Ignore, Transformer, and Comparer options that
+// remain after applying all path filters, value filters, and type filters.
+// If at least one Ignore exists in S, then the comparison is ignored.
+// If the number of Transformer and Comparer options in S is greater than one,
+// then Equal panics because it is ambiguous which option to use.
+// If S contains a single Transformer, then use that to transform the current
+// values and recursively call Equal on the output values.
+// If S contains a single Comparer, then use that to compare the current values.
+// Otherwise, evaluation proceeds to the next rule.
+//
+// • If the values have an Equal method of the form "(T) Equal(T) bool" or
+// "(T) Equal(I) bool" where T is assignable to I, then use the result of
+// x.Equal(y) even if x or y is nil.
+// Otherwise, no such method exists and evaluation proceeds to the next rule.
+//
+// • Lastly, try to compare x and y based on their basic kinds.
+// Simple kinds like booleans, integers, floats, complex numbers, strings, and
+// channels are compared using the equivalent of the == operator in Go.
+// Functions are only equal if they are both nil, otherwise they are unequal.
+// Pointers are equal if the underlying values they point to are also equal.
+// Interfaces are equal if their underlying concrete values are also equal.
+//
+// Structs are equal if all of their fields are equal. If a struct contains
+// unexported fields, Equal panics unless the AllowUnexported option is used or
+// an Ignore option (e.g., cmpopts.IgnoreUnexported) ignores that field.
+//
+// Arrays, slices, and maps are equal if they are both nil or both non-nil
+// with the same length and the elements at each index or key are equal.
+// Note that a non-nil empty slice and a nil slice are not equal.
+// To equate empty slices and maps, consider using cmpopts.EquateEmpty.
+// Map keys are equal according to the == operator.
+// To use custom comparisons for map keys, consider using cmpopts.SortMaps.
+func Equal(x, y interface{}, opts ...Option) bool {
+	s := newState(opts)
+	s.compareAny(reflect.ValueOf(x), reflect.ValueOf(y))
+	return s.result.Equal()
+}
+
+// Diff returns a human-readable report of the differences between two values.
+// It returns an empty string if and only if Equal returns true for the same
+// input values and options. The output string will use the "-" symbol to
+// indicate elements removed from x, and the "+" symbol to indicate elements
+// added to y.
+//
+// Do not depend on this output being stable.
+func Diff(x, y interface{}, opts ...Option) string {
+	r := new(defaultReporter)
+	opts = Options{Options(opts), r}
+	eq := Equal(x, y, opts...)
+	d := r.String()
+	if (d == "") != eq {
+		panic("inconsistent difference and equality results")
+	}
+	return d
+}
+
+type state struct {
+	// These fields represent the "comparison state".
+	// Calling statelessCompare must not result in observable changes to these.
+	result   diff.Result // The current result of comparison
+	curPath  Path        // The current path in the value tree
+	reporter reporter    // Optional reporter used for difference formatting
+
+	// dynChecker triggers pseudo-random checks for option correctness.
+	// It is safe for statelessCompare to mutate this value.
+	dynChecker dynChecker
+
+	// These fields, once set by processOption, will not change.
+	exporters map[reflect.Type]bool // Set of structs with unexported field visibility
+	opts      Options               // List of all fundamental and filter options
+}
+
+func newState(opts []Option) *state {
+	s := new(state)
+	for _, opt := range opts {
+		s.processOption(opt)
+	}
+	return s
+}
+
+func (s *state) processOption(opt Option) {
+	switch opt := opt.(type) {
+	case nil:
+	case Options:
+		for _, o := range opt {
+			s.processOption(o)
+		}
+	case coreOption:
+		type filtered interface {
+			isFiltered() bool
+		}
+		if fopt, ok := opt.(filtered); ok && !fopt.isFiltered() {
+			panic(fmt.Sprintf("cannot use an unfiltered option: %v", opt))
+		}
+		s.opts = append(s.opts, opt)
+	case visibleStructs:
+		if s.exporters == nil {
+			s.exporters = make(map[reflect.Type]bool)
+		}
+		for t := range opt {
+			s.exporters[t] = true
+		}
+	case reporter:
+		if s.reporter != nil {
+			panic("difference reporter already registered")
+		}
+		s.reporter = opt
+	default:
+		panic(fmt.Sprintf("unknown option %T", opt))
+	}
+}
+
+// statelessCompare compares two values and returns the result.
+// This function is stateless in that it does not alter the current result,
+// or output to any registered reporters.
+func (s *state) statelessCompare(vx, vy reflect.Value) diff.Result {
+	// We do not save and restore the curPath because all of the compareX
+	// methods should properly push and pop from the path.
+	// It is an implementation bug if the contents of curPath differs from
+	// when calling this function to when returning from it.
+
+	oldResult, oldReporter := s.result, s.reporter
+	s.result = diff.Result{} // Reset result
+	s.reporter = nil         // Remove reporter to avoid spurious printouts
+	s.compareAny(vx, vy)
+	res := s.result
+	s.result, s.reporter = oldResult, oldReporter
+	return res
+}
+
+func (s *state) compareAny(vx, vy reflect.Value) {
+	// TODO: Support cyclic data structures.
+
+	// Rule 0: Differing types are never equal.
+	if !vx.IsValid() || !vy.IsValid() {
+		s.report(vx.IsValid() == vy.IsValid(), vx, vy)
+		return
+	}
+	if vx.Type() != vy.Type() {
+		s.report(false, vx, vy) // Possible for path to be empty
+		return
+	}
+	t := vx.Type()
+	if len(s.curPath) == 0 {
+		s.curPath.push(&pathStep{typ: t})
+		defer s.curPath.pop()
+	}
+	vx, vy = s.tryExporting(vx, vy)
+
+	// Rule 1: Check whether an option applies on this node in the value tree.
+	if s.tryOptions(vx, vy, t) {
+		return
+	}
+
+	// Rule 2: Check whether the type has a valid Equal method.
+	if s.tryMethod(vx, vy, t) {
+		return
+	}
+
+	// Rule 3: Recursively descend into each value's underlying kind.
+	switch t.Kind() {
+	case reflect.Bool:
+		s.report(vx.Bool() == vy.Bool(), vx, vy)
+		return
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		s.report(vx.Int() == vy.Int(), vx, vy)
+		return
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		s.report(vx.Uint() == vy.Uint(), vx, vy)
+		return
+	case reflect.Float32, reflect.Float64:
+		s.report(vx.Float() == vy.Float(), vx, vy)
+		return
+	case reflect.Complex64, reflect.Complex128:
+		s.report(vx.Complex() == vy.Complex(), vx, vy)
+		return
+	case reflect.String:
+		s.report(vx.String() == vy.String(), vx, vy)
+		return
+	case reflect.Chan, reflect.UnsafePointer:
+		s.report(vx.Pointer() == vy.Pointer(), vx, vy)
+		return
+	case reflect.Func:
+		s.report(vx.IsNil() && vy.IsNil(), vx, vy)
+		return
+	case reflect.Ptr:
+		if vx.IsNil() || vy.IsNil() {
+			s.report(vx.IsNil() && vy.IsNil(), vx, vy)
+			return
+		}
+		s.curPath.push(&indirect{pathStep{t.Elem()}})
+		defer s.curPath.pop()
+		s.compareAny(vx.Elem(), vy.Elem())
+		return
+	case reflect.Interface:
+		if vx.IsNil() || vy.IsNil() {
+			s.report(vx.IsNil() && vy.IsNil(), vx, vy)
+			return
+		}
+		if vx.Elem().Type() != vy.Elem().Type() {
+			s.report(false, vx.Elem(), vy.Elem())
+			return
+		}
+		s.curPath.push(&typeAssertion{pathStep{vx.Elem().Type()}})
+		defer s.curPath.pop()
+		s.compareAny(vx.Elem(), vy.Elem())
+		return
+	case reflect.Slice:
+		if vx.IsNil() || vy.IsNil() {
+			s.report(vx.IsNil() && vy.IsNil(), vx, vy)
+			return
+		}
+		fallthrough
+	case reflect.Array:
+		s.compareArray(vx, vy, t)
+		return
+	case reflect.Map:
+		s.compareMap(vx, vy, t)
+		return
+	case reflect.Struct:
+		s.compareStruct(vx, vy, t)
+		return
+	default:
+		panic(fmt.Sprintf("%v kind not handled", t.Kind()))
+	}
+}
+
+func (s *state) tryExporting(vx, vy reflect.Value) (reflect.Value, reflect.Value) {
+	if sf, ok := s.curPath[len(s.curPath)-1].(*structField); ok && sf.unexported {
+		if sf.force {
+			// Use unsafe pointer arithmetic to get read-write access to an
+			// unexported field in the struct.
+			vx = unsafeRetrieveField(sf.pvx, sf.field)
+			vy = unsafeRetrieveField(sf.pvy, sf.field)
+		} else {
+			// We are not allowed to export the value, so invalidate them
+			// so that tryOptions can panic later if not explicitly ignored.
+			vx = nothing
+			vy = nothing
+		}
+	}
+	return vx, vy
+}
+
+func (s *state) tryOptions(vx, vy reflect.Value, t reflect.Type) bool {
+	// If there were no FilterValues, we will not detect invalid inputs,
+	// so manually check for them and append invalid if necessary.
+	// We still evaluate the options since an ignore can override invalid.
+	opts := s.opts
+	if !vx.IsValid() || !vy.IsValid() {
+		opts = Options{opts, invalid{}}
+	}
+
+	// Evaluate all filters and apply the remaining options.
+	if opt := opts.filter(s, vx, vy, t); opt != nil {
+		opt.apply(s, vx, vy)
+		return true
+	}
+	return false
+}
+
+func (s *state) tryMethod(vx, vy reflect.Value, t reflect.Type) bool {
+	// Check if this type even has an Equal method.
+	m, ok := t.MethodByName("Equal")
+	if !ok || !function.IsType(m.Type, function.EqualAssignable) {
+		return false
+	}
+
+	eq := s.callTTBFunc(m.Func, vx, vy)
+	s.report(eq, vx, vy)
+	return true
+}
+
+func (s *state) callTRFunc(f, v reflect.Value) reflect.Value {
+	v = sanitizeValue(v, f.Type().In(0))
+	if !s.dynChecker.Next() {
+		return f.Call([]reflect.Value{v})[0]
+	}
+
+	// Run the function twice and ensure that we get the same results back.
+	// We run in goroutines so that the race detector (if enabled) can detect
+	// unsafe mutations to the input.
+	c := make(chan reflect.Value)
+	go detectRaces(c, f, v)
+	want := f.Call([]reflect.Value{v})[0]
+	if got := <-c; !s.statelessCompare(got, want).Equal() {
+		// To avoid false-positives with non-reflexive equality operations,
+		// we sanity check whether a value is equal to itself.
+		if !s.statelessCompare(want, want).Equal() {
+			return want
+		}
+		fn := getFuncName(f.Pointer())
+		panic(fmt.Sprintf("non-deterministic function detected: %s", fn))
+	}
+	return want
+}
+
+func (s *state) callTTBFunc(f, x, y reflect.Value) bool {
+	x = sanitizeValue(x, f.Type().In(0))
+	y = sanitizeValue(y, f.Type().In(1))
+	if !s.dynChecker.Next() {
+		return f.Call([]reflect.Value{x, y})[0].Bool()
+	}
+
+	// Swapping the input arguments is sufficient to check that
+	// f is symmetric and deterministic.
+	// We run in goroutines so that the race detector (if enabled) can detect
+	// unsafe mutations to the input.
+	c := make(chan reflect.Value)
+	go detectRaces(c, f, y, x)
+	want := f.Call([]reflect.Value{x, y})[0].Bool()
+	if got := <-c; !got.IsValid() || got.Bool() != want {
+		fn := getFuncName(f.Pointer())
+		panic(fmt.Sprintf("non-deterministic or non-symmetric function detected: %s", fn))
+	}
+	return want
+}
+
+func detectRaces(c chan<- reflect.Value, f reflect.Value, vs ...reflect.Value) {
+	var ret reflect.Value
+	defer func() {
+		recover() // Ignore panics, let the other call to f panic instead
+		c <- ret
+	}()
+	ret = f.Call(vs)[0]
+}
+
+// sanitizeValue converts nil interfaces of type T to those of type R,
+// assuming that T is assignable to R.
+// Otherwise, it returns the input value as is.
+func sanitizeValue(v reflect.Value, t reflect.Type) reflect.Value {
+	// TODO(dsnet): Remove this hacky workaround.
+	// See https://golang.org/issue/22143
+	if v.Kind() == reflect.Interface && v.IsNil() && v.Type() != t {
+		return reflect.New(t).Elem()
+	}
+	return v
+}
+
+func (s *state) compareArray(vx, vy reflect.Value, t reflect.Type) {
+	step := &sliceIndex{pathStep{t.Elem()}, 0, 0}
+	s.curPath.push(step)
+
+	// Compute an edit-script for slices vx and vy.
+	es := diff.Difference(vx.Len(), vy.Len(), func(ix, iy int) diff.Result {
+		step.xkey, step.ykey = ix, iy
+		return s.statelessCompare(vx.Index(ix), vy.Index(iy))
+	})
+
+	// Report the entire slice as is if the arrays are of primitive kind,
+	// and the arrays are different enough.
+	isPrimitive := false
+	switch t.Elem().Kind() {
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+		reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
+		reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
+		isPrimitive = true
+	}
+	if isPrimitive && es.Dist() > (vx.Len()+vy.Len())/4 {
+		s.curPath.pop() // Pop first since we are reporting the whole slice
+		s.report(false, vx, vy)
+		return
+	}
+
+	// Replay the edit-script.
+	var ix, iy int
+	for _, e := range es {
+		switch e {
+		case diff.UniqueX:
+			step.xkey, step.ykey = ix, -1
+			s.report(false, vx.Index(ix), nothing)
+			ix++
+		case diff.UniqueY:
+			step.xkey, step.ykey = -1, iy
+			s.report(false, nothing, vy.Index(iy))
+			iy++
+		default:
+			step.xkey, step.ykey = ix, iy
+			if e == diff.Identity {
+				s.report(true, vx.Index(ix), vy.Index(iy))
+			} else {
+				s.compareAny(vx.Index(ix), vy.Index(iy))
+			}
+			ix++
+			iy++
+		}
+	}
+	s.curPath.pop()
+	return
+}
+
+func (s *state) compareMap(vx, vy reflect.Value, t reflect.Type) {
+	if vx.IsNil() || vy.IsNil() {
+		s.report(vx.IsNil() && vy.IsNil(), vx, vy)
+		return
+	}
+
+	// We combine and sort the two map keys so that we can perform the
+	// comparisons in a deterministic order.
+	step := &mapIndex{pathStep: pathStep{t.Elem()}}
+	s.curPath.push(step)
+	defer s.curPath.pop()
+	for _, k := range value.SortKeys(append(vx.MapKeys(), vy.MapKeys()...)) {
+		step.key = k
+		vvx := vx.MapIndex(k)
+		vvy := vy.MapIndex(k)
+		switch {
+		case vvx.IsValid() && vvy.IsValid():
+			s.compareAny(vvx, vvy)
+		case vvx.IsValid() && !vvy.IsValid():
+			s.report(false, vvx, nothing)
+		case !vvx.IsValid() && vvy.IsValid():
+			s.report(false, nothing, vvy)
+		default:
+			// It is possible for both vvx and vvy to be invalid if the
+			// key contained a NaN value in it. There is no way in
+			// reflection to be able to retrieve these values.
+			// See https://golang.org/issue/11104
+			panic(fmt.Sprintf("%#v has map key with NaNs", s.curPath))
+		}
+	}
+}
+
+func (s *state) compareStruct(vx, vy reflect.Value, t reflect.Type) {
+	var vax, vay reflect.Value // Addressable versions of vx and vy
+
+	step := &structField{}
+	s.curPath.push(step)
+	defer s.curPath.pop()
+	for i := 0; i < t.NumField(); i++ {
+		vvx := vx.Field(i)
+		vvy := vy.Field(i)
+		step.typ = t.Field(i).Type
+		step.name = t.Field(i).Name
+		step.idx = i
+		step.unexported = !isExported(step.name)
+		if step.unexported {
+			// Defer checking of unexported fields until later to give an
+			// Ignore a chance to ignore the field.
+			if !vax.IsValid() || !vay.IsValid() {
+				// For unsafeRetrieveField to work, the parent struct must
+				// be addressable. Create a new copy of the values if
+				// necessary to make them addressable.
+				vax = makeAddressable(vx)
+				vay = makeAddressable(vy)
+			}
+			step.force = s.exporters[t]
+			step.pvx = vax
+			step.pvy = vay
+			step.field = t.Field(i)
+		}
+		s.compareAny(vvx, vvy)
+	}
+}
+
+// report records the result of a single comparison.
+// It also calls Report if any reporter is registered.
+func (s *state) report(eq bool, vx, vy reflect.Value) {
+	if eq {
+		s.result.NSame++
+	} else {
+		s.result.NDiff++
+	}
+	if s.reporter != nil {
+		s.reporter.Report(vx, vy, eq, s.curPath)
+	}
+}
+
+// dynChecker tracks the state needed to periodically perform checks that
+// user provided functions are symmetric and deterministic.
+// The zero value is safe for immediate use.
+type dynChecker struct{ curr, next int }
+
+// Next increments the state and reports whether a check should be performed.
+//
+// Checks occur every Nth function call, where N is a triangular number:
+//	0 1 3 6 10 15 21 28 36 45 55 66 78 91 105 120 136 153 171 190 ...
+// See https://en.wikipedia.org/wiki/Triangular_number
+//
+// This sequence ensures that the cost of checks drops significantly as
+// the number of functions calls grows larger.
+func (dc *dynChecker) Next() bool {
+	ok := dc.curr == dc.next
+	if ok {
+		dc.curr = 0
+		dc.next++
+	}
+	dc.curr++
+	return ok
+}
+
+// makeAddressable returns a value that is always addressable.
+// It returns the input verbatim if it is already addressable,
+// otherwise it creates a new value and returns an addressable copy.
+func makeAddressable(v reflect.Value) reflect.Value {
+	if v.CanAddr() {
+		return v
+	}
+	vc := reflect.New(v.Type()).Elem()
+	vc.Set(v)
+	return vc
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go
new file mode 100644
index 0000000..42afa49
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go
@@ -0,0 +1,17 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE.md file.
+
+// +build !debug
+
+package diff
+
+var debug debugger
+
+type debugger struct{}
+
+func (debugger) Begin(_, _ int, f EqualFunc, _, _ *EditScript) EqualFunc {
+	return f
+}
+func (debugger) Update() {}
+func (debugger) Finish() {}
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go
new file mode 100644
index 0000000..fd9f7f1
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go
@@ -0,0 +1,122 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE.md file.
+
+// +build debug
+
+package diff
+
+import (
+	"fmt"
+	"strings"
+	"sync"
+	"time"
+)
+
+// The algorithm can be seen running in real-time by enabling debugging:
+//	go test -tags=debug -v
+//
+// Example output:
+//	=== RUN   TestDifference/#34
+//	┌───────────────────────────────┐
+//	│ \ · · · · · · · · · · · · · · │
+//	│ · # · · · · · · · · · · · · · │
+//	│ · \ · · · · · · · · · · · · · │
+//	│ · · \ · · · · · · · · · · · · │
+//	│ · · · X # · · · · · · · · · · │
+//	│ · · · # \ · · · · · · · · · · │
+//	│ · · · · · # # · · · · · · · · │
+//	│ · · · · · # \ · · · · · · · · │
+//	│ · · · · · · · \ · · · · · · · │
+//	│ · · · · · · · · \ · · · · · · │
+//	│ · · · · · · · · · \ · · · · · │
+//	│ · · · · · · · · · · \ · · # · │
+//	│ · · · · · · · · · · · \ # # · │
+//	│ · · · · · · · · · · · # # # · │
+//	│ · · · · · · · · · · # # # # · │
+//	│ · · · · · · · · · # # # # # · │
+//	│ · · · · · · · · · · · · · · \ │
+//	└───────────────────────────────┘
+//	[.Y..M.XY......YXYXY.|]
+//
+// The grid represents the edit-graph where the horizontal axis represents
+// list X and the vertical axis represents list Y. The start of the two lists
+// is the top-left, while the ends are the bottom-right. The '·' represents
+// an unexplored node in the graph. The '\' indicates that the two symbols
+// from list X and Y are equal. The 'X' indicates that two symbols are similar
+// (but not exactly equal) to each other. The '#' indicates that the two symbols
+// are different (and not similar). The algorithm traverses this graph trying to
+// make the paths starting in the top-left and the bottom-right connect.
+//
+// The series of '.', 'X', 'Y', and 'M' characters at the bottom represents
+// the currently established path from the forward and reverse searches,
+// separated by a '|' character.
+
+const (
+	updateDelay  = 100 * time.Millisecond
+	finishDelay  = 500 * time.Millisecond
+	ansiTerminal = true // ANSI escape codes used to move terminal cursor
+)
+
+var debug debugger
+
+type debugger struct {
+	sync.Mutex
+	p1, p2           EditScript
+	fwdPath, revPath *EditScript
+	grid             []byte
+	lines            int
+}
+
+func (dbg *debugger) Begin(nx, ny int, f EqualFunc, p1, p2 *EditScript) EqualFunc {
+	dbg.Lock()
+	dbg.fwdPath, dbg.revPath = p1, p2
+	top := "┌─" + strings.Repeat("──", nx) + "┐\n"
+	row := "│ " + strings.Repeat("· ", nx) + "│\n"
+	btm := "└─" + strings.Repeat("──", nx) + "┘\n"
+	dbg.grid = []byte(top + strings.Repeat(row, ny) + btm)
+	dbg.lines = strings.Count(dbg.String(), "\n")
+	fmt.Print(dbg)
+
+	// Wrap the EqualFunc so that we can intercept each result.
+	return func(ix, iy int) (r Result) {
+		cell := dbg.grid[len(top)+iy*len(row):][len("│ ")+len("· ")*ix:][:len("·")]
+		for i := range cell {
+			cell[i] = 0 // Zero out the multiple bytes of UTF-8 middle-dot
+		}
+		switch r = f(ix, iy); {
+		case r.Equal():
+			cell[0] = '\\'
+		case r.Similar():
+			cell[0] = 'X'
+		default:
+			cell[0] = '#'
+		}
+		return
+	}
+}
+
+func (dbg *debugger) Update() {
+	dbg.print(updateDelay)
+}
+
+func (dbg *debugger) Finish() {
+	dbg.print(finishDelay)
+	dbg.Unlock()
+}
+
+func (dbg *debugger) String() string {
+	dbg.p1, dbg.p2 = *dbg.fwdPath, dbg.p2[:0]
+	for i := len(*dbg.revPath) - 1; i >= 0; i-- {
+		dbg.p2 = append(dbg.p2, (*dbg.revPath)[i])
+	}
+	return fmt.Sprintf("%s[%v|%v]\n\n", dbg.grid, dbg.p1, dbg.p2)
+}
+
+func (dbg *debugger) print(d time.Duration) {
+	if ansiTerminal {
+		fmt.Printf("\x1b[%dA", dbg.lines) // Reset terminal cursor
+	}
+	fmt.Print(dbg)
+	time.Sleep(d)
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go
new file mode 100644
index 0000000..260befe
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go
@@ -0,0 +1,363 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE.md file.
+
+// Package diff implements an algorithm for producing edit-scripts.
+// The edit-script is a sequence of operations needed to transform one list
+// of symbols into another (or vice-versa). The edits allowed are insertions,
+// deletions, and modifications. The summation of all edits is called the
+// Levenshtein distance as this problem is well-known in computer science.
+//
+// This package prioritizes performance over accuracy. That is, the run time
+// is more important than obtaining a minimal Levenshtein distance.
+package diff
+
+// EditType represents a single operation within an edit-script.
+type EditType uint8
+
+const (
+	// Identity indicates that a symbol pair is identical in both list X and Y.
+	Identity EditType = iota
+	// UniqueX indicates that a symbol only exists in X and not Y.
+	UniqueX
+	// UniqueY indicates that a symbol only exists in Y and not X.
+	UniqueY
+	// Modified indicates that a symbol pair is a modification of each other.
+	Modified
+)
+
+// EditScript represents the series of differences between two lists.
+type EditScript []EditType
+
+// String returns a human-readable string representing the edit-script where
+// Identity, UniqueX, UniqueY, and Modified are represented by the
+// '.', 'X', 'Y', and 'M' characters, respectively.
+func (es EditScript) String() string {
+	b := make([]byte, len(es))
+	for i, e := range es {
+		switch e {
+		case Identity:
+			b[i] = '.'
+		case UniqueX:
+			b[i] = 'X'
+		case UniqueY:
+			b[i] = 'Y'
+		case Modified:
+			b[i] = 'M'
+		default:
+			panic("invalid edit-type")
+		}
+	}
+	return string(b)
+}
+
+// stats returns a histogram of the number of each type of edit operation.
+func (es EditScript) stats() (s struct{ NI, NX, NY, NM int }) {
+	for _, e := range es {
+		switch e {
+		case Identity:
+			s.NI++
+		case UniqueX:
+			s.NX++
+		case UniqueY:
+			s.NY++
+		case Modified:
+			s.NM++
+		default:
+			panic("invalid edit-type")
+		}
+	}
+	return
+}
+
+// Dist is the Levenshtein distance and is guaranteed to be 0 if and only if
+// lists X and Y are equal.
+func (es EditScript) Dist() int { return len(es) - es.stats().NI }
+
+// LenX is the length of the X list.
+func (es EditScript) LenX() int { return len(es) - es.stats().NY }
+
+// LenY is the length of the Y list.
+func (es EditScript) LenY() int { return len(es) - es.stats().NX }
+
+// EqualFunc reports whether the symbols at indexes ix and iy are equal.
+// When called by Difference, the index is guaranteed to be within nx and ny.
+type EqualFunc func(ix int, iy int) Result
+
+// Result is the result of comparison.
+// NSame is the number of sub-elements that are equal.
+// NDiff is the number of sub-elements that are not equal.
+type Result struct{ NSame, NDiff int }
+
+// Equal indicates whether the symbols are equal. Two symbols are equal
+// if and only if NDiff == 0. If Equal, then they are also Similar.
+func (r Result) Equal() bool { return r.NDiff == 0 }
+
+// Similar indicates whether two symbols are similar and may be represented
+// by using the Modified type. As a special case, we consider binary comparisons
+// (i.e., those that return Result{1, 0} or Result{0, 1}) to be similar.
+//
+// The exact ratio of NSame to NDiff to determine similarity may change.
+func (r Result) Similar() bool {
+	// Use NSame+1 to offset NSame so that binary comparisons are similar.
+	return r.NSame+1 >= r.NDiff
+}
+
+// Difference reports whether two lists of lengths nx and ny are equal
+// given the definition of equality provided as f.
+//
+// This function returns an edit-script, which is a sequence of operations
+// needed to convert one list into the other. The following invariants for
+// the edit-script are maintained:
+//	• eq == (es.Dist()==0)
+//	• nx == es.LenX()
+//	• ny == es.LenY()
+//
+// This algorithm is not guaranteed to be an optimal solution (i.e., one that
+// produces an edit-script with a minimal Levenshtein distance). This algorithm
+// favors performance over optimality. The exact output is not guaranteed to
+// be stable and may change over time.
+func Difference(nx, ny int, f EqualFunc) (es EditScript) {
+	// This algorithm is based on traversing what is known as an "edit-graph".
+	// See Figure 1 from "An O(ND) Difference Algorithm and Its Variations"
+	// by Eugene W. Myers. Since D can be as large as N itself, this is
+	// effectively O(N^2). Unlike the algorithm from that paper, we are not
+	// interested in the optimal path, but at least some "decent" path.
+	//
+	// For example, let X and Y be lists of symbols:
+	//	X = [A B C A B B A]
+	//	Y = [C B A B A C]
+	//
+	// The edit-graph can be drawn as the following:
+	//	   A B C A B B A
+	//	  ┌─────────────┐
+	//	C │_|_|\|_|_|_|_│ 0
+	//	B │_|\|_|_|\|\|_│ 1
+	//	A │\|_|_|\|_|_|\│ 2
+	//	B │_|\|_|_|\|\|_│ 3
+	//	A │\|_|_|\|_|_|\│ 4
+	//	C │ | |\| | | | │ 5
+	//	  └─────────────┘ 6
+	//	   0 1 2 3 4 5 6 7
+	//
+	// List X is written along the horizontal axis, while list Y is written
+	// along the vertical axis. At any point on this grid, if the symbol in
+	// list X matches the corresponding symbol in list Y, then a '\' is drawn.
+	// The goal of any minimal edit-script algorithm is to find a path from the
+	// top-left corner to the bottom-right corner, while traveling through the
+	// fewest horizontal or vertical edges.
+	// A horizontal edge is equivalent to inserting a symbol from list X.
+	// A vertical edge is equivalent to inserting a symbol from list Y.
+	// A diagonal edge is equivalent to a matching symbol between both X and Y.
+
+	// Invariants:
+	//	• 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx
+	//	• 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny
+	//
+	// In general:
+	//	• fwdFrontier.X < revFrontier.X
+	//	• fwdFrontier.Y < revFrontier.Y
+	// Unless, it is time for the algorithm to terminate.
+	fwdPath := path{+1, point{0, 0}, make(EditScript, 0, (nx+ny)/2)}
+	revPath := path{-1, point{nx, ny}, make(EditScript, 0)}
+	fwdFrontier := fwdPath.point // Forward search frontier
+	revFrontier := revPath.point // Reverse search frontier
+
+	// Search budget bounds the cost of searching for better paths.
+	// The longest sequence of non-matching symbols that can be tolerated is
+	// approximately the square-root of the search budget.
+	searchBudget := 4 * (nx + ny) // O(n)
+
+	// The algorithm below is a greedy, meet-in-the-middle algorithm for
+	// computing sub-optimal edit-scripts between two lists.
+	//
+	// The algorithm is approximately as follows:
+	//	• Searching for differences switches back-and-forth between
+	//	a search that starts at the beginning (the top-left corner), and
+	//	a search that starts at the end (the bottom-right corner). The goal of
+	//	the search is connect with the search from the opposite corner.
+	//	• As we search, we build a path in a greedy manner, where the first
+	//	match seen is added to the path (this is sub-optimal, but provides a
+	//	decent result in practice). When matches are found, we try the next pair
+	//	of symbols in the lists and follow all matches as far as possible.
+	//	• When searching for matches, we search along a diagonal going through
+	//	through the "frontier" point. If no matches are found, we advance the
+	//	frontier towards the opposite corner.
+	//	• This algorithm terminates when either the X coordinates or the
+	//	Y coordinates of the forward and reverse frontier points ever intersect.
+	//
+	// This algorithm is correct even if searching only in the forward direction
+	// or in the reverse direction. We do both because it is commonly observed
+	// that two lists commonly differ because elements were added to the front
+	// or end of the other list.
+	//
+	// Running the tests with the "debug" build tag prints a visualization of
+	// the algorithm running in real-time. This is educational for understanding
+	// how the algorithm works. See debug_enable.go.
+	f = debug.Begin(nx, ny, f, &fwdPath.es, &revPath.es)
+	for {
+		// Forward search from the beginning.
+		if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 {
+			break
+		}
+		for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ {
+			// Search in a diagonal pattern for a match.
+			z := zigzag(i)
+			p := point{fwdFrontier.X + z, fwdFrontier.Y - z}
+			switch {
+			case p.X >= revPath.X || p.Y < fwdPath.Y:
+				stop1 = true // Hit top-right corner
+			case p.Y >= revPath.Y || p.X < fwdPath.X:
+				stop2 = true // Hit bottom-left corner
+			case f(p.X, p.Y).Equal():
+				// Match found, so connect the path to this point.
+				fwdPath.connect(p, f)
+				fwdPath.append(Identity)
+				// Follow sequence of matches as far as possible.
+				for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y {
+					if !f(fwdPath.X, fwdPath.Y).Equal() {
+						break
+					}
+					fwdPath.append(Identity)
+				}
+				fwdFrontier = fwdPath.point
+				stop1, stop2 = true, true
+			default:
+				searchBudget-- // Match not found
+			}
+			debug.Update()
+		}
+		// Advance the frontier towards reverse point.
+		if revPath.X-fwdFrontier.X >= revPath.Y-fwdFrontier.Y {
+			fwdFrontier.X++
+		} else {
+			fwdFrontier.Y++
+		}
+
+		// Reverse search from the end.
+		if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 {
+			break
+		}
+		for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ {
+			// Search in a diagonal pattern for a match.
+			z := zigzag(i)
+			p := point{revFrontier.X - z, revFrontier.Y + z}
+			switch {
+			case fwdPath.X >= p.X || revPath.Y < p.Y:
+				stop1 = true // Hit bottom-left corner
+			case fwdPath.Y >= p.Y || revPath.X < p.X:
+				stop2 = true // Hit top-right corner
+			case f(p.X-1, p.Y-1).Equal():
+				// Match found, so connect the path to this point.
+				revPath.connect(p, f)
+				revPath.append(Identity)
+				// Follow sequence of matches as far as possible.
+				for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y {
+					if !f(revPath.X-1, revPath.Y-1).Equal() {
+						break
+					}
+					revPath.append(Identity)
+				}
+				revFrontier = revPath.point
+				stop1, stop2 = true, true
+			default:
+				searchBudget-- // Match not found
+			}
+			debug.Update()
+		}
+		// Advance the frontier towards forward point.
+		if revFrontier.X-fwdPath.X >= revFrontier.Y-fwdPath.Y {
+			revFrontier.X--
+		} else {
+			revFrontier.Y--
+		}
+	}
+
+	// Join the forward and reverse paths and then append the reverse path.
+	fwdPath.connect(revPath.point, f)
+	for i := len(revPath.es) - 1; i >= 0; i-- {
+		t := revPath.es[i]
+		revPath.es = revPath.es[:i]
+		fwdPath.append(t)
+	}
+	debug.Finish()
+	return fwdPath.es
+}
+
+type path struct {
+	dir   int // +1 if forward, -1 if reverse
+	point     // Leading point of the EditScript path
+	es    EditScript
+}
+
+// connect appends any necessary Identity, Modified, UniqueX, or UniqueY types
+// to the edit-script to connect p.point to dst.
+func (p *path) connect(dst point, f EqualFunc) {
+	if p.dir > 0 {
+		// Connect in forward direction.
+		for dst.X > p.X && dst.Y > p.Y {
+			switch r := f(p.X, p.Y); {
+			case r.Equal():
+				p.append(Identity)
+			case r.Similar():
+				p.append(Modified)
+			case dst.X-p.X >= dst.Y-p.Y:
+				p.append(UniqueX)
+			default:
+				p.append(UniqueY)
+			}
+		}
+		for dst.X > p.X {
+			p.append(UniqueX)
+		}
+		for dst.Y > p.Y {
+			p.append(UniqueY)
+		}
+	} else {
+		// Connect in reverse direction.
+		for p.X > dst.X && p.Y > dst.Y {
+			switch r := f(p.X-1, p.Y-1); {
+			case r.Equal():
+				p.append(Identity)
+			case r.Similar():
+				p.append(Modified)
+			case p.Y-dst.Y >= p.X-dst.X:
+				p.append(UniqueY)
+			default:
+				p.append(UniqueX)
+			}
+		}
+		for p.X > dst.X {
+			p.append(UniqueX)
+		}
+		for p.Y > dst.Y {
+			p.append(UniqueY)
+		}
+	}
+}
+
+func (p *path) append(t EditType) {
+	p.es = append(p.es, t)
+	switch t {
+	case Identity, Modified:
+		p.add(p.dir, p.dir)
+	case UniqueX:
+		p.add(p.dir, 0)
+	case UniqueY:
+		p.add(0, p.dir)
+	}
+	debug.Update()
+}
+
+type point struct{ X, Y int }
+
+func (p *point) add(dx, dy int) { p.X += dx; p.Y += dy }
+
+// zigzag maps a consecutive sequence of integers to a zig-zag sequence.
+//	[0 1 2 3 4 5 ...] => [0 -1 +1 -2 +2 ...]
+func zigzag(x int) int {
+	if x&1 != 0 {
+		x = ^x
+	}
+	return x >> 1
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/function/func.go b/vendor/github.com/google/go-cmp/cmp/internal/function/func.go
new file mode 100644
index 0000000..4c35ff1
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/internal/function/func.go
@@ -0,0 +1,49 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE.md file.
+
+// Package function identifies function types.
+package function
+
+import "reflect"
+
+type funcType int
+
+const (
+	_ funcType = iota
+
+	ttbFunc // func(T, T) bool
+	tibFunc // func(T, I) bool
+	trFunc  // func(T) R
+
+	Equal           = ttbFunc // func(T, T) bool
+	EqualAssignable = tibFunc // func(T, I) bool; encapsulates func(T, T) bool
+	Transformer     = trFunc  // func(T) R
+	ValueFilter     = ttbFunc // func(T, T) bool
+	Less            = ttbFunc // func(T, T) bool
+)
+
+var boolType = reflect.TypeOf(true)
+
+// IsType reports whether the reflect.Type is of the specified function type.
+func IsType(t reflect.Type, ft funcType) bool {
+	if t == nil || t.Kind() != reflect.Func || t.IsVariadic() {
+		return false
+	}
+	ni, no := t.NumIn(), t.NumOut()
+	switch ft {
+	case ttbFunc: // func(T, T) bool
+		if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == boolType {
+			return true
+		}
+	case tibFunc: // func(T, I) bool
+		if ni == 2 && no == 1 && t.In(0).AssignableTo(t.In(1)) && t.Out(0) == boolType {
+			return true
+		}
+	case trFunc: // func(T) R
+		if ni == 1 && no == 1 {
+			return true
+		}
+	}
+	return false
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/format.go b/vendor/github.com/google/go-cmp/cmp/internal/value/format.go
new file mode 100644
index 0000000..657e508
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/internal/value/format.go
@@ -0,0 +1,277 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE.md file.
+
+// Package value provides functionality for reflect.Value types.
+package value
+
+import (
+	"fmt"
+	"reflect"
+	"strconv"
+	"strings"
+	"unicode"
+)
+
+var stringerIface = reflect.TypeOf((*fmt.Stringer)(nil)).Elem()
+
+// Format formats the value v as a string.
+//
+// This is similar to fmt.Sprintf("%+v", v) except this:
+//	* Prints the type unless it can be elided
+//	* Avoids printing struct fields that are zero
+//	* Prints a nil-slice as being nil, not empty
+//	* Prints map entries in deterministic order
+func Format(v reflect.Value, conf FormatConfig) string {
+	conf.printType = true
+	conf.followPointers = true
+	conf.realPointers = true
+	return formatAny(v, conf, nil)
+}
+
+type FormatConfig struct {
+	UseStringer        bool // Should the String method be used if available?
+	printType          bool // Should we print the type before the value?
+	PrintPrimitiveType bool // Should we print the type of primitives?
+	followPointers     bool // Should we recursively follow pointers?
+	realPointers       bool // Should we print the real address of pointers?
+}
+
+func formatAny(v reflect.Value, conf FormatConfig, visited map[uintptr]bool) string {
+	// TODO: Should this be a multi-line printout in certain situations?
+
+	if !v.IsValid() {
+		return "<non-existent>"
+	}
+	if conf.UseStringer && v.Type().Implements(stringerIface) && v.CanInterface() {
+		if (v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface) && v.IsNil() {
+			return "<nil>"
+		}
+
+		const stringerPrefix = "s" // Indicates that the String method was used
+		s := v.Interface().(fmt.Stringer).String()
+		return stringerPrefix + formatString(s)
+	}
+
+	switch v.Kind() {
+	case reflect.Bool:
+		return formatPrimitive(v.Type(), v.Bool(), conf)
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return formatPrimitive(v.Type(), v.Int(), conf)
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		if v.Type().PkgPath() == "" || v.Kind() == reflect.Uintptr {
+			// Unnamed uints are usually bytes or words, so use hexadecimal.
+			return formatPrimitive(v.Type(), formatHex(v.Uint()), conf)
+		}
+		return formatPrimitive(v.Type(), v.Uint(), conf)
+	case reflect.Float32, reflect.Float64:
+		return formatPrimitive(v.Type(), v.Float(), conf)
+	case reflect.Complex64, reflect.Complex128:
+		return formatPrimitive(v.Type(), v.Complex(), conf)
+	case reflect.String:
+		return formatPrimitive(v.Type(), formatString(v.String()), conf)
+	case reflect.UnsafePointer, reflect.Chan, reflect.Func:
+		return formatPointer(v, conf)
+	case reflect.Ptr:
+		if v.IsNil() {
+			if conf.printType {
+				return fmt.Sprintf("(%v)(nil)", v.Type())
+			}
+			return "<nil>"
+		}
+		if visited[v.Pointer()] || !conf.followPointers {
+			return formatPointer(v, conf)
+		}
+		visited = insertPointer(visited, v.Pointer())
+		return "&" + formatAny(v.Elem(), conf, visited)
+	case reflect.Interface:
+		if v.IsNil() {
+			if conf.printType {
+				return fmt.Sprintf("%v(nil)", v.Type())
+			}
+			return "<nil>"
+		}
+		return formatAny(v.Elem(), conf, visited)
+	case reflect.Slice:
+		if v.IsNil() {
+			if conf.printType {
+				return fmt.Sprintf("%v(nil)", v.Type())
+			}
+			return "<nil>"
+		}
+		if visited[v.Pointer()] {
+			return formatPointer(v, conf)
+		}
+		visited = insertPointer(visited, v.Pointer())
+		fallthrough
+	case reflect.Array:
+		var ss []string
+		subConf := conf
+		subConf.printType = v.Type().Elem().Kind() == reflect.Interface
+		for i := 0; i < v.Len(); i++ {
+			s := formatAny(v.Index(i), subConf, visited)
+			ss = append(ss, s)
+		}
+		s := fmt.Sprintf("{%s}", strings.Join(ss, ", "))
+		if conf.printType {
+			return v.Type().String() + s
+		}
+		return s
+	case reflect.Map:
+		if v.IsNil() {
+			if conf.printType {
+				return fmt.Sprintf("%v(nil)", v.Type())
+			}
+			return "<nil>"
+		}
+		if visited[v.Pointer()] {
+			return formatPointer(v, conf)
+		}
+		visited = insertPointer(visited, v.Pointer())
+
+		var ss []string
+		keyConf, valConf := conf, conf
+		keyConf.printType = v.Type().Key().Kind() == reflect.Interface
+		keyConf.followPointers = false
+		valConf.printType = v.Type().Elem().Kind() == reflect.Interface
+		for _, k := range SortKeys(v.MapKeys()) {
+			sk := formatAny(k, keyConf, visited)
+			sv := formatAny(v.MapIndex(k), valConf, visited)
+			ss = append(ss, fmt.Sprintf("%s: %s", sk, sv))
+		}
+		s := fmt.Sprintf("{%s}", strings.Join(ss, ", "))
+		if conf.printType {
+			return v.Type().String() + s
+		}
+		return s
+	case reflect.Struct:
+		var ss []string
+		subConf := conf
+		subConf.printType = true
+		for i := 0; i < v.NumField(); i++ {
+			vv := v.Field(i)
+			if isZero(vv) {
+				continue // Elide zero value fields
+			}
+			name := v.Type().Field(i).Name
+			subConf.UseStringer = conf.UseStringer
+			s := formatAny(vv, subConf, visited)
+			ss = append(ss, fmt.Sprintf("%s: %s", name, s))
+		}
+		s := fmt.Sprintf("{%s}", strings.Join(ss, ", "))
+		if conf.printType {
+			return v.Type().String() + s
+		}
+		return s
+	default:
+		panic(fmt.Sprintf("%v kind not handled", v.Kind()))
+	}
+}
+
+func formatString(s string) string {
+	// Use quoted string if it the same length as a raw string literal.
+	// Otherwise, attempt to use the raw string form.
+	qs := strconv.Quote(s)
+	if len(qs) == 1+len(s)+1 {
+		return qs
+	}
+
+	// Disallow newlines to ensure output is a single line.
+	// Only allow printable runes for readability purposes.
+	rawInvalid := func(r rune) bool {
+		return r == '`' || r == '\n' || !unicode.IsPrint(r)
+	}
+	if strings.IndexFunc(s, rawInvalid) < 0 {
+		return "`" + s + "`"
+	}
+	return qs
+}
+
+func formatPrimitive(t reflect.Type, v interface{}, conf FormatConfig) string {
+	if conf.printType && (conf.PrintPrimitiveType || t.PkgPath() != "") {
+		return fmt.Sprintf("%v(%v)", t, v)
+	}
+	return fmt.Sprintf("%v", v)
+}
+
+func formatPointer(v reflect.Value, conf FormatConfig) string {
+	p := v.Pointer()
+	if !conf.realPointers {
+		p = 0 // For deterministic printing purposes
+	}
+	s := formatHex(uint64(p))
+	if conf.printType {
+		return fmt.Sprintf("(%v)(%s)", v.Type(), s)
+	}
+	return s
+}
+
+func formatHex(u uint64) string {
+	var f string
+	switch {
+	case u <= 0xff:
+		f = "0x%02x"
+	case u <= 0xffff:
+		f = "0x%04x"
+	case u <= 0xffffff:
+		f = "0x%06x"
+	case u <= 0xffffffff:
+		f = "0x%08x"
+	case u <= 0xffffffffff:
+		f = "0x%010x"
+	case u <= 0xffffffffffff:
+		f = "0x%012x"
+	case u <= 0xffffffffffffff:
+		f = "0x%014x"
+	case u <= 0xffffffffffffffff:
+		f = "0x%016x"
+	}
+	return fmt.Sprintf(f, u)
+}
+
+// insertPointer insert p into m, allocating m if necessary.
+func insertPointer(m map[uintptr]bool, p uintptr) map[uintptr]bool {
+	if m == nil {
+		m = make(map[uintptr]bool)
+	}
+	m[p] = true
+	return m
+}
+
+// isZero reports whether v is the zero value.
+// This does not rely on Interface and so can be used on unexported fields.
+func isZero(v reflect.Value) bool {
+	switch v.Kind() {
+	case reflect.Bool:
+		return v.Bool() == false
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return v.Int() == 0
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return v.Uint() == 0
+	case reflect.Float32, reflect.Float64:
+		return v.Float() == 0
+	case reflect.Complex64, reflect.Complex128:
+		return v.Complex() == 0
+	case reflect.String:
+		return v.String() == ""
+	case reflect.UnsafePointer:
+		return v.Pointer() == 0
+	case reflect.Chan, reflect.Func, reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice:
+		return v.IsNil()
+	case reflect.Array:
+		for i := 0; i < v.Len(); i++ {
+			if !isZero(v.Index(i)) {
+				return false
+			}
+		}
+		return true
+	case reflect.Struct:
+		for i := 0; i < v.NumField(); i++ {
+			if !isZero(v.Field(i)) {
+				return false
+			}
+		}
+		return true
+	}
+	return false
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go b/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go
new file mode 100644
index 0000000..fe8aa27
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go
@@ -0,0 +1,111 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE.md file.
+
+package value
+
+import (
+	"fmt"
+	"math"
+	"reflect"
+	"sort"
+)
+
+// SortKeys sorts a list of map keys, deduplicating keys if necessary.
+// The type of each value must be comparable.
+func SortKeys(vs []reflect.Value) []reflect.Value {
+	if len(vs) == 0 {
+		return vs
+	}
+
+	// Sort the map keys.
+	sort.Sort(valueSorter(vs))
+
+	// Deduplicate keys (fails for NaNs).
+	vs2 := vs[:1]
+	for _, v := range vs[1:] {
+		if isLess(vs2[len(vs2)-1], v) {
+			vs2 = append(vs2, v)
+		}
+	}
+	return vs2
+}
+
+// TODO: Use sort.Slice once Google AppEngine is on Go1.8 or above.
+type valueSorter []reflect.Value
+
+func (vs valueSorter) Len() int           { return len(vs) }
+func (vs valueSorter) Less(i, j int) bool { return isLess(vs[i], vs[j]) }
+func (vs valueSorter) Swap(i, j int)      { vs[i], vs[j] = vs[j], vs[i] }
+
+// isLess is a generic function for sorting arbitrary map keys.
+// The inputs must be of the same type and must be comparable.
+func isLess(x, y reflect.Value) bool {
+	switch x.Type().Kind() {
+	case reflect.Bool:
+		return !x.Bool() && y.Bool()
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return x.Int() < y.Int()
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return x.Uint() < y.Uint()
+	case reflect.Float32, reflect.Float64:
+		fx, fy := x.Float(), y.Float()
+		return fx < fy || math.IsNaN(fx) && !math.IsNaN(fy)
+	case reflect.Complex64, reflect.Complex128:
+		cx, cy := x.Complex(), y.Complex()
+		rx, ix, ry, iy := real(cx), imag(cx), real(cy), imag(cy)
+		if rx == ry || (math.IsNaN(rx) && math.IsNaN(ry)) {
+			return ix < iy || math.IsNaN(ix) && !math.IsNaN(iy)
+		}
+		return rx < ry || math.IsNaN(rx) && !math.IsNaN(ry)
+	case reflect.Ptr, reflect.UnsafePointer, reflect.Chan:
+		return x.Pointer() < y.Pointer()
+	case reflect.String:
+		return x.String() < y.String()
+	case reflect.Array:
+		for i := 0; i < x.Len(); i++ {
+			if isLess(x.Index(i), y.Index(i)) {
+				return true
+			}
+			if isLess(y.Index(i), x.Index(i)) {
+				return false
+			}
+		}
+		return false
+	case reflect.Struct:
+		for i := 0; i < x.NumField(); i++ {
+			if isLess(x.Field(i), y.Field(i)) {
+				return true
+			}
+			if isLess(y.Field(i), x.Field(i)) {
+				return false
+			}
+		}
+		return false
+	case reflect.Interface:
+		vx, vy := x.Elem(), y.Elem()
+		if !vx.IsValid() || !vy.IsValid() {
+			return !vx.IsValid() && vy.IsValid()
+		}
+		tx, ty := vx.Type(), vy.Type()
+		if tx == ty {
+			return isLess(x.Elem(), y.Elem())
+		}
+		if tx.Kind() != ty.Kind() {
+			return vx.Kind() < vy.Kind()
+		}
+		if tx.String() != ty.String() {
+			return tx.String() < ty.String()
+		}
+		if tx.PkgPath() != ty.PkgPath() {
+			return tx.PkgPath() < ty.PkgPath()
+		}
+		// This can happen in rare situations, so we fallback to just comparing
+		// the unique pointer for a reflect.Type. This guarantees deterministic
+		// ordering within a program, but it is obviously not stable.
+		return reflect.ValueOf(vx.Type()).Pointer() < reflect.ValueOf(vy.Type()).Pointer()
+	default:
+		// Must be Func, Map, or Slice; which are not comparable.
+		panic(fmt.Sprintf("%T is not comparable", x.Type()))
+	}
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/options.go b/vendor/github.com/google/go-cmp/cmp/options.go
new file mode 100644
index 0000000..91d4b06
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/options.go
@@ -0,0 +1,453 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE.md file.
+
+package cmp
+
+import (
+	"fmt"
+	"reflect"
+	"runtime"
+	"strings"
+
+	"github.com/google/go-cmp/cmp/internal/function"
+)
+
+// Option configures for specific behavior of Equal and Diff. In particular,
+// the fundamental Option functions (Ignore, Transformer, and Comparer),
+// configure how equality is determined.
+//
+// The fundamental options may be composed with filters (FilterPath and
+// FilterValues) to control the scope over which they are applied.
+//
+// The cmp/cmpopts package provides helper functions for creating options that
+// may be used with Equal and Diff.
+type Option interface {
+	// filter applies all filters and returns the option that remains.
+	// Each option may only read s.curPath and call s.callTTBFunc.
+	//
+	// An Options is returned only if multiple comparers or transformers
+	// can apply simultaneously and will only contain values of those types
+	// or sub-Options containing values of those types.
+	filter(s *state, vx, vy reflect.Value, t reflect.Type) applicableOption
+}
+
+// applicableOption represents the following types:
+//	Fundamental: ignore | invalid | *comparer | *transformer
+//	Grouping:    Options
+type applicableOption interface {
+	Option
+
+	// apply executes the option, which may mutate s or panic.
+	apply(s *state, vx, vy reflect.Value)
+}
+
+// coreOption represents the following types:
+//	Fundamental: ignore | invalid | *comparer | *transformer
+//	Filters:     *pathFilter | *valuesFilter
+type coreOption interface {
+	Option
+	isCore()
+}
+
+type core struct{}
+
+func (core) isCore() {}
+
+// Options is a list of Option values that also satisfies the Option interface.
+// Helper comparison packages may return an Options value when packing multiple
+// Option values into a single Option. When this package processes an Options,
+// it will be implicitly expanded into a flat list.
+//
+// Applying a filter on an Options is equivalent to applying that same filter
+// on all individual options held within.
+type Options []Option
+
+func (opts Options) filter(s *state, vx, vy reflect.Value, t reflect.Type) (out applicableOption) {
+	for _, opt := range opts {
+		switch opt := opt.filter(s, vx, vy, t); opt.(type) {
+		case ignore:
+			return ignore{} // Only ignore can short-circuit evaluation
+		case invalid:
+			out = invalid{} // Takes precedence over comparer or transformer
+		case *comparer, *transformer, Options:
+			switch out.(type) {
+			case nil:
+				out = opt
+			case invalid:
+				// Keep invalid
+			case *comparer, *transformer, Options:
+				out = Options{out, opt} // Conflicting comparers or transformers
+			}
+		}
+	}
+	return out
+}
+
+func (opts Options) apply(s *state, _, _ reflect.Value) {
+	const warning = "ambiguous set of applicable options"
+	const help = "consider using filters to ensure at most one Comparer or Transformer may apply"
+	var ss []string
+	for _, opt := range flattenOptions(nil, opts) {
+		ss = append(ss, fmt.Sprint(opt))
+	}
+	set := strings.Join(ss, "\n\t")
+	panic(fmt.Sprintf("%s at %#v:\n\t%s\n%s", warning, s.curPath, set, help))
+}
+
+func (opts Options) String() string {
+	var ss []string
+	for _, opt := range opts {
+		ss = append(ss, fmt.Sprint(opt))
+	}
+	return fmt.Sprintf("Options{%s}", strings.Join(ss, ", "))
+}
+
+// FilterPath returns a new Option where opt is only evaluated if filter f
+// returns true for the current Path in the value tree.
+//
+// The option passed in may be an Ignore, Transformer, Comparer, Options, or
+// a previously filtered Option.
+func FilterPath(f func(Path) bool, opt Option) Option {
+	if f == nil {
+		panic("invalid path filter function")
+	}
+	if opt := normalizeOption(opt); opt != nil {
+		return &pathFilter{fnc: f, opt: opt}
+	}
+	return nil
+}
+
+type pathFilter struct {
+	core
+	fnc func(Path) bool
+	opt Option
+}
+
+func (f pathFilter) filter(s *state, vx, vy reflect.Value, t reflect.Type) applicableOption {
+	if f.fnc(s.curPath) {
+		return f.opt.filter(s, vx, vy, t)
+	}
+	return nil
+}
+
+func (f pathFilter) String() string {
+	fn := getFuncName(reflect.ValueOf(f.fnc).Pointer())
+	return fmt.Sprintf("FilterPath(%s, %v)", fn, f.opt)
+}
+
+// FilterValues returns a new Option where opt is only evaluated if filter f,
+// which is a function of the form "func(T, T) bool", returns true for the
+// current pair of values being compared. If the type of the values is not
+// assignable to T, then this filter implicitly returns false.
+//
+// The filter function must be
+// symmetric (i.e., agnostic to the order of the inputs) and
+// deterministic (i.e., produces the same result when given the same inputs).
+// If T is an interface, it is possible that f is called with two values with
+// different concrete types that both implement T.
+//
+// The option passed in may be an Ignore, Transformer, Comparer, Options, or
+// a previously filtered Option.
+func FilterValues(f interface{}, opt Option) Option {
+	v := reflect.ValueOf(f)
+	if !function.IsType(v.Type(), function.ValueFilter) || v.IsNil() {
+		panic(fmt.Sprintf("invalid values filter function: %T", f))
+	}
+	if opt := normalizeOption(opt); opt != nil {
+		vf := &valuesFilter{fnc: v, opt: opt}
+		if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 {
+			vf.typ = ti
+		}
+		return vf
+	}
+	return nil
+}
+
+type valuesFilter struct {
+	core
+	typ reflect.Type  // T
+	fnc reflect.Value // func(T, T) bool
+	opt Option
+}
+
+func (f valuesFilter) filter(s *state, vx, vy reflect.Value, t reflect.Type) applicableOption {
+	if !vx.IsValid() || !vy.IsValid() {
+		return invalid{}
+	}
+	if (f.typ == nil || t.AssignableTo(f.typ)) && s.callTTBFunc(f.fnc, vx, vy) {
+		return f.opt.filter(s, vx, vy, t)
+	}
+	return nil
+}
+
+func (f valuesFilter) String() string {
+	fn := getFuncName(f.fnc.Pointer())
+	return fmt.Sprintf("FilterValues(%s, %v)", fn, f.opt)
+}
+
+// Ignore is an Option that causes all comparisons to be ignored.
+// This value is intended to be combined with FilterPath or FilterValues.
+// It is an error to pass an unfiltered Ignore option to Equal.
+func Ignore() Option { return ignore{} }
+
+type ignore struct{ core }
+
+func (ignore) isFiltered() bool                                                     { return false }
+func (ignore) filter(_ *state, _, _ reflect.Value, _ reflect.Type) applicableOption { return ignore{} }
+func (ignore) apply(_ *state, _, _ reflect.Value)                                   { return }
+func (ignore) String() string                                                       { return "Ignore()" }
+
+// invalid is a sentinel Option type to indicate that some options could not
+// be evaluated due to unexported fields.
+type invalid struct{ core }
+
+func (invalid) filter(_ *state, _, _ reflect.Value, _ reflect.Type) applicableOption { return invalid{} }
+func (invalid) apply(s *state, _, _ reflect.Value) {
+	const help = "consider using AllowUnexported or cmpopts.IgnoreUnexported"
+	panic(fmt.Sprintf("cannot handle unexported field: %#v\n%s", s.curPath, help))
+}
+
+// Transformer returns an Option that applies a transformation function that
+// converts values of a certain type into that of another.
+//
+// The transformer f must be a function "func(T) R" that converts values of
+// type T to those of type R and is implicitly filtered to input values
+// assignable to T. The transformer must not mutate T in any way.
+//
+// To help prevent some cases of infinite recursive cycles applying the
+// same transform to the output of itself (e.g., in the case where the
+// input and output types are the same), an implicit filter is added such that
+// a transformer is applicable only if that exact transformer is not already
+// in the tail of the Path since the last non-Transform step.
+//
+// The name is a user provided label that is used as the Transform.Name in the
+// transformation PathStep. If empty, an arbitrary name is used.
+func Transformer(name string, f interface{}) Option {
+	v := reflect.ValueOf(f)
+	if !function.IsType(v.Type(), function.Transformer) || v.IsNil() {
+		panic(fmt.Sprintf("invalid transformer function: %T", f))
+	}
+	if name == "" {
+		name = "λ" // Lambda-symbol as place-holder for anonymous transformer
+	}
+	if !isValid(name) {
+		panic(fmt.Sprintf("invalid name: %q", name))
+	}
+	tr := &transformer{name: name, fnc: reflect.ValueOf(f)}
+	if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 {
+		tr.typ = ti
+	}
+	return tr
+}
+
+type transformer struct {
+	core
+	name string
+	typ  reflect.Type  // T
+	fnc  reflect.Value // func(T) R
+}
+
+func (tr *transformer) isFiltered() bool { return tr.typ != nil }
+
+func (tr *transformer) filter(s *state, _, _ reflect.Value, t reflect.Type) applicableOption {
+	for i := len(s.curPath) - 1; i >= 0; i-- {
+		if t, ok := s.curPath[i].(*transform); !ok {
+			break // Hit most recent non-Transform step
+		} else if tr == t.trans {
+			return nil // Cannot directly use same Transform
+		}
+	}
+	if tr.typ == nil || t.AssignableTo(tr.typ) {
+		return tr
+	}
+	return nil
+}
+
+func (tr *transformer) apply(s *state, vx, vy reflect.Value) {
+	// Update path before calling the Transformer so that dynamic checks
+	// will use the updated path.
+	s.curPath.push(&transform{pathStep{tr.fnc.Type().Out(0)}, tr})
+	defer s.curPath.pop()
+
+	vx = s.callTRFunc(tr.fnc, vx)
+	vy = s.callTRFunc(tr.fnc, vy)
+	s.compareAny(vx, vy)
+}
+
+func (tr transformer) String() string {
+	return fmt.Sprintf("Transformer(%s, %s)", tr.name, getFuncName(tr.fnc.Pointer()))
+}
+
+// Comparer returns an Option that determines whether two values are equal
+// to each other.
+//
+// The comparer f must be a function "func(T, T) bool" and is implicitly
+// filtered to input values assignable to T. If T is an interface, it is
+// possible that f is called with two values of different concrete types that
+// both implement T.
+//
+// The equality function must be:
+//	• Symmetric: equal(x, y) == equal(y, x)
+//	• Deterministic: equal(x, y) == equal(x, y)
+//	• Pure: equal(x, y) does not modify x or y
+func Comparer(f interface{}) Option {
+	v := reflect.ValueOf(f)
+	if !function.IsType(v.Type(), function.Equal) || v.IsNil() {
+		panic(fmt.Sprintf("invalid comparer function: %T", f))
+	}
+	cm := &comparer{fnc: v}
+	if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 {
+		cm.typ = ti
+	}
+	return cm
+}
+
+type comparer struct {
+	core
+	typ reflect.Type  // T
+	fnc reflect.Value // func(T, T) bool
+}
+
+func (cm *comparer) isFiltered() bool { return cm.typ != nil }
+
+func (cm *comparer) filter(_ *state, _, _ reflect.Value, t reflect.Type) applicableOption {
+	if cm.typ == nil || t.AssignableTo(cm.typ) {
+		return cm
+	}
+	return nil
+}
+
+func (cm *comparer) apply(s *state, vx, vy reflect.Value) {
+	eq := s.callTTBFunc(cm.fnc, vx, vy)
+	s.report(eq, vx, vy)
+}
+
+func (cm comparer) String() string {
+	return fmt.Sprintf("Comparer(%s)", getFuncName(cm.fnc.Pointer()))
+}
+
+// AllowUnexported returns an Option that forcibly allows operations on
+// unexported fields in certain structs, which are specified by passing in a
+// value of each struct type.
+//
+// Users of this option must understand that comparing on unexported fields
+// from external packages is not safe since changes in the internal
+// implementation of some external package may cause the result of Equal
+// to unexpectedly change. However, it may be valid to use this option on types
+// defined in an internal package where the semantic meaning of an unexported
+// field is in the control of the user.
+//
+// For some cases, a custom Comparer should be used instead that defines
+// equality as a function of the public API of a type rather than the underlying
+// unexported implementation.
+//
+// For example, the reflect.Type documentation defines equality to be determined
+// by the == operator on the interface (essentially performing a shallow pointer
+// comparison) and most attempts to compare *regexp.Regexp types are interested
+// in only checking that the regular expression strings are equal.
+// Both of these are accomplished using Comparers:
+//
+//	Comparer(func(x, y reflect.Type) bool { return x == y })
+//	Comparer(func(x, y *regexp.Regexp) bool { return x.String() == y.String() })
+//
+// In other cases, the cmpopts.IgnoreUnexported option can be used to ignore
+// all unexported fields on specified struct types.
+func AllowUnexported(types ...interface{}) Option {
+	if !supportAllowUnexported {
+		panic("AllowUnexported is not supported on purego builds, Google App Engine Standard, or GopherJS")
+	}
+	m := make(map[reflect.Type]bool)
+	for _, typ := range types {
+		t := reflect.TypeOf(typ)
+		if t.Kind() != reflect.Struct {
+			panic(fmt.Sprintf("invalid struct type: %T", typ))
+		}
+		m[t] = true
+	}
+	return visibleStructs(m)
+}
+
+type visibleStructs map[reflect.Type]bool
+
+func (visibleStructs) filter(_ *state, _, _ reflect.Value, _ reflect.Type) applicableOption {
+	panic("not implemented")
+}
+
+// reporter is an Option that configures how differences are reported.
+type reporter interface {
+	// TODO: Not exported yet.
+	//
+	// Perhaps add PushStep and PopStep and change Report to only accept
+	// a PathStep instead of the full-path? Adding a PushStep and PopStep makes
+	// it clear that we are traversing the value tree in a depth-first-search
+	// manner, which has an effect on how values are printed.
+
+	Option
+
+	// Report is called for every comparison made and will be provided with
+	// the two values being compared, the equality result, and the
+	// current path in the value tree. It is possible for x or y to be an
+	// invalid reflect.Value if one of the values is non-existent;
+	// which is possible with maps and slices.
+	Report(x, y reflect.Value, eq bool, p Path)
+}
+
+// normalizeOption normalizes the input options such that all Options groups
+// are flattened and groups with a single element are reduced to that element.
+// Only coreOptions and Options containing coreOptions are allowed.
+func normalizeOption(src Option) Option {
+	switch opts := flattenOptions(nil, Options{src}); len(opts) {
+	case 0:
+		return nil
+	case 1:
+		return opts[0]
+	default:
+		return opts
+	}
+}
+
+// flattenOptions copies all options in src to dst as a flat list.
+// Only coreOptions and Options containing coreOptions are allowed.
+func flattenOptions(dst, src Options) Options {
+	for _, opt := range src {
+		switch opt := opt.(type) {
+		case nil:
+			continue
+		case Options:
+			dst = flattenOptions(dst, opt)
+		case coreOption:
+			dst = append(dst, opt)
+		default:
+			panic(fmt.Sprintf("invalid option type: %T", opt))
+		}
+	}
+	return dst
+}
+
+// getFuncName returns a short function name from the pointer.
+// The string parsing logic works up until Go1.9.
+func getFuncName(p uintptr) string {
+	fnc := runtime.FuncForPC(p)
+	if fnc == nil {
+		return "<unknown>"
+	}
+	name := fnc.Name() // E.g., "long/path/name/mypkg.(mytype).(long/path/name/mypkg.myfunc)-fm"
+	if strings.HasSuffix(name, ")-fm") || strings.HasSuffix(name, ")·fm") {
+		// Strip the package name from method name.
+		name = strings.TrimSuffix(name, ")-fm")
+		name = strings.TrimSuffix(name, ")·fm")
+		if i := strings.LastIndexByte(name, '('); i >= 0 {
+			methodName := name[i+1:] // E.g., "long/path/name/mypkg.myfunc"
+			if j := strings.LastIndexByte(methodName, '.'); j >= 0 {
+				methodName = methodName[j+1:] // E.g., "myfunc"
+			}
+			name = name[:i] + methodName // E.g., "long/path/name/mypkg.(mytype)." + "myfunc"
+		}
+	}
+	if i := strings.LastIndexByte(name, '/'); i >= 0 {
+		// Strip the package name.
+		name = name[i+1:] // E.g., "mypkg.(mytype).myfunc"
+	}
+	return name
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/path.go b/vendor/github.com/google/go-cmp/cmp/path.go
new file mode 100644
index 0000000..c08a3cf
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/path.go
@@ -0,0 +1,309 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE.md file.
+
+package cmp
+
+import (
+	"fmt"
+	"reflect"
+	"strings"
+	"unicode"
+	"unicode/utf8"
+)
+
+type (
+	// Path is a list of PathSteps describing the sequence of operations to get
+	// from some root type to the current position in the value tree.
+	// The first Path element is always an operation-less PathStep that exists
+	// simply to identify the initial type.
+	//
+	// When traversing structs with embedded structs, the embedded struct will
+	// always be accessed as a field before traversing the fields of the
+	// embedded struct themselves. That is, an exported field from the
+	// embedded struct will never be accessed directly from the parent struct.
+	Path []PathStep
+
+	// PathStep is a union-type for specific operations to traverse
+	// a value's tree structure. Users of this package never need to implement
+	// these types as values of this type will be returned by this package.
+	PathStep interface {
+		String() string
+		Type() reflect.Type // Resulting type after performing the path step
+		isPathStep()
+	}
+
+	// SliceIndex is an index operation on a slice or array at some index Key.
+	SliceIndex interface {
+		PathStep
+		Key() int // May return -1 if in a split state
+
+		// SplitKeys returns the indexes for indexing into slices in the
+		// x and y values, respectively. These indexes may differ due to the
+		// insertion or removal of an element in one of the slices, causing
+		// all of the indexes to be shifted. If an index is -1, then that
+		// indicates that the element does not exist in the associated slice.
+		//
+		// Key is guaranteed to return -1 if and only if the indexes returned
+		// by SplitKeys are not the same. SplitKeys will never return -1 for
+		// both indexes.
+		SplitKeys() (x int, y int)
+
+		isSliceIndex()
+	}
+	// MapIndex is an index operation on a map at some index Key.
+	MapIndex interface {
+		PathStep
+		Key() reflect.Value
+		isMapIndex()
+	}
+	// TypeAssertion represents a type assertion on an interface.
+	TypeAssertion interface {
+		PathStep
+		isTypeAssertion()
+	}
+	// StructField represents a struct field access on a field called Name.
+	StructField interface {
+		PathStep
+		Name() string
+		Index() int
+		isStructField()
+	}
+	// Indirect represents pointer indirection on the parent type.
+	Indirect interface {
+		PathStep
+		isIndirect()
+	}
+	// Transform is a transformation from the parent type to the current type.
+	Transform interface {
+		PathStep
+		Name() string
+		Func() reflect.Value
+
+		// Option returns the originally constructed Transformer option.
+		// The == operator can be used to detect the exact option used.
+		Option() Option
+
+		isTransform()
+	}
+)
+
+func (pa *Path) push(s PathStep) {
+	*pa = append(*pa, s)
+}
+
+func (pa *Path) pop() {
+	*pa = (*pa)[:len(*pa)-1]
+}
+
+// Last returns the last PathStep in the Path.
+// If the path is empty, this returns a non-nil PathStep that reports a nil Type.
+func (pa Path) Last() PathStep {
+	return pa.Index(-1)
+}
+
+// Index returns the ith step in the Path and supports negative indexing.
+// A negative index starts counting from the tail of the Path such that -1
+// refers to the last step, -2 refers to the second-to-last step, and so on.
+// If index is invalid, this returns a non-nil PathStep that reports a nil Type.
+func (pa Path) Index(i int) PathStep {
+	if i < 0 {
+		i = len(pa) + i
+	}
+	if i < 0 || i >= len(pa) {
+		return pathStep{}
+	}
+	return pa[i]
+}
+
+// String returns the simplified path to a node.
+// The simplified path only contains struct field accesses.
+//
+// For example:
+//	MyMap.MySlices.MyField
+func (pa Path) String() string {
+	var ss []string
+	for _, s := range pa {
+		if _, ok := s.(*structField); ok {
+			ss = append(ss, s.String())
+		}
+	}
+	return strings.TrimPrefix(strings.Join(ss, ""), ".")
+}
+
+// GoString returns the path to a specific node using Go syntax.
+//
+// For example:
+//	(*root.MyMap["key"].(*mypkg.MyStruct).MySlices)[2][3].MyField
+func (pa Path) GoString() string {
+	var ssPre, ssPost []string
+	var numIndirect int
+	for i, s := range pa {
+		var nextStep PathStep
+		if i+1 < len(pa) {
+			nextStep = pa[i+1]
+		}
+		switch s := s.(type) {
+		case *indirect:
+			numIndirect++
+			pPre, pPost := "(", ")"
+			switch nextStep.(type) {
+			case *indirect:
+				continue // Next step is indirection, so let them batch up
+			case *structField:
+				numIndirect-- // Automatic indirection on struct fields
+			case nil:
+				pPre, pPost = "", "" // Last step; no need for parenthesis
+			}
+			if numIndirect > 0 {
+				ssPre = append(ssPre, pPre+strings.Repeat("*", numIndirect))
+				ssPost = append(ssPost, pPost)
+			}
+			numIndirect = 0
+			continue
+		case *transform:
+			ssPre = append(ssPre, s.trans.name+"(")
+			ssPost = append(ssPost, ")")
+			continue
+		case *typeAssertion:
+			// As a special-case, elide type assertions on anonymous types
+			// since they are typically generated dynamically and can be very
+			// verbose. For example, some transforms return interface{} because
+			// of Go's lack of generics, but typically take in and return the
+			// exact same concrete type.
+			if s.Type().PkgPath() == "" {
+				continue
+			}
+		}
+		ssPost = append(ssPost, s.String())
+	}
+	for i, j := 0, len(ssPre)-1; i < j; i, j = i+1, j-1 {
+		ssPre[i], ssPre[j] = ssPre[j], ssPre[i]
+	}
+	return strings.Join(ssPre, "") + strings.Join(ssPost, "")
+}
+
+type (
+	pathStep struct {
+		typ reflect.Type
+	}
+
+	sliceIndex struct {
+		pathStep
+		xkey, ykey int
+	}
+	mapIndex struct {
+		pathStep
+		key reflect.Value
+	}
+	typeAssertion struct {
+		pathStep
+	}
+	structField struct {
+		pathStep
+		name string
+		idx  int
+
+		// These fields are used for forcibly accessing an unexported field.
+		// pvx, pvy, and field are only valid if unexported is true.
+		unexported bool
+		force      bool                // Forcibly allow visibility
+		pvx, pvy   reflect.Value       // Parent values
+		field      reflect.StructField // Field information
+	}
+	indirect struct {
+		pathStep
+	}
+	transform struct {
+		pathStep
+		trans *transformer
+	}
+)
+
+func (ps pathStep) Type() reflect.Type { return ps.typ }
+func (ps pathStep) String() string {
+	if ps.typ == nil {
+		return "<nil>"
+	}
+	s := ps.typ.String()
+	if s == "" || strings.ContainsAny(s, "{}\n") {
+		return "root" // Type too simple or complex to print
+	}
+	return fmt.Sprintf("{%s}", s)
+}
+
+func (si sliceIndex) String() string {
+	switch {
+	case si.xkey == si.ykey:
+		return fmt.Sprintf("[%d]", si.xkey)
+	case si.ykey == -1:
+		// [5->?] means "I don't know where X[5] went"
+		return fmt.Sprintf("[%d->?]", si.xkey)
+	case si.xkey == -1:
+		// [?->3] means "I don't know where Y[3] came from"
+		return fmt.Sprintf("[?->%d]", si.ykey)
+	default:
+		// [5->3] means "X[5] moved to Y[3]"
+		return fmt.Sprintf("[%d->%d]", si.xkey, si.ykey)
+	}
+}
+func (mi mapIndex) String() string      { return fmt.Sprintf("[%#v]", mi.key) }
+func (ta typeAssertion) String() string { return fmt.Sprintf(".(%v)", ta.typ) }
+func (sf structField) String() string   { return fmt.Sprintf(".%s", sf.name) }
+func (in indirect) String() string      { return "*" }
+func (tf transform) String() string     { return fmt.Sprintf("%s()", tf.trans.name) }
+
+func (si sliceIndex) Key() int {
+	if si.xkey != si.ykey {
+		return -1
+	}
+	return si.xkey
+}
+func (si sliceIndex) SplitKeys() (x, y int) { return si.xkey, si.ykey }
+func (mi mapIndex) Key() reflect.Value      { return mi.key }
+func (sf structField) Name() string         { return sf.name }
+func (sf structField) Index() int           { return sf.idx }
+func (tf transform) Name() string           { return tf.trans.name }
+func (tf transform) Func() reflect.Value    { return tf.trans.fnc }
+func (tf transform) Option() Option         { return tf.trans }
+
+func (pathStep) isPathStep()           {}
+func (sliceIndex) isSliceIndex()       {}
+func (mapIndex) isMapIndex()           {}
+func (typeAssertion) isTypeAssertion() {}
+func (structField) isStructField()     {}
+func (indirect) isIndirect()           {}
+func (transform) isTransform()         {}
+
+var (
+	_ SliceIndex    = sliceIndex{}
+	_ MapIndex      = mapIndex{}
+	_ TypeAssertion = typeAssertion{}
+	_ StructField   = structField{}
+	_ Indirect      = indirect{}
+	_ Transform     = transform{}
+
+	_ PathStep = sliceIndex{}
+	_ PathStep = mapIndex{}
+	_ PathStep = typeAssertion{}
+	_ PathStep = structField{}
+	_ PathStep = indirect{}
+	_ PathStep = transform{}
+)
+
+// isExported reports whether the identifier is exported.
+func isExported(id string) bool {
+	r, _ := utf8.DecodeRuneInString(id)
+	return unicode.IsUpper(r)
+}
+
+// isValid reports whether the identifier is valid.
+// Empty and underscore-only strings are not valid.
+func isValid(id string) bool {
+	ok := id != "" && id != "_"
+	for j, c := range id {
+		ok = ok && (j > 0 || !unicode.IsDigit(c))
+		ok = ok && (c == '_' || unicode.IsLetter(c) || unicode.IsDigit(c))
+	}
+	return ok
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/reporter.go b/vendor/github.com/google/go-cmp/cmp/reporter.go
new file mode 100644
index 0000000..20e9f18
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/reporter.go
@@ -0,0 +1,53 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE.md file.
+
+package cmp
+
+import (
+	"fmt"
+	"reflect"
+	"strings"
+
+	"github.com/google/go-cmp/cmp/internal/value"
+)
+
+type defaultReporter struct {
+	Option
+	diffs  []string // List of differences, possibly truncated
+	ndiffs int      // Total number of differences
+	nbytes int      // Number of bytes in diffs
+	nlines int      // Number of lines in diffs
+}
+
+var _ reporter = (*defaultReporter)(nil)
+
+func (r *defaultReporter) Report(x, y reflect.Value, eq bool, p Path) {
+	if eq {
+		return // Ignore equal results
+	}
+	const maxBytes = 4096
+	const maxLines = 256
+	r.ndiffs++
+	if r.nbytes < maxBytes && r.nlines < maxLines {
+		sx := value.Format(x, value.FormatConfig{UseStringer: true})
+		sy := value.Format(y, value.FormatConfig{UseStringer: true})
+		if sx == sy {
+			// Unhelpful output, so use more exact formatting.
+			sx = value.Format(x, value.FormatConfig{PrintPrimitiveType: true})
+			sy = value.Format(y, value.FormatConfig{PrintPrimitiveType: true})
+		}
+		s := fmt.Sprintf("%#v:\n\t-: %s\n\t+: %s\n", p, sx, sy)
+		r.diffs = append(r.diffs, s)
+		r.nbytes += len(s)
+		r.nlines += strings.Count(s, "\n")
+	}
+}
+
+func (r *defaultReporter) String() string {
+	s := strings.Join(r.diffs, "")
+	if r.ndiffs == len(r.diffs) {
+		return s
+	}
+	return fmt.Sprintf("%s... %d more differences ...", s, r.ndiffs-len(r.diffs))
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/unsafe_panic.go b/vendor/github.com/google/go-cmp/cmp/unsafe_panic.go
new file mode 100644
index 0000000..d1518eb
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/unsafe_panic.go
@@ -0,0 +1,15 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE.md file.
+
+// +build purego appengine js
+
+package cmp
+
+import "reflect"
+
+const supportAllowUnexported = false
+
+func unsafeRetrieveField(reflect.Value, reflect.StructField) reflect.Value {
+	panic("unsafeRetrieveField is not implemented")
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/unsafe_reflect.go b/vendor/github.com/google/go-cmp/cmp/unsafe_reflect.go
new file mode 100644
index 0000000..579b655
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/unsafe_reflect.go
@@ -0,0 +1,23 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE.md file.
+
+// +build !purego,!appengine,!js
+
+package cmp
+
+import (
+	"reflect"
+	"unsafe"
+)
+
+const supportAllowUnexported = true
+
+// unsafeRetrieveField uses unsafe to forcibly retrieve any field from a struct
+// such that the value has read-write permissions.
+//
+// The parent struct, v, must be addressable, while f must be a StructField
+// describing the field to retrieve.
+func unsafeRetrieveField(v reflect.Value, f reflect.StructField) reflect.Value {
+	return reflect.NewAt(f.Type, unsafe.Pointer(v.UnsafeAddr()+f.Offset)).Elem()
+}
diff --git a/vendor/github.com/knative/build/AUTHORS b/vendor/github.com/knative/build/AUTHORS
new file mode 100644
index 0000000..9c2b57e
--- /dev/null
+++ b/vendor/github.com/knative/build/AUTHORS
@@ -0,0 +1,8 @@
+# This is the list of Knative authors for copyright purposes.
+#
+# This does not necessarily list everyone who has contributed code, since in
+# some cases, their employer may be the copyright holder.  To see the full list
+# of contributors, see the revision history in source control.
+Google LLC
+Pivotal Software, Inc.
+Red Hat, Inc.
\ No newline at end of file
diff --git a/vendor/github.com/knative/build/LICENSE b/vendor/github.com/knative/build/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/vendor/github.com/knative/build/LICENSE
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/knative/build/cmd/controller/kodata/LICENSE b/vendor/github.com/knative/build/cmd/controller/kodata/LICENSE
new file mode 120000
index 0000000..5853aae
--- /dev/null
+++ b/vendor/github.com/knative/build/cmd/controller/kodata/LICENSE
@@ -0,0 +1 @@
+../../../LICENSE
\ No newline at end of file
diff --git a/vendor/github.com/knative/build/cmd/controller/kodata/VENDOR-LICENSE b/vendor/github.com/knative/build/cmd/controller/kodata/VENDOR-LICENSE
new file mode 120000
index 0000000..3cc8976
--- /dev/null
+++ b/vendor/github.com/knative/build/cmd/controller/kodata/VENDOR-LICENSE
@@ -0,0 +1 @@
+../../../third_party/VENDOR-LICENSE
\ No newline at end of file
diff --git a/vendor/github.com/knative/build/cmd/creds-init/kodata/LICENSE b/vendor/github.com/knative/build/cmd/creds-init/kodata/LICENSE
new file mode 120000
index 0000000..5853aae
--- /dev/null
+++ b/vendor/github.com/knative/build/cmd/creds-init/kodata/LICENSE
@@ -0,0 +1 @@
+../../../LICENSE
\ No newline at end of file
diff --git a/vendor/github.com/knative/build/cmd/creds-init/kodata/VENDOR-LICENSE b/vendor/github.com/knative/build/cmd/creds-init/kodata/VENDOR-LICENSE
new file mode 120000
index 0000000..3cc8976
--- /dev/null
+++ b/vendor/github.com/knative/build/cmd/creds-init/kodata/VENDOR-LICENSE
@@ -0,0 +1 @@
+../../../third_party/VENDOR-LICENSE
\ No newline at end of file
diff --git a/vendor/github.com/knative/build/cmd/git-init/kodata/LICENSE b/vendor/github.com/knative/build/cmd/git-init/kodata/LICENSE
new file mode 120000
index 0000000..5853aae
--- /dev/null
+++ b/vendor/github.com/knative/build/cmd/git-init/kodata/LICENSE
@@ -0,0 +1 @@
+../../../LICENSE
\ No newline at end of file
diff --git a/vendor/github.com/knative/build/cmd/git-init/kodata/VENDOR-LICENSE b/vendor/github.com/knative/build/cmd/git-init/kodata/VENDOR-LICENSE
new file mode 120000
index 0000000..3cc8976
--- /dev/null
+++ b/vendor/github.com/knative/build/cmd/git-init/kodata/VENDOR-LICENSE
@@ -0,0 +1 @@
+../../../third_party/VENDOR-LICENSE
\ No newline at end of file
diff --git a/vendor/github.com/knative/build/cmd/logs/kodata/LICENSE b/vendor/github.com/knative/build/cmd/logs/kodata/LICENSE
new file mode 120000
index 0000000..5853aae
--- /dev/null
+++ b/vendor/github.com/knative/build/cmd/logs/kodata/LICENSE
@@ -0,0 +1 @@
+../../../LICENSE
\ No newline at end of file
diff --git a/vendor/github.com/knative/build/cmd/logs/kodata/VENDOR-LICENSE b/vendor/github.com/knative/build/cmd/logs/kodata/VENDOR-LICENSE
new file mode 120000
index 0000000..3cc8976
--- /dev/null
+++ b/vendor/github.com/knative/build/cmd/logs/kodata/VENDOR-LICENSE
@@ -0,0 +1 @@
+../../../third_party/VENDOR-LICENSE
\ No newline at end of file
diff --git a/vendor/github.com/knative/build/cmd/nop/kodata/LICENSE b/vendor/github.com/knative/build/cmd/nop/kodata/LICENSE
new file mode 120000
index 0000000..5853aae
--- /dev/null
+++ b/vendor/github.com/knative/build/cmd/nop/kodata/LICENSE
@@ -0,0 +1 @@
+../../../LICENSE
\ No newline at end of file
diff --git a/vendor/github.com/knative/build/cmd/nop/kodata/VENDOR-LICENSE b/vendor/github.com/knative/build/cmd/nop/kodata/VENDOR-LICENSE
new file mode 120000
index 0000000..3cc8976
--- /dev/null
+++ b/vendor/github.com/knative/build/cmd/nop/kodata/VENDOR-LICENSE
@@ -0,0 +1 @@
+../../../third_party/VENDOR-LICENSE
\ No newline at end of file
diff --git a/vendor/github.com/knative/build/cmd/webhook/kodata/LICENSE b/vendor/github.com/knative/build/cmd/webhook/kodata/LICENSE
new file mode 120000
index 0000000..5853aae
--- /dev/null
+++ b/vendor/github.com/knative/build/cmd/webhook/kodata/LICENSE
@@ -0,0 +1 @@
+../../../LICENSE
\ No newline at end of file
diff --git a/vendor/github.com/knative/build/cmd/webhook/kodata/VENDOR-LICENSE b/vendor/github.com/knative/build/cmd/webhook/kodata/VENDOR-LICENSE
new file mode 120000
index 0000000..3cc8976
--- /dev/null
+++ b/vendor/github.com/knative/build/cmd/webhook/kodata/VENDOR-LICENSE
@@ -0,0 +1 @@
+../../../third_party/VENDOR-LICENSE
\ No newline at end of file
diff --git a/vendor/github.com/knative/build/config/300-imagecache.yaml b/vendor/github.com/knative/build/config/300-imagecache.yaml
new file mode 120000
index 0000000..f10d6da
--- /dev/null
+++ b/vendor/github.com/knative/build/config/300-imagecache.yaml
@@ -0,0 +1 @@
+../vendor/github.com/knative/caching/config/image.yaml
\ No newline at end of file
diff --git a/vendor/github.com/knative/build/pkg/apis/build/register.go b/vendor/github.com/knative/build/pkg/apis/build/register.go
new file mode 100644
index 0000000..379817b
--- /dev/null
+++ b/vendor/github.com/knative/build/pkg/apis/build/register.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2018 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package build
+
+// GroupName is the Kubernetes resource group name for Build types.
+const GroupName = "build.knative.dev"
diff --git a/vendor/github.com/knative/build/pkg/apis/build/v1alpha1/build_defaults.go b/vendor/github.com/knative/build/pkg/apis/build/v1alpha1/build_defaults.go
new file mode 100644
index 0000000..fafe219
--- /dev/null
+++ b/vendor/github.com/knative/build/pkg/apis/build/v1alpha1/build_defaults.go
@@ -0,0 +1,42 @@
+/*
+Copyright 2018 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+	"time"
+
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// DefaultTimeout is 10min
+const DefaultTimeout = 10 * time.Minute
+
+// SetDefaults for build
+func (b *Build) SetDefaults() {
+	if b == nil {
+		return
+	}
+	if b.Spec.ServiceAccountName == "" {
+		b.Spec.ServiceAccountName = "default"
+	}
+	if b.Spec.Timeout == nil {
+		b.Spec.Timeout = &metav1.Duration{Duration: DefaultTimeout}
+	}
+	if b.Spec.Template != nil && b.Spec.Template.Kind == "" {
+		b.Spec.Template.Kind = BuildTemplateKind
+	}
+}
diff --git a/vendor/github.com/knative/build/pkg/apis/build/v1alpha1/build_template_interface.go b/vendor/github.com/knative/build/pkg/apis/build/v1alpha1/build_template_interface.go
new file mode 100644
index 0000000..a14dba3
--- /dev/null
+++ b/vendor/github.com/knative/build/pkg/apis/build/v1alpha1/build_template_interface.go
@@ -0,0 +1,23 @@
+/*
+Copyright 2018 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+// BuildTemplateInterface is implemented by BuildTemplate and ClusterBuildTemplate
+type BuildTemplateInterface interface {
+	TemplateSpec() BuildTemplateSpec
+	Copy() BuildTemplateInterface
+}
diff --git a/vendor/github.com/knative/build/pkg/apis/build/v1alpha1/build_template_types.go b/vendor/github.com/knative/build/pkg/apis/build/v1alpha1/build_template_types.go
new file mode 100644
index 0000000..20d78c0
--- /dev/null
+++ b/vendor/github.com/knative/build/pkg/apis/build/v1alpha1/build_template_types.go
@@ -0,0 +1,116 @@
+/*
+Copyright 2018 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+	"github.com/knative/pkg/apis"
+	corev1 "k8s.io/api/core/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+
+	"github.com/knative/pkg/kmeta"
+)
+
+// Template is an interface for accessing the BuildTemplateSpec
+// from various forms of template (namespace-/cluster-scoped).
+type Template interface {
+	TemplateSpec() BuildTemplateSpec
+}
+
+// +genclient
+// +genclient:noStatus
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// BuildTemplate is a template that can used to easily create Builds.
+type BuildTemplate struct {
+	metav1.TypeMeta   `json:",inline"`
+	metav1.ObjectMeta `json:"metadata,omitempty"`
+
+	Spec BuildTemplateSpec `json:"spec"`
+}
+
+// Check that our resource implements several interfaces.
+var _ kmeta.OwnerRefable = (*BuildTemplate)(nil)
+var _ Template = (*BuildTemplate)(nil)
+var _ BuildTemplateInterface = (*BuildTemplate)(nil)
+
+// Check that BuildTemplate may be validated and defaulted.
+var _ apis.Validatable = (*BuildTemplate)(nil)
+var _ apis.Defaultable = (*BuildTemplate)(nil)
+
+// BuildTemplateSpec is the spec for a BuildTemplate.
+type BuildTemplateSpec struct {
+	// TODO: Generation does not work correctly with CRD. They are scrubbed
+	// by the APIserver (https://github.com/kubernetes/kubernetes/issues/58778)
+	// So, we add Generation here. Once that gets fixed, remove this and use
+	// ObjectMeta.Generation instead.
+	// +optional
+	Generation int64 `json:"generation,omitempty"`
+
+	// Parameters defines the parameters that can be populated in a template.
+	Parameters []ParameterSpec `json:"parameters,omitempty"`
+
+	// Steps are the steps of the build; each step is run sequentially with the
+	// source mounted into /workspace.
+	Steps []corev1.Container `json:"steps"`
+
+	// Volumes is a collection of volumes that are available to mount into the
+	// steps of the build.
+	Volumes []corev1.Volume `json:"volumes"`
+}
+
+// ParameterSpec defines the possible parameters that can be populated in a
+// template.
+type ParameterSpec struct {
+	// Name is the unique name of this template parameter.
+	Name string `json:"name"`
+
+	// Description is a human-readable explanation of this template parameter.
+	Description string `json:"description,omitempty"`
+
+	// Default, if specified, defines the default value that should be applied if
+	// the build does not specify the value for this parameter.
+	Default *string `json:"default,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// BuildTemplateList is a list of BuildTemplate resources.
+type BuildTemplateList struct {
+	metav1.TypeMeta `json:",inline"`
+	metav1.ListMeta `json:"metadata"`
+
+	Items []BuildTemplate `json:"items"`
+}
+
+// TemplateSpec returnes the Spec used by the template
+func (bt *BuildTemplate) TemplateSpec() BuildTemplateSpec {
+	return bt.Spec
+}
+
+// Copy performes a deep copy
+func (bt *BuildTemplate) Copy() BuildTemplateInterface {
+	return bt.DeepCopy()
+}
+
+// GetGroupVersionKind gives kind
+func (bt *BuildTemplate) GetGroupVersionKind() schema.GroupVersionKind {
+	return SchemeGroupVersion.WithKind("BuildTemplate")
+}
+
+// SetDefaults for build template
+func (bt *BuildTemplate) SetDefaults() {}
diff --git a/vendor/github.com/knative/build/pkg/apis/build/v1alpha1/build_template_validation.go b/vendor/github.com/knative/build/pkg/apis/build/v1alpha1/build_template_validation.go
new file mode 100644
index 0000000..cd3464c
--- /dev/null
+++ b/vendor/github.com/knative/build/pkg/apis/build/v1alpha1/build_template_validation.go
@@ -0,0 +1,86 @@
+/*
+Copyright 2018 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+	"github.com/knative/pkg/apis"
+	corev1 "k8s.io/api/core/v1"
+)
+
+// Validate build template
+func (b *BuildTemplate) Validate() *apis.FieldError {
+	return validateObjectMetadata(b.GetObjectMeta()).ViaField("metadata").Also(b.Spec.Validate().ViaField("spec"))
+}
+
+// Validate Build Template
+func (b *BuildTemplateSpec) Validate() *apis.FieldError {
+	if err := validateSteps(b.Steps); err != nil {
+		return err
+	}
+	if err := ValidateVolumes(b.Volumes); err != nil {
+		return err
+	}
+	if err := validateParameters(b.Parameters); err != nil {
+		return err
+	}
+	return nil
+}
+
+//ValidateVolumes validates collection of volumes that are available to mount into the
+// steps of the build ot build template.
+func ValidateVolumes(volumes []corev1.Volume) *apis.FieldError {
+	// Build must not duplicate volume names.
+	vols := map[string]struct{}{}
+	for _, v := range volumes {
+		if _, ok := vols[v.Name]; ok {
+			return apis.ErrMultipleOneOf("volumeName")
+		}
+		vols[v.Name] = struct{}{}
+	}
+	return nil
+}
+
+func validateSteps(steps []corev1.Container) *apis.FieldError {
+	// Build must not duplicate step names.
+	names := map[string]struct{}{}
+	for _, s := range steps {
+		if s.Image == "" {
+			return apis.ErrMissingField("Image")
+		}
+
+		if s.Name == "" {
+			continue
+		}
+		if _, ok := names[s.Name]; ok {
+			return apis.ErrMultipleOneOf("stepName")
+		}
+		names[s.Name] = struct{}{}
+	}
+	return nil
+}
+
+func validateParameters(params []ParameterSpec) *apis.FieldError {
+	// Template must not duplicate parameter names.
+	seen := map[string]struct{}{}
+	for _, p := range params {
+		if _, ok := seen[p.Name]; ok {
+			return apis.ErrInvalidKeyName("ParamName", "b.spec.params")
+		}
+		seen[p.Name] = struct{}{}
+	}
+	return nil
+}
diff --git a/vendor/github.com/knative/build/pkg/apis/build/v1alpha1/build_types.go b/vendor/github.com/knative/build/pkg/apis/build/v1alpha1/build_types.go
new file mode 100644
index 0000000..2d484b8
--- /dev/null
+++ b/vendor/github.com/knative/build/pkg/apis/build/v1alpha1/build_types.go
@@ -0,0 +1,287 @@
+/*
+Copyright 2018 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+	corev1 "k8s.io/api/core/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+
+	"github.com/knative/pkg/apis"
+	duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1"
+	"github.com/knative/pkg/kmeta"
+)
+
+// +genclient
+// +genclient:noStatus
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Build represents a build of a container image. A Build is made up of a
+// source, and a set of steps. Steps can mount volumes to share data between
+// themselves. A build may be created by instantiating a BuildTemplate.
+type Build struct {
+	metav1.TypeMeta   `json:",inline"`
+	metav1.ObjectMeta `json:"metadata,omitempty"`
+
+	Spec   BuildSpec   `json:"spec"`
+	Status BuildStatus `json:"status"`
+}
+
+// Check that our resource implements several interfaces.
+var _ kmeta.OwnerRefable = (*Build)(nil)
+
+// Check that Build may be validated and defaulted.
+var _ apis.Validatable = (*Build)(nil)
+var _ apis.Defaultable = (*Build)(nil)
+
+// BuildSpec is the spec for a Build resource.
+type BuildSpec struct {
+	// TODO: Generation does not work correctly with CRD. They are scrubbed
+	// by the APIserver (https://github.com/kubernetes/kubernetes/issues/58778)
+	// So, we add Generation here. Once that gets fixed, remove this and use
+	// ObjectMeta.Generation instead.
+	// +optional
+	Generation int64 `json:"generation,omitempty"`
+
+	// Source specifies the input to the build.
+	Source *SourceSpec `json:"source,omitempty"`
+
+	// Steps are the steps of the build; each step is run sequentially with the
+	// source mounted into /workspace.
+	Steps []corev1.Container `json:"steps,omitempty"`
+
+	// Volumes is a collection of volumes that are available to mount into the
+	// steps of the build.
+	Volumes []corev1.Volume `json:"volumes,omitempty"`
+
+	// The name of the service account as which to run this build.
+	ServiceAccountName string `json:"serviceAccountName,omitempty"`
+
+	// Template, if specified, references a BuildTemplate resource to use to
+	// populate fields in the build, and optional Arguments to pass to the
+	// template. The default Kind of template is BuildTemplate
+	Template *TemplateInstantiationSpec `json:"template,omitempty"`
+
+	// NodeSelector is a selector which must be true for the pod to fit on a node.
+	// Selector which must match a node's labels for the pod to be scheduled on that node.
+	// More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
+	// +optional
+	NodeSelector map[string]string `json:"nodeSelector,omitempty"`
+
+	// Time after which the build times out. Defaults to 10 minutes.
+	// Specified build timeout should be less than 24h.
+	// Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration
+	// +optional
+	Timeout *metav1.Duration `json:"timeout,omitempty"`
+
+	// If specified, the pod's scheduling constraints
+	// +optional
+	Affinity *corev1.Affinity `json:"affinity,omitempty"`
+}
+
+// TemplateKind defines the type of BuildTemplate used by the build.
+type TemplateKind string
+
+const (
+	// BuildTemplateKind indicates that the template type has a namepace scope.
+	BuildTemplateKind TemplateKind = "BuildTemplate"
+	// ClusterBuildTemplateKind indicates that template type has a cluster scope.
+	ClusterBuildTemplateKind TemplateKind = "ClusterBuildTemplate"
+)
+
+// TemplateInstantiationSpec specifies how a BuildTemplate is instantiated into
+// a Build.
+type TemplateInstantiationSpec struct {
+	// Name references the BuildTemplate resource to use.
+	//
+	// The template is assumed to exist in the Build's namespace.
+	Name string `json:"name"`
+
+	// The Kind of the template to be used, possible values are BuildTemplate
+	// or ClusterBuildTemplate. If nothing is specified, the default if is BuildTemplate
+	Kind TemplateKind `json:"kind,omitempty"`
+
+	// Arguments, if specified, lists values that should be applied to the
+	// parameters specified by the template.
+	Arguments []ArgumentSpec `json:"arguments,omitempty"`
+
+	// Env, if specified will provide variables to all build template steps.
+	// This will override any of the template's steps environment variables.
+	Env []corev1.EnvVar `json:"env,omitempty"`
+}
+
+// ArgumentSpec defines the actual values to use to populate a template's
+// parameters.
+type ArgumentSpec struct {
+	// Name is the name of the argument.
+	Name string `json:"name"`
+	// Value is the value of the argument.
+	Value string `json:"value"`
+	// TODO(jasonhall): ValueFrom?
+}
+
+// SourceSpec defines the input to the Build
+type SourceSpec struct {
+	// Git represents source in a Git repository.
+	Git *GitSourceSpec `json:"git,omitempty"`
+
+	// GCS represents source in Google Cloud Storage.
+	GCS *GCSSourceSpec `json:"gcs,omitempty"`
+
+	// Custom indicates that source should be retrieved using a custom
+	// process defined in a container invocation.
+	Custom *corev1.Container `json:"custom,omitempty"`
+
+	// SubPath specifies a path within the fetched source which should be
+	// built. This option makes parent directories *inaccessible* to the
+	// build steps. (The specific source type may, in fact, not even fetch
+	// files not in the SubPath.)
+	SubPath string `json:"subPath,omitempty"`
+}
+
+// GitSourceSpec describes a Git repo source input to the Build.
+type GitSourceSpec struct {
+	// URL of the Git repository to clone from.
+	Url string `json:"url"`
+
+	// Git revision (branch, tag, commit SHA or ref) to clone.  See
+	// https://git-scm.com/docs/gitrevisions#_specifying_revisions for more
+	// information.
+	Revision string `json:"revision"`
+}
+
+// GCSSourceSpec describes source input to the Build in the form of an archive,
+// or a source manifest describing files to fetch.
+type GCSSourceSpec struct {
+	// Type declares the style of source to fetch.
+	Type GCSSourceType `json:"type,omitempty"`
+
+	// Location specifies the location of the source archive or manifest file.
+	Location string `json:"location,omitempty"`
+}
+
+// GCSSourceType defines a type of GCS source fetch.
+type GCSSourceType string
+
+const (
+	// GCSArchive indicates that source should be fetched from a typical archive file.
+	GCSArchive GCSSourceType = "Archive"
+
+	// GCSManifest indicates that source should be fetched using a
+	// manifest-based protocol which enables incremental source upload.
+	GCSManifest GCSSourceType = "Manifest"
+)
+
+// BuildProvider defines a build execution implementation.
+type BuildProvider string
+
+const (
+	// GoogleBuildProvider indicates that this build was performed with Google Cloud Build.
+	GoogleBuildProvider BuildProvider = "Google"
+	// ClusterBuildProvider indicates that this build was performed on-cluster.
+	ClusterBuildProvider BuildProvider = "Cluster"
+)
+
+// BuildStatus is the status for a Build resource
+type BuildStatus struct {
+	Builder BuildProvider `json:"builder,omitempty"`
+
+	// Cluster provides additional information if the builder is Cluster.
+	Cluster *ClusterSpec `json:"cluster,omitempty"`
+	// Google provides additional information if the builder is Google.
+	Google *GoogleSpec `json:"google,omitempty"`
+
+	// StartTime is the time the build is actually started.
+	StartTime metav1.Time `json:"startTime,omitEmpty"`
+	// CompletionTime is the time the build completed.
+	CompletionTime metav1.Time `json:"completionTime,omitEmpty"`
+
+	// StepStates describes the state of each build step container.
+	StepStates []corev1.ContainerState `json:"stepStates,omitEmpty"`
+
+	// StepsCompleted lists the name of build steps completed.
+	StepsCompleted []string `json:"stepsCompleted"`
+
+	// Conditions describes the set of conditions of this build.
+	Conditions duckv1alpha1.Conditions `json:"conditions,omitempty"`
+}
+
+// Check that BuildStatus may have its conditions managed.
+var _ duckv1alpha1.ConditionsAccessor = (*BuildStatus)(nil)
+
+// ClusterSpec provides information about the on-cluster build, if applicable.
+type ClusterSpec struct {
+	// Namespace is the namespace in which the pod is running.
+	Namespace string `json:"namespace"`
+	// PodName is the name of the pod responsible for executing this build's steps.
+	PodName string `json:"podName"`
+}
+
+// GoogleSpec provides information about the GCB build, if applicable.
+type GoogleSpec struct {
+	// Operation is the unique name of the GCB API Operation for the build.
+	Operation string `json:"operation"`
+}
+
+// BuildSucceeded is set when the build is running, and becomes True when the
+// build finishes successfully.
+//
+// If the build is ongoing, its status will be Unknown. If it fails, its status
+// will be False.
+const BuildSucceeded = duckv1alpha1.ConditionSucceeded
+
+var buildCondSet = duckv1alpha1.NewBatchConditionSet()
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// BuildList is a list of Build resources
+type BuildList struct {
+	metav1.TypeMeta `json:",inline"`
+	metav1.ListMeta `json:"metadata"`
+
+	// Items is the list of Build items in this list.
+	Items []Build `json:"items"`
+}
+
+// GetCondition returns the Condition matching the given type.
+func (bs *BuildStatus) GetCondition(t duckv1alpha1.ConditionType) *duckv1alpha1.Condition {
+	return buildCondSet.Manage(bs).GetCondition(t)
+}
+
+// SetCondition sets the condition, unsetting previous conditions with the same
+// type as necessary.
+func (bs *BuildStatus) SetCondition(newCond *duckv1alpha1.Condition) {
+	if newCond != nil {
+		buildCondSet.Manage(bs).SetCondition(*newCond)
+	}
+}
+
+// GetConditions returns the Conditions array. This enables generic handling of
+// conditions by implementing the duckv1alpha1.Conditions interface.
+func (bs *BuildStatus) GetConditions() duckv1alpha1.Conditions {
+	return bs.Conditions
+}
+
+// SetConditions sets the Conditions array. This enables generic handling of
+// conditions by implementing the duckv1alpha1.Conditions interface.
+func (bs *BuildStatus) SetConditions(conditions duckv1alpha1.Conditions) {
+	bs.Conditions = conditions
+}
+
+func (b *Build) GetGroupVersionKind() schema.GroupVersionKind {
+	return SchemeGroupVersion.WithKind("Build")
+}
diff --git a/vendor/github.com/knative/build/pkg/apis/build/v1alpha1/build_validation.go b/vendor/github.com/knative/build/pkg/apis/build/v1alpha1/build_validation.go
new file mode 100644
index 0000000..04792a4
--- /dev/null
+++ b/vendor/github.com/knative/build/pkg/apis/build/v1alpha1/build_validation.go
@@ -0,0 +1,94 @@
+/*
+Copyright 2018 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+	"fmt"
+	"time"
+
+	"github.com/knative/pkg/apis"
+)
+
+// Validate Build
+func (b *Build) Validate() *apis.FieldError {
+	return validateObjectMetadata(b.GetObjectMeta()).ViaField("metadata").Also(b.Spec.Validate().ViaField("spec"))
+}
+
+// Validate for build spec
+func (bs *BuildSpec) Validate() *apis.FieldError {
+	if bs.Template == nil && len(bs.Steps) == 0 {
+		return apis.ErrMissingField("b.spec.template").Also(apis.ErrMissingField("b.spec.steps"))
+	}
+	if bs.Template != nil && len(bs.Steps) > 0 {
+		return apis.ErrMissingField("b.spec.template").Also(apis.ErrMissingField("b.spec.steps"))
+	}
+
+	if bs.Template != nil && bs.Template.Name == "" {
+		apis.ErrMissingField("build.spec.template.name")
+	}
+
+	// If a build specifies a template, all the template's parameters without
+	// defaults must be satisfied by the build's parameters.
+	if bs.Template != nil {
+		return bs.Template.Validate()
+	}
+	if err := ValidateVolumes(bs.Volumes); err != nil {
+		return err
+	}
+	if err := bs.validateTimeout(); err != nil {
+		return err
+	}
+
+	if err := validateSteps(bs.Steps); err != nil {
+		return err
+	}
+	return nil
+}
+
+// Validate templateKind
+func (b *TemplateInstantiationSpec) Validate() *apis.FieldError {
+	if b == nil {
+		return nil
+	}
+	if b.Name == "" {
+		return apis.ErrMissingField("build.spec.template.name")
+	}
+	if b.Kind != "" {
+		switch b.Kind {
+		case ClusterBuildTemplateKind,
+			BuildTemplateKind:
+			return nil
+		default:
+			return apis.ErrInvalidValue(string(b.Kind), apis.CurrentField)
+		}
+	}
+	return nil
+}
+
+func (bt *BuildSpec) validateTimeout() *apis.FieldError {
+	if bt.Timeout == nil {
+		return nil
+	}
+	maxTimeout := time.Duration(24 * time.Hour)
+
+	if bt.Timeout.Duration > maxTimeout {
+		return apis.ErrInvalidValue(fmt.Sprintf("%s should be < 24h", bt.Timeout), "b.spec.timeout")
+	} else if bt.Timeout.Duration < 0 {
+		return apis.ErrInvalidValue(fmt.Sprintf("%s should be > 0", bt.Timeout), "b.spec.timeout")
+	}
+	return nil
+}
diff --git a/vendor/github.com/knative/build/pkg/apis/build/v1alpha1/cluster_build_template_types.go b/vendor/github.com/knative/build/pkg/apis/build/v1alpha1/cluster_build_template_types.go
new file mode 100644
index 0000000..604b133
--- /dev/null
+++ b/vendor/github.com/knative/build/pkg/apis/build/v1alpha1/cluster_build_template_types.go
@@ -0,0 +1,74 @@
+/*
+Copyright 2018 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+
+	"github.com/knative/pkg/apis"
+	"github.com/knative/pkg/kmeta"
+)
+
+// +genclient
+// +genclient:noStatus
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ClusterBuildTemplate is a template that can used to easily create Builds.
+type ClusterBuildTemplate struct {
+	metav1.TypeMeta   `json:",inline"`
+	metav1.ObjectMeta `json:"metadata,omitempty"`
+
+	Spec BuildTemplateSpec `json:"spec"`
+}
+
+// Check that our resource implements several interfaces.
+var _ kmeta.OwnerRefable = (*ClusterBuildTemplate)(nil)
+var _ Template = (*ClusterBuildTemplate)(nil)
+var _ BuildTemplateInterface = (*ClusterBuildTemplate)(nil)
+
+// Check that ClusterBuildTemplate may be validated and defaulted.
+var _ apis.Validatable = (*ClusterBuildTemplate)(nil)
+var _ apis.Defaultable = (*ClusterBuildTemplate)(nil)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ClusterBuildTemplateList is a list of BuildTemplate resources.
+type ClusterBuildTemplateList struct {
+	metav1.TypeMeta `json:",inline"`
+	metav1.ListMeta `json:"metadata"`
+
+	Items []ClusterBuildTemplate `json:"items"`
+}
+
+// TemplateSpec returnes the Spec used by the template
+func (bt *ClusterBuildTemplate) TemplateSpec() BuildTemplateSpec {
+	return bt.Spec
+}
+
+// Copy performes a deep copy
+func (bt *ClusterBuildTemplate) Copy() BuildTemplateInterface {
+	return bt.DeepCopy()
+}
+
+func (bt *ClusterBuildTemplate) GetGroupVersionKind() schema.GroupVersionKind {
+	return SchemeGroupVersion.WithKind("ClusterBuildTemplate")
+}
+
+// SetDefaults
+func (b *ClusterBuildTemplate) SetDefaults() {}
diff --git a/vendor/github.com/knative/build/pkg/apis/build/v1alpha1/cluster_build_template_validation.go b/vendor/github.com/knative/build/pkg/apis/build/v1alpha1/cluster_build_template_validation.go
new file mode 100644
index 0000000..111c8d5
--- /dev/null
+++ b/vendor/github.com/knative/build/pkg/apis/build/v1alpha1/cluster_build_template_validation.go
@@ -0,0 +1,24 @@
+/*
+Copyright 2018 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import "github.com/knative/pkg/apis"
+
+// Validate ClusterBuildTemplate
+func (b *ClusterBuildTemplate) Validate() *apis.FieldError {
+	return validateObjectMetadata(b.GetObjectMeta()).ViaField("metadata").Also(b.Spec.Validate().ViaField("spec"))
+}
diff --git a/vendor/github.com/knative/build/pkg/apis/build/v1alpha1/doc.go b/vendor/github.com/knative/build/pkg/apis/build/v1alpha1/doc.go
new file mode 100644
index 0000000..df55159
--- /dev/null
+++ b/vendor/github.com/knative/build/pkg/apis/build/v1alpha1/doc.go
@@ -0,0 +1,21 @@
+/*
+Copyright 2018 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+
+// Package v1alpha1 is the v1alpha1 version of the API.
+// +groupName=build.knative.dev
+package v1alpha1
diff --git a/vendor/github.com/knative/build/pkg/apis/build/v1alpha1/metadata_validation.go b/vendor/github.com/knative/build/pkg/apis/build/v1alpha1/metadata_validation.go
new file mode 100644
index 0000000..8801c3f
--- /dev/null
+++ b/vendor/github.com/knative/build/pkg/apis/build/v1alpha1/metadata_validation.go
@@ -0,0 +1,47 @@
+/*
+Copyright 2018 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+	"strings"
+
+	"github.com/knative/pkg/apis"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+const (
+	maxLength = 63
+)
+
+func validateObjectMetadata(meta metav1.Object) *apis.FieldError {
+	name := meta.GetName()
+
+	if strings.Contains(name, ".") {
+		return &apis.FieldError{
+			Message: "Invalid resource name: special character . must not be present",
+			Paths:   []string{"name"},
+		}
+	}
+
+	if len(name) > maxLength {
+		return &apis.FieldError{
+			Message: "Invalid resource name: length must be no more than 63 characters",
+			Paths:   []string{"name"},
+		}
+	}
+	return nil
+}
diff --git a/vendor/github.com/knative/build/pkg/apis/build/v1alpha1/register.go b/vendor/github.com/knative/build/pkg/apis/build/v1alpha1/register.go
new file mode 100644
index 0000000..c2b2c65
--- /dev/null
+++ b/vendor/github.com/knative/build/pkg/apis/build/v1alpha1/register.go
@@ -0,0 +1,59 @@
+/*
+Copyright 2018 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+
+	"github.com/knative/build/pkg/apis/build"
+)
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: build.GroupName, Version: "v1alpha1"}
+
+// Kind takes an unqualified kind and returns back a Group qualified GroupKind
+func Kind(kind string) schema.GroupKind {
+	return SchemeGroupVersion.WithKind(kind).GroupKind()
+}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+	return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+	schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
+
+	// AddToScheme adds Build types to the scheme.
+	AddToScheme = schemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to Scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+	scheme.AddKnownTypes(SchemeGroupVersion,
+		&Build{},
+		&BuildList{},
+		&BuildTemplate{},
+		&BuildTemplateList{},
+		&ClusterBuildTemplate{},
+		&ClusterBuildTemplateList{},
+	)
+	metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+	return nil
+}
diff --git a/vendor/github.com/knative/build/pkg/apis/build/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/knative/build/pkg/apis/build/v1alpha1/zz_generated.deepcopy.go
new file mode 100644
index 0000000..0880074
--- /dev/null
+++ b/vendor/github.com/knative/build/pkg/apis/build/v1alpha1/zz_generated.deepcopy.go
@@ -0,0 +1,550 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright 2018 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// This file was autogenerated by deepcopy-gen. Do not edit it manually!
+
+package v1alpha1
+
+import (
+	duck_v1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1"
+	v1 "k8s.io/api/core/v1"
+	meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ArgumentSpec) DeepCopyInto(out *ArgumentSpec) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArgumentSpec.
+func (in *ArgumentSpec) DeepCopy() *ArgumentSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(ArgumentSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Build) DeepCopyInto(out *Build) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	in.Status.DeepCopyInto(&out.Status)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Build.
+func (in *Build) DeepCopy() *Build {
+	if in == nil {
+		return nil
+	}
+	out := new(Build)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Build) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BuildList) DeepCopyInto(out *BuildList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]Build, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildList.
+func (in *BuildList) DeepCopy() *BuildList {
+	if in == nil {
+		return nil
+	}
+	out := new(BuildList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *BuildList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BuildSpec) DeepCopyInto(out *BuildSpec) {
+	*out = *in
+	if in.Source != nil {
+		in, out := &in.Source, &out.Source
+		if *in == nil {
+			*out = nil
+		} else {
+			*out = new(SourceSpec)
+			(*in).DeepCopyInto(*out)
+		}
+	}
+	if in.Steps != nil {
+		in, out := &in.Steps, &out.Steps
+		*out = make([]v1.Container, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.Volumes != nil {
+		in, out := &in.Volumes, &out.Volumes
+		*out = make([]v1.Volume, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.Template != nil {
+		in, out := &in.Template, &out.Template
+		if *in == nil {
+			*out = nil
+		} else {
+			*out = new(TemplateInstantiationSpec)
+			(*in).DeepCopyInto(*out)
+		}
+	}
+	if in.NodeSelector != nil {
+		in, out := &in.NodeSelector, &out.NodeSelector
+		*out = make(map[string]string, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val
+		}
+	}
+	if in.Timeout != nil {
+		in, out := &in.Timeout, &out.Timeout
+		if *in == nil {
+			*out = nil
+		} else {
+			*out = new(meta_v1.Duration)
+			**out = **in
+		}
+	}
+	if in.Affinity != nil {
+		in, out := &in.Affinity, &out.Affinity
+		if *in == nil {
+			*out = nil
+		} else {
+			*out = new(v1.Affinity)
+			(*in).DeepCopyInto(*out)
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildSpec.
+func (in *BuildSpec) DeepCopy() *BuildSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(BuildSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BuildStatus) DeepCopyInto(out *BuildStatus) {
+	*out = *in
+	if in.Cluster != nil {
+		in, out := &in.Cluster, &out.Cluster
+		if *in == nil {
+			*out = nil
+		} else {
+			*out = new(ClusterSpec)
+			**out = **in
+		}
+	}
+	if in.Google != nil {
+		in, out := &in.Google, &out.Google
+		if *in == nil {
+			*out = nil
+		} else {
+			*out = new(GoogleSpec)
+			**out = **in
+		}
+	}
+	in.StartTime.DeepCopyInto(&out.StartTime)
+	in.CompletionTime.DeepCopyInto(&out.CompletionTime)
+	if in.StepStates != nil {
+		in, out := &in.StepStates, &out.StepStates
+		*out = make([]v1.ContainerState, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.StepsCompleted != nil {
+		in, out := &in.StepsCompleted, &out.StepsCompleted
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.Conditions != nil {
+		in, out := &in.Conditions, &out.Conditions
+		*out = make(duck_v1alpha1.Conditions, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildStatus.
+func (in *BuildStatus) DeepCopy() *BuildStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(BuildStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BuildTemplate) DeepCopyInto(out *BuildTemplate) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildTemplate.
+func (in *BuildTemplate) DeepCopy() *BuildTemplate {
+	if in == nil {
+		return nil
+	}
+	out := new(BuildTemplate)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *BuildTemplate) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BuildTemplateList) DeepCopyInto(out *BuildTemplateList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]BuildTemplate, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildTemplateList.
+func (in *BuildTemplateList) DeepCopy() *BuildTemplateList {
+	if in == nil {
+		return nil
+	}
+	out := new(BuildTemplateList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *BuildTemplateList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BuildTemplateSpec) DeepCopyInto(out *BuildTemplateSpec) {
+	*out = *in
+	if in.Parameters != nil {
+		in, out := &in.Parameters, &out.Parameters
+		*out = make([]ParameterSpec, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.Steps != nil {
+		in, out := &in.Steps, &out.Steps
+		*out = make([]v1.Container, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.Volumes != nil {
+		in, out := &in.Volumes, &out.Volumes
+		*out = make([]v1.Volume, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildTemplateSpec.
+func (in *BuildTemplateSpec) DeepCopy() *BuildTemplateSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(BuildTemplateSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterBuildTemplate) DeepCopyInto(out *ClusterBuildTemplate) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterBuildTemplate.
+func (in *ClusterBuildTemplate) DeepCopy() *ClusterBuildTemplate {
+	if in == nil {
+		return nil
+	}
+	out := new(ClusterBuildTemplate)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ClusterBuildTemplate) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterBuildTemplateList) DeepCopyInto(out *ClusterBuildTemplateList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]ClusterBuildTemplate, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterBuildTemplateList.
+func (in *ClusterBuildTemplateList) DeepCopy() *ClusterBuildTemplateList {
+	if in == nil {
+		return nil
+	}
+	out := new(ClusterBuildTemplateList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ClusterBuildTemplateList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSpec.
+func (in *ClusterSpec) DeepCopy() *ClusterSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(ClusterSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GCSSourceSpec) DeepCopyInto(out *GCSSourceSpec) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCSSourceSpec.
+func (in *GCSSourceSpec) DeepCopy() *GCSSourceSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(GCSSourceSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GitSourceSpec) DeepCopyInto(out *GitSourceSpec) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitSourceSpec.
+func (in *GitSourceSpec) DeepCopy() *GitSourceSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(GitSourceSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GoogleSpec) DeepCopyInto(out *GoogleSpec) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GoogleSpec.
+func (in *GoogleSpec) DeepCopy() *GoogleSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(GoogleSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ParameterSpec) DeepCopyInto(out *ParameterSpec) {
+	*out = *in
+	if in.Default != nil {
+		in, out := &in.Default, &out.Default
+		if *in == nil {
+			*out = nil
+		} else {
+			*out = new(string)
+			**out = **in
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParameterSpec.
+func (in *ParameterSpec) DeepCopy() *ParameterSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(ParameterSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SourceSpec) DeepCopyInto(out *SourceSpec) {
+	*out = *in
+	if in.Git != nil {
+		in, out := &in.Git, &out.Git
+		if *in == nil {
+			*out = nil
+		} else {
+			*out = new(GitSourceSpec)
+			**out = **in
+		}
+	}
+	if in.GCS != nil {
+		in, out := &in.GCS, &out.GCS
+		if *in == nil {
+			*out = nil
+		} else {
+			*out = new(GCSSourceSpec)
+			**out = **in
+		}
+	}
+	if in.Custom != nil {
+		in, out := &in.Custom, &out.Custom
+		if *in == nil {
+			*out = nil
+		} else {
+			*out = new(v1.Container)
+			(*in).DeepCopyInto(*out)
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceSpec.
+func (in *SourceSpec) DeepCopy() *SourceSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(SourceSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TemplateInstantiationSpec) DeepCopyInto(out *TemplateInstantiationSpec) {
+	*out = *in
+	if in.Arguments != nil {
+		in, out := &in.Arguments, &out.Arguments
+		*out = make([]ArgumentSpec, len(*in))
+		copy(*out, *in)
+	}
+	if in.Env != nil {
+		in, out := &in.Env, &out.Env
+		*out = make([]v1.EnvVar, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateInstantiationSpec.
+func (in *TemplateInstantiationSpec) DeepCopy() *TemplateInstantiationSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(TemplateInstantiationSpec)
+	in.DeepCopyInto(out)
+	return out
+}
diff --git a/vendor/github.com/knative/build/test/panic/kodata/LICENSE b/vendor/github.com/knative/build/test/panic/kodata/LICENSE
new file mode 120000
index 0000000..5853aae
--- /dev/null
+++ b/vendor/github.com/knative/build/test/panic/kodata/LICENSE
@@ -0,0 +1 @@
+../../../LICENSE
\ No newline at end of file
diff --git a/vendor/github.com/knative/build/test/panic/kodata/VENDOR-LICENSE b/vendor/github.com/knative/build/test/panic/kodata/VENDOR-LICENSE
new file mode 120000
index 0000000..3cc8976
--- /dev/null
+++ b/vendor/github.com/knative/build/test/panic/kodata/VENDOR-LICENSE
@@ -0,0 +1 @@
+../../../third_party/VENDOR-LICENSE
\ No newline at end of file
diff --git a/vendor/github.com/knative/build/test/workingdir/kodata/LICENSE b/vendor/github.com/knative/build/test/workingdir/kodata/LICENSE
new file mode 120000
index 0000000..5853aae
--- /dev/null
+++ b/vendor/github.com/knative/build/test/workingdir/kodata/LICENSE
@@ -0,0 +1 @@
+../../../LICENSE
\ No newline at end of file
diff --git a/vendor/github.com/knative/build/test/workingdir/kodata/VENDOR-LICENSE b/vendor/github.com/knative/build/test/workingdir/kodata/VENDOR-LICENSE
new file mode 120000
index 0000000..3cc8976
--- /dev/null
+++ b/vendor/github.com/knative/build/test/workingdir/kodata/VENDOR-LICENSE
@@ -0,0 +1 @@
+../../../third_party/VENDOR-LICENSE
\ No newline at end of file
diff --git a/vendor/github.com/knative/pkg/LICENSE b/vendor/github.com/knative/pkg/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/vendor/github.com/knative/pkg/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/knative/pkg/apis/doc.go b/vendor/github.com/knative/pkg/apis/doc.go
new file mode 100644
index 0000000..73ae032
--- /dev/null
+++ b/vendor/github.com/knative/pkg/apis/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright 2018 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+package apis
diff --git a/vendor/github.com/knative/pkg/apis/duck/cached.go b/vendor/github.com/knative/pkg/apis/duck/cached.go
new file mode 100644
index 0000000..6696bd0
--- /dev/null
+++ b/vendor/github.com/knative/pkg/apis/duck/cached.go
@@ -0,0 +1,72 @@
+/*
+Copyright 2018 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package duck
+
+import (
+	"sync"
+
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/client-go/tools/cache"
+)
+
+// CachedInformerFactory implements InformerFactory by delegating to another
+// InformerFactory, but memoizing the results.
+type CachedInformerFactory struct {
+	Delegate InformerFactory
+
+	m     sync.Mutex
+	cache map[schema.GroupVersionResource]*result
+}
+
+// Check that CachedInformerFactory implements InformerFactory.
+var _ InformerFactory = (*CachedInformerFactory)(nil)
+
+// Get implements InformerFactory.
+func (cif *CachedInformerFactory) Get(gvr schema.GroupVersionResource) (cache.SharedIndexInformer, cache.GenericLister, error) {
+	cif.m.Lock()
+	if cif.cache == nil {
+		cif.cache = make(map[schema.GroupVersionResource]*result)
+	}
+	elt, ok := cif.cache[gvr]
+	if !ok {
+		elt = &result{}
+		elt.init = func() {
+			elt.inf, elt.lister, elt.err = cif.Delegate.Get(gvr)
+		}
+		cif.cache[gvr] = elt
+	}
+	// If this were done via "defer", then TestDifferentGVRs will fail.
+	cif.m.Unlock()
+
+	// The call to the delegate could be slow because it syncs informers, so do
+	// this outside of the main lock.
+	return elt.Get()
+}
+
+type result struct {
+	sync.Once
+	init func()
+
+	inf    cache.SharedIndexInformer
+	lister cache.GenericLister
+	err    error
+}
+
+func (t *result) Get() (cache.SharedIndexInformer, cache.GenericLister, error) {
+	t.Do(t.init)
+	return t.inf, t.lister, t.err
+}
diff --git a/vendor/github.com/knative/pkg/apis/duck/doc.go b/vendor/github.com/knative/pkg/apis/duck/doc.go
new file mode 100644
index 0000000..9188bd2
--- /dev/null
+++ b/vendor/github.com/knative/pkg/apis/duck/doc.go
@@ -0,0 +1,23 @@
+/*
+Copyright 2018 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package duck defines logic for defining and consuming "duck typed"
+// Kubernetes resources.  Producers define partial resource definitions
+// that resource authors may choose to implement to interoperate with
+// consumers of these "duck typed" interfaces.
+// For more information see:
+// https://docs.google.com/document/d/16j8C91jML4fQRQPhnHihNJUJDcbvW0RM1YAX2REHgyY/edit#
+package duck
diff --git a/vendor/github.com/knative/pkg/apis/duck/enqueue.go b/vendor/github.com/knative/pkg/apis/duck/enqueue.go
new file mode 100644
index 0000000..1ef966e
--- /dev/null
+++ b/vendor/github.com/knative/pkg/apis/duck/enqueue.go
@@ -0,0 +1,44 @@
+/*
+Copyright 2018 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package duck
+
+import (
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/client-go/tools/cache"
+)
+
+// EnqueueInformerFactory implements InformerFactory by delegating to another
+// InformerFactory, but attaching a ResourceEventHandler to the informer.
+type EnqueueInformerFactory struct {
+	Delegate InformerFactory
+
+	EventHandler cache.ResourceEventHandler
+}
+
+// Check that EnqueueInformerFactory implements InformerFactory.
+var _ InformerFactory = (*EnqueueInformerFactory)(nil)
+
+// Get implements InformerFactory.
+func (cif *EnqueueInformerFactory) Get(gvr schema.GroupVersionResource) (cache.SharedIndexInformer, cache.GenericLister, error) {
+	inf, lister, err := cif.Delegate.Get(gvr)
+	if err != nil {
+		return nil, nil, err
+	}
+	// If there is an informer, attach our event handler.
+	inf.AddEventHandler(cif.EventHandler)
+	return inf, lister, nil
+}
diff --git a/vendor/github.com/knative/pkg/apis/duck/interface.go b/vendor/github.com/knative/pkg/apis/duck/interface.go
new file mode 100644
index 0000000..f99a636
--- /dev/null
+++ b/vendor/github.com/knative/pkg/apis/duck/interface.go
@@ -0,0 +1,28 @@
+/*
+Copyright 2018 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package duck
+
+import (
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/client-go/tools/cache"
+)
+
+// InformerFactory is used to create Informer/Lister pairs for a schema.GroupVersionResource
+type InformerFactory interface {
+	// Get returns a synced Informer/Lister pair for the provided schema.GroupVersionResource.
+	Get(schema.GroupVersionResource) (cache.SharedIndexInformer, cache.GenericLister, error)
+}
diff --git a/vendor/github.com/knative/pkg/apis/duck/patch.go b/vendor/github.com/knative/pkg/apis/duck/patch.go
new file mode 100644
index 0000000..386aa1f
--- /dev/null
+++ b/vendor/github.com/knative/pkg/apis/duck/patch.go
@@ -0,0 +1,60 @@
+/*
+Copyright 2018 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package duck
+
+import (
+	"encoding/json"
+
+	jsonmergepatch "github.com/evanphx/json-patch"
+	"github.com/mattbaird/jsonpatch"
+)
+
+func marshallBeforeAfter(before, after interface{}) ([]byte, []byte, error) {
+	rawBefore, err := json.Marshal(before)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	rawAfter, err := json.Marshal(after)
+	if err != nil {
+		return rawBefore, nil, err
+	}
+
+	return rawBefore, rawAfter, nil
+}
+
+func CreateMergePatch(before, after interface{}) ([]byte, error) {
+	rawBefore, rawAfter, err := marshallBeforeAfter(before, after)
+	if err != nil {
+		return nil, err
+	}
+	return jsonmergepatch.CreateMergePatch(rawBefore, rawAfter)
+}
+
+func CreatePatch(before, after interface{}) (JSONPatch, error) {
+	rawBefore, rawAfter, err := marshallBeforeAfter(before, after)
+	if err != nil {
+		return nil, err
+	}
+	return jsonpatch.CreatePatch(rawBefore, rawAfter)
+}
+
+type JSONPatch []jsonpatch.JsonPatchOperation
+
+func (p JSONPatch) MarshalJSON() ([]byte, error) {
+	return json.Marshal([]jsonpatch.JsonPatchOperation(p))
+}
diff --git a/vendor/github.com/knative/pkg/apis/duck/proxy.go b/vendor/github.com/knative/pkg/apis/duck/proxy.go
new file mode 100644
index 0000000..85a795c
--- /dev/null
+++ b/vendor/github.com/knative/pkg/apis/duck/proxy.go
@@ -0,0 +1,74 @@
+/*
+Copyright 2018 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package duck
+
+import (
+	"sync"
+
+	"k8s.io/apimachinery/pkg/watch"
+)
+
+// NewProxyWatcher is based on the same concept from Kubernetes apimachinery in 1.12 here:
+//    https://github.com/kubernetes/apimachinery/blob/c6dd271be/pkg/watch/watch.go#L272
+// Replace this copy once we've update our client libraries.
+
+// proxyWatcher lets you wrap your channel in watch.Interface. Threadsafe.
+type proxyWatcher struct {
+	result chan watch.Event
+	stopCh chan struct{}
+
+	mutex   sync.Mutex
+	stopped bool
+}
+
+var _ watch.Interface = (*proxyWatcher)(nil)
+
+// NewProxyWatcher creates new proxyWatcher by wrapping a channel
+func NewProxyWatcher(ch chan watch.Event) watch.Interface {
+	return &proxyWatcher{
+		result:  ch,
+		stopCh:  make(chan struct{}),
+		stopped: false,
+	}
+}
+
+// Stop implements Interface
+func (pw *proxyWatcher) Stop() {
+	pw.mutex.Lock()
+	defer pw.mutex.Unlock()
+	if !pw.stopped {
+		pw.stopped = true
+		close(pw.stopCh)
+	}
+}
+
+// Stopping returns true if Stop() has been called
+func (pw *proxyWatcher) Stopping() bool {
+	pw.mutex.Lock()
+	defer pw.mutex.Unlock()
+	return pw.stopped
+}
+
+// ResultChan implements watch.Interface
+func (pw *proxyWatcher) ResultChan() <-chan watch.Event {
+	return pw.result
+}
+
+// StopChan returns stop channel
+func (pw *proxyWatcher) StopChan() <-chan struct{} {
+	return pw.stopCh
+}
diff --git a/vendor/github.com/knative/pkg/apis/duck/register.go b/vendor/github.com/knative/pkg/apis/duck/register.go
new file mode 100644
index 0000000..d10adc2
--- /dev/null
+++ b/vendor/github.com/knative/pkg/apis/duck/register.go
@@ -0,0 +1,21 @@
+/*
+Copyright 2018 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package duck
+
+const (
+	GroupName = "duck.knative.dev"
+)
diff --git a/vendor/github.com/knative/pkg/apis/duck/typed.go b/vendor/github.com/knative/pkg/apis/duck/typed.go
new file mode 100644
index 0000000..9d29c1e
--- /dev/null
+++ b/vendor/github.com/knative/pkg/apis/duck/typed.go
@@ -0,0 +1,141 @@
+/*
+Copyright 2018 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package duck
+
+import (
+	"fmt"
+	"net/http"
+	"time"
+
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/apimachinery/pkg/watch"
+	"k8s.io/client-go/dynamic"
+	"k8s.io/client-go/tools/cache"
+
+	"github.com/knative/pkg/apis"
+)
+
+// TypedInformerFactory implements InformerFactory such that the elements
+// tracked by the informer/lister have the type of the canonical "obj".
+type TypedInformerFactory struct {
+	Client       dynamic.Interface
+	Type         apis.Listable
+	ResyncPeriod time.Duration
+	StopChannel  <-chan struct{}
+}
+
+// Check that TypedInformerFactory implements InformerFactory.
+var _ InformerFactory = (*TypedInformerFactory)(nil)
+
+// Get implements InformerFactory.
+func (dif *TypedInformerFactory) Get(gvr schema.GroupVersionResource) (cache.SharedIndexInformer, cache.GenericLister, error) {
+	listObj := dif.Type.GetListType()
+	lw := &cache.ListWatch{
+		ListFunc:  asStructuredLister(dif.Client.Resource(gvr).List, listObj),
+		WatchFunc: AsStructuredWatcher(dif.Client.Resource(gvr).Watch, dif.Type),
+	}
+	inf := cache.NewSharedIndexInformer(lw, dif.Type, dif.ResyncPeriod, cache.Indexers{
+		cache.NamespaceIndex: cache.MetaNamespaceIndexFunc,
+	})
+
+	lister := cache.NewGenericLister(inf.GetIndexer(), gvr.GroupResource())
+
+	go inf.Run(dif.StopChannel)
+
+	if ok := cache.WaitForCacheSync(dif.StopChannel, inf.HasSynced); !ok {
+		return nil, nil, fmt.Errorf("Failed starting shared index informer for %v with type %T", gvr, dif.Type)
+	}
+
+	return inf, lister, nil
+}
+
+type unstructuredLister func(metav1.ListOptions) (*unstructured.UnstructuredList, error)
+
+func asStructuredLister(ulist unstructuredLister, listObj runtime.Object) cache.ListFunc {
+	return func(opts metav1.ListOptions) (runtime.Object, error) {
+		ul, err := ulist(opts)
+		if err != nil {
+			return nil, err
+		}
+		res := listObj.DeepCopyObject()
+		if err := FromUnstructured(ul, res); err != nil {
+			return nil, err
+		}
+		return res, nil
+	}
+}
+
+// AsStructuredWatcher is public for testing only.
+// TODO(mattmoor): Move tests for this to `package duck` and make private.
+func AsStructuredWatcher(wf cache.WatchFunc, obj runtime.Object) cache.WatchFunc {
+	return func(lo metav1.ListOptions) (watch.Interface, error) {
+		uw, err := wf(lo)
+		if err != nil {
+			return nil, err
+		}
+		structuredCh := make(chan watch.Event)
+		go func() {
+			defer close(structuredCh)
+			unstructuredCh := uw.ResultChan()
+			for {
+				select {
+				case ue, ok := <-unstructuredCh:
+					if !ok {
+						// Channel is closed.
+						return
+					}
+
+					unstructuredObj, ok := ue.Object.(*unstructured.Unstructured)
+					if !ok {
+						// If it isn't an unstructured object, then forward the
+						// event as-is.  This is likely to happen when the event's
+						// Type is an Error.
+						structuredCh <- ue
+						continue
+					}
+					structuredObj := obj.DeepCopyObject()
+
+					err := FromUnstructured(unstructuredObj, structuredObj)
+					if err != nil {
+						// Pass back an error indicating that the object we got
+						// was invalid.
+						structuredCh <- watch.Event{
+							Type: watch.Error,
+							Object: &metav1.Status{
+								Status:  metav1.StatusFailure,
+								Code:    http.StatusUnprocessableEntity,
+								Reason:  metav1.StatusReasonInvalid,
+								Message: err.Error(),
+							},
+						}
+						continue
+					}
+					// Send the structured event.
+					structuredCh <- watch.Event{
+						Type:   ue.Type,
+						Object: structuredObj,
+					}
+				}
+			}
+		}()
+
+		return NewProxyWatcher(structuredCh), nil
+	}
+}
diff --git a/vendor/github.com/knative/pkg/apis/duck/unstructured.go b/vendor/github.com/knative/pkg/apis/duck/unstructured.go
new file mode 100644
index 0000000..98b3cef
--- /dev/null
+++ b/vendor/github.com/knative/pkg/apis/duck/unstructured.go
@@ -0,0 +1,37 @@
+/*
+Copyright 2018 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package duck
+
+import (
+	"encoding/json"
+)
+
+// Marshallable is implementated by the Unstructured K8s types.
+type Marshalable interface {
+	MarshalJSON() ([]byte, error)
+}
+
+// FromUnstructured takes unstructured object from (say from client-go/dynamic) and
+// converts it into our duck types.
+func FromUnstructured(obj Marshalable, target interface{}) error {
+	// Use the unstructured marshaller to ensure it's proper JSON
+	raw, err := obj.MarshalJSON()
+	if err != nil {
+		return err
+	}
+	return json.Unmarshal(raw, &target)
+}
diff --git a/vendor/github.com/knative/pkg/apis/duck/v1alpha1/addressable_types.go b/vendor/github.com/knative/pkg/apis/duck/v1alpha1/addressable_types.go
new file mode 100644
index 0000000..0bf1f98
--- /dev/null
+++ b/vendor/github.com/knative/pkg/apis/duck/v1alpha1/addressable_types.go
@@ -0,0 +1,96 @@
+/*
+Copyright 2018 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+
+	"github.com/knative/pkg/apis"
+	"github.com/knative/pkg/apis/duck"
+)
+
+// Addressable provides a generic mechanism for a custom resource
+// definition to indicate a destination for message delivery.
+// (Currently, only hostname is supported, and HTTP is implied. In the
+// future, additional schemes may be supported, and path components
+// ala UI may also be supported.)
+
+// Addressable is the schema for the destination information. This is
+// typically stored in the object's `status`, as this information may
+// be generated by the controller.
+type Addressable struct {
+	Hostname string `json:"hostname,omitempty"`
+}
+
+
+// Addressable is an Implementable "duck type".
+var _ duck.Implementable = (*Addressable)(nil)
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// AddressableType is a skeleton type wrapping Addressable in the manner we expect
+// resource writers defining compatible resources to embed it.  We will
+// typically use this type to deserialize Addressable ObjectReferences and
+// access the Addressable data.  This is not a real resource.
+type AddressableType struct {
+	metav1.TypeMeta   `json:",inline"`
+	metav1.ObjectMeta `json:"metadata,omitempty"`
+
+	Status AddressStatus `json:"status"`
+}
+
+// AddressStatus shows how we expect folks to embed Addressable in
+// their Status field.
+type AddressStatus struct {
+	Address *Addressable `json:"address,omitempty"`
+}
+
+// Verify AddressableType resources meet duck contracts.
+var _ duck.Populatable = (*AddressableType)(nil)
+var _ apis.Listable = (*AddressableType)(nil)
+
+// GetFullType implements duck.Implementable
+func (_ *Addressable) GetFullType() duck.Populatable {
+	return &AddressableType{}
+}
+
+// Populate implements duck.Populatable
+func (t *AddressableType) Populate() {
+	t.Status = AddressStatus{
+		&Addressable{
+			// Populate ALL fields
+			Hostname: "this is not empty",
+		},
+	}
+}
+
+// GetListType implements apis.Listable
+func (r *AddressableType) GetListType() runtime.Object {
+	return &AddressableTypeList{}
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// AddressableTypeList is a list of AddressableType resources
+type AddressableTypeList struct {
+	metav1.TypeMeta `json:",inline"`
+	metav1.ListMeta `json:"metadata"`
+
+	Items []AddressableType `json:"items"`
+}
diff --git a/vendor/github.com/knative/pkg/apis/duck/v1alpha1/condition_set.go b/vendor/github.com/knative/pkg/apis/duck/v1alpha1/condition_set.go
new file mode 100644
index 0000000..30b76a5
--- /dev/null
+++ b/vendor/github.com/knative/pkg/apis/duck/v1alpha1/condition_set.go
@@ -0,0 +1,348 @@
+/*
+Copyright 2018 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+	"reflect"
+	"sort"
+	"time"
+
+	"fmt"
+
+	"github.com/knative/pkg/apis"
+	corev1 "k8s.io/api/core/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// Conditions is the interface for a Resource that implements the getter and
+// setter for accessing a Condition collection.
+// +k8s:deepcopy-gen=true
+type ConditionsAccessor interface {
+	GetConditions() Conditions
+	SetConditions(Conditions)
+}
+
+// ConditionSet is an abstract collection of the possible ConditionType values
+// that a particular resource might expose.  It also holds the "happy condition"
+// for that resource, which we define to be one of Ready or Succeeded depending
+// on whether it is a Living or Batch process respectively.
+// +k8s:deepcopy-gen=false
+type ConditionSet struct {
+	happy      ConditionType
+	dependents []ConditionType
+}
+
+// ConditionManager allows a resource to operate on its Conditions using higher
+// order operations.
+type ConditionManager interface {
+	// IsHappy looks at the happy condition and returns true if that condition is
+	// set to true.
+	IsHappy() bool
+
+	// GetCondition finds and returns the Condition that matches the ConditionType
+	// previously set on Conditions.
+	GetCondition(t ConditionType) *Condition
+
+	// SetCondition sets or updates the Condition on Conditions for Condition.Type.
+	// If there is an update, Conditions are stored back sorted.
+	SetCondition(new Condition)
+
+	// MarkTrue sets the status of t to true, and then marks the happy condition to
+	// true if all other dependents are also true.
+	MarkTrue(t ConditionType)
+
+	// MarkUnknown sets the status of t to Unknown and also sets the happy condition
+	// to Unknown if no other dependent condition is in an error state.
+	MarkUnknown(t ConditionType, reason, messageFormat string, messageA ...interface{})
+
+	// MarkFalse sets the status of t and the happy condition to False.
+	MarkFalse(t ConditionType, reason, messageFormat string, messageA ...interface{})
+
+	// InitializeConditions updates all Conditions in the ConditionSet to Unknown
+	// if not set.
+	InitializeConditions()
+
+	// InitializeCondition updates a Condition to Unknown if not set.
+	InitializeCondition(t ConditionType)
+}
+
+// NewLivingConditionSet returns a ConditionSet to hold the conditions for the
+// living resource. ConditionReady is used as the happy condition.
+func NewLivingConditionSet(d ...ConditionType) ConditionSet {
+	return newConditionSet(ConditionReady, d...)
+}
+
+// NewBatchConditionSet returns a ConditionSet to hold the conditions for the
+// batch resource. ConditionSucceeded is used as the happy condition.
+func NewBatchConditionSet(d ...ConditionType) ConditionSet {
+	return newConditionSet(ConditionSucceeded, d...)
+}
+
+// newConditionSet returns a ConditionSet to hold the conditions that are
+// important for the caller. The first ConditionType is the overarching status
+// for that will be used to signal the resources' status is Ready or Succeeded.
+func newConditionSet(happy ConditionType, dependents ...ConditionType) ConditionSet {
+	var deps []ConditionType
+	for _, d := range dependents {
+		// Skip duplicates
+		if d == happy || contains(deps, d) {
+			continue
+		}
+		deps = append(deps, d)
+	}
+	return ConditionSet{
+		happy:      happy,
+		dependents: deps,
+	}
+}
+
+func contains(ct []ConditionType, t ConditionType) bool {
+	for _, c := range ct {
+		if c == t {
+			return true
+		}
+	}
+	return false
+}
+
+// Check that conditionsImpl implements ConditionManager.
+var _ ConditionManager = (*conditionsImpl)(nil)
+
+// conditionsImpl implements the helper methods for evaluating Conditions.
+// +k8s:deepcopy-gen=false
+type conditionsImpl struct {
+	ConditionSet
+	accessor ConditionsAccessor
+}
+
+// Manage creates a ConditionManager from a accessor object using the original
+// ConditionSet as a reference. Status must be or point to a struct.
+func (r ConditionSet) Manage(status interface{}) ConditionManager {
+
+	// First try to see if status implements ConditionsAccessor
+	ca, ok := status.(ConditionsAccessor)
+	if ok {
+		return conditionsImpl{
+			accessor:     ca,
+			ConditionSet: r,
+		}
+	}
+
+	// Next see if we can use reflection to gain access to Conditions
+	ca = NewReflectedConditionsAccessor(status)
+	if ca != nil {
+		return conditionsImpl{
+			accessor:     ca,
+			ConditionSet: r,
+		}
+	}
+
+	// We tried. This object is not understood by the the condition manager.
+	//panic(fmt.Sprintf("Error converting %T into a ConditionsAccessor", status))
+	// TODO: not sure which way. using panic above means passing nil status panics the system.
+	return conditionsImpl{
+		ConditionSet: r,
+	}
+}
+
+// IsHappy looks at the happy condition and returns true if that condition is
+// set to true.
+func (r conditionsImpl) IsHappy() bool {
+	if c := r.GetCondition(r.happy); c == nil || !c.IsTrue() {
+		return false
+	}
+	return true
+}
+
+// GetCondition finds and returns the Condition that matches the ConditionType
+// previously set on Conditions.
+func (r conditionsImpl) GetCondition(t ConditionType) *Condition {
+	if r.accessor == nil {
+		return nil
+	}
+
+	for _, c := range r.accessor.GetConditions() {
+		if c.Type == t {
+			return &c
+		}
+	}
+	return nil
+}
+
+// SetCondition sets or updates the Condition on Conditions for Condition.Type.
+// If there is an update, Conditions are stored back sorted.
+func (r conditionsImpl) SetCondition(new Condition) {
+	if r.accessor == nil {
+		return
+	}
+	t := new.Type
+	var conditions Conditions
+	for _, c := range r.accessor.GetConditions() {
+		if c.Type != t {
+			conditions = append(conditions, c)
+		} else {
+			// If we'd only update the LastTransitionTime, then return.
+			new.LastTransitionTime = c.LastTransitionTime
+			if reflect.DeepEqual(&new, &c) {
+				return
+			}
+		}
+	}
+	new.LastTransitionTime = apis.VolatileTime{Inner: metav1.NewTime(time.Now())}
+	conditions = append(conditions, new)
+	// Sorted for convenience of the consumer, i.e. kubectl.
+	sort.Slice(conditions, func(i, j int) bool { return conditions[i].Type < conditions[j].Type })
+	r.accessor.SetConditions(conditions)
+}
+
+// MarkTrue sets the status of t to true, and then marks the happy condition to
+// true if all other dependents are also true.
+func (r conditionsImpl) MarkTrue(t ConditionType) {
+	// set the specified condition
+	r.SetCondition(Condition{
+		Type:   t,
+		Status: corev1.ConditionTrue,
+	})
+
+	// check the dependents.
+	for _, cond := range r.dependents {
+		c := r.GetCondition(cond)
+		// Failed or Unknown conditions trump true conditions
+		if !c.IsTrue() {
+			return
+		}
+	}
+
+	// set the happy condition
+	r.SetCondition(Condition{
+		Type:   r.happy,
+		Status: corev1.ConditionTrue,
+	})
+}
+
+// MarkUnknown sets the status of t to Unknown and also sets the happy condition
+// to Unknown if no other dependent condition is in an error state.
+func (r conditionsImpl) MarkUnknown(t ConditionType, reason, messageFormat string, messageA ...interface{}) {
+	// set the specified condition
+	r.SetCondition(Condition{
+		Type:    t,
+		Status:  corev1.ConditionUnknown,
+		Reason:  reason,
+		Message: fmt.Sprintf(messageFormat, messageA...),
+	})
+
+	// check the dependents.
+	for _, cond := range r.dependents {
+		c := r.GetCondition(cond)
+		// Failed conditions trump Unknown conditions
+		if c.IsFalse() {
+			// Double check that the happy condition is also false.
+			happy := r.GetCondition(r.happy)
+			if !happy.IsFalse() {
+				r.MarkFalse(r.happy, reason, messageFormat, messageA)
+			}
+			return
+		}
+	}
+
+	// set the happy condition
+	r.SetCondition(Condition{
+		Type:    r.happy,
+		Status:  corev1.ConditionUnknown,
+		Reason:  reason,
+		Message: fmt.Sprintf(messageFormat, messageA...),
+	})
+}
+
+// MarkFalse sets the status of t and the happy condition to False.
+func (r conditionsImpl) MarkFalse(t ConditionType, reason, messageFormat string, messageA ...interface{}) {
+	for _, t := range []ConditionType{
+		t,
+		r.happy,
+	} {
+		r.SetCondition(Condition{
+			Type:    t,
+			Status:  corev1.ConditionFalse,
+			Reason:  reason,
+			Message: fmt.Sprintf(messageFormat, messageA...),
+		})
+	}
+}
+
+// InitializeConditions updates all Conditions in the ConditionSet to Unknown
+// if not set.
+func (r conditionsImpl) InitializeConditions() {
+	for _, t := range append(r.dependents, r.happy) {
+		r.InitializeCondition(t)
+	}
+}
+
+// InitializeCondition updates a Condition to Unknown if not set.
+func (r conditionsImpl) InitializeCondition(t ConditionType) {
+	if c := r.GetCondition(t); c == nil {
+		r.SetCondition(Condition{
+			Type:   t,
+			Status: corev1.ConditionUnknown,
+		})
+	}
+}
+
+// NewReflectedConditionsAccessor uses reflection to return a ConditionsAccessor
+// to access the field called "Conditions".
+func NewReflectedConditionsAccessor(status interface{}) ConditionsAccessor {
+	statusValue := reflect.Indirect(reflect.ValueOf(status))
+
+	// If status is not a struct, don't even try to use it.
+	if statusValue.Kind() != reflect.Struct {
+		return nil
+	}
+
+	conditionsField := statusValue.FieldByName("Conditions")
+
+	if conditionsField.IsValid() && conditionsField.CanInterface() && conditionsField.CanSet() {
+		if _, ok := conditionsField.Interface().(Conditions); ok {
+			return &reflectedConditionsAccessor{
+				conditions: conditionsField,
+			}
+		}
+	}
+	return nil
+}
+
+// reflectedConditionsAccessor is an internal wrapper object to act as the
+// ConditionsAccessor for status objects that do not implement ConditionsAccessor
+// directly, but do expose the field using the "Conditions" field name.
+type reflectedConditionsAccessor struct {
+	conditions reflect.Value
+}
+
+// GetConditions uses reflection to return Conditions from the held status object.
+func (r *reflectedConditionsAccessor) GetConditions() Conditions {
+	if r != nil && r.conditions.IsValid() && r.conditions.CanInterface() {
+		if conditions, ok := r.conditions.Interface().(Conditions); ok {
+			return conditions
+		}
+	}
+	return Conditions(nil)
+}
+
+// SetConditions uses reflection to set Conditions on the held status object.
+func (r *reflectedConditionsAccessor) SetConditions(conditions Conditions) {
+	if r != nil && r.conditions.IsValid() && r.conditions.CanSet() {
+		r.conditions.Set(reflect.ValueOf(conditions))
+	}
+}
diff --git a/vendor/github.com/knative/pkg/apis/duck/v1alpha1/conditions_types.go b/vendor/github.com/knative/pkg/apis/duck/v1alpha1/conditions_types.go
new file mode 100644
index 0000000..44b9992
--- /dev/null
+++ b/vendor/github.com/knative/pkg/apis/duck/v1alpha1/conditions_types.go
@@ -0,0 +1,167 @@
+/*
+Copyright 2018 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+	"time"
+
+	corev1 "k8s.io/api/core/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+
+	"github.com/knative/pkg/apis"
+	"github.com/knative/pkg/apis/duck"
+)
+
+// Conditions is the schema for the conditions portion of the payload
+type Conditions []Condition
+
+// ConditionType is a camel-cased condition type.
+type ConditionType string
+
+const (
+	// ConditionReady specifies that the resource is ready.
+	// For long-running resources.
+	ConditionReady ConditionType = "Ready"
+	// ConditionSucceeded specifies that the resource has finished.
+	// For resource which run to completion.
+	ConditionSucceeded ConditionType = "Succeeded"
+)
+
+// Conditions defines a readiness condition for a Knative resource.
+// See: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#typical-status-properties
+// +k8s:deepcopy-gen=true
+type Condition struct {
+	// Type of condition.
+	// +required
+	Type ConditionType `json:"type" description:"type of status condition"`
+
+	// Status of the condition, one of True, False, Unknown.
+	// +required
+	Status corev1.ConditionStatus `json:"status" description:"status of the condition, one of True, False, Unknown"`
+
+	// LastTransitionTime is the last time the condition transitioned from one status to another.
+	// We use VolatileTime in place of metav1.Time to exclude this from creating equality.Semantic
+	// differences (all other things held constant).
+	// +optional
+	LastTransitionTime apis.VolatileTime `json:"lastTransitionTime,omitempty" description:"last time the condition transit from one status to another"`
+
+	// The reason for the condition's last transition.
+	// +optional
+	Reason string `json:"reason,omitempty" description:"one-word CamelCase reason for the condition's last transition"`
+
+	// A human readable message indicating details about the transition.
+	// +optional
+	Message string `json:"message,omitempty" description:"human-readable message indicating details about last transition"`
+}
+
+// IsTrue is true if the condition is True
+func (c *Condition) IsTrue() bool {
+	if c == nil {
+		return false
+	}
+	return c.Status == corev1.ConditionTrue
+}
+
+// IsFalse is true if the condition is False
+func (c *Condition) IsFalse() bool {
+	if c == nil {
+		return false
+	}
+	return c.Status == corev1.ConditionFalse
+}
+
+// IsUnknown is true if the condition is Unknown
+func (c *Condition) IsUnknown() bool {
+	if c == nil {
+		return true
+	}
+	return c.Status == corev1.ConditionUnknown
+}
+
+
+// Conditions is an Implementable "duck type".
+var _ duck.Implementable = (*Conditions)(nil)
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// KResource is a skeleton type wrapping Conditions in the manner we expect
+// resource writers defining compatible resources to embed it.  We will
+// typically use this type to deserialize Conditions ObjectReferences and
+// access the Conditions data.  This is not a real resource.
+type KResource struct {
+	metav1.TypeMeta   `json:",inline"`
+	metav1.ObjectMeta `json:"metadata,omitempty"`
+
+	Status KResourceStatus `json:"status"`
+}
+
+// KResourceStatus shows how we expect folks to embed Conditions in
+// their Status field.
+type KResourceStatus struct {
+	Conditions Conditions `json:"conditions,omitempty"`
+}
+
+func (krs *KResourceStatus) GetConditions() Conditions {
+	return krs.Conditions
+}
+
+func (krs *KResourceStatus) SetConditions(conditions Conditions) {
+	krs.Conditions = conditions
+}
+
+// Ensure KResourceStatus satisfies ConditionsAccessor
+var _ ConditionsAccessor = (*KResourceStatus)(nil)
+
+// In order for Conditions to be Implementable, KResource must be Populatable.
+var _ duck.Populatable = (*KResource)(nil)
+
+// Ensure KResource satisfies apis.Listable
+var _ apis.Listable = (*KResource)(nil)
+
+// GetFullType implements duck.Implementable
+func (_ *Conditions) GetFullType() duck.Populatable {
+	return &KResource{}
+}
+
+// Populate implements duck.Populatable
+func (t *KResource) Populate() {
+	t.Status.Conditions = Conditions{{
+		// Populate ALL fields
+		Type:               "Birthday",
+		Status:             corev1.ConditionTrue,
+		LastTransitionTime: apis.VolatileTime{Inner: metav1.NewTime(time.Date(1984, 02, 28, 18, 52, 00, 00, time.UTC))},
+		Reason:             "Celebrate",
+		Message:            "n3wScott, find your party hat :tada:",
+	}}
+}
+
+// GetListType implements apis.Listable
+func (r *KResource) GetListType() runtime.Object {
+	return &KResourceList{}
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// KResourceList is a list of KResource resources
+type KResourceList struct {
+	metav1.TypeMeta `json:",inline"`
+	metav1.ListMeta `json:"metadata"`
+
+	Items []KResource `json:"items"`
+}
diff --git a/vendor/github.com/knative/pkg/apis/duck/v1alpha1/doc.go b/vendor/github.com/knative/pkg/apis/duck/v1alpha1/doc.go
new file mode 100644
index 0000000..3638eb7
--- /dev/null
+++ b/vendor/github.com/knative/pkg/apis/duck/v1alpha1/doc.go
@@ -0,0 +1,23 @@
+/*
+Copyright 2018 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Api versions allow the api contract for a resource to be changed while keeping
+// backward compatibility by support multiple concurrent versions
+// of the same resource
+
+// +k8s:deepcopy-gen=package
+// +groupName=duck.knative.dev
+package v1alpha1
diff --git a/vendor/github.com/knative/pkg/apis/duck/v1alpha1/generational_types.go b/vendor/github.com/knative/pkg/apis/duck/v1alpha1/generational_types.go
new file mode 100644
index 0000000..c8197f6
--- /dev/null
+++ b/vendor/github.com/knative/pkg/apis/duck/v1alpha1/generational_types.go
@@ -0,0 +1,83 @@
+/*
+Copyright 2018 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+
+	"github.com/knative/pkg/apis"
+	"github.com/knative/pkg/apis/duck"
+)
+
+// Generation is the schema for the generational portion of the payload
+type Generation int64
+
+
+// Generation is an Implementable "duck type".
+var _ duck.Implementable = (*Generation)(nil)
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Generational is a skeleton type wrapping Generation in the manner we expect
+// resource writers defining compatible resources to embed it.  We will
+// typically use this type to deserialize Generation ObjectReferences and
+// access the Generation data.  This is not a real resource.
+type Generational struct {
+	metav1.TypeMeta   `json:",inline"`
+	metav1.ObjectMeta `json:"metadata,omitempty"`
+
+	Spec GenerationalSpec `json:"spec"`
+}
+
+// GenerationalSpec shows how we expect folks to embed Generation in
+// their Spec field.
+type GenerationalSpec struct {
+	Generation Generation `json:"generation,omitempty"`
+}
+
+// In order for Generation to be Implementable, Generational must be Populatable.
+var _ duck.Populatable = (*Generational)(nil)
+
+// Ensure Generational satisfies apis.Listable
+var _ apis.Listable = (*Generational)(nil)
+
+// GetFullType implements duck.Implementable
+func (_ *Generation) GetFullType() duck.Populatable {
+	return &Generational{}
+}
+
+// Populate implements duck.Populatable
+func (t *Generational) Populate() {
+	t.Spec.Generation = 1234
+}
+
+// GetListType implements apis.Listable
+func (r *Generational) GetListType() runtime.Object {
+	return &GenerationalList{}
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// GenerationalList is a list of Generational resources
+type GenerationalList struct {
+	metav1.TypeMeta `json:",inline"`
+	metav1.ListMeta `json:"metadata"`
+
+	Items []Generational `json:"items"`
+}
diff --git a/vendor/github.com/knative/pkg/apis/duck/v1alpha1/legacy_targetable_types.go b/vendor/github.com/knative/pkg/apis/duck/v1alpha1/legacy_targetable_types.go
new file mode 100644
index 0000000..ee50201
--- /dev/null
+++ b/vendor/github.com/knative/pkg/apis/duck/v1alpha1/legacy_targetable_types.go
@@ -0,0 +1,96 @@
+/*
+Copyright 2018 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+
+	"github.com/knative/pkg/apis"
+	"github.com/knative/pkg/apis/duck"
+)
+
+// LegacyTargetable left around until we migrate to Addressable in the
+// dependent resources. Addressable has more structure in the way it
+// defines the fields. LegacyTargetable only assumed a single string
+// in the Status field and we're moving towards defining proper structs
+// under Status rather than strings.
+// This is to support existing resources until they migrate.
+//
+// Do not use this for anything new, use Addressable
+
+// LegacyTargetable is the old schema for the addressable portion
+// of the payload
+//
+// For new resources use Addressable.
+type LegacyTargetable struct {
+	DomainInternal string `json:"domainInternal,omitempty"`
+}
+
+
+// LegacyTargetable is an Implementable "duck type".
+var _ duck.Implementable = (*LegacyTargetable)(nil)
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// LegacyTarget is a skeleton type wrapping LegacyTargetable in the manner we
+// want to support unless they get migrated into supporting Legacy.
+// We will typically use this type to deserialize LegacyTargetable
+// ObjectReferences and access the LegacyTargetable data.  This is not a
+// real resource.
+// ** Do not use this for any new resources **
+type LegacyTarget struct {
+	metav1.TypeMeta   `json:",inline"`
+	metav1.ObjectMeta `json:"metadata,omitempty"`
+
+	Status LegacyTargetable `json:"status"`
+}
+
+// In order for LegacyTargetable to be Implementable, LegacyTarget must be Populatable.
+var _ duck.Populatable = (*LegacyTarget)(nil)
+
+// Ensure LegacyTarget satisfies apis.Listable
+var _ apis.Listable = (*LegacyTarget)(nil)
+
+// GetFullType implements duck.Implementable
+func (_ *LegacyTargetable) GetFullType() duck.Populatable {
+	return &LegacyTarget{}
+}
+
+// Populate implements duck.Populatable
+func (t *LegacyTarget) Populate() {
+	t.Status = LegacyTargetable{
+		// Populate ALL fields
+		DomainInternal: "this is not empty",
+	}
+}
+
+// GetListType implements apis.Listable
+func (r *LegacyTarget) GetListType() runtime.Object {
+	return &LegacyTargetList{}
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// LegacyTargetList is a list of LegacyTarget resources
+type LegacyTargetList struct {
+	metav1.TypeMeta `json:",inline"`
+	metav1.ListMeta `json:"metadata"`
+
+	Items []LegacyTarget `json:"items"`
+}
diff --git a/vendor/github.com/knative/pkg/apis/duck/v1alpha1/register.go b/vendor/github.com/knative/pkg/apis/duck/v1alpha1/register.go
new file mode 100644
index 0000000..a0264e5
--- /dev/null
+++ b/vendor/github.com/knative/pkg/apis/duck/v1alpha1/register.go
@@ -0,0 +1,61 @@
+/*
+Copyright 2018 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+	"github.com/knative/pkg/apis/duck"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: duck.GroupName, Version: "v1alpha1"}
+
+// Kind takes an unqualified kind and returns back a Group qualified GroupKind
+func Kind(kind string) schema.GroupKind {
+	return SchemeGroupVersion.WithKind(kind).GroupKind()
+}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+	return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+	SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
+	AddToScheme   = SchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to Scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+	scheme.AddKnownTypes(
+		SchemeGroupVersion,
+		&KResource{},
+		(&KResource{}).GetListType(),
+		&Generational{},
+		(&Generational{}).GetListType(),
+		&AddressableType{},
+		(&AddressableType{}).GetListType(),
+		&Target{},
+		(&Target{}).GetListType(),
+		&LegacyTarget{},
+		(&LegacyTarget{}).GetListType(),
+	)
+	metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+	return nil
+}
diff --git a/vendor/github.com/knative/pkg/apis/duck/v1alpha1/retired_targetable_types.go b/vendor/github.com/knative/pkg/apis/duck/v1alpha1/retired_targetable_types.go
new file mode 100644
index 0000000..695e11c
--- /dev/null
+++ b/vendor/github.com/knative/pkg/apis/duck/v1alpha1/retired_targetable_types.go
@@ -0,0 +1,98 @@
+/*
+Copyright 2018 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+
+	"github.com/knative/pkg/apis"
+	"github.com/knative/pkg/apis/duck"
+)
+
+// Targetable is an earlier version of the Callable interface.
+// Callable is a higher-level interface which implements Addressable
+// but further promises that the destination may synchronously return
+// response messages in reply to a message.
+//
+// Targetable implementations should instead implement Addressable and
+// include an `eventing.knative.dev/returns=any` annotation.
+
+// Targetable is retired; implement Addressable for now.
+type Targetable struct {
+	DomainInternal string `json:"domainInternal,omitempty"`
+}
+
+
+// Targetable is an Implementable "duck type".
+var _ duck.Implementable = (*Targetable)(nil)
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Target is a skeleton type wrapping Targetable in the manner we expect
+// resource writers defining compatible resources to embed it.  We will
+// typically use this type to deserialize Targetable ObjectReferences and
+// access the Targetable data.  This is not a real resource.
+type Target struct {
+	metav1.TypeMeta   `json:",inline"`
+	metav1.ObjectMeta `json:"metadata,omitempty"`
+
+	Status TargetStatus `json:"status"`
+}
+
+// TargetStatus shows how we expect folks to embed Targetable in
+// their Status field.
+type TargetStatus struct {
+	Targetable *Targetable `json:"targetable,omitempty"`
+}
+
+// In order for Targetable to be Implementable, Target must be Populatable.
+var _ duck.Populatable = (*Target)(nil)
+
+// Ensure Target satisfies apis.Listable
+var _ apis.Listable = (*Target)(nil)
+
+// GetFullType implements duck.Implementable
+func (_ *Targetable) GetFullType() duck.Populatable {
+	return &Target{}
+}
+
+// Populate implements duck.Populatable
+func (t *Target) Populate() {
+	t.Status = TargetStatus{
+		&Targetable{
+			// Populate ALL fields
+			DomainInternal: "this is not empty",
+		},
+	}
+}
+
+// GetListType implements apis.Listable
+func (r *Target) GetListType() runtime.Object {
+	return &TargetList{}
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// TargetList is a list of Target resources
+type TargetList struct {
+	metav1.TypeMeta `json:",inline"`
+	metav1.ListMeta `json:"metadata"`
+
+	Items []Target `json:"items"`
+}
diff --git a/vendor/github.com/knative/pkg/apis/duck/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/knative/pkg/apis/duck/v1alpha1/zz_generated.deepcopy.go
new file mode 100644
index 0000000..731c705
--- /dev/null
+++ b/vendor/github.com/knative/pkg/apis/duck/v1alpha1/zz_generated.deepcopy.go
@@ -0,0 +1,493 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright 2018 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+	runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AddressStatus) DeepCopyInto(out *AddressStatus) {
+	*out = *in
+	if in.Address != nil {
+		in, out := &in.Address, &out.Address
+		*out = new(Addressable)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddressStatus.
+func (in *AddressStatus) DeepCopy() *AddressStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(AddressStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Addressable) DeepCopyInto(out *Addressable) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Addressable.
+func (in *Addressable) DeepCopy() *Addressable {
+	if in == nil {
+		return nil
+	}
+	out := new(Addressable)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AddressableType) DeepCopyInto(out *AddressableType) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Status.DeepCopyInto(&out.Status)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddressableType.
+func (in *AddressableType) DeepCopy() *AddressableType {
+	if in == nil {
+		return nil
+	}
+	out := new(AddressableType)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *AddressableType) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AddressableTypeList) DeepCopyInto(out *AddressableTypeList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]AddressableType, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddressableTypeList.
+func (in *AddressableTypeList) DeepCopy() *AddressableTypeList {
+	if in == nil {
+		return nil
+	}
+	out := new(AddressableTypeList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *AddressableTypeList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Condition) DeepCopyInto(out *Condition) {
+	*out = *in
+	in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Condition.
+func (in *Condition) DeepCopy() *Condition {
+	if in == nil {
+		return nil
+	}
+	out := new(Condition)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in Conditions) DeepCopyInto(out *Conditions) {
+	{
+		in := &in
+		*out = make(Conditions, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+		return
+	}
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Conditions.
+func (in Conditions) DeepCopy() Conditions {
+	if in == nil {
+		return nil
+	}
+	out := new(Conditions)
+	in.DeepCopyInto(out)
+	return *out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Generational) DeepCopyInto(out *Generational) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	out.Spec = in.Spec
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Generational.
+func (in *Generational) DeepCopy() *Generational {
+	if in == nil {
+		return nil
+	}
+	out := new(Generational)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Generational) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GenerationalList) DeepCopyInto(out *GenerationalList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]Generational, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenerationalList.
+func (in *GenerationalList) DeepCopy() *GenerationalList {
+	if in == nil {
+		return nil
+	}
+	out := new(GenerationalList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *GenerationalList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GenerationalSpec) DeepCopyInto(out *GenerationalSpec) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenerationalSpec.
+func (in *GenerationalSpec) DeepCopy() *GenerationalSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(GenerationalSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KResource) DeepCopyInto(out *KResource) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Status.DeepCopyInto(&out.Status)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KResource.
+func (in *KResource) DeepCopy() *KResource {
+	if in == nil {
+		return nil
+	}
+	out := new(KResource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *KResource) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KResourceList) DeepCopyInto(out *KResourceList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]KResource, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KResourceList.
+func (in *KResourceList) DeepCopy() *KResourceList {
+	if in == nil {
+		return nil
+	}
+	out := new(KResourceList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *KResourceList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KResourceStatus) DeepCopyInto(out *KResourceStatus) {
+	*out = *in
+	if in.Conditions != nil {
+		in, out := &in.Conditions, &out.Conditions
+		*out = make(Conditions, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KResourceStatus.
+func (in *KResourceStatus) DeepCopy() *KResourceStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(KResourceStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LegacyTarget) DeepCopyInto(out *LegacyTarget) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	out.Status = in.Status
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LegacyTarget.
+func (in *LegacyTarget) DeepCopy() *LegacyTarget {
+	if in == nil {
+		return nil
+	}
+	out := new(LegacyTarget)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *LegacyTarget) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LegacyTargetList) DeepCopyInto(out *LegacyTargetList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]LegacyTarget, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LegacyTargetList.
+func (in *LegacyTargetList) DeepCopy() *LegacyTargetList {
+	if in == nil {
+		return nil
+	}
+	out := new(LegacyTargetList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *LegacyTargetList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LegacyTargetable) DeepCopyInto(out *LegacyTargetable) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LegacyTargetable.
+func (in *LegacyTargetable) DeepCopy() *LegacyTargetable {
+	if in == nil {
+		return nil
+	}
+	out := new(LegacyTargetable)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Target) DeepCopyInto(out *Target) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Status.DeepCopyInto(&out.Status)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Target.
+func (in *Target) DeepCopy() *Target {
+	if in == nil {
+		return nil
+	}
+	out := new(Target)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Target) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TargetList) DeepCopyInto(out *TargetList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]Target, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetList.
+func (in *TargetList) DeepCopy() *TargetList {
+	if in == nil {
+		return nil
+	}
+	out := new(TargetList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *TargetList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TargetStatus) DeepCopyInto(out *TargetStatus) {
+	*out = *in
+	if in.Targetable != nil {
+		in, out := &in.Targetable, &out.Targetable
+		*out = new(Targetable)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetStatus.
+func (in *TargetStatus) DeepCopy() *TargetStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(TargetStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Targetable) DeepCopyInto(out *Targetable) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Targetable.
+func (in *Targetable) DeepCopy() *Targetable {
+	if in == nil {
+		return nil
+	}
+	out := new(Targetable)
+	in.DeepCopyInto(out)
+	return out
+}
diff --git a/vendor/github.com/knative/pkg/apis/duck/verify.go b/vendor/github.com/knative/pkg/apis/duck/verify.go
new file mode 100644
index 0000000..d53421b
--- /dev/null
+++ b/vendor/github.com/knative/pkg/apis/duck/verify.go
@@ -0,0 +1,85 @@
+/*
+Copyright 2018 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package duck
+
+import (
+	"encoding/json"
+	"fmt"
+
+	"github.com/google/go-cmp/cmp"
+)
+
+// Implementable is implemented by the Fooable duck type that consumers
+// are expected to embed as a `.status.fooable` field.
+type Implementable interface {
+	// GetFullType returns an instance of a full resource wrapping
+	// an instance of this Implementable that can populate its fields
+	// to verify json roundtripping.
+	GetFullType() Populatable
+}
+
+// Populatable is implemented by a skeleton resource wrapping an Implementable
+// duck type.  It will generally have TypeMeta, ObjectMeta, and a Status field
+// wrapping a Fooable field.
+type Populatable interface {
+	// Populate fills in all possible fields, so that we can verify that
+	// they roundtrip properly through JSON.
+	Populate()
+}
+
+// VerifyType verifies that a particular concrete resource properly implements
+// the provided Implementable duck type.  It is expected that under the resource
+// definition implementing a particular "Fooable" that one would write:
+//
+//   type ConcreteResource struct { ... }
+//
+//   // Check that ConcreteResource properly implement Fooable.
+//   err := duck.VerifyType(&ConcreteResource{}, &something.Fooable{})
+//
+// This will return an error if the duck typing is not satisfied.
+func VerifyType(instance interface{}, iface Implementable) error {
+	// Create instances of the full resource for our input and ultimate result
+	// that we will compare at the end.
+	input, output := iface.GetFullType(), iface.GetFullType()
+
+	// Populate our input resource with values we will roundtrip.
+	input.Populate()
+
+	// Serialize the input to JSON and deserialize that into the provided instance
+	// of the type that we are checking.
+	if before, err := json.Marshal(input); err != nil {
+		return fmt.Errorf("error serializing duck type %T", input)
+	} else if err := json.Unmarshal(before, instance); err != nil {
+		return fmt.Errorf("error deserializing duck type %T into %T", input, instance)
+	}
+
+	// Serialize the instance we are checking to JSON and deserialize that into the
+	// output resource.
+	if after, err := json.Marshal(instance); err != nil {
+		return fmt.Errorf("error serializing %T", instance)
+	} else if err := json.Unmarshal(after, output); err != nil {
+		return fmt.Errorf("error deserializing %T into dock type %T", instance, output)
+	}
+
+	// Now verify that we were able to roundtrip all of our fields through the type
+	// we are checking.
+	if diff := cmp.Diff(input, output); diff != "" {
+		return fmt.Errorf("%T does not implement the duck type %T, the following fields were lost: %s",
+			instance, iface, diff)
+	}
+	return nil
+}
diff --git a/vendor/github.com/knative/pkg/apis/field_error.go b/vendor/github.com/knative/pkg/apis/field_error.go
new file mode 100644
index 0000000..023db78
--- /dev/null
+++ b/vendor/github.com/knative/pkg/apis/field_error.go
@@ -0,0 +1,337 @@
+/*
+Copyright 2017 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package apis
+
+import (
+	"fmt"
+	"sort"
+	"strings"
+)
+
+// CurrentField is a constant to supply as a fieldPath for when there is
+// a problem with the current field itself.
+const CurrentField = ""
+
+// FieldError is used to propagate the context of errors pertaining to
+// specific fields in a manner suitable for use in a recursive walk, so
+// that errors contain the appropriate field context.
+// FieldError methods are non-mutating.
+// +k8s:deepcopy-gen=true
+type FieldError struct {
+	Message string
+	Paths   []string
+	// Details contains an optional longer payload.
+	// +optional
+	Details string
+	errors  []FieldError
+}
+
+// FieldError implements error
+var _ error = (*FieldError)(nil)
+
+// ViaField is used to propagate a validation error along a field access.
+// For example, if a type recursively validates its "spec" via:
+//   if err := foo.Spec.Validate(); err != nil {
+//     // Augment any field paths with the context that they were accessed
+//     // via "spec".
+//     return err.ViaField("spec")
+//   }
+func (fe *FieldError) ViaField(prefix ...string) *FieldError {
+	if fe == nil {
+		return nil
+	}
+	// Copy over message and details, paths will be updated and errors come
+	// along using .Also().
+	newErr := &FieldError{
+		Message: fe.Message,
+		Details: fe.Details,
+	}
+
+	// Prepend the Prefix to existing errors.
+	newPaths := make([]string, 0, len(fe.Paths))
+	for _, oldPath := range fe.Paths {
+		newPaths = append(newPaths, flatten(append(prefix, oldPath)))
+	}
+	newErr.Paths = newPaths
+	for _, e := range fe.errors {
+		newErr = newErr.Also(e.ViaField(prefix...))
+	}
+	return newErr
+}
+
+// ViaIndex is used to attach an index to the next ViaField provided.
+// For example, if a type recursively validates a parameter that has a collection:
+//  for i, c := range spec.Collection {
+//    if err := doValidation(c); err != nil {
+//      return err.ViaIndex(i).ViaField("collection")
+//    }
+//  }
+func (fe *FieldError) ViaIndex(index int) *FieldError {
+	return fe.ViaField(asIndex(index))
+}
+
+// ViaFieldIndex is the short way to chain: err.ViaIndex(bar).ViaField(foo)
+func (fe *FieldError) ViaFieldIndex(field string, index int) *FieldError {
+	return fe.ViaIndex(index).ViaField(field)
+}
+
+// ViaKey is used to attach a key to the next ViaField provided.
+// For example, if a type recursively validates a parameter that has a collection:
+//  for k, v := range spec.Bag. {
+//    if err := doValidation(v); err != nil {
+//      return err.ViaKey(k).ViaField("bag")
+//    }
+//  }
+func (fe *FieldError) ViaKey(key string) *FieldError {
+	return fe.ViaField(asKey(key))
+}
+
+// ViaFieldKey is the short way to chain: err.ViaKey(bar).ViaField(foo)
+func (fe *FieldError) ViaFieldKey(field string, key string) *FieldError {
+	return fe.ViaKey(key).ViaField(field)
+}
+
+// Also collects errors, returns a new collection of existing errors and new errors.
+func (fe *FieldError) Also(errs ...*FieldError) *FieldError {
+	var newErr *FieldError
+	// collect the current objects errors, if it has any
+	if !fe.isEmpty() {
+		newErr = fe.DeepCopy()
+	} else {
+		newErr = &FieldError{}
+	}
+	// and then collect the passed in errors
+	for _, e := range errs {
+		if !e.isEmpty() {
+			newErr.errors = append(newErr.errors, *e)
+		}
+	}
+	if newErr.isEmpty() {
+		return nil
+	}
+	return newErr
+}
+
+func (fe *FieldError) isEmpty() bool {
+	if fe == nil {
+		return true
+	}
+	return fe.Message == "" && fe.Details == "" && len(fe.errors) == 0 && len(fe.Paths) == 0
+}
+
+func (fe *FieldError) getNormalizedErrors() []FieldError {
+	// in case we call getNormalizedErrors on a nil object, return just an empty
+	// list. This can happen when .Error() is called on a nil object.
+	if fe == nil {
+		return []FieldError(nil)
+	}
+	var errors []FieldError
+	// if this FieldError is a leaf,
+	if fe.Message != "" {
+		err := FieldError{
+			Message: fe.Message,
+			Paths:   fe.Paths,
+			Details: fe.Details,
+		}
+		errors = append(errors, err)
+	}
+	// and then collect all other errors recursively.
+	for _, e := range fe.errors {
+		errors = append(errors, e.getNormalizedErrors()...)
+	}
+	return errors
+}
+
+// Error implements error
+func (fe *FieldError) Error() string {
+	var errs []string
+	// Get the list of errors as a flat merged list.
+	normedErrors := merge(fe.getNormalizedErrors())
+	for _, e := range normedErrors {
+		if e.Details == "" {
+			errs = append(errs, fmt.Sprintf("%v: %v", e.Message, strings.Join(e.Paths, ", ")))
+		} else {
+			errs = append(errs, fmt.Sprintf("%v: %v\n%v", e.Message, strings.Join(e.Paths, ", "), e.Details))
+		}
+	}
+	return strings.Join(errs, "\n")
+}
+
+// Helpers ---
+
+func asIndex(index int) string {
+	return fmt.Sprintf("[%d]", index)
+}
+
+func isIndex(part string) bool {
+	return strings.HasPrefix(part, "[") && strings.HasSuffix(part, "]")
+}
+
+func asKey(key string) string {
+	return fmt.Sprintf("[%s]", key)
+}
+
+// flatten takes in a array of path components and looks for chances to flatten
+// objects that have index prefixes, examples:
+//   err([0]).ViaField(bar).ViaField(foo) -> foo.bar.[0] converts to foo.bar[0]
+//   err(bar).ViaIndex(0).ViaField(foo) -> foo.[0].bar converts to foo[0].bar
+//   err(bar).ViaField(foo).ViaIndex(0) -> [0].foo.bar converts to [0].foo.bar
+//   err(bar).ViaIndex(0).ViaIndex[1].ViaField(foo) -> foo.[1].[0].bar converts to foo[1][0].bar
+func flatten(path []string) string {
+	var newPath []string
+	for _, part := range path {
+		for _, p := range strings.Split(part, ".") {
+			if p == CurrentField {
+				continue
+			} else if len(newPath) > 0 && isIndex(p) {
+				newPath[len(newPath)-1] = fmt.Sprintf("%s%s", newPath[len(newPath)-1], p)
+			} else {
+				newPath = append(newPath, p)
+			}
+		}
+	}
+	return strings.Join(newPath, ".")
+}
+
+// mergePaths takes in two string slices and returns the combination of them
+// without any duplicate entries.
+func mergePaths(a, b []string) []string {
+	newPaths := make([]string, 0, len(a)+len(b))
+	newPaths = append(newPaths, a...)
+	for _, bi := range b {
+		if !containsString(newPaths, bi) {
+			newPaths = append(newPaths, bi)
+		}
+	}
+	return newPaths
+}
+
+// containsString takes in a string slice and looks for the provided string
+// within the slice.
+func containsString(slice []string, s string) bool {
+	for _, item := range slice {
+		if item == s {
+			return true
+		}
+	}
+	return false
+}
+
+// merge takes in a flat list of FieldErrors and returns back a merged list of
+// FiledErrors. FieldErrors have their Paths combined (and de-duped) if their
+// Message and Details are the same. Merge will not inspect FieldError.errors.
+// Merge will also sort the .Path slice, and the errors slice before returning.
+func merge(errs []FieldError) []FieldError {
+	// make a map big enough for all the errors.
+	m := make(map[string]FieldError, len(errs))
+
+	// Convert errs to a map where the key is <message>-<details> and the value
+	// is the error. If an error already exists in the map with the same key,
+	// then the paths will be merged.
+	for _, e := range errs {
+		k := key(&e)
+		if v, ok := m[k]; ok {
+			// Found a match, merge the keys.
+			v.Paths = mergePaths(v.Paths, e.Paths)
+			m[k] = v
+		} else {
+			// Does not exist in the map, save the error.
+			m[k] = e
+		}
+	}
+
+	// Take the map made previously and flatten it back out again.
+	newErrs := make([]FieldError, 0, len(m))
+	for _, v := range m {
+		// While we have access to the merged paths, sort them too.
+		sort.Slice(v.Paths, func(i, j int) bool { return v.Paths[i] < v.Paths[j] })
+		newErrs = append(newErrs, v)
+	}
+
+	// Sort the flattened map.
+	sort.Slice(newErrs, func(i, j int) bool {
+		if newErrs[i].Message == newErrs[j].Message {
+			return newErrs[i].Details < newErrs[j].Details
+		}
+		return newErrs[i].Message < newErrs[j].Message
+	})
+
+	// return back the merged list of sorted errors.
+	return newErrs
+}
+
+// key returns the key using the fields .Message and .Details.
+func key(err *FieldError) string {
+	return fmt.Sprintf("%s-%s", err.Message, err.Details)
+}
+
+// Public helpers ---
+
+// ErrMissingField is a variadic helper method for constructing a FieldError for
+// a set of missing fields.
+func ErrMissingField(fieldPaths ...string) *FieldError {
+	return &FieldError{
+		Message: "missing field(s)",
+		Paths:   fieldPaths,
+	}
+}
+
+// ErrDisallowedFields is a variadic helper method for constructing a FieldError
+// for a set of disallowed fields.
+func ErrDisallowedFields(fieldPaths ...string) *FieldError {
+	return &FieldError{
+		Message: "must not set the field(s)",
+		Paths:   fieldPaths,
+	}
+}
+
+// ErrInvalidValue constructs a FieldError for a field that has received an
+// invalid string value.
+func ErrInvalidValue(value, fieldPath string) *FieldError {
+	return &FieldError{
+		Message: fmt.Sprintf("invalid value %q", value),
+		Paths:   []string{fieldPath},
+	}
+}
+
+// ErrMissingOneOf is a variadic helper method for constructing a FieldError for
+// not having at least one field in a mutually exclusive field group.
+func ErrMissingOneOf(fieldPaths ...string) *FieldError {
+	return &FieldError{
+		Message: "expected exactly one, got neither",
+		Paths:   fieldPaths,
+	}
+}
+
+// ErrMultipleOneOf is a variadic helper method for constructing a FieldError
+// for having more than one field set in a mutually exclusive field group.
+func ErrMultipleOneOf(fieldPaths ...string) *FieldError {
+	return &FieldError{
+		Message: "expected exactly one, got both",
+		Paths:   fieldPaths,
+	}
+}
+
+// ErrInvalidKeyName is a variadic helper method for constructing a FieldError
+// that specifies a key name that is invalid.
+func ErrInvalidKeyName(value, fieldPath string, details ...string) *FieldError {
+	return &FieldError{
+		Message: fmt.Sprintf("invalid key name %q", value),
+		Paths:   []string{fieldPath},
+		Details: strings.Join(details, ", "),
+	}
+}
diff --git a/vendor/github.com/knative/pkg/apis/interfaces.go b/vendor/github.com/knative/pkg/apis/interfaces.go
new file mode 100644
index 0000000..d21ab41
--- /dev/null
+++ b/vendor/github.com/knative/pkg/apis/interfaces.go
@@ -0,0 +1,49 @@
+/*
+Copyright 2018 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package apis
+
+import (
+	"k8s.io/apimachinery/pkg/runtime"
+)
+
+// Defaultable defines an interface for setting the defaults for the
+// uninitialized fields of this instance.
+type Defaultable interface {
+	SetDefaults()
+}
+
+// Validatable indicates that a particular type may have its fields validated.
+type Validatable interface {
+	// Validate checks the validity of this types fields.
+	Validate() *FieldError
+}
+
+// Immutable indicates that a particular type has fields that should
+// not change after creation.
+type Immutable interface {
+	// CheckImmutableFields checks that the current instance's immutable
+	// fields haven't changed from the provided original.
+	CheckImmutableFields(original Immutable) *FieldError
+}
+
+// Listable indicates that a particular type can be returned via the returned
+// list type by the API server.
+type Listable interface {
+	runtime.Object
+
+	GetListType() runtime.Object
+}
diff --git a/vendor/github.com/knative/pkg/apis/kind2resource.go b/vendor/github.com/knative/pkg/apis/kind2resource.go
new file mode 100644
index 0000000..37ffe08
--- /dev/null
+++ b/vendor/github.com/knative/pkg/apis/kind2resource.go
@@ -0,0 +1,47 @@
+/*
+Copyright 2018 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package apis
+
+import (
+	"fmt"
+	"strings"
+
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// KindToResource converts a GroupVersionKind to a GroupVersionResource
+// through the world's simplest (worst) pluralizer.
+func KindToResource(gvk schema.GroupVersionKind) schema.GroupVersionResource {
+	return schema.GroupVersionResource{
+		Group:    gvk.Group,
+		Version:  gvk.Version,
+		Resource: pluralizeKind(gvk.Kind),
+	}
+}
+
+// Takes a kind and pluralizes it. This is super terrible, but I am
+// not aware of a generic way to do this.
+// I am not alone in thinking this and I haven't found a better solution:
+// This seems relevant:
+// https://github.com/kubernetes/kubernetes/issues/18622
+func pluralizeKind(kind string) string {
+	ret := strings.ToLower(kind)
+	if strings.HasSuffix(ret, "s") {
+		return fmt.Sprintf("%ses", ret)
+	}
+	return fmt.Sprintf("%ss", ret)
+}
diff --git a/vendor/github.com/knative/pkg/apis/volatile_time.go b/vendor/github.com/knative/pkg/apis/volatile_time.go
new file mode 100644
index 0000000..3d2daa2
--- /dev/null
+++ b/vendor/github.com/knative/pkg/apis/volatile_time.go
@@ -0,0 +1,46 @@
+/*
+Copyright 2018 The Knative Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package apis
+
+import (
+	"k8s.io/apimachinery/pkg/api/equality"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// VolatileTime wraps metav1.Time
+type VolatileTime struct {
+	Inner metav1.Time
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (t VolatileTime) MarshalJSON() ([]byte, error) {
+	return t.Inner.MarshalJSON()
+}
+
+// UnmarshalJSON implements the json.Unmarshaller interface.
+func (t *VolatileTime) UnmarshalJSON(b []byte) error {
+	return t.Inner.UnmarshalJSON(b)
+}
+
+func init() {
+	equality.Semantic.AddFunc(
+		// Always treat VolatileTime fields as equivalent.
+		func(a, b VolatileTime) bool {
+			return true
+		},
+	)
+}
diff --git a/vendor/github.com/knative/pkg/apis/zz_generated.deepcopy.go b/vendor/github.com/knative/pkg/apis/zz_generated.deepcopy.go
new file mode 100644
index 0000000..76db35c
--- /dev/null
+++ b/vendor/github.com/knative/pkg/apis/zz_generated.deepcopy.go
@@ -0,0 +1,66 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright 2018 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package apis
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *FieldError) DeepCopyInto(out *FieldError) {
+	*out = *in
+	if in.Paths != nil {
+		in, out := &in.Paths, &out.Paths
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.errors != nil {
+		in, out := &in.errors, &out.errors
+		*out = make([]FieldError, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FieldError.
+func (in *FieldError) DeepCopy() *FieldError {
+	if in == nil {
+		return nil
+	}
+	out := new(FieldError)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VolatileTime) DeepCopyInto(out *VolatileTime) {
+	*out = *in
+	in.Inner.DeepCopyInto(&out.Inner)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolatileTime.
+func (in *VolatileTime) DeepCopy() *VolatileTime {
+	if in == nil {
+		return nil
+	}
+	out := new(VolatileTime)
+	in.DeepCopyInto(out)
+	return out
+}
diff --git a/vendor/github.com/knative/pkg/kmeta/accessor.go b/vendor/github.com/knative/pkg/kmeta/accessor.go
new file mode 100644
index 0000000..07c69be
--- /dev/null
+++ b/vendor/github.com/knative/pkg/kmeta/accessor.go
@@ -0,0 +1,94 @@
+/*
+Copyright 2018 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package kmeta
+
+import (
+	"fmt"
+
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/apimachinery/pkg/types"
+	"k8s.io/client-go/tools/cache"
+)
+
+// Accessor is a collection of interfaces from metav1.TypeMeta,
+// runtime.Object and metav1.Object that Kubernetes API types
+// registered with runtime.Scheme must support.
+type Accessor interface {
+	// Interfaces for metav1.TypeMeta
+	GroupVersionKind() schema.GroupVersionKind
+	SetGroupVersionKind(gvk schema.GroupVersionKind)
+
+	// Interfaces for runtime.Object
+	GetObjectKind() schema.ObjectKind
+	DeepCopyObject() runtime.Object
+
+	// Interfaces for metav1.Object
+	GetNamespace() string
+	SetNamespace(namespace string)
+	GetName() string
+	SetName(name string)
+	GetGenerateName() string
+	SetGenerateName(name string)
+	GetUID() types.UID
+	SetUID(uid types.UID)
+	GetResourceVersion() string
+	SetResourceVersion(version string)
+	GetGeneration() int64
+	SetGeneration(generation int64)
+	GetSelfLink() string
+	SetSelfLink(selfLink string)
+	GetCreationTimestamp() metav1.Time
+	SetCreationTimestamp(timestamp metav1.Time)
+	GetDeletionTimestamp() *metav1.Time
+	SetDeletionTimestamp(timestamp *metav1.Time)
+	GetDeletionGracePeriodSeconds() *int64
+	SetDeletionGracePeriodSeconds(*int64)
+	GetLabels() map[string]string
+	SetLabels(labels map[string]string)
+	GetAnnotations() map[string]string
+	SetAnnotations(annotations map[string]string)
+	GetInitializers() *metav1.Initializers
+	SetInitializers(initializers *metav1.Initializers)
+	GetFinalizers() []string
+	SetFinalizers(finalizers []string)
+	GetOwnerReferences() []metav1.OwnerReference
+	SetOwnerReferences([]metav1.OwnerReference)
+	GetClusterName() string
+	SetClusterName(clusterName string)
+}
+
+// DeletionHandlingAccessor tries to convert given interface into Accessor first;
+// and to handle deletion, it try to fetch info from DeletedFinalStateUnknown on failure.
+// The name is a reference to cache.DeletionHandlingMetaNamespaceKeyFunc
+func DeletionHandlingAccessor(obj interface{}) (Accessor, error) {
+	accessor, ok := obj.(Accessor)
+	if !ok {
+		// To handle obj deletion, try to fetch info from DeletedFinalStateUnknown.
+		tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
+		if !ok {
+			return nil, fmt.Errorf("Couldn't get Accessor from tombstone %#v", obj)
+		}
+		accessor, ok = tombstone.Obj.(Accessor)
+		if !ok {
+			return nil, fmt.Errorf("The object that Tombstone contained is not of kmeta.Accessor %#v", obj)
+		}
+	}
+
+	return accessor, nil
+}
diff --git a/vendor/github.com/knative/pkg/kmeta/doc.go b/vendor/github.com/knative/pkg/kmeta/doc.go
new file mode 100644
index 0000000..53ff38d
--- /dev/null
+++ b/vendor/github.com/knative/pkg/kmeta/doc.go
@@ -0,0 +1,19 @@
+/*
+Copyright 2018 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    https://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package kmeta provides Knative utilities for operating on Kubernetes
+// resources' ObjectMeta.
+package kmeta
diff --git a/vendor/github.com/knative/pkg/kmeta/labels.go b/vendor/github.com/knative/pkg/kmeta/labels.go
new file mode 100644
index 0000000..f9a72d8
--- /dev/null
+++ b/vendor/github.com/knative/pkg/kmeta/labels.go
@@ -0,0 +1,114 @@
+/*
+Copyright 2018 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package kmeta
+
+import (
+	"fmt"
+
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/labels"
+	"k8s.io/apimachinery/pkg/selection"
+)
+
+// The methods in this file are used for managing subresources in cases where
+// a controller instantiates different resources for each version of itself.
+// There are two sets of methods available here:
+// * `*VersionLabel*`: these methods act on `metadata.resourceVersion` and
+//  create new labels for EVERY change to the resource (incl. `/status`).
+// * `*GenerationLabel*`: these methods act on `metadata.generation` and
+//  create new labels for changes to the resource's "spec" (typically, but
+//  some K8s resources change `metadata.generation` for annotations as well
+//  e.g. Deployment).
+//
+// For example, if an A might instantiate N B's at version 1 and M B's at
+// version 2 then it can use MakeVersionLabels to decorate each subresource
+// with the appropriate labels for the version at which it was instantiated.
+//
+// During reconciliation, MakeVersionLabelSelector can be used with the
+// informer listers to access the appropriate subresources for the current
+// version of the parent resource.
+//
+// Likewise during reconciliation, MakeOldVersionLabelSelector can be used
+// with the API client's DeleteCollection method to clean up subresources
+// for older versions of the resource.
+
+// MakeVersionLabels constructs a set of labels to apply to subresources
+// instantiated at this version of the parent resource, so that we can
+// efficiently select them.
+func MakeVersionLabels(om metav1.ObjectMetaAccessor) labels.Set {
+	return map[string]string{
+		"controller": string(om.GetObjectMeta().GetUID()),
+		"version":    om.GetObjectMeta().GetResourceVersion(),
+	}
+}
+
+// MakeVersionLabelSelector constructs a selector for subresources
+// instantiated at this version of the parent resource.  This keys
+// off of the labels populated by MakeVersionLabels.
+func MakeVersionLabelSelector(om metav1.ObjectMetaAccessor) labels.Selector {
+	return labels.SelectorFromSet(MakeVersionLabels(om))
+}
+
+// MakeOldVersionLabelSelector constructs a selector for subresources
+// instantiated at an older version of the parent resource.  This keys
+// off of the labels populated by MakeVersionLabels.
+func MakeOldVersionLabelSelector(om metav1.ObjectMetaAccessor) labels.Selector {
+	return labels.NewSelector().Add(
+		mustNewRequirement("controller", selection.Equals, []string{string(om.GetObjectMeta().GetUID())}),
+		mustNewRequirement("version", selection.NotEquals, []string{om.GetObjectMeta().GetResourceVersion()}),
+	)
+}
+
+// MakeGenerationLabels constructs a set of labels to apply to subresources
+// instantiated at this version of the parent resource, so that we can
+// efficiently select them.
+func MakeGenerationLabels(om metav1.ObjectMetaAccessor) labels.Set {
+	return map[string]string{
+		"controller": string(om.GetObjectMeta().GetUID()),
+		"generation": genStr(om),
+	}
+}
+
+// MakeGenerationLabelSelector constructs a selector for subresources
+// instantiated at this version of the parent resource.  This keys
+// off of the labels populated by MakeGenerationLabels.
+func MakeGenerationLabelSelector(om metav1.ObjectMetaAccessor) labels.Selector {
+	return labels.SelectorFromSet(MakeGenerationLabels(om))
+}
+
+// MakeOldGenerationLabelSelector constructs a selector for subresources
+// instantiated at an older version of the parent resource.  This keys
+// off of the labels populated by MakeGenerationLabels.
+func MakeOldGenerationLabelSelector(om metav1.ObjectMetaAccessor) labels.Selector {
+	return labels.NewSelector().Add(
+		mustNewRequirement("controller", selection.Equals, []string{string(om.GetObjectMeta().GetUID())}),
+		mustNewRequirement("generation", selection.NotEquals, []string{genStr(om)}),
+	)
+}
+
+func genStr(om metav1.ObjectMetaAccessor) string {
+	return fmt.Sprintf("%05d", om.GetObjectMeta().GetGeneration())
+}
+
+// mustNewRequirement panics if there are any errors constructing our selectors.
+func mustNewRequirement(key string, op selection.Operator, vals []string) labels.Requirement {
+	r, err := labels.NewRequirement(key, op, vals)
+	if err != nil {
+		panic(fmt.Sprintf("mustNewRequirement(%v, %v, %v) = %v", key, op, vals, err))
+	}
+	return *r
+}
diff --git a/vendor/github.com/knative/pkg/kmeta/owner_references.go b/vendor/github.com/knative/pkg/kmeta/owner_references.go
new file mode 100644
index 0000000..2e9a128
--- /dev/null
+++ b/vendor/github.com/knative/pkg/kmeta/owner_references.go
@@ -0,0 +1,38 @@
+/*
+Copyright 2018 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package kmeta
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// OwnerRefable indicates that a particular type has sufficient
+// information to produce a metav1.OwnerReference to an object.
+type OwnerRefable interface {
+	metav1.ObjectMetaAccessor
+
+	// GetGroupVersionKind returns a GroupVersionKind. The name is chosen
+	// to avoid collision with TypeMeta's GroupVersionKind() method.
+	// See: https://issues.k8s.io/3030
+	GetGroupVersionKind() schema.GroupVersionKind
+}
+
+// NewControllerRef creates an OwnerReference pointing to the given controller.
+func NewControllerRef(obj OwnerRefable) *metav1.OwnerReference {
+	return metav1.NewControllerRef(obj.GetObjectMeta(), obj.GetGroupVersionKind())
+}
diff --git a/vendor/github.com/knative/serving/AUTHORS b/vendor/github.com/knative/serving/AUTHORS
new file mode 100644
index 0000000..5ab9082
--- /dev/null
+++ b/vendor/github.com/knative/serving/AUTHORS
@@ -0,0 +1,10 @@
+# This is the list of Knative authors for copyright purposes.
+#
+# This does not necessarily list everyone who has contributed code, since in
+# some cases, their employer may be the copyright holder.  To see the full list
+# of contributors, see the revision history in source control.
+Google LLC
+Pivotal Software, Inc.
+IBM Corp
+Red Hat, Inc.
+Cisco Systems, Inc.
diff --git a/vendor/github.com/knative/serving/LICENSE b/vendor/github.com/knative/serving/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/vendor/github.com/knative/serving/LICENSE
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
... 5959 lines suppressed ...