You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@camel.apache.org by nf...@apache.org on 2019/05/21 08:33:40 UTC

[camel-k] branch master updated: Upgrade Operator SDK to version 0.8.0

This is an automated email from the ASF dual-hosted git repository.

nferraro pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/camel-k.git


The following commit(s) were added to refs/heads/master by this push:
     new c96aece  Upgrade Operator SDK to version 0.8.0
c96aece is described below

commit c96aece6eea27c42855bb6c39310675af98317a4
Author: Antonin Stefanutti <an...@stefanutti.fr>
AuthorDate: Mon May 20 14:20:15 2019 +0200

    Upgrade Operator SDK to version 0.8.0
---
 Gopkg.lock                                         |   6 +-
 Gopkg.toml                                         |   2 +-
 .../operator-sdk/cmd/operator-sdk/add/api.go       |   6 +-
 .../cmd/operator-sdk/add/controller.go             |   7 +-
 .../operator-sdk/cmd/operator-sdk/build/cmd.go     | 211 ++------
 .../operator-sdk/cmd/operator-sdk/generate/k8s.go  |   8 +-
 .../cmd/operator-sdk/generate/openapi.go           |  10 +-
 .../cmd/operator-sdk/internal/genutil/genutil.go   |  16 +-
 .../cmd/operator-sdk/internal/genutil/k8s.go       |  28 +-
 .../cmd/operator-sdk/internal/genutil/openapi.go   |   7 +-
 .../operator-sdk/cmd/operator-sdk/main.go          |  27 +-
 .../operator-sdk/cmd/operator-sdk/migrate/cmd.go   |  83 +++-
 .../operator-sdk/cmd/operator-sdk/new/cmd.go       | 104 ++--
 .../operator-sdk/cmd/operator-sdk/printdeps/cmd.go |  46 +-
 .../operator-sdk/cmd/operator-sdk/scorecard/cmd.go |  56 +--
 .../cmd/operator-sdk/scorecard/scorecard.go        | 344 -------------
 .../cmd/operator-sdk/scorecard/test_definitions.go | 324 ------------
 .../operator-sdk/cmd/operator-sdk/test/cluster.go  | 189 -------
 .../operator-sdk/cmd/operator-sdk/test/cmd.go      |   3 +-
 .../operator-sdk/cmd/operator-sdk/test/local.go    |  31 +-
 .../operator-sdk/cmd/operator-sdk/up/local.go      |  21 +-
 .../operator-sdk/cmd/operator-sdk/version/cmd.go   |   6 +-
 .../version.go => internal/pkg/flags/flags.go}     |   9 +-
 .../internal/pkg/scaffold/ansible/ao_logs.go       |   2 +-
 .../pkg/scaffold/ansible/build_dockerfile.go       |   9 +-
 .../build_test_framework_ansible_test_script.go    |   2 +-
 .../ansible/build_test_framework_dockerfile.go     |   7 +-
 .../internal/pkg/scaffold/ansible/constants.go     |   3 +
 .../pkg/scaffold/ansible/deploy_operator.go        |  24 +-
 .../pkg/scaffold/ansible/dockerfilehybrid.go       |  27 +-
 .../internal/pkg/scaffold/ansible/entrypoint.go    |   2 +-
 .../internal/pkg/scaffold/ansible/go_mod.go        | 126 +++++
 .../internal/pkg/scaffold/ansible/gopkgtoml.go     |  15 +-
 .../pkg/scaffold/{version.go => ansible/input.go}  |  30 +-
 .../internal/pkg/scaffold/ansible/k8s_status.go    |  11 +-
 .../internal/pkg/scaffold/ansible/main.go          |   2 +-
 .../scaffold/ansible/molecule_default_asserts.go   |   7 +-
 .../scaffold/ansible/molecule_default_molecule.go  |   2 +-
 .../scaffold/ansible/molecule_default_playbook.go  |  17 +-
 .../scaffold/ansible/molecule_default_prepare.go   |  11 +-
 .../ansible/molecule_test_cluster_molecule.go      |   2 +-
 .../ansible/molecule_test_cluster_playbook.go      |  27 +-
 .../ansible/molecule_test_local_molecule.go        |   2 +-
 .../ansible/molecule_test_local_playbook.go        |  63 +--
 .../ansible/molecule_test_local_prepare.go         |  13 +-
 .../internal/pkg/scaffold/ansible/playbook.go      |   3 +-
 .../pkg/scaffold/ansible/roles_defaults_main.go    |   3 +-
 .../internal/pkg/scaffold/ansible/roles_files.go   |   2 +-
 .../pkg/scaffold/ansible/roles_handlers_main.go    |   3 +-
 .../pkg/scaffold/ansible/roles_meta_main.go        |   2 +-
 .../internal/pkg/scaffold/ansible/roles_readme.go  |   2 +-
 .../pkg/scaffold/ansible/roles_tasks_main.go       |   3 +-
 .../pkg/scaffold/ansible/roles_templates.go        |   2 +-
 .../pkg/scaffold/ansible/roles_vars_main.go        |   3 +-
 .../internal/pkg/scaffold/ansible/travis.go        |   2 +-
 .../internal/pkg/scaffold/ansible/usersetup.go     |   2 +-
 .../internal/pkg/scaffold/ansible/watches.go       |  15 +-
 .../{go_test_script.go => boilerplate_go_txt.go}   |  31 +-
 .../internal/pkg/scaffold/build_dockerfile.go      |   2 +-
 .../operator-sdk/internal/pkg/scaffold/cmd.go      |   2 +
 .../internal/pkg/scaffold/constants.go             |   1 -
 .../internal/pkg/scaffold/controller_kind.go       | 125 ++++-
 .../operator-sdk/internal/pkg/scaffold/cr.go       |   2 +-
 .../operator-sdk/internal/pkg/scaffold/crd.go      | 150 +++---
 .../operator-sdk/internal/pkg/scaffold/go_mod.go   | 106 ++++
 .../internal/pkg/scaffold/gopkgtoml.go             |  90 +---
 .../internal/pkg/scaffold/helm/chart.go            |  58 ++-
 .../internal/pkg/scaffold/helm/dockerfilehybrid.go |   2 +-
 .../internal/pkg/scaffold/helm/go_mod.go           | 182 +++++++
 .../internal/pkg/scaffold/helm/gopkgtoml.go        |  13 +-
 .../internal/pkg/scaffold/helm/operator.go         |   6 -
 .../internal/pkg/scaffold/helm/role.go             | 229 +++++++++
 .../internal/pkg/scaffold/input/input.go           |   4 +
 .../pkg/scaffold/internal/deps/print_dep.go        |  99 ++++
 .../pkg/scaffold/internal/deps/print_go_mod.go     | 149 ++++++
 .../pkg/scaffold/internal/testutil/test_util.go    |  28 ++
 .../internal/pkg/scaffold/olm-catalog/config.go    |  11 +-
 .../internal/pkg/scaffold/olm-catalog/csv.go       |   7 +-
 .../operator-sdk/internal/pkg/scaffold/operator.go |   6 -
 .../operator-sdk/internal/pkg/scaffold/resource.go |   2 +-
 .../operator-sdk/internal/pkg/scaffold/role.go     |  45 +-
 .../operator-sdk/internal/pkg/scaffold/scaffold.go |  89 +++-
 .../operator-sdk/internal/pkg/scaffold/test_pod.go |  59 ---
 .../internal/pkg/scaffold/test_setup.go            |  18 +
 .../{test_framework_dockerfile.go => tools.go}     |  35 +-
 .../operator-sdk/internal/pkg/scaffold/types.go    |   1 +
 .../operator-sdk/internal/pkg/scaffold/version.go  |   5 +-
 .../pkg}/scorecard/basic_tests.go                  |  81 +++
 .../operator-sdk/internal/pkg/scorecard/helpers.go | 189 +++++++
 .../pkg}/scorecard/olm_tests.go                    | 179 ++++++-
 .../pkg}/scorecard/resource_handler.go             |   7 +-
 .../internal/pkg/scorecard/scorecard.go            | 542 +++++++++++++++++++++
 .../internal/pkg/scorecard/test_definitions.go     | 152 ++++++
 .../operator-sdk/internal/util/projutil/exec.go    | 161 ++++++
 .../internal/util/projutil/project_util.go         | 152 ++++--
 .../pkg/ansible/controller/reconcile.go            |  20 +-
 .../operator-sdk/pkg/ansible/metrics/metrics.go    |  83 ++++
 .../pkg/ansible/proxy/cache_response.go            | 234 +++++++++
 .../operator-sdk/pkg/ansible/proxy/inject_owner.go | 178 +++++++
 .../operator-sdk/pkg/ansible/proxy/kubectl.go      |   6 +-
 .../operator-sdk/pkg/ansible/proxy/proxy.go        | 350 +------------
 .../operator-sdk/pkg/ansible/run.go                |  12 +-
 .../operator-sdk/pkg/ansible/runner/runner.go      |   5 +
 .../apis/scorecard/v1alpha1/doc.go}                |   9 +-
 .../apis/scorecard/v1alpha1/register.go}           |  27 +-
 .../pkg/apis/scorecard/v1alpha1/types.go           | 108 ++++
 .../scorecard/v1alpha1/zz_generated.deepcopy.go    | 138 ++++++
 .../operator-sdk/pkg/helm/controller/reconcile.go  |  26 +-
 .../operator-sdk/pkg/helm/internal/types/types.go  |  10 +-
 .../operator-sdk/pkg/helm/release/manager.go       |  33 --
 .../operator-sdk/pkg/k8sutil/k8sutil.go            |  13 +-
 .../operator-sdk/pkg/log/zap/flags.go              |  12 +
 .../pkg/restmapper/dynamicrestmapper.go            | 124 +++++
 .../operator-sdk/pkg/test/framework.go             |  33 +-
 .../operator-sdk/pkg/test/main_entry.go            |  33 +-
 .../pkg/apis/cache/v1alpha1/memcached_types.go     |   1 +
 .../{memcached_types.go => memcachedrs_types.go}   |  35 +-
 .../apis/cache/v1alpha1/zz_generated.deepcopy.go   |  98 ++++
 .../pkg/controller/add_memcachedrs.go}             |  20 +-
 .../controller/memcached/memcached_controller.go   |   1 +
 .../memcachedrs_controller.go}                     | 138 +++---
 .../operator-sdk/version/version.go                |   4 +-
 122 files changed, 4491 insertions(+), 2270 deletions(-)

diff --git a/Gopkg.lock b/Gopkg.lock
index ce8f1f8..7a219c0 100644
--- a/Gopkg.lock
+++ b/Gopkg.lock
@@ -469,7 +469,7 @@
   version = "v3.9.0"
 
 [[projects]]
-  digest = "1:674610d54812d3c36ab7861fc826176bf581a9426cc09abec0107414c17f89cd"
+  digest = "1:ce4e43d2eef4a4c40c6e8439c724e34e6fd7e48959a340038e3cd02762cdb437"
   name = "github.com/operator-framework/operator-sdk"
   packages = [
     "pkg/k8sutil",
@@ -478,8 +478,8 @@
     "version",
   ]
   pruneopts = "NT"
-  revision = "7e07b20c9e22560fb22a3295b612eae7652b8b14"
-  version = "v0.7.0"
+  revision = "78c472461e75e6c64589cfadf577a2004b8a26b3"
+  version = "v0.8.0"
 
 [[projects]]
   digest = "1:93b1d84c5fa6d1ea52f4114c37714cddd84d5b78f151b62bb101128dd51399bf"
diff --git a/Gopkg.toml b/Gopkg.toml
index 723995b..6c7096a 100644
--- a/Gopkg.toml
+++ b/Gopkg.toml
@@ -41,7 +41,7 @@ required = [
   name = "github.com/operator-framework/operator-sdk"
   # The version rule is used for a specific release and the master branch for in between releases.
   # branch = "master" #osdk_branch_annotation
-  version = "=v0.7.0" #osdk_version_annotation
+  version = "=v0.8.0" #osdk_version_annotation
 
 [[constraint]]
   name = "github.com/coreos/prometheus-operator"
diff --git a/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/add/api.go b/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/add/api.go
index d9eac70..2b11ba0 100644
--- a/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/add/api.go
+++ b/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/add/api.go
@@ -29,7 +29,6 @@ import (
 var (
 	apiVersion string
 	kind       string
-	headerFile string
 )
 
 func newAddApiCmd() *cobra.Command {
@@ -77,7 +76,6 @@ Example:
 	if err := apiCmd.MarkFlagRequired("kind"); err != nil {
 		log.Fatalf("Failed to mark `kind` flag for `add api` subcommand as required")
 	}
-	apiCmd.Flags().StringVar(&headerFile, "header-file", "", "Path to file containing headers for generated files.")
 
 	return apiCmd
 }
@@ -124,12 +122,12 @@ func apiRun(cmd *cobra.Command, args []string) error {
 	}
 
 	// Run k8s codegen for deepcopy
-	if err := genutil.K8sCodegen(headerFile); err != nil {
+	if err := genutil.K8sCodegen(); err != nil {
 		return err
 	}
 
 	// Generate a validation spec for the new CRD.
-	if err := genutil.OpenAPIGen(headerFile); err != nil {
+	if err := genutil.OpenAPIGen(); err != nil {
 		return err
 	}
 
diff --git a/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/add/controller.go b/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/add/controller.go
index fde3646..ddb2dc2 100644
--- a/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/add/controller.go
+++ b/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/add/controller.go
@@ -25,6 +25,8 @@ import (
 	"github.com/spf13/cobra"
 )
 
+var customAPIImport string
+
 func newAddControllerCmd() *cobra.Command {
 	controllerCmd := &cobra.Command{
 		Use:   "controller",
@@ -57,6 +59,7 @@ Example:
 	if err := controllerCmd.MarkFlagRequired("kind"); err != nil {
 		log.Fatalf("Failed to mark `kind` flag for `add controller` subcommand as required")
 	}
+	controllerCmd.Flags().StringVar(&customAPIImport, "custom-api-import", "", `External Kubernetes resource import path of the form "host.com/repo/path[=import_identifier]". import_identifier is optional`)
 
 	return controllerCmd
 }
@@ -81,10 +84,10 @@ func controllerRun(cmd *cobra.Command, args []string) error {
 		Repo:           projutil.CheckAndGetProjectGoPkg(),
 		AbsProjectPath: projutil.MustGetwd(),
 	}
-
 	s := &scaffold.Scaffold{}
+
 	err = s.Execute(cfg,
-		&scaffold.ControllerKind{Resource: r},
+		&scaffold.ControllerKind{Resource: r, CustomImport: customAPIImport},
 		&scaffold.AddController{Resource: r},
 	)
 	if err != nil {
diff --git a/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/build/cmd.go b/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/build/cmd.go
index 7e89618..d5b334a 100644
--- a/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/build/cmd.go
+++ b/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/build/cmd.go
@@ -15,30 +15,22 @@
 package build
 
 import (
-	"errors"
 	"fmt"
-	"io/ioutil"
 	"os"
 	"os/exec"
 	"path/filepath"
 	"strings"
 
 	"github.com/operator-framework/operator-sdk/internal/pkg/scaffold"
-	"github.com/operator-framework/operator-sdk/internal/pkg/scaffold/input"
 	"github.com/operator-framework/operator-sdk/internal/util/projutil"
-	"github.com/operator-framework/operator-sdk/internal/util/yamlutil"
-	"github.com/operator-framework/operator-sdk/pkg/test"
 
-	"github.com/ghodss/yaml"
 	log "github.com/sirupsen/logrus"
 	"github.com/spf13/cobra"
 )
 
 var (
-	namespacedManBuild string
-	testLocationBuild  string
-	enableTests        bool
-	dockerBuildArgs    string
+	imageBuildArgs string
+	imageBuilder   string
 )
 
 func NewCmd() *cobra.Command {
@@ -59,86 +51,32 @@ For example:
 `,
 		RunE: buildFunc,
 	}
-	buildCmd.Flags().BoolVar(&enableTests, "enable-tests", false, "Enable in-cluster testing by adding test binary to the image")
-	buildCmd.Flags().StringVar(&testLocationBuild, "test-location", "./test/e2e", "Location of tests")
-	buildCmd.Flags().StringVar(&namespacedManBuild, "namespaced-manifest", "deploy/operator.yaml", "Path of namespaced resources manifest for tests")
-	buildCmd.Flags().StringVar(&dockerBuildArgs, "docker-build-args", "", "Extra docker build arguments as one string such as \"--build-arg https_proxy=$https_proxy\"")
+	buildCmd.Flags().StringVar(&imageBuildArgs, "image-build-args", "", "Extra image build arguments as one string such as \"--build-arg https_proxy=$https_proxy\"")
+	buildCmd.Flags().StringVar(&imageBuilder, "image-builder", "docker", "Tool to build OCI images. One of: [docker, buildah]")
 	return buildCmd
 }
 
-/*
- * verifyDeploymentImages checks image names of pod 0 in deployments found in the provided yaml file.
- * This is done because e2e tests require a namespaced manifest file to configure a namespace with
- * required resources. This function is intended to identify if a user used a different image name
- * for their operator in the provided yaml, which would result in the testing of the wrong operator
- * image. As it is possible for a namespaced yaml to have multiple deployments (such as the vault
- * operator, which depends on the etcd-operator), this is just a warning, not a fatal error.
- */
-func verifyDeploymentImage(yamlFile []byte, imageName string) error {
-	warningMessages := ""
-	scanner := yamlutil.NewYAMLScanner(yamlFile)
-	for scanner.Scan() {
-		yamlSpec := scanner.Bytes()
+func createBuildCommand(imageBuilder, context, dockerFile, image string, imageBuildArgs ...string) (*exec.Cmd, error) {
+	var args []string
+	switch imageBuilder {
+	case "docker":
+		args = append(args, "build", "-f", dockerFile, "-t", image)
+	case "buildah":
+		args = append(args, "bud", "--format=docker", "-f", dockerFile, "-t", image)
+	default:
+		return nil, fmt.Errorf("%s is not supported image builder", imageBuilder)
+	}
 
-		yamlMap := make(map[string]interface{})
-		err := yaml.Unmarshal(yamlSpec, &yamlMap)
-		if err != nil {
-			return fmt.Errorf("could not unmarshal YAML namespaced spec: (%v)", err)
-		}
-		kind, ok := yamlMap["kind"].(string)
-		if !ok {
-			return fmt.Errorf("yaml manifest file contains a 'kind' field that is not a string")
-		}
-		if kind == "Deployment" {
-			// this is ugly and hacky; we should probably make this cleaner
-			nestedMap, ok := yamlMap["spec"].(map[string]interface{})
-			if !ok {
-				continue
-			}
-			nestedMap, ok = nestedMap["template"].(map[string]interface{})
-			if !ok {
-				continue
-			}
-			nestedMap, ok = nestedMap["spec"].(map[string]interface{})
-			if !ok {
-				continue
-			}
-			containersArray, ok := nestedMap["containers"].([]interface{})
-			if !ok {
-				continue
-			}
-			for _, item := range containersArray {
-				image, ok := item.(map[string]interface{})["image"].(string)
-				if !ok {
-					continue
-				}
-				if image != imageName {
-					warningMessages = fmt.Sprintf("%s\nWARNING: Namespace manifest contains a deployment with image %v, which does not match the name of the image being built: %v", warningMessages, image, imageName)
-				}
-			}
+	for _, bargs := range imageBuildArgs {
+		if bargs != "" {
+			splitArgs := strings.Fields(bargs)
+			args = append(args, splitArgs...)
 		}
 	}
-	if err := scanner.Err(); err != nil {
-		return fmt.Errorf("failed to verify deployment image: (%v)", err)
-	}
-	if warningMessages == "" {
-		return nil
-	}
-	return errors.New(warningMessages)
-}
 
-func verifyTestManifest(image string) error {
-	namespacedBytes, err := ioutil.ReadFile(namespacedManBuild)
-	if err != nil {
-		return fmt.Errorf("could not read namespaced manifest: (%v)", err)
-	}
+	args = append(args, context)
 
-	err = verifyDeploymentImage(namespacedBytes, image)
-	// the error from verifyDeploymentImage is just a warning, not fatal error
-	if err != nil {
-		log.Warn(err)
-	}
-	return nil
+	return exec.Command(imageBuilder, args...), nil
 }
 
 func buildFunc(cmd *cobra.Command, args []string) error {
@@ -147,111 +85,44 @@ func buildFunc(cmd *cobra.Command, args []string) error {
 	}
 
 	projutil.MustInProjectRoot()
-	goBuildEnv := append(os.Environ(), "GOOS=linux", "GOARCH=amd64", "CGO_ENABLED=0")
+	goBuildEnv := append(os.Environ(), "GOOS=linux", "GOARCH=amd64")
+
+	// If CGO_ENABLED is not set, set it to '0'.
+	if _, ok := os.LookupEnv("CGO_ENABLED"); !ok {
+		goBuildEnv = append(goBuildEnv, "CGO_ENABLED=0")
+	}
+
 	goTrimFlags := []string{"-gcflags", "all=-trimpath=${GOPATH}", "-asmflags", "all=-trimpath=${GOPATH}"}
 	absProjectPath := projutil.MustGetwd()
 	projectName := filepath.Base(absProjectPath)
 
 	// Don't need to build Go code if a non-Go Operator.
-	if projutil.GetOperatorType() == projutil.OperatorTypeGo {
-		managerDir := filepath.Join(projutil.CheckAndGetProjectGoPkg(), scaffold.ManagerDir)
-		outputBinName := filepath.Join(absProjectPath, scaffold.BuildBinDir, projectName)
-		goBuildArgs := append(append([]string{"build"}, goTrimFlags...), "-o", outputBinName, managerDir)
-		buildCmd := exec.Command("go", goBuildArgs...)
-		buildCmd.Env = goBuildEnv
-		if err := projutil.ExecCmd(buildCmd); err != nil {
+	if projutil.IsOperatorGo() {
+		opts := projutil.GoCmdOptions{
+			BinName:     filepath.Join(absProjectPath, scaffold.BuildBinDir, projectName),
+			PackagePath: filepath.Join(projutil.CheckAndGetProjectGoPkg(), scaffold.ManagerDir),
+			Args:        goTrimFlags,
+			Env:         goBuildEnv,
+			GoMod:       projutil.IsDepManagerGoMod(),
+		}
+		if err := projutil.GoBuild(opts); err != nil {
 			return fmt.Errorf("failed to build operator binary: (%v)", err)
 		}
 	}
 
 	image := args[0]
-	baseImageName := image
-	if enableTests {
-		baseImageName += "-intermediate"
-	}
 
-	log.Infof("Building Docker image %s", baseImageName)
+	log.Infof("Building OCI image %s", image)
 
-	dbArgs := []string{"build", ".", "-f", "build/Dockerfile", "-t", baseImageName}
-
-	if dockerBuildArgs != "" {
-		splitArgs := strings.Fields(dockerBuildArgs)
-		dbArgs = append(dbArgs, splitArgs...)
+	buildCmd, err := createBuildCommand(imageBuilder, ".", "build/Dockerfile", image, imageBuildArgs)
+	if err != nil {
+		return err
 	}
 
-	dbcmd := exec.Command("docker", dbArgs...)
-	if err := projutil.ExecCmd(dbcmd); err != nil {
-		if enableTests {
-			return fmt.Errorf("failed to output intermediate image %s: (%v)", image, err)
-		}
+	if err := projutil.ExecCmd(buildCmd); err != nil {
 		return fmt.Errorf("failed to output build image %s: (%v)", image, err)
 	}
 
-	if enableTests {
-		if projutil.GetOperatorType() == projutil.OperatorTypeGo {
-			testBinary := filepath.Join(absProjectPath, scaffold.BuildBinDir, projectName+"-test")
-			goTestBuildArgs := append(append([]string{"test"}, goTrimFlags...), "-c", "-o", testBinary, testLocationBuild+"/...")
-			buildTestCmd := exec.Command("go", goTestBuildArgs...)
-			buildTestCmd.Env = goBuildEnv
-			if err := projutil.ExecCmd(buildTestCmd); err != nil {
-				return fmt.Errorf("failed to build test binary: (%v)", err)
-			}
-		}
-
-		// if a user is using an older sdk repo as their library, make sure they have required build files
-		testDockerfile := filepath.Join(scaffold.BuildTestDir, scaffold.DockerfileFile)
-		_, err := os.Stat(testDockerfile)
-		if err != nil && os.IsNotExist(err) {
-
-			log.Info("Generating build manifests for test-framework.")
-
-			cfg := &input.Config{
-				Repo:           projutil.CheckAndGetProjectGoPkg(),
-				AbsProjectPath: absProjectPath,
-				ProjectName:    projectName,
-			}
-
-			s := &scaffold.Scaffold{}
-			t := projutil.GetOperatorType()
-			switch t {
-			case projutil.OperatorTypeGo:
-				err = s.Execute(cfg,
-					&scaffold.TestFrameworkDockerfile{},
-					&scaffold.GoTestScript{},
-					&scaffold.TestPod{Image: image, TestNamespaceEnv: test.TestNamespaceEnv},
-				)
-			case projutil.OperatorTypeAnsible:
-				return fmt.Errorf("test scaffolding for Ansible Operators is not implemented")
-			case projutil.OperatorTypeHelm:
-				return fmt.Errorf("test scaffolding for Helm Operators is not implemented")
-			default:
-				return fmt.Errorf("unknown operator type '%v'", t)
-			}
-
-			if err != nil {
-				return fmt.Errorf("test framework manifest scaffold failed: (%v)", err)
-			}
-		}
-
-		log.Infof("Building test Docker image %s", image)
-
-		testDbArgs := []string{"build", ".", "-f", testDockerfile, "-t", image, "--build-arg", "NAMESPACEDMAN=" + namespacedManBuild, "--build-arg", "BASEIMAGE=" + baseImageName}
-
-		if dockerBuildArgs != "" {
-			splitArgs := strings.Fields(dockerBuildArgs)
-			testDbArgs = append(testDbArgs, splitArgs...)
-		}
-
-		testDbcmd := exec.Command("docker", testDbArgs...)
-		if err := projutil.ExecCmd(testDbcmd); err != nil {
-			return fmt.Errorf("failed to output test image %s: (%v)", image, err)
-		}
-		// Check image name of deployments in namespaced manifest
-		if err := verifyTestManifest(image); err != nil {
-			return nil
-		}
-	}
-
 	log.Info("Operator build complete.")
 	return nil
 }
diff --git a/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/generate/k8s.go b/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/generate/k8s.go
index 21621eb..ace62a3 100644
--- a/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/generate/k8s.go
+++ b/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/generate/k8s.go
@@ -24,7 +24,7 @@ import (
 )
 
 func newGenerateK8SCmd() *cobra.Command {
-	k8sCmd := &cobra.Command{
+	return &cobra.Command{
 		Use:   "k8s",
 		Short: "Generates Kubernetes code for custom resource",
 		Long: `k8s generator generates code for custom resources given the API
@@ -41,10 +41,6 @@ Example:
 `,
 		RunE: k8sFunc,
 	}
-
-	k8sCmd.Flags().StringVar(&headerFile, "header-file", "", "Path to file containing headers for generated files.")
-
-	return k8sCmd
 }
 
 func k8sFunc(cmd *cobra.Command, args []string) error {
@@ -57,5 +53,5 @@ func k8sFunc(cmd *cobra.Command, args []string) error {
 		return err
 	}
 
-	return genutil.K8sCodegen(headerFile)
+	return genutil.K8sCodegen()
 }
diff --git a/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/generate/openapi.go b/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/generate/openapi.go
index a777a21..639d540 100644
--- a/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/generate/openapi.go
+++ b/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/generate/openapi.go
@@ -21,10 +21,8 @@ import (
 	"github.com/spf13/cobra"
 )
 
-var headerFile string
-
 func newGenerateOpenAPICmd() *cobra.Command {
-	openAPICmd := &cobra.Command{
+	return &cobra.Command{
 		Use:   "openapi",
 		Short: "Generates OpenAPI specs for API's",
 		Long: `generate openapi generates OpenAPI validation specs in Go from tagged types
@@ -47,10 +45,6 @@ Example:
 `,
 		RunE: openAPIFunc,
 	}
-
-	openAPICmd.Flags().StringVar(&headerFile, "header-file", "", "Path to file containing headers for generated files.")
-
-	return openAPICmd
 }
 
 func openAPIFunc(cmd *cobra.Command, args []string) error {
@@ -58,5 +52,5 @@ func openAPIFunc(cmd *cobra.Command, args []string) error {
 		return fmt.Errorf("command %s doesn't accept any arguments", cmd.CommandPath())
 	}
 
-	return genutil.OpenAPIGen(headerFile)
+	return genutil.OpenAPIGen()
 }
diff --git a/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/internal/genutil/genutil.go b/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/internal/genutil/genutil.go
index e9c1c53..a30dedc 100644
--- a/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/internal/genutil/genutil.go
+++ b/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/internal/genutil/genutil.go
@@ -22,10 +22,12 @@ import (
 	"path/filepath"
 	"strings"
 
+	flags "github.com/operator-framework/operator-sdk/internal/pkg/flags"
 	"github.com/operator-framework/operator-sdk/internal/pkg/scaffold"
 	"github.com/operator-framework/operator-sdk/internal/util/projutil"
 
 	log "github.com/sirupsen/logrus"
+	"github.com/spf13/viper"
 )
 
 func buildCodegenBinaries(genDirs []string, binDir, codegenSrcDir string) error {
@@ -47,7 +49,7 @@ func runGoBuildCodegen(binDir, repoDir, genDir string) error {
 	}
 
 	// Only print binary build info if verbosity is explicitly set.
-	if projutil.IsGoVerbose() {
+	if viper.GetBool(flags.VerboseOpt) {
 		return projutil.ExecCmd(cmd)
 	}
 	cmd.Stdout = ioutil.Discard
@@ -108,10 +110,14 @@ func createFQApis(pkg string, gvs map[string][]string) string {
 	return fqb.String()
 }
 
-func withHeaderFile(hf string, f func(string) error) (err error) {
-	if hf == "" {
-		hf, err = createEmptyTmpFile()
-		if err != nil {
+func withHeaderFile(f func(string) error) (err error) {
+	i, err := (&scaffold.Boilerplate{}).GetInput()
+	if err != nil {
+		return err
+	}
+	hf := i.Path
+	if _, err := os.Stat(hf); os.IsNotExist(err) {
+		if hf, err = createEmptyTmpFile(); err != nil {
 			return err
 		}
 		defer func() {
diff --git a/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/internal/genutil/k8s.go b/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/internal/genutil/k8s.go
index 8fa216f..35fcf72 100644
--- a/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/internal/genutil/k8s.go
+++ b/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/internal/genutil/k8s.go
@@ -27,9 +27,8 @@ import (
 )
 
 // K8sCodegen performs deepcopy code-generation for all custom resources under
-// pkg/apis. hf is  a path to a header file containing text to add to generated
-// files.
-func K8sCodegen(hf string) error {
+// pkg/apis.
+func K8sCodegen() error {
 	projutil.MustInProjectRoot()
 
 	wd := projutil.MustGetwd()
@@ -38,7 +37,6 @@ func K8sCodegen(hf string) error {
 	binDir := filepath.Join(wd, scaffold.BuildBinDir)
 
 	genDirs := []string{
-		"./cmd/defaulter-gen",
 		"./cmd/client-gen",
 		"./cmd/lister-gen",
 		"./cmd/informer-gen",
@@ -60,11 +58,7 @@ func K8sCodegen(hf string) error {
 	log.Infof("Running deepcopy code-generation for Custom Resource group versions: [%v]\n", gvb.String())
 
 	fdc := func(a string) error { return deepcopyGen(binDir, repoPkg, a, gvMap) }
-	if err = withHeaderFile(hf, fdc); err != nil {
-		return err
-	}
-	fd := func(a string) error { return defaulterGen(binDir, repoPkg, a, gvMap) }
-	if err = withHeaderFile(hf, fd); err != nil {
+	if err = withHeaderFile(fdc); err != nil {
 		return err
 	}
 
@@ -88,19 +82,3 @@ func deepcopyGen(binDir, repoPkg, hf string, gvMap map[string][]string) (err err
 	}
 	return nil
 }
-
-func defaulterGen(binDir, repoPkg, hf string, gvMap map[string][]string) (err error) {
-	apisPkg := filepath.Join(repoPkg, scaffold.ApisDir)
-	args := []string{
-		"--input-dirs", createFQApis(apisPkg, gvMap),
-		"--output-file-base", "zz_generated.defaults",
-		// defaulter-gen requires a boilerplate file. Either use header or an
-		// empty file if header is empty.
-		"--go-header-file", hf,
-	}
-	cmd := exec.Command(filepath.Join(binDir, "defaulter-gen"), args...)
-	if err = projutil.ExecCmd(cmd); err != nil {
-		return fmt.Errorf("failed to perform defaulter code-generation: %v", err)
-	}
-	return nil
-}
diff --git a/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/internal/genutil/openapi.go b/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/internal/genutil/openapi.go
index c75b830..13eabc1 100644
--- a/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/internal/genutil/openapi.go
+++ b/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/internal/genutil/openapi.go
@@ -28,9 +28,8 @@ import (
 	log "github.com/sirupsen/logrus"
 )
 
-// OpenAPIGen generates OpenAPI validation specs for all CRD's in dirs. hf is
-// a path to a header file containing text to add to generated files.
-func OpenAPIGen(hf string) error {
+// OpenAPIGen generates OpenAPI validation specs for all CRD's in dirs.
+func OpenAPIGen() error {
 	projutil.MustInProjectRoot()
 
 	absProjectPath := projutil.MustGetwd()
@@ -57,7 +56,7 @@ func OpenAPIGen(hf string) error {
 	fqApiStr := createFQApis(apisPkg, gvMap)
 	fqApis := strings.Split(fqApiStr, ",")
 	f := func(a string) error { return openAPIGen(binDir, a, fqApis) }
-	if err = withHeaderFile(hf, f); err != nil {
+	if err = withHeaderFile(f); err != nil {
 		return err
 	}
 
diff --git a/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/main.go b/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/main.go
index 8826dca..c973742 100644
--- a/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/main.go
+++ b/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/main.go
@@ -15,6 +15,7 @@
 package main
 
 import (
+	"github.com/operator-framework/operator-sdk/internal/util/projutil"
 	"os"
 
 	// Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.)
@@ -32,16 +33,29 @@ import (
 	"github.com/operator-framework/operator-sdk/cmd/operator-sdk/test"
 	"github.com/operator-framework/operator-sdk/cmd/operator-sdk/up"
 	"github.com/operator-framework/operator-sdk/cmd/operator-sdk/version"
-	osdkversion "github.com/operator-framework/operator-sdk/version"
+	flags "github.com/operator-framework/operator-sdk/internal/pkg/flags"
+
+	log "github.com/sirupsen/logrus"
 	"github.com/spf13/cobra"
+	"github.com/spf13/viper"
 	_ "k8s.io/client-go/plugin/pkg/client/auth"
 )
 
 func main() {
 	root := &cobra.Command{
-		Use:     "operator-sdk",
-		Short:   "An SDK for building operators with ease",
-		Version: osdkversion.Version,
+		Use:   "operator-sdk",
+		Short: "An SDK for building operators with ease",
+		PersistentPreRun: func(cmd *cobra.Command, args []string) {
+			if viper.GetBool(flags.VerboseOpt) {
+				err := projutil.SetGoVerbose()
+				if err != nil {
+					log.Errorf("Could not set GOFLAGS: (%v)", err)
+					return
+				}
+				log.SetLevel(log.DebugLevel)
+				log.Debug("Debug logging is set")
+			}
+		},
 	}
 
 	root.AddCommand(new.NewCmd())
@@ -58,6 +72,11 @@ func main() {
 	root.AddCommand(olmcatalog.NewCmd())
 	root.AddCommand(version.NewCmd())
 
+	root.PersistentFlags().Bool(flags.VerboseOpt, false, "Enable verbose logging")
+	if err := viper.BindPFlags(root.PersistentFlags()); err != nil {
+		log.Fatalf("Failed to bind root flags: %v", err)
+	}
+
 	if err := root.Execute(); err != nil {
 		os.Exit(1)
 	}
diff --git a/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/migrate/cmd.go b/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/migrate/cmd.go
index aa8ad5d..18efade 100644
--- a/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/migrate/cmd.go
+++ b/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/migrate/cmd.go
@@ -25,18 +25,29 @@ import (
 	"github.com/operator-framework/operator-sdk/internal/pkg/scaffold/input"
 	"github.com/operator-framework/operator-sdk/internal/util/projutil"
 
+	"github.com/pkg/errors"
 	log "github.com/sirupsen/logrus"
 	"github.com/spf13/cobra"
 )
 
+var (
+	depManager string
+	headerFile string
+)
+
 // NewCmd returns a command that will add source code to an existing non-go operator
 func NewCmd() *cobra.Command {
-	return &cobra.Command{
+	newCmd := &cobra.Command{
 		Use:   "migrate",
 		Short: "Adds source code to an operator",
 		Long:  `operator-sdk migrate adds a main.go source file and any associated source files for an operator that is not of the "go" type.`,
 		RunE:  migrateRun,
 	}
+
+	newCmd.Flags().StringVar(&depManager, "dep-manager", "modules", `Dependency manager the new project will use (choices: "dep", "modules")`)
+	newCmd.Flags().StringVar(&headerFile, "header-file", "", "Path to file containing headers for generated Go files. Copied to hack/boilerplate.go.txt")
+
+	return newCmd
 }
 
 // migrateRun determines the current operator type and runs the corresponding
@@ -61,6 +72,7 @@ func migrateAnsible() error {
 	wd := projutil.MustGetwd()
 
 	cfg := &input.Config{
+		Repo:           projutil.CheckAndGetProjectGoPkg(),
 		AbsProjectPath: wd,
 		ProjectName:    filepath.Base(wd),
 	}
@@ -78,15 +90,25 @@ func migrateAnsible() error {
 	default:
 		return fmt.Errorf("error trying to stat %s: (%v)", ansible.PlaybookYamlFile, err)
 	}
-
 	if err := renameDockerfile(); err != nil {
 		return err
 	}
 
 	s := &scaffold.Scaffold{}
+	if headerFile != "" {
+		err = s.Execute(cfg, &scaffold.Boilerplate{BoilerplateSrcPath: headerFile})
+		if err != nil {
+			return fmt.Errorf("boilerplate scaffold failed: (%v)", err)
+		}
+		s.BoilerplatePath = headerFile
+	}
+
+	if err := scaffoldAnsibleDepManager(s, cfg); err != nil {
+		return errors.Wrap(err, "migrate Ansible dependency manager file scaffold failed")
+	}
+
 	err = s.Execute(cfg,
 		&ansible.Main{},
-		&ansible.GopkgToml{},
 		&dockerfile,
 		&ansible.Entrypoint{},
 		&ansible.UserSetup{},
@@ -104,6 +126,7 @@ func migrateHelm() error {
 	wd := projutil.MustGetwd()
 
 	cfg := &input.Config{
+		Repo:           projutil.CheckAndGetProjectGoPkg(),
 		AbsProjectPath: wd,
 		ProjectName:    filepath.Base(wd),
 	}
@@ -113,9 +136,20 @@ func migrateHelm() error {
 	}
 
 	s := &scaffold.Scaffold{}
+	if headerFile != "" {
+		err := s.Execute(cfg, &scaffold.Boilerplate{BoilerplateSrcPath: headerFile})
+		if err != nil {
+			return fmt.Errorf("boilerplate scaffold failed: (%v)", err)
+		}
+		s.BoilerplatePath = headerFile
+	}
+
+	if err := scaffoldHelmDepManager(s, cfg); err != nil {
+		return errors.Wrap(err, "migrate Helm dependency manager file scaffold failed")
+	}
+
 	err := s.Execute(cfg,
 		&helm.Main{},
-		&helm.GopkgToml{},
 		&helm.DockerfileHybrid{
 			Watches:    true,
 			HelmCharts: true,
@@ -139,3 +173,44 @@ func renameDockerfile() error {
 	log.Infof("Renamed Dockerfile to %s and replaced with newer version. Compare the new Dockerfile to your old one and manually migrate any customizations", newDockerfilePath)
 	return nil
 }
+
+func scaffoldHelmDepManager(s *scaffold.Scaffold, cfg *input.Config) error {
+	var files []input.File
+	switch m := projutil.DepManagerType(depManager); m {
+	case projutil.DepManagerDep:
+		files = append(files, &helm.GopkgToml{})
+	case projutil.DepManagerGoMod:
+		if err := goModCheck(); err != nil {
+			return err
+		}
+		files = append(files, &helm.GoMod{}, &scaffold.Tools{})
+	default:
+		return projutil.ErrInvalidDepManager(depManager)
+	}
+	return s.Execute(cfg, files...)
+}
+
+func scaffoldAnsibleDepManager(s *scaffold.Scaffold, cfg *input.Config) error {
+	var files []input.File
+	switch m := projutil.DepManagerType(depManager); m {
+	case projutil.DepManagerDep:
+		files = append(files, &ansible.GopkgToml{})
+	case projutil.DepManagerGoMod:
+		if err := goModCheck(); err != nil {
+			return err
+		}
+		files = append(files, &ansible.GoMod{}, &scaffold.Tools{})
+	default:
+		return projutil.ErrInvalidDepManager(depManager)
+	}
+	return s.Execute(cfg, files...)
+}
+
+func goModCheck() error {
+	goModOn, err := projutil.GoModOn()
+	if err == nil && !goModOn {
+		log.Fatal(`Dependency manager "modules" has been selected but go modules are not active. ` +
+			`Activate modules then run "operator-sdk migrate".`)
+	}
+	return err
+}
diff --git a/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/new/cmd.go b/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/new/cmd.go
index e10d596..b75b579 100644
--- a/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/new/cmd.go
+++ b/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/new/cmd.go
@@ -27,8 +27,10 @@ import (
 	"github.com/operator-framework/operator-sdk/internal/pkg/scaffold/input"
 	"github.com/operator-framework/operator-sdk/internal/util/projutil"
 
+	"github.com/pkg/errors"
 	log "github.com/sirupsen/logrus"
 	"github.com/spf13/cobra"
+	"sigs.k8s.io/controller-runtime/pkg/client/config"
 )
 
 func NewCmd() *cobra.Command {
@@ -52,9 +54,10 @@ generates a skeletal app-operator application in $GOPATH/src/github.com/example.
 	newCmd.Flags().StringVar(&apiVersion, "api-version", "", "Kubernetes apiVersion and has a format of $GROUP_NAME/$VERSION (e.g app.example.com/v1alpha1) - used with \"ansible\" or \"helm\" types")
 	newCmd.Flags().StringVar(&kind, "kind", "", "Kubernetes CustomResourceDefintion kind. (e.g AppService) - used with \"ansible\" or \"helm\" types")
 	newCmd.Flags().StringVar(&operatorType, "type", "go", "Type of operator to initialize (choices: \"go\", \"ansible\" or \"helm\")")
+	newCmd.Flags().StringVar(&depManager, "dep-manager", "modules", `Dependency manager the new project will use (choices: "dep", "modules")`)
 	newCmd.Flags().BoolVar(&skipGit, "skip-git-init", false, "Do not init the directory as a git repository")
+	newCmd.Flags().StringVar(&headerFile, "header-file", "", "Path to file containing headers for generated Go files. Copied to hack/boilerplate.go.txt")
 	newCmd.Flags().BoolVar(&generatePlaybook, "generate-playbook", false, "Generate a playbook skeleton. (Only used for --type ansible)")
-	newCmd.Flags().BoolVar(&isClusterScoped, "cluster-scoped", false, "Generate cluster-scoped resources instead of namespace-scoped")
 
 	newCmd.Flags().StringVar(&helmChartRef, "helm-chart", "", "Initialize helm operator with existing helm chart (<URL>, <repo>/<name>, or local path)")
 	newCmd.Flags().StringVar(&helmChartVersion, "helm-chart-version", "", "Specific version of the helm chart (default is latest version)")
@@ -68,20 +71,16 @@ var (
 	kind             string
 	operatorType     string
 	projectName      string
+	depManager       string
+	headerFile       string
 	skipGit          bool
 	generatePlaybook bool
-	isClusterScoped  bool
 
 	helmChartRef     string
 	helmChartVersion string
 	helmChartRepo    string
 )
 
-const (
-	dep       = "dep"
-	ensureCmd = "ensure"
-)
-
 func newFunc(cmd *cobra.Command, args []string) error {
 	if err := parse(cmd, args); err != nil {
 		return err
@@ -95,10 +94,10 @@ func newFunc(cmd *cobra.Command, args []string) error {
 
 	switch operatorType {
 	case projutil.OperatorTypeGo:
-		if err := doScaffold(); err != nil {
+		if err := doGoScaffold(); err != nil {
 			return err
 		}
-		if err := pullDep(); err != nil {
+		if err := getDeps(); err != nil {
 			return err
 		}
 	case projutil.OperatorTypeAnsible:
@@ -145,28 +144,54 @@ func mustBeNewProject() {
 	}
 }
 
-func doScaffold() error {
+func doGoScaffold() error {
 	cfg := &input.Config{
 		Repo:           filepath.Join(projutil.CheckAndGetProjectGoPkg(), projectName),
 		AbsProjectPath: filepath.Join(projutil.MustGetwd(), projectName),
 		ProjectName:    projectName,
 	}
-
 	s := &scaffold.Scaffold{}
-	err := s.Execute(cfg,
+
+	if headerFile != "" {
+		err := s.Execute(cfg, &scaffold.Boilerplate{BoilerplateSrcPath: headerFile})
+		if err != nil {
+			return fmt.Errorf("boilerplate scaffold failed: (%v)", err)
+		}
+		s.BoilerplatePath = headerFile
+	}
+
+	var err error
+	switch m := projutil.DepManagerType(depManager); m {
+	case projutil.DepManagerDep:
+		err = s.Execute(cfg, &scaffold.GopkgToml{})
+	case projutil.DepManagerGoMod:
+		if goModOn, merr := projutil.GoModOn(); merr != nil {
+			return merr
+		} else if !goModOn {
+			log.Fatalf(`Dependency manager "%s" has been selected but go modules are not active. `+
+				`Activate modules then run "operator-sdk new %s".`, m, projectName)
+		}
+		err = s.Execute(cfg, &scaffold.GoMod{}, &scaffold.Tools{})
+	default:
+		err = projutil.ErrNoDepManager
+	}
+	if err != nil {
+		return fmt.Errorf("dependency manager file scaffold failed: (%v)", err)
+	}
+
+	err = s.Execute(cfg,
 		&scaffold.Cmd{},
 		&scaffold.Dockerfile{},
 		&scaffold.Entrypoint{},
 		&scaffold.UserSetup{},
 		&scaffold.ServiceAccount{},
-		&scaffold.Role{IsClusterScoped: isClusterScoped},
-		&scaffold.RoleBinding{IsClusterScoped: isClusterScoped},
-		&scaffold.Operator{IsClusterScoped: isClusterScoped},
+		&scaffold.Role{},
+		&scaffold.RoleBinding{},
+		&scaffold.Operator{},
 		&scaffold.Apis{},
 		&scaffold.Controller{},
 		&scaffold.Version{},
 		&scaffold.Gitignore{},
-		&scaffold.GopkgToml{},
 	)
 	if err != nil {
 		return fmt.Errorf("new Go scaffold failed: (%v)", err)
@@ -191,8 +216,8 @@ func doAnsibleScaffold() error {
 	s := &scaffold.Scaffold{}
 	err = s.Execute(cfg,
 		&scaffold.ServiceAccount{},
-		&scaffold.Role{IsClusterScoped: isClusterScoped},
-		&scaffold.RoleBinding{IsClusterScoped: isClusterScoped},
+		&scaffold.Role{},
+		&scaffold.RoleBinding{},
 		&scaffold.CRD{Resource: resource},
 		&scaffold.CR{Resource: resource},
 		&ansible.BuildDockerfile{GeneratePlaybook: generatePlaybook},
@@ -220,7 +245,7 @@ func doAnsibleScaffold() error {
 			GeneratePlaybook: generatePlaybook,
 			Resource:         *resource,
 		},
-		&ansible.DeployOperator{IsClusterScoped: isClusterScoped},
+		&ansible.DeployOperator{},
 		&ansible.Travis{},
 		&ansible.MoleculeTestLocalMolecule{},
 		&ansible.MoleculeTestLocalPrepare{Resource: *resource},
@@ -280,6 +305,15 @@ func doHelmScaffold() error {
 	valuesPath := filepath.Join("<project_dir>", helm.HelmChartsDir, chart.GetMetadata().GetName(), "values.yaml")
 	crSpec := fmt.Sprintf("# Default values copied from %s\n\n%s", valuesPath, chart.GetValues().GetRaw())
 
+	k8sCfg, err := config.GetConfig()
+	if err != nil {
+		return fmt.Errorf("failed to get kubernetes config: %s", err)
+	}
+	roleScaffold, err := helm.CreateRoleScaffold(k8sCfg, chart)
+	if err != nil {
+		return fmt.Errorf("failed to generate role scaffold: %s", err)
+	}
+
 	s := &scaffold.Scaffold{}
 	err = s.Execute(cfg,
 		&helm.Dockerfile{},
@@ -288,9 +322,9 @@ func doHelmScaffold() error {
 			ChartName: chart.GetMetadata().GetName(),
 		},
 		&scaffold.ServiceAccount{},
-		&scaffold.Role{IsClusterScoped: isClusterScoped},
-		&scaffold.RoleBinding{IsClusterScoped: isClusterScoped},
-		&helm.Operator{IsClusterScoped: isClusterScoped},
+		roleScaffold,
+		&scaffold.RoleBinding{IsClusterScoped: roleScaffold.IsClusterScoped},
+		&helm.Operator{},
 		&scaffold.CRD{Resource: resource},
 		&scaffold.CR{
 			Resource: resource,
@@ -309,7 +343,7 @@ func doHelmScaffold() error {
 
 func verifyFlags() error {
 	if operatorType != projutil.OperatorTypeGo && operatorType != projutil.OperatorTypeAnsible && operatorType != projutil.OperatorTypeHelm {
-		return fmt.Errorf("value of --type can only be `go`, `ansible`, or `helm`")
+		return errors.Wrap(projutil.ErrUnknownOperatorType{Type: operatorType}, "value of --type can only be `go`, `ansible`, or `helm`")
 	}
 	if operatorType != projutil.OperatorTypeAnsible && generatePlaybook {
 		return fmt.Errorf("value of --generate-playbook can only be used with --type `ansible`")
@@ -357,16 +391,22 @@ func execProjCmd(cmd string, args ...string) error {
 	return projutil.ExecCmd(dc)
 }
 
-func pullDep() error {
-	_, err := exec.LookPath(dep)
-	if err != nil {
-		return fmt.Errorf("looking for dep in $PATH: (%v)", err)
-	}
-	log.Info("Run dep ensure ...")
-	if err := execProjCmd(dep, ensureCmd, "-v"); err != nil {
-		return err
+func getDeps() error {
+	switch m := projutil.DepManagerType(depManager); m {
+	case projutil.DepManagerDep:
+		log.Info("Running dep ensure ...")
+		if err := execProjCmd("dep", "ensure", "-v"); err != nil {
+			return err
+		}
+	case projutil.DepManagerGoMod:
+		log.Info("Running go mod ...")
+		if err := execProjCmd("go", "mod", "vendor", "-v"); err != nil {
+			return err
+		}
+	default:
+		return projutil.ErrInvalidDepManager(depManager)
 	}
-	log.Info("Run dep ensure done")
+	log.Info("Done getting dependencies")
 	return nil
 }
 
diff --git a/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/printdeps/cmd.go b/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/printdeps/cmd.go
index 5a3ea35..2d7e9ad 100644
--- a/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/printdeps/cmd.go
+++ b/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/printdeps/cmd.go
@@ -18,6 +18,9 @@ import (
 	"fmt"
 
 	"github.com/operator-framework/operator-sdk/internal/pkg/scaffold"
+	"github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible"
+	"github.com/operator-framework/operator-sdk/internal/pkg/scaffold/helm"
+	"github.com/operator-framework/operator-sdk/internal/util/projutil"
 
 	"github.com/spf13/cobra"
 )
@@ -30,15 +33,16 @@ func NewCmd() *cobra.Command {
 		Short: "Print Golang packages and versions required to run the operator",
 		Long: `The operator-sdk print-deps command prints all Golang packages and versions expected
 by this version of the Operator SDK. Versions for these packages should match
-those in an operators' Gopkg.toml file.
+those in an operators' go.mod or Gopkg.toml file, depending on the dependency
+manager chosen when initializing or migrating a project.
 
 print-deps prints in columnar format by default. Use the --as-file flag to
-print in Gopkg.toml file format.
+print in go.mod or Gopkg.toml file format.
 `,
 		RunE: printDepsFunc,
 	}
 
-	printDepsCmd.Flags().BoolVar(&asFile, "as-file", false, "Print dependencies in Gopkg.toml file format.")
+	printDepsCmd.Flags().BoolVar(&asFile, "as-file", false, "Print dependencies in go.mod or Gopkg.toml file format, depending on the dependency manager chosen when initializing or migrating a project")
 
 	return printDepsCmd
 }
@@ -47,10 +51,40 @@ func printDepsFunc(cmd *cobra.Command, args []string) error {
 	if len(args) != 0 {
 		return fmt.Errorf("command %s doesn't accept any arguments", cmd.CommandPath())
 	}
-	if asFile {
-		scaffold.PrintDepsAsFile()
-	} else if err := scaffold.PrintDeps(); err != nil {
+
+	if err := printDeps(asFile); err != nil {
 		return fmt.Errorf("print deps failed: (%v)", err)
 	}
 	return nil
 }
+
+func printDeps(asFile bool) error {
+	// Make sure the project has a dep manager file.
+	mt, err := projutil.GetDepManagerType()
+	if err != nil {
+		return err
+	}
+	isDep := mt == projutil.DepManagerDep
+
+	// Migrated Ansible and Helm projects will be of type OperatorTypeGo but
+	// their deps files will differ from a vanilla Go project.
+	switch {
+	case projutil.IsOperatorAnsible():
+		if isDep {
+			return ansible.PrintDepGopkgTOML(asFile)
+		}
+		return ansible.PrintGoMod(asFile)
+	case projutil.IsOperatorHelm():
+		if isDep {
+			return helm.PrintDepGopkgTOML(asFile)
+		}
+		return helm.PrintGoMod(asFile)
+	case projutil.IsOperatorGo():
+		if isDep {
+			return scaffold.PrintDepGopkgTOML(asFile)
+		}
+		return scaffold.PrintGoMod(asFile)
+	}
+
+	return projutil.ErrUnknownOperatorType{}
+}
diff --git a/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/scorecard/cmd.go b/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/scorecard/cmd.go
index 7acc21d..6bc217d 100644
--- a/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/scorecard/cmd.go
+++ b/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/scorecard/cmd.go
@@ -19,6 +19,7 @@ import (
 	"strings"
 
 	"github.com/operator-framework/operator-sdk/internal/pkg/scaffold"
+	"github.com/operator-framework/operator-sdk/internal/pkg/scorecard"
 	"github.com/operator-framework/operator-sdk/version"
 
 	log "github.com/sirupsen/logrus"
@@ -26,52 +27,31 @@ import (
 	"github.com/spf13/viper"
 )
 
-// scorecardConfig stores all scorecard config passed as flags
-type scorecardConfig struct {
-	namespace          string
-	kubeconfigPath     string
-	initTimeout        int
-	olmDeployed        bool
-	csvPath            string
-	basicTests         bool
-	olmTests           bool
-	tenantTests        bool
-	namespacedManifest string
-	globalManifest     string
-	crManifest         string
-	proxyImage         string
-	proxyPullPolicy    string
-	crdsDir            string
-	verbose            bool
-}
-
-var scConf scorecardConfig
-
 func NewCmd() *cobra.Command {
 	scorecardCmd := &cobra.Command{
 		Use:   "scorecard",
 		Short: "Run scorecard tests",
 		Long: `Runs blackbox scorecard tests on an operator
 `,
-		RunE: ScorecardTests,
+		RunE: scorecard.ScorecardTests,
 	}
 
-	scorecardCmd.Flags().StringVar(&ScorecardConf, ConfigOpt, "", "config file (default is <project_dir>/.osdk-yaml)")
-	scorecardCmd.Flags().StringVar(&scConf.namespace, NamespaceOpt, "", "Namespace of custom resource created in cluster")
-	scorecardCmd.Flags().StringVar(&scConf.kubeconfigPath, KubeconfigOpt, "", "Path to kubeconfig of custom resource created in cluster")
-	scorecardCmd.Flags().IntVar(&scConf.initTimeout, InitTimeoutOpt, 10, "Timeout for status block on CR to be created in seconds")
-	scorecardCmd.Flags().BoolVar(&scConf.olmDeployed, OlmDeployedOpt, false, "The OLM has deployed the operator. Use only the CSV for test data")
-	scorecardCmd.Flags().StringVar(&scConf.csvPath, CSVPathOpt, "", "Path to CSV being tested")
-	scorecardCmd.Flags().BoolVar(&scConf.basicTests, BasicTestsOpt, true, "Enable basic operator checks")
-	scorecardCmd.Flags().BoolVar(&scConf.olmTests, OLMTestsOpt, true, "Enable OLM integration checks")
-	scorecardCmd.Flags().BoolVar(&scConf.tenantTests, TenantTestsOpt, false, "Enable good tenant checks")
-	scorecardCmd.Flags().StringVar(&scConf.namespacedManifest, NamespacedManifestOpt, "", "Path to manifest for namespaced resources (e.g. RBAC and Operator manifest)")
-	scorecardCmd.Flags().StringVar(&scConf.globalManifest, GlobalManifestOpt, "", "Path to manifest for Global resources (e.g. CRD manifests)")
-	scorecardCmd.Flags().StringVar(&scConf.crManifest, CRManifestOpt, "", "Path to manifest for Custom Resource (required)")
-	scorecardCmd.Flags().StringVar(&scConf.proxyImage, ProxyImageOpt, fmt.Sprintf("quay.io/operator-framework/scorecard-proxy:%s", strings.TrimSuffix(version.Version, "+git")), "Image name for scorecard proxy")
-	scorecardCmd.Flags().StringVar(&scConf.proxyPullPolicy, ProxyPullPolicyOpt, "Always", "Pull policy for scorecard proxy image")
-	scorecardCmd.Flags().StringVar(&scConf.crdsDir, "crds-dir", scaffold.CRDsDir, "Directory containing CRDs (all CRD manifest filenames must have the suffix 'crd.yaml')")
-	scorecardCmd.Flags().BoolVar(&scConf.verbose, VerboseOpt, false, "Enable verbose logging")
+	scorecardCmd.Flags().String(scorecard.ConfigOpt, "", "config file (default is <project_dir>/.osdk-yaml)")
+	scorecardCmd.Flags().String(scorecard.NamespaceOpt, "", "Namespace of custom resource created in cluster")
+	scorecardCmd.Flags().String(scorecard.KubeconfigOpt, "", "Path to kubeconfig of custom resource created in cluster")
+	scorecardCmd.Flags().Int(scorecard.InitTimeoutOpt, 60, "Timeout for status block on CR to be created in seconds")
+	scorecardCmd.Flags().Bool(scorecard.OlmDeployedOpt, false, "The OLM has deployed the operator. Use only the CSV for test data")
+	scorecardCmd.Flags().String(scorecard.CSVPathOpt, "", "Path to CSV being tested")
+	scorecardCmd.Flags().Bool(scorecard.BasicTestsOpt, true, "Enable basic operator checks")
+	scorecardCmd.Flags().Bool(scorecard.OLMTestsOpt, true, "Enable OLM integration checks")
+	scorecardCmd.Flags().String(scorecard.NamespacedManifestOpt, "", "Path to manifest for namespaced resources (e.g. RBAC and Operator manifest)")
+	scorecardCmd.Flags().String(scorecard.GlobalManifestOpt, "", "Path to manifest for Global resources (e.g. CRD manifests)")
+	scorecardCmd.Flags().StringSlice(scorecard.CRManifestOpt, nil, "Path to manifest for Custom Resource (required) (specify flag multiple times for multiple CRs)")
+	scorecardCmd.Flags().String(scorecard.ProxyImageOpt, fmt.Sprintf("quay.io/operator-framework/scorecard-proxy:%s", strings.TrimSuffix(version.Version, "+git")), "Image name for scorecard proxy")
+	scorecardCmd.Flags().String(scorecard.ProxyPullPolicyOpt, "Always", "Pull policy for scorecard proxy image")
+	scorecardCmd.Flags().String(scorecard.CRDsDirOpt, scaffold.CRDsDir, "Directory containing CRDs (all CRD manifest filenames must have the suffix 'crd.yaml')")
+	scorecardCmd.Flags().StringP(scorecard.OutputFormatOpt, "o", scorecard.HumanReadableOutputFormat, fmt.Sprintf("Output format for results. Valid values: %s, %s", scorecard.HumanReadableOutputFormat, scorecard.JSONOutputFormat))
+	scorecardCmd.Flags().String(scorecard.PluginDirOpt, "scorecard", "Scorecard plugin directory (plugin exectuables must be in a \"bin\" subdirectory")
 
 	if err := viper.BindPFlags(scorecardCmd.Flags()); err != nil {
 		log.Fatalf("Failed to bind scorecard flags to viper: %v", err)
diff --git a/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/scorecard/scorecard.go b/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/scorecard/scorecard.go
deleted file mode 100644
index fe15b4d..0000000
--- a/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/scorecard/scorecard.go
+++ /dev/null
@@ -1,344 +0,0 @@
-// Copyright 2019 The Operator-SDK Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package scorecard
-
-import (
-	"context"
-	"encoding/json"
-	"errors"
-	"fmt"
-	"io/ioutil"
-	"os"
-
-	"github.com/operator-framework/operator-sdk/internal/pkg/scaffold"
-	k8sInternal "github.com/operator-framework/operator-sdk/internal/util/k8sutil"
-	"github.com/operator-framework/operator-sdk/internal/util/projutil"
-	"github.com/operator-framework/operator-sdk/internal/util/yamlutil"
-
-	"github.com/ghodss/yaml"
-	olmapiv1alpha1 "github.com/operator-framework/operator-lifecycle-manager/pkg/api/apis/operators/v1alpha1"
-	olminstall "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/install"
-	log "github.com/sirupsen/logrus"
-	"github.com/spf13/cobra"
-	"github.com/spf13/viper"
-	v1 "k8s.io/api/core/v1"
-	extscheme "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme"
-	"k8s.io/apimachinery/pkg/runtime"
-	"k8s.io/apimachinery/pkg/runtime/serializer"
-	"k8s.io/client-go/discovery/cached"
-	"k8s.io/client-go/kubernetes"
-	cgoscheme "k8s.io/client-go/kubernetes/scheme"
-	"k8s.io/client-go/rest"
-	"k8s.io/client-go/restmapper"
-	"sigs.k8s.io/controller-runtime/pkg/client"
-)
-
-const (
-	ConfigOpt             = "config"
-	NamespaceOpt          = "namespace"
-	KubeconfigOpt         = "kubeconfig"
-	InitTimeoutOpt        = "init-timeout"
-	OlmDeployedOpt        = "olm-deployed"
-	CSVPathOpt            = "csv-path"
-	BasicTestsOpt         = "basic-tests"
-	OLMTestsOpt           = "olm-tests"
-	TenantTestsOpt        = "good-tenant-tests"
-	NamespacedManifestOpt = "namespaced-manifest"
-	GlobalManifestOpt     = "global-manifest"
-	CRManifestOpt         = "cr-manifest"
-	ProxyImageOpt         = "proxy-image"
-	ProxyPullPolicyOpt    = "proxy-pull-policy"
-	CRDsDirOpt            = "crds-dir"
-	VerboseOpt            = "verbose"
-)
-
-const (
-	basicOperator  = "Basic Operator"
-	olmIntegration = "OLM Integration"
-	goodTenant     = "Good Tenant"
-)
-
-var (
-	kubeconfig     *rest.Config
-	dynamicDecoder runtime.Decoder
-	runtimeClient  client.Client
-	restMapper     *restmapper.DeferredDiscoveryRESTMapper
-	deploymentName string
-	proxyPodGlobal *v1.Pod
-	cleanupFns     []cleanupFn
-	ScorecardConf  string
-)
-
-const (
-	scorecardPodName       = "operator-scorecard-test"
-	scorecardContainerName = "scorecard-proxy"
-)
-
-func ScorecardTests(cmd *cobra.Command, args []string) error {
-	if err := initConfig(); err != nil {
-		return err
-	}
-	if err := validateScorecardFlags(); err != nil {
-		return err
-	}
-	cmd.SilenceUsage = true
-	if viper.GetBool(VerboseOpt) {
-		log.SetLevel(log.DebugLevel)
-	}
-	defer func() {
-		if err := cleanupScorecard(); err != nil {
-			log.Errorf("Failed to clenup resources: (%v)", err)
-		}
-	}()
-
-	var (
-		tmpNamespaceVar string
-		err             error
-	)
-	kubeconfig, tmpNamespaceVar, err = k8sInternal.GetKubeconfigAndNamespace(viper.GetString(KubeconfigOpt))
-	if err != nil {
-		return fmt.Errorf("failed to build the kubeconfig: %v", err)
-	}
-	if viper.GetString(NamespaceOpt) == "" {
-		viper.Set(NamespaceOpt, tmpNamespaceVar)
-	}
-	scheme := runtime.NewScheme()
-	// scheme for client go
-	if err := cgoscheme.AddToScheme(scheme); err != nil {
-		return fmt.Errorf("failed to add client-go scheme to client: (%v)", err)
-	}
-	// api extensions scheme (CRDs)
-	if err := extscheme.AddToScheme(scheme); err != nil {
-		return fmt.Errorf("failed to add failed to add extensions api scheme to client: (%v)", err)
-	}
-	// olm api (CS
-	if err := olmapiv1alpha1.AddToScheme(scheme); err != nil {
-		return fmt.Errorf("failed to add failed to add oml api scheme (CSVs) to client: (%v)", err)
-	}
-	dynamicDecoder = serializer.NewCodecFactory(scheme).UniversalDeserializer()
-	// if a user creates a new CRD, we need to be able to reset the rest mapper
-	// temporary kubeclient to get a cached discovery
-	kubeclient, err := kubernetes.NewForConfig(kubeconfig)
-	if err != nil {
-		return fmt.Errorf("failed to get a kubeclient: %v", err)
-	}
-	cachedDiscoveryClient := cached.NewMemCacheClient(kubeclient.Discovery())
-	restMapper = restmapper.NewDeferredDiscoveryRESTMapper(cachedDiscoveryClient)
-	restMapper.Reset()
-	runtimeClient, _ = client.New(kubeconfig, client.Options{Scheme: scheme, Mapper: restMapper})
-
-	csv := &olmapiv1alpha1.ClusterServiceVersion{}
-	if viper.GetBool(OLMTestsOpt) {
-		yamlSpec, err := ioutil.ReadFile(viper.GetString(CSVPathOpt))
-		if err != nil {
-			return fmt.Errorf("failed to read csv: %v", err)
-		}
-		if err = yaml.Unmarshal(yamlSpec, csv); err != nil {
-			return fmt.Errorf("error getting ClusterServiceVersion: %v", err)
-		}
-	}
-
-	// Extract operator manifests from the CSV if olm-deployed is set.
-	if viper.GetBool(OlmDeployedOpt) {
-		// Get deploymentName from the deployment manifest within the CSV.
-		strat, err := (&olminstall.StrategyResolver{}).UnmarshalStrategy(csv.Spec.InstallStrategy)
-		if err != nil {
-			return err
-		}
-		stratDep, ok := strat.(*olminstall.StrategyDetailsDeployment)
-		if !ok {
-			return fmt.Errorf("expected StrategyDetailsDeployment, got strategy of type %T", strat)
-		}
-		deploymentName = stratDep.DeploymentSpecs[0].Name
-		// Get the proxy pod, which should have been created with the CSV.
-		proxyPodGlobal, err = getPodFromDeployment(deploymentName, viper.GetString(NamespaceOpt))
-		if err != nil {
-			return err
-		}
-
-		// Create a temporary CR manifest from metadata if one is not provided.
-		crJSONStr, ok := csv.ObjectMeta.Annotations["alm-examples"]
-		if ok && viper.GetString(CRManifestOpt) == "" {
-			var crs []interface{}
-			if err = json.Unmarshal([]byte(crJSONStr), &crs); err != nil {
-				return err
-			}
-			// TODO: run scorecard against all CR's in CSV.
-			cr := crs[0]
-			crJSONBytes, err := json.Marshal(cr)
-			if err != nil {
-				return err
-			}
-			crYAMLBytes, err := yaml.JSONToYAML(crJSONBytes)
-			if err != nil {
-				return err
-			}
-			crFile, err := ioutil.TempFile("", "cr.yaml")
-			if err != nil {
-				return err
-			}
-			if _, err := crFile.Write(crYAMLBytes); err != nil {
-				return err
-			}
-			viper.Set(CRManifestOpt, crFile.Name())
-			defer func() {
-				err := os.Remove(viper.GetString(CRManifestOpt))
-				if err != nil {
-					log.Errorf("Could not delete temporary CR manifest file: (%v)", err)
-				}
-			}()
-		}
-
-	} else {
-		// If no namespaced manifest path is given, combine
-		// deploy/{service_account,role.yaml,role_binding,operator}.yaml.
-		if viper.GetString(NamespacedManifestOpt) == "" {
-			file, err := yamlutil.GenerateCombinedNamespacedManifest(scaffold.DeployDir)
-			if err != nil {
-				return err
-			}
-			viper.Set(NamespacedManifestOpt, file.Name())
-			defer func() {
-				err := os.Remove(viper.GetString(NamespacedManifestOpt))
-				if err != nil {
-					log.Errorf("Could not delete temporary namespace manifest file: (%v)", err)
-				}
-			}()
-		}
-		// If no global manifest is given, combine all CRD's in the given CRD's dir.
-		if viper.GetString(GlobalManifestOpt) == "" {
-			gMan, err := yamlutil.GenerateCombinedGlobalManifest(viper.GetString(CRDsDirOpt))
-			if err != nil {
-				return err
-			}
-			viper.Set(GlobalManifestOpt, gMan.Name())
-			defer func() {
-				err := os.Remove(viper.GetString(GlobalManifestOpt))
-				if err != nil {
-					log.Errorf("Could not delete global manifest file: (%v)", err)
-				}
-			}()
-		}
-		if err := createFromYAMLFile(viper.GetString(GlobalManifestOpt)); err != nil {
-			return fmt.Errorf("failed to create global resources: %v", err)
-		}
-		if err := createFromYAMLFile(viper.GetString(NamespacedManifestOpt)); err != nil {
-			return fmt.Errorf("failed to create namespaced resources: %v", err)
-		}
-	}
-
-	if err := createFromYAMLFile(viper.GetString(CRManifestOpt)); err != nil {
-		return fmt.Errorf("failed to create cr resource: %v", err)
-	}
-	obj, err := yamlToUnstructured(viper.GetString(CRManifestOpt))
-	if err != nil {
-		return fmt.Errorf("failed to decode custom resource manifest into object: %s", err)
-	}
-	if err := waitUntilCRStatusExists(obj); err != nil {
-		return fmt.Errorf("failed waiting to check if CR status exists: %v", err)
-	}
-	var suites []*TestSuite
-
-	// Run tests.
-	if viper.GetBool(BasicTestsOpt) {
-		conf := BasicTestConfig{
-			Client:   runtimeClient,
-			CR:       obj,
-			ProxyPod: proxyPodGlobal,
-		}
-		basicTests := NewBasicTestSuite(conf)
-		basicTests.Run(context.TODO())
-		suites = append(suites, basicTests)
-	}
-	if viper.GetBool(OLMTestsOpt) {
-		conf := OLMTestConfig{
-			Client:   runtimeClient,
-			CR:       obj,
-			CSV:      csv,
-			CRDsDir:  viper.GetString(CRDsDirOpt),
-			ProxyPod: proxyPodGlobal,
-		}
-		olmTests := NewOLMTestSuite(conf)
-		olmTests.Run(context.TODO())
-		suites = append(suites, olmTests)
-	}
-	totalScore := 0.0
-	for _, suite := range suites {
-		fmt.Printf("%s:\n", suite.GetName())
-		for _, result := range suite.TestResults {
-			fmt.Printf("\t%s: %d/%d\n", result.Test.GetName(), result.EarnedPoints, result.MaximumPoints)
-		}
-		totalScore += float64(suite.TotalScore())
-	}
-	totalScore = totalScore / float64(len(suites))
-	fmt.Printf("\nTotal Score: %.0f%%\n", totalScore)
-	// Print suggestions
-	for _, suite := range suites {
-		for _, result := range suite.TestResults {
-			for _, suggestion := range result.Suggestions {
-				// 33 is yellow (specifically, the same shade of yellow that logrus uses for warnings)
-				fmt.Printf("\x1b[%dmSUGGESTION:\x1b[0m %s\n", 33, suggestion)
-			}
-		}
-	}
-	// Print errors
-	for _, suite := range suites {
-		for _, result := range suite.TestResults {
-			for _, err := range result.Errors {
-				// 31 is red (specifically, the same shade of red that logrus uses for errors)
-				fmt.Printf("\x1b[%dmERROR:\x1b[0m %s\n", 31, err)
-			}
-		}
-	}
-	return nil
-}
-
-func initConfig() error {
-	if ScorecardConf != "" {
-		// Use config file from the flag.
-		viper.SetConfigFile(ScorecardConf)
-	} else {
-		viper.AddConfigPath(projutil.MustGetwd())
-		// using SetConfigName allows users to use a .yaml, .json, or .toml file
-		viper.SetConfigName(".osdk-scorecard")
-	}
-
-	if err := viper.ReadInConfig(); err == nil {
-		log.Info("Using config file: ", viper.ConfigFileUsed())
-	} else {
-		log.Warn("Could not load config file; using flags")
-	}
-	return nil
-}
-
-func validateScorecardFlags() error {
-	if !viper.GetBool(OlmDeployedOpt) && viper.GetString(CRManifestOpt) == "" {
-		return errors.New("cr-manifest config option must be set")
-	}
-	if !viper.GetBool(BasicTestsOpt) && !viper.GetBool(OLMTestsOpt) {
-		return errors.New("at least one test type must be set")
-	}
-	if viper.GetBool(OLMTestsOpt) && viper.GetString(CSVPathOpt) == "" {
-		return fmt.Errorf("csv-path must be set if olm-tests is enabled")
-	}
-	if viper.GetBool(OlmDeployedOpt) && viper.GetString(CSVPathOpt) == "" {
-		return fmt.Errorf("csv-path must be set if olm-deployed is enabled")
-	}
-	pullPolicy := viper.GetString(ProxyPullPolicyOpt)
-	if pullPolicy != "Always" && pullPolicy != "Never" && pullPolicy != "PullIfNotPresent" {
-		return fmt.Errorf("invalid proxy pull policy: (%s); valid values: Always, Never, PullIfNotPresent", pullPolicy)
-	}
-	return nil
-}
diff --git a/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/scorecard/test_definitions.go b/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/scorecard/test_definitions.go
deleted file mode 100644
index 216e1a6..0000000
--- a/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/scorecard/test_definitions.go
+++ /dev/null
@@ -1,324 +0,0 @@
-// Copyright 2019 The Operator-SDK Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package scorecard
-
-import (
-	"context"
-
-	olmapiv1alpha1 "github.com/operator-framework/operator-lifecycle-manager/pkg/api/apis/operators/v1alpha1"
-	v1 "k8s.io/api/core/v1"
-	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
-	"sigs.k8s.io/controller-runtime/pkg/client"
-)
-
-// Type Definitions
-
-// Test provides methods for running scorecard tests
-type Test interface {
-	GetName() string
-	GetDescription() string
-	IsCumulative() bool
-	Run(context.Context) *TestResult
-}
-
-// TestResult contains a test's points, suggestions, and errors
-type TestResult struct {
-	Test          Test
-	EarnedPoints  int
-	MaximumPoints int
-	Suggestions   []string
-	Errors        []error
-}
-
-// TestInfo contains information about the scorecard test
-type TestInfo struct {
-	Name        string
-	Description string
-	// If a test is set to cumulative, the scores of multiple runs of the same test on separate CRs are added together for the total score.
-	// If cumulative is false, if any test failed, the total score is 0/1. Otherwise 1/1.
-	Cumulative bool
-}
-
-// GetName return the test name
-func (i TestInfo) GetName() string { return i.Name }
-
-// GetDescription returns the test description
-func (i TestInfo) GetDescription() string { return i.Description }
-
-// IsCumulative returns true if the test's scores are intended to be cumulative
-func (i TestInfo) IsCumulative() bool { return i.Cumulative }
-
-// BasicTestConfig contains all variables required by the BasicTest TestSuite
-type BasicTestConfig struct {
-	Client   client.Client
-	CR       *unstructured.Unstructured
-	ProxyPod *v1.Pod
-}
-
-// OLMTestConfig contains all variables required by the OLMTest TestSuite
-type OLMTestConfig struct {
-	Client   client.Client
-	CR       *unstructured.Unstructured
-	CSV      *olmapiv1alpha1.ClusterServiceVersion
-	CRDsDir  string
-	ProxyPod *v1.Pod
-}
-
-// TestSuite contains a list of tests and results, along with the relative weights of each test
-type TestSuite struct {
-	TestInfo
-	Tests       []Test
-	TestResults []*TestResult
-	Weights     map[string]float64
-}
-
-// Test definitions
-
-// CheckSpecTest is a scorecard test that verifies that the CR has a spec block
-type CheckSpecTest struct {
-	TestInfo
-	BasicTestConfig
-}
-
-// NewCheckSpecTest returns a new CheckSpecTest object
-func NewCheckSpecTest(conf BasicTestConfig) *CheckSpecTest {
-	return &CheckSpecTest{
-		BasicTestConfig: conf,
-		TestInfo: TestInfo{
-			Name:        "Spec Block Exists",
-			Description: "Custom Resource has a Spec Block",
-			Cumulative:  false,
-		},
-	}
-}
-
-// CheckStatusTest is a scorecard test that verifies that the CR has a status block
-type CheckStatusTest struct {
-	TestInfo
-	BasicTestConfig
-}
-
-// NewCheckStatusTest returns a new CheckStatusTest object
-func NewCheckStatusTest(conf BasicTestConfig) *CheckStatusTest {
-	return &CheckStatusTest{
-		BasicTestConfig: conf,
-		TestInfo: TestInfo{
-			Name:        "Status Block Exists",
-			Description: "Custom Resource has a Status Block",
-			Cumulative:  false,
-		},
-	}
-}
-
-// WritingIntoCRsHasEffectTest is a scorecard test that verifies that the operator is making PUT and/or POST requests to the API server
-type WritingIntoCRsHasEffectTest struct {
-	TestInfo
-	BasicTestConfig
-}
-
-// NewWritingIntoCRsHasEffectTest returns a new WritingIntoCRsHasEffectTest object
-func NewWritingIntoCRsHasEffectTest(conf BasicTestConfig) *WritingIntoCRsHasEffectTest {
-	return &WritingIntoCRsHasEffectTest{
-		BasicTestConfig: conf,
-		TestInfo: TestInfo{
-			Name:        "Writing into CRs has an effect",
-			Description: "A CR sends PUT/POST requests to the API server to modify resources in response to spec block changes",
-			Cumulative:  false,
-		},
-	}
-}
-
-// CRDsHaveValidationTest is a scorecard test that verifies that all CRDs have a validation section
-type CRDsHaveValidationTest struct {
-	TestInfo
-	OLMTestConfig
-}
-
-// NewCRDsHaveValidationTest returns a new CRDsHaveValidationTest object
-func NewCRDsHaveValidationTest(conf OLMTestConfig) *CRDsHaveValidationTest {
-	return &CRDsHaveValidationTest{
-		OLMTestConfig: conf,
-		TestInfo: TestInfo{
-			Name:        "Provided APIs have validation",
-			Description: "All CRDs have an OpenAPI validation subsection",
-			Cumulative:  true,
-		},
-	}
-}
-
-// CRDsHaveResourcesTest is a scorecard test that verifies that the CSV lists used resources in its owned CRDs secyion
-type CRDsHaveResourcesTest struct {
-	TestInfo
-	OLMTestConfig
-}
-
-// NewCRDsHaveResourcesTest returns a new CRDsHaveResourcesTest object
-func NewCRDsHaveResourcesTest(conf OLMTestConfig) *CRDsHaveResourcesTest {
-	return &CRDsHaveResourcesTest{
-		OLMTestConfig: conf,
-		TestInfo: TestInfo{
-			Name:        "Owned CRDs have resources listed",
-			Description: "All Owned CRDs contain a resources subsection",
-			Cumulative:  true,
-		},
-	}
-}
-
-// AnnotationsContainExamplesTest is a scorecard test that verifies that the CSV contains examples via the alm-examples annotation
-type AnnotationsContainExamplesTest struct {
-	TestInfo
-	OLMTestConfig
-}
-
-// NewAnnotationsContainExamplesTest returns a new AnnotationsContainExamplesTest object
-func NewAnnotationsContainExamplesTest(conf OLMTestConfig) *AnnotationsContainExamplesTest {
-	return &AnnotationsContainExamplesTest{
-		OLMTestConfig: conf,
-		TestInfo: TestInfo{
-			Name:        "CRs have at least 1 example",
-			Description: "The CSV's metadata contains an alm-examples section",
-			Cumulative:  true,
-		},
-	}
-}
-
-// SpecDescriptorsTest is a scorecard test that verifies that all spec fields have descriptors
-type SpecDescriptorsTest struct {
-	TestInfo
-	OLMTestConfig
-}
-
-// NewSpecDescriptorsTest returns a new SpecDescriptorsTest object
-func NewSpecDescriptorsTest(conf OLMTestConfig) *SpecDescriptorsTest {
-	return &SpecDescriptorsTest{
-		OLMTestConfig: conf,
-		TestInfo: TestInfo{
-			Name:        "Spec fields with descriptors",
-			Description: "All spec fields have matching descriptors in the CSV",
-			Cumulative:  true,
-		},
-	}
-}
-
-// StatusDescriptorsTest is a scorecard test that verifies that all status fields have descriptors
-type StatusDescriptorsTest struct {
-	TestInfo
-	OLMTestConfig
-}
-
-// NewStatusDescriptorsTest returns a new StatusDescriptorsTest object
-func NewStatusDescriptorsTest(conf OLMTestConfig) *StatusDescriptorsTest {
-	return &StatusDescriptorsTest{
-		OLMTestConfig: conf,
-		TestInfo: TestInfo{
-			Name:        "Status fields with descriptors",
-			Description: "All status fields have matching descriptors in the CSV",
-			Cumulative:  true,
-		},
-	}
-}
-
-// Test Suite Declarations
-
-// NewBasicTestSuite returns a new TestSuite object containing basic, functional operator tests
-func NewBasicTestSuite(conf BasicTestConfig) *TestSuite {
-	ts := NewTestSuite(
-		"Basic Tests",
-		"Test suite that runs basic, functional operator tests",
-	)
-	ts.AddTest(NewCheckSpecTest(conf), 1.5)
-	ts.AddTest(NewCheckStatusTest(conf), 1)
-	ts.AddTest(NewWritingIntoCRsHasEffectTest(conf), 1)
-
-	return ts
-}
-
-// NewOLMTestSuite returns a new TestSuite object containing CSV best practice checks
-func NewOLMTestSuite(conf OLMTestConfig) *TestSuite {
-	ts := NewTestSuite(
-		"OLM Tests",
-		"Test suite checks if an operator's CSV follows best practices",
-	)
-
-	ts.AddTest(NewCRDsHaveValidationTest(conf), 1.25)
-	ts.AddTest(NewCRDsHaveResourcesTest(conf), 1)
-	ts.AddTest(NewAnnotationsContainExamplesTest(conf), 1)
-	ts.AddTest(NewSpecDescriptorsTest(conf), 1)
-	ts.AddTest(NewStatusDescriptorsTest(conf), 1)
-
-	return ts
-}
-
-// Helper functions
-
-// ResultsPassFail will be used when multiple CRs are supported
-func ResultsPassFail(results []TestResult) (earned, max int) {
-	for _, result := range results {
-		if result.EarnedPoints != result.MaximumPoints {
-			return 0, 1
-		}
-	}
-	return 1, 1
-}
-
-// ResultsCumulative will be used when multiple CRs are supported
-func ResultsCumulative(results []TestResult) (earned, max int) {
-	for _, result := range results {
-		earned += result.EarnedPoints
-		max += result.MaximumPoints
-	}
-	return earned, max
-}
-
-// AddTest adds a new Test to a TestSuite along with a relative weight for the new Test
-func (ts *TestSuite) AddTest(t Test, weight float64) {
-	ts.Tests = append(ts.Tests, t)
-	ts.Weights[t.GetName()] = weight
-}
-
-// TotalScore calculates and returns the total score of all run Tests in a TestSuite
-func (ts *TestSuite) TotalScore() (score int) {
-	floatScore := 0.0
-	for _, result := range ts.TestResults {
-		if result.MaximumPoints != 0 {
-			floatScore += (float64(result.EarnedPoints) / float64(result.MaximumPoints)) * ts.Weights[result.Test.GetName()]
-		}
-	}
-	// scale to a percentage
-	addedWeights := 0.0
-	for _, weight := range ts.Weights {
-		addedWeights += weight
-	}
-	floatScore = floatScore * (100 / addedWeights)
-	return int(floatScore)
-}
-
-// Run runs all Tests in a TestSuite
-func (ts *TestSuite) Run(ctx context.Context) {
-	for _, test := range ts.Tests {
-		ts.TestResults = append(ts.TestResults, test.Run(ctx))
-	}
-}
-
-// NewTestSuite returns a new TestSuite with a given name and description
-func NewTestSuite(name, description string) *TestSuite {
-	return &TestSuite{
-		TestInfo: TestInfo{
-			Name:        name,
-			Description: description,
-		},
-		Weights: make(map[string]float64),
-	}
-}
diff --git a/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/test/cluster.go b/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/test/cluster.go
deleted file mode 100755
index 3077758..0000000
--- a/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/test/cluster.go
+++ /dev/null
@@ -1,189 +0,0 @@
-// Copyright 2018 The Operator-SDK Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package test
-
-import (
-	"bytes"
-	"fmt"
-	"strings"
-	"time"
-
-	"github.com/operator-framework/operator-sdk/internal/pkg/scaffold"
-	"github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible"
-	"github.com/operator-framework/operator-sdk/internal/util/fileutil"
-	k8sInternal "github.com/operator-framework/operator-sdk/internal/util/k8sutil"
-	"github.com/operator-framework/operator-sdk/internal/util/projutil"
-	"github.com/operator-framework/operator-sdk/pkg/k8sutil"
-	"github.com/operator-framework/operator-sdk/pkg/test"
-
-	log "github.com/sirupsen/logrus"
-	"github.com/spf13/cobra"
-	v1 "k8s.io/api/core/v1"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/apimachinery/pkg/util/wait"
-	"k8s.io/client-go/kubernetes"
-)
-
-type testClusterConfig struct {
-	namespace       string
-	kubeconfig      string
-	imagePullPolicy string
-	serviceAccount  string
-	pendingTimeout  int
-}
-
-var tcConfig testClusterConfig
-
-func newTestClusterCmd() *cobra.Command {
-	testCmd := &cobra.Command{
-		Use:   "cluster <image name> [flags]",
-		Short: "Run End-To-End tests using image with embedded test binary",
-		RunE:  testClusterFunc,
-	}
-	testCmd.Flags().StringVar(&tcConfig.namespace, "namespace", "", "Namespace to run tests in")
-	testCmd.Flags().StringVar(&tcConfig.kubeconfig, "kubeconfig", "", "Kubeconfig path")
-	testCmd.Flags().StringVar(&tcConfig.imagePullPolicy, "image-pull-policy", "Always", "Set test pod image pull policy. Allowed values: Always, Never")
-	testCmd.Flags().StringVar(&tcConfig.serviceAccount, "service-account", "default", "Service account to run tests on")
-	testCmd.Flags().IntVar(&tcConfig.pendingTimeout, "pending-timeout", 60, "Timeout in seconds for testing pod to stay in pending state (default 60s)")
-
-	return testCmd
-}
-
-func testClusterFunc(cmd *cobra.Command, args []string) error {
-	if len(args) != 1 {
-		return fmt.Errorf("command %s requires exactly one argument", cmd.CommandPath())
-	}
-
-	log.Info("Testing operator in cluster.")
-
-	var pullPolicy v1.PullPolicy
-	if strings.ToLower(tcConfig.imagePullPolicy) == "always" {
-		pullPolicy = v1.PullAlways
-	} else if strings.ToLower(tcConfig.imagePullPolicy) == "never" {
-		pullPolicy = v1.PullNever
-	} else {
-		return fmt.Errorf("invalid imagePullPolicy '%v'", tcConfig.imagePullPolicy)
-	}
-
-	var testCmd []string
-	switch projutil.GetOperatorType() {
-	case projutil.OperatorTypeGo:
-		testCmd = []string{"/" + scaffold.GoTestScriptFile}
-	case projutil.OperatorTypeAnsible:
-		testCmd = []string{"/" + ansible.BuildTestFrameworkAnsibleTestScriptFile}
-	case projutil.OperatorTypeHelm:
-		log.Fatal("`test cluster` for Helm operators is not implemented")
-	default:
-		log.Fatal("Failed to determine operator type")
-	}
-
-	// cobra prints its help message on error; we silence that here because any errors below
-	// are due to the test failing, not incorrect user input
-	cmd.SilenceUsage = true
-	testPod := &v1.Pod{
-		ObjectMeta: metav1.ObjectMeta{
-			Name: "operator-test",
-		},
-		Spec: v1.PodSpec{
-			ServiceAccountName: tcConfig.serviceAccount,
-			RestartPolicy:      v1.RestartPolicyNever,
-			Containers: []v1.Container{{
-				Name:            "operator-test",
-				Image:           args[0],
-				ImagePullPolicy: pullPolicy,
-				Command:         testCmd,
-				Env: []v1.EnvVar{{
-					Name:      test.TestNamespaceEnv,
-					ValueFrom: &v1.EnvVarSource{FieldRef: &v1.ObjectFieldSelector{FieldPath: "metadata.namespace"}},
-				}, {
-					Name:  k8sutil.OperatorNameEnvVar,
-					Value: "test-operator",
-				}, {
-					Name:      k8sutil.PodNameEnvVar,
-					ValueFrom: &v1.EnvVarSource{FieldRef: &v1.ObjectFieldSelector{FieldPath: "metadata.name"}},
-				}},
-			}},
-		},
-	}
-	kubeconfig, defaultNamespace, err := k8sInternal.GetKubeconfigAndNamespace(tcConfig.kubeconfig)
-	if err != nil {
-		return fmt.Errorf("failed to get kubeconfig: %v", err)
-	}
-	if tcConfig.namespace == "" {
-		tcConfig.namespace = defaultNamespace
-	}
-	kubeclient, err := kubernetes.NewForConfig(kubeconfig)
-	if err != nil {
-		return fmt.Errorf("failed to create kubeclient: %v", err)
-	}
-	testPod, err = kubeclient.CoreV1().Pods(tcConfig.namespace).Create(testPod)
-	if err != nil {
-		return fmt.Errorf("failed to create test pod: %v", err)
-	}
-	defer func() {
-		rerr := kubeclient.CoreV1().Pods(tcConfig.namespace).Delete(testPod.Name, &metav1.DeleteOptions{})
-		if rerr != nil {
-			log.Warnf("Failed to delete test pod: %v", rerr)
-		}
-	}()
-	err = wait.Poll(time.Second*5, time.Second*time.Duration(tcConfig.pendingTimeout), func() (bool, error) {
-		testPod, err = kubeclient.CoreV1().Pods(tcConfig.namespace).Get(testPod.Name, metav1.GetOptions{})
-		if err != nil {
-			return false, fmt.Errorf("failed to get test pod: %v", err)
-		}
-		if testPod.Status.Phase == v1.PodPending {
-			return false, nil
-		}
-		return true, nil
-	})
-	if err != nil {
-		testPod, err = kubeclient.CoreV1().Pods(tcConfig.namespace).Get(testPod.Name, metav1.GetOptions{})
-		if err != nil {
-			return fmt.Errorf("failed to get test pod: %v", err)
-		}
-		waitingState := testPod.Status.ContainerStatuses[0].State.Waiting
-		return fmt.Errorf("test pod stuck in 'Pending' phase for longer than %d seconds.\nMessage: %s\nReason: %s", tcConfig.pendingTimeout, waitingState.Message, waitingState.Reason)
-	}
-	for {
-		testPod, err = kubeclient.CoreV1().Pods(tcConfig.namespace).Get(testPod.Name, metav1.GetOptions{})
-		if err != nil {
-			return fmt.Errorf("failed to get test pod: %v", err)
-		}
-		if testPod.Status.Phase != v1.PodSucceeded && testPod.Status.Phase != v1.PodFailed {
-			time.Sleep(time.Second * 5)
-			continue
-		} else if testPod.Status.Phase == v1.PodSucceeded {
-			log.Info("Cluster test successfully completed.")
-			return nil
-		} else if testPod.Status.Phase == v1.PodFailed {
-			req := kubeclient.CoreV1().Pods(tcConfig.namespace).GetLogs(testPod.Name, &v1.PodLogOptions{})
-			readCloser, err := req.Stream()
-			if err != nil {
-				return fmt.Errorf("test failed and failed to get error logs: %v", err)
-			}
-			defer func() {
-				if err := readCloser.Close(); err != nil && !fileutil.IsClosedError(err) {
-					log.Errorf("Failed to close pod log reader: (%v)", err)
-				}
-			}()
-			buf := new(bytes.Buffer)
-			_, err = buf.ReadFrom(readCloser)
-			if err != nil {
-				return fmt.Errorf("test failed and failed to read pod logs: %v", err)
-			}
-			return fmt.Errorf("test failed:\n%s", buf.String())
-		}
-	}
-}
diff --git a/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/test/cmd.go b/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/test/cmd.go
index a955bd7..808c704 100644
--- a/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/test/cmd.go
+++ b/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/test/cmd.go
@@ -22,11 +22,10 @@ func NewCmd() *cobra.Command {
 	testCmd := &cobra.Command{
 		Use:   "test",
 		Short: "Tests the operator",
-		Long: `The test command has subcommands that can test the operator locally or from within a cluster.
+		Long: `The test command has subcommands that can test the operator.
 `,
 	}
 
 	testCmd.AddCommand(newTestLocalCmd())
-	testCmd.AddCommand(newTestClusterCmd())
 	return testCmd
 }
diff --git a/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/test/local.go b/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/test/local.go
index b2894df..fd1bd48 100644
--- a/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/test/local.go
+++ b/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/test/local.go
@@ -75,8 +75,7 @@ func newTestLocalCmd() *cobra.Command {
 }
 
 func testLocalFunc(cmd *cobra.Command, args []string) error {
-	t := projutil.GetOperatorType()
-	switch t {
+	switch t := projutil.GetOperatorType(); t {
 	case projutil.OperatorTypeGo:
 		return testLocalGoFunc(cmd, args)
 	case projutil.OperatorTypeAnsible:
@@ -84,7 +83,7 @@ func testLocalFunc(cmd *cobra.Command, args []string) error {
 	case projutil.OperatorTypeHelm:
 		return fmt.Errorf("`test local` for Helm operators is not implemented")
 	}
-	return fmt.Errorf("unknown operator type '%v'", t)
+	return projutil.ErrUnknownOperatorType{}
 }
 
 func testLocalAnsibleFunc(cmd *cobra.Command, args []string) error {
@@ -190,13 +189,14 @@ func testLocalGoFunc(cmd *cobra.Command, args []string) error {
 			return fmt.Errorf("failed to overwrite operator image in the namespaced manifest: %v", err)
 		}
 	}
-	testArgs := []string{"test", args[0] + "/..."}
+	testArgs := []string{
+		"-" + test.NamespacedManPathFlag, tlConfig.namespacedManPath,
+		"-" + test.GlobalManPathFlag, tlConfig.globalManPath,
+		"-" + test.ProjRootFlag, projutil.MustGetwd(),
+	}
 	if tlConfig.kubeconfig != "" {
 		testArgs = append(testArgs, "-"+test.KubeConfigFlag, tlConfig.kubeconfig)
 	}
-	testArgs = append(testArgs, "-"+test.NamespacedManPathFlag, tlConfig.namespacedManPath)
-	testArgs = append(testArgs, "-"+test.GlobalManPathFlag, tlConfig.globalManPath)
-	testArgs = append(testArgs, "-"+test.ProjRootFlag, projutil.MustGetwd())
 	// if we do the append using an empty go flags, it inserts an empty arg, which causes
 	// any later flags to be ignored
 	if tlConfig.goTestFlags != "" {
@@ -208,13 +208,18 @@ func testLocalGoFunc(cmd *cobra.Command, args []string) error {
 	if tlConfig.upLocal {
 		testArgs = append(testArgs, "-"+test.LocalOperatorFlag)
 	}
-	dc := exec.Command("go", testArgs...)
-	dc.Env = append(os.Environ(), fmt.Sprintf("%v=%v", test.TestNamespaceEnv, tlConfig.namespace))
-	dc.Dir = projutil.MustGetwd()
-	if err := projutil.ExecCmd(dc); err != nil {
-		return err
+	opts := projutil.GoTestOptions{
+		GoCmdOptions: projutil.GoCmdOptions{
+			PackagePath: args[0] + "/...",
+			Env:         append(os.Environ(), fmt.Sprintf("%v=%v", test.TestNamespaceEnv, tlConfig.namespace)),
+			Dir:         projutil.MustGetwd(),
+			GoMod:       projutil.IsDepManagerGoMod(),
+		},
+		TestBinaryArgs: testArgs,
+	}
+	if err := projutil.GoTest(opts); err != nil {
+		return fmt.Errorf("failed to build test binary: (%v)", err)
 	}
-
 	log.Info("Local operator test successfully completed.")
 	return nil
 }
diff --git a/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/up/local.go b/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/up/local.go
index e7ed894..a81b353 100644
--- a/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/up/local.go
+++ b/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/up/local.go
@@ -87,8 +87,7 @@ func upLocalFunc(cmd *cobra.Command, args []string) error {
 	}
 	log.Infof("Using namespace %s.", namespace)
 
-	t := projutil.GetOperatorType()
-	switch t {
+	switch t := projutil.GetOperatorType(); t {
 	case projutil.OperatorTypeGo:
 		return upLocal()
 	case projutil.OperatorTypeAnsible:
@@ -96,7 +95,7 @@ func upLocalFunc(cmd *cobra.Command, args []string) error {
 	case projutil.OperatorTypeHelm:
 		return upLocalHelm()
 	}
-	return fmt.Errorf("unknown operator type '%v'", t)
+	return projutil.ErrUnknownOperatorType{}
 }
 
 func upLocal() error {
@@ -178,17 +177,17 @@ func setupOperatorEnv() error {
 }
 
 func buildLocal(outputBinName string) error {
-	args := []string{"build", "-o", outputBinName}
+	var args []string
 	if ldFlags != "" {
-		args = append(args, "-ldflags", ldFlags)
+		args = []string{"-ldflags", ldFlags}
 	}
-	args = append(args, filepath.Join(scaffold.ManagerDir, scaffold.CmdFile))
-
-	bc := exec.Command("go", args...)
-	if err := projutil.ExecCmd(bc); err != nil {
-		return err
+	opts := projutil.GoCmdOptions{
+		BinName:     outputBinName,
+		PackagePath: filepath.Join(projutil.CheckAndGetProjectGoPkg(), scaffold.ManagerDir),
+		Args:        args,
+		GoMod:       projutil.IsDepManagerGoMod(),
 	}
-	return nil
+	return projutil.GoBuild(opts)
 }
 
 func printVersion() {
diff --git a/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/version/cmd.go b/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/version/cmd.go
index dbc369f..d0f3d25 100644
--- a/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/version/cmd.go
+++ b/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/version/cmd.go
@@ -27,7 +27,11 @@ func NewCmd() *cobra.Command {
 		Use:   "version",
 		Short: "Prints the version of operator-sdk",
 		Run: func(cmd *cobra.Command, args []string) {
-			fmt.Println("operator-sdk version:", ver.Version)
+			version := ver.GitVersion
+			if version == "unknown" {
+				version = ver.Version
+			}
+			fmt.Printf("operator-sdk version: %s, commit: %s\n", version, ver.GitCommit)
 		},
 	}
 	return versionCmd
diff --git a/vendor/github.com/operator-framework/operator-sdk/version/version.go b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/flags/flags.go
similarity index 82%
copy from vendor/github.com/operator-framework/operator-sdk/version/version.go
copy to vendor/github.com/operator-framework/operator-sdk/internal/pkg/flags/flags.go
index 51db095..cc374cf 100644
--- a/vendor/github.com/operator-framework/operator-sdk/version/version.go
+++ b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/flags/flags.go
@@ -1,4 +1,4 @@
-// Copyright 2018 The Operator-SDK Authors
+// Copyright 2019 The Operator-SDK Authors
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.
@@ -12,8 +12,9 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-package version
+package flags
 
-var (
-	Version = "v0.7.0"
+// global command-line flags
+const (
+	VerboseOpt = "verbose"
 )
diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/ao_logs.go b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/ao_logs.go
index 2c116aa..2b00e86 100644
--- a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/ao_logs.go
+++ b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/ao_logs.go
@@ -22,7 +22,7 @@ import (
 
 //DockerfileHybrid - Dockerfile for a hybrid operator
 type AoLogs struct {
-	input.Input
+	StaticInput
 }
 
 // GetInput - gets the input
diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/build_dockerfile.go b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/build_dockerfile.go
index 0b1a47c..7e74c54 100644
--- a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/build_dockerfile.go
+++ b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/build_dockerfile.go
@@ -38,17 +38,18 @@ func (b *BuildDockerfile) GetInput() (input.Input, error) {
 		b.Path = filepath.Join(scaffold.BuildDir, BuildDockerfileFile)
 	}
 	b.TemplateBody = buildDockerfileAnsibleTmpl
+	b.Delims = AnsibleDelims
 	b.RolesDir = RolesDir
 	b.ImageTag = strings.TrimSuffix(version.Version, "+git")
 	return b.Input, nil
 }
 
-const buildDockerfileAnsibleTmpl = `FROM quay.io/operator-framework/ansible-operator:{{.ImageTag}}
+const buildDockerfileAnsibleTmpl = `FROM quay.io/operator-framework/ansible-operator:[[.ImageTag]]
 
 COPY watches.yaml ${HOME}/watches.yaml
 
-COPY {{.RolesDir}}/ ${HOME}/{{.RolesDir}}/
-{{- if .GeneratePlaybook }}
+COPY [[.RolesDir]]/ ${HOME}/[[.RolesDir]]/
+[[- if .GeneratePlaybook ]]
 COPY playbook.yml ${HOME}/playbook.yml
-{{- end }}
+[[- end ]]
 `
diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/build_test_framework_ansible_test_script.go b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/build_test_framework_ansible_test_script.go
index f390b9f..f225c74 100644
--- a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/build_test_framework_ansible_test_script.go
+++ b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/build_test_framework_ansible_test_script.go
@@ -24,7 +24,7 @@ import (
 const BuildTestFrameworkAnsibleTestScriptFile = "ansible-test.sh"
 
 type BuildTestFrameworkAnsibleTestScript struct {
-	input.Input
+	StaticInput
 }
 
 // GetInput - gets the input
diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/build_test_framework_dockerfile.go b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/build_test_framework_dockerfile.go
index 173ab04..a153a82 100644
--- a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/build_test_framework_dockerfile.go
+++ b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/build_test_framework_dockerfile.go
@@ -24,7 +24,7 @@ import (
 const BuildTestFrameworkDockerfileFile = "Dockerfile"
 
 type BuildTestFrameworkDockerfile struct {
-	input.Input
+	StaticInput
 }
 
 // GetInput - gets the input
@@ -40,7 +40,10 @@ func (b *BuildTestFrameworkDockerfile) GetInput() (input.Input, error) {
 const buildTestFrameworkDockerfileAnsibleTmpl = `ARG BASEIMAGE
 FROM ${BASEIMAGE}
 USER 0
-RUN yum install -y python-devel gcc libffi-devel && pip install molecule
+
+RUN yum install -y python-devel gcc libffi-devel
+RUN pip install molecule==2.20.1
+
 ARG NAMESPACEDMAN
 ADD $NAMESPACEDMAN /namespaced.yaml
 ADD build/test-framework/ansible-test.sh /ansible-test.sh
diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/constants.go b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/constants.go
index c68825f..d6c1c39 100644
--- a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/constants.go
+++ b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/constants.go
@@ -26,3 +26,6 @@ const (
 	MoleculeDefaultDir     = MoleculeDir + filePathSep + "default"
 	MoleculeTestLocalDir   = MoleculeDir + filePathSep + "test-local"
 )
+
+// Arrays can't be constants but this should be a constant
+var AnsibleDelims = [2]string{"[[", "]]"}
diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/deploy_operator.go b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/deploy_operator.go
index b620051..85cca48 100644
--- a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/deploy_operator.go
+++ b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/deploy_operator.go
@@ -25,7 +25,6 @@ const DeployOperatorFile = "operator.yaml"
 
 type DeployOperator struct {
 	input.Input
-	IsClusterScoped bool
 }
 
 // GetInput - gets the input
@@ -34,6 +33,7 @@ func (d *DeployOperator) GetInput() (input.Input, error) {
 		d.Path = filepath.Join(scaffold.DeployDir, DeployOperatorFile)
 	}
 	d.TemplateBody = deployOperatorAnsibleTmpl
+	d.Delims = AnsibleDelims
 
 	return d.Input, nil
 }
@@ -41,18 +41,18 @@ func (d *DeployOperator) GetInput() (input.Input, error) {
 const deployOperatorAnsibleTmpl = `apiVersion: apps/v1
 kind: Deployment
 metadata:
-  name: {{.ProjectName}}
+  name: [[.ProjectName]]
 spec:
   replicas: 1
   selector:
     matchLabels:
-      name: {{.ProjectName}}
+      name: [[.ProjectName]]
   template:
     metadata:
       labels:
-        name: {{.ProjectName}}
+        name: [[.ProjectName]]
     spec:
-      serviceAccountName: {{.ProjectName}}
+      serviceAccountName: [[.ProjectName]]
       containers:
         - name: ansible
           command:
@@ -60,34 +60,30 @@ spec:
           - /tmp/ansible-operator/runner
           - stdout
           # Replace this with the built image name
-          image: "{{ "{{ REPLACE_IMAGE }}" }}"
-          imagePullPolicy: "{{ "{{ pull_policy|default('Always') }}"}}"
+          image: "{{ REPLACE_IMAGE }}"
+          imagePullPolicy: "{{ pull_policy|default('Always') }}"
           volumeMounts:
           - mountPath: /tmp/ansible-operator/runner
             name: runner
             readOnly: true
         - name: operator
           # Replace this with the built image name
-          image: "{{ "{{ REPLACE_IMAGE }}" }}"
-          imagePullPolicy: "{{ "{{ pull_policy|default('Always') }}"}}"
+          image: "{{ REPLACE_IMAGE }}"
+          imagePullPolicy: "{{ pull_policy|default('Always') }}"
           volumeMounts:
           - mountPath: /tmp/ansible-operator/runner
             name: runner
           env:
             - name: WATCH_NAMESPACE
-              {{- if .IsClusterScoped }}
-              value: ""
-              {{- else }}
               valueFrom:
                 fieldRef:
                   fieldPath: metadata.namespace
-              {{- end}}
             - name: POD_NAME
               valueFrom:
                 fieldRef:
                   fieldPath: metadata.name
             - name: OPERATOR_NAME
-              value: "{{.ProjectName}}"
+              value: "[[.ProjectName]]"
       volumes:
         - name: runner
           emptyDir: {}
diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/dockerfilehybrid.go b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/dockerfilehybrid.go
index eb7db27..51b4208 100644
--- a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/dockerfilehybrid.go
+++ b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/dockerfilehybrid.go
@@ -41,17 +41,24 @@ func (d *DockerfileHybrid) GetInput() (input.Input, error) {
 		d.Path = filepath.Join(scaffold.BuildDir, scaffold.DockerfileFile)
 	}
 	d.TemplateBody = dockerFileHybridAnsibleTmpl
+	d.Delims = AnsibleDelims
 	return d.Input, nil
 }
 
-const dockerFileHybridAnsibleTmpl = `FROM ansible/ansible-runner
+const dockerFileHybridAnsibleTmpl = `FROM ansible/ansible-runner:1.2
 
 RUN yum remove -y ansible python-idna
 RUN yum install -y inotify-tools && yum clean all
 RUN pip uninstall ansible-runner -y
 
-RUN pip install --upgrade setuptools
-RUN pip install ansible ansible-runner openshift kubernetes ansible-runner-http idna==2.7
+RUN pip install --upgrade setuptools==41.0.1
+RUN pip install "urllib3>=1.23,<1.25"
+RUN pip install ansible==2.7.10 \
+	ansible-runner==1.2 \
+	ansible-runner-http==1.0.0 \
+	idna==2.7 \
+	"kubernetes>=8.0.0,<9.0.0" \
+	openshift==0.8.8
 
 RUN mkdir -p /etc/ansible \
     && echo "localhost ansible_connection=local" > /etc/ansible/hosts \
@@ -64,21 +71,21 @@ ENV OPERATOR=/usr/local/bin/ansible-operator \
     USER_NAME=ansible-operator\
     HOME=/opt/ansible
 
-{{- if .Watches }}
-COPY watches.yaml ${HOME}/watches.yaml{{ end }}
+[[- if .Watches ]]
+COPY watches.yaml ${HOME}/watches.yaml[[ end ]]
 
 # install operator binary
-COPY build/_output/bin/{{.ProjectName}} ${OPERATOR}
+COPY build/_output/bin/[[.ProjectName]] ${OPERATOR}
 # install k8s_status Ansible Module
 COPY library/k8s_status.py /usr/share/ansible/openshift/
 
 COPY bin /usr/local/bin
 RUN  /usr/local/bin/user_setup
 
-{{- if .Roles }}
-COPY roles/ ${HOME}/roles/{{ end }}
-{{- if .Playbook }}
-COPY playbook.yml ${HOME}/playbook.yml{{ end }}
+[[- if .Roles ]]
+COPY roles/ ${HOME}/roles/[[ end ]]
+[[- if .Playbook ]]
+COPY playbook.yml ${HOME}/playbook.yml[[ end ]]
 
 ENTRYPOINT ["/usr/local/bin/entrypoint"]
 
diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/entrypoint.go b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/entrypoint.go
index 0338cc5..5596f38 100644
--- a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/entrypoint.go
+++ b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/entrypoint.go
@@ -22,7 +22,7 @@ import (
 
 // Entrypoint - entrypoint script
 type Entrypoint struct {
-	input.Input
+	StaticInput
 }
 
 func (e *Entrypoint) GetInput() (input.Input, error) {
diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/go_mod.go b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/go_mod.go
new file mode 100644
index 0000000..a283f42
--- /dev/null
+++ b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/go_mod.go
@@ -0,0 +1,126 @@
+// Copyright 2019 The Operator-SDK Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ansible
+
+import (
+	"fmt"
+
+	"github.com/operator-framework/operator-sdk/internal/pkg/scaffold/input"
+	"github.com/operator-framework/operator-sdk/internal/pkg/scaffold/internal/deps"
+)
+
+const GoModFile = "go.mod"
+
+// GoMod - the go.mod file for an Ansible hybrid operator.
+type GoMod struct {
+	input.Input
+}
+
+func (s *GoMod) GetInput() (input.Input, error) {
+	if s.Path == "" {
+		s.Path = GoModFile
+	}
+	s.TemplateBody = goModTmpl
+	return s.Input, nil
+}
+
+const goModTmpl = `module {{ .Repo }}
+
+require (
+	cloud.google.com/go v0.37.2 // indirect
+	contrib.go.opencensus.io/exporter/ocagent v0.4.9 // indirect
+	github.com/Azure/go-autorest v11.7.0+incompatible // indirect
+	github.com/appscode/jsonpatch v0.0.0-20190108182946-7c0e3b262f30 // indirect
+	github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect
+	github.com/emicklei/go-restful v2.9.3+incompatible // indirect
+	github.com/go-logr/logr v0.1.0 // indirect
+	github.com/go-logr/zapr v0.1.1 // indirect
+	github.com/go-openapi/spec v0.19.0 // indirect
+	github.com/gogo/protobuf v1.2.1 // indirect
+	github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef // indirect
+	github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf // indirect
+	github.com/google/uuid v1.1.1 // indirect
+	github.com/googleapis/gnostic v0.2.0 // indirect
+	github.com/gophercloud/gophercloud v0.0.0-20190328150603-33e54f40ffcf // indirect
+	github.com/gregjones/httpcache v0.0.0-20190212212710-3befbb6ad0cc // indirect
+	github.com/grpc-ecosystem/grpc-gateway v1.8.5 // indirect
+	github.com/hashicorp/golang-lru v0.5.1 // indirect
+	github.com/imdario/mergo v0.3.7 // indirect
+	github.com/json-iterator/go v1.1.6 // indirect
+	github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect
+	github.com/markbates/inflect v1.0.4 // indirect
+	github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
+	github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 // indirect
+	github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
+	github.com/operator-framework/operator-sdk master
+	github.com/pborman/uuid v0.0.0-20180906182336-adf5a7427709 // indirect
+	github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
+	github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 // indirect
+	github.com/prometheus/procfs v0.0.0-20190328153300-af7bedc223fb // indirect
+	github.com/sirupsen/logrus v1.4.0 // indirect
+	github.com/spf13/afero v1.2.2 // indirect
+	github.com/spf13/pflag v1.0.3
+	go.uber.org/atomic v1.3.2 // indirect
+	go.uber.org/multierr v1.1.0 // indirect
+	go.uber.org/zap v1.9.1 // indirect
+	golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c // indirect
+	golang.org/x/net v0.0.0-20190327214358-63eda1eb0650 // indirect
+	golang.org/x/oauth2 v0.0.0-20190319182350-c85d3e98c914 // indirect
+	golang.org/x/sys v0.0.0-20190322080309-f49334f85ddc // indirect
+	golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 // indirect
+	google.golang.org/appengine v1.5.0 // indirect
+	google.golang.org/genproto v0.0.0-20190327125643-d831d65fe17d // indirect
+	k8s.io/api v0.0.0-20181213150558-05914d821849 // indirect
+	k8s.io/apiextensions-apiserver v0.0.0-20190328030136-8ada4fd07db4
+	k8s.io/apimachinery v0.0.0-20181127025237-2b1284ed4c93 // indirect
+	k8s.io/client-go v0.0.0-20181213151034-8d9ed539ba31
+	k8s.io/code-generator v0.0.0-20190405172246-9a4d48088f6a
+	k8s.io/gengo v0.0.0-20190327210449-e17681d19d3a
+	k8s.io/klog v0.2.0 // indirect
+	k8s.io/kube-openapi v0.0.0-20190320154901-5e45bb682580
+	k8s.io/kubernetes v1.14.1 // indirect
+	sigs.k8s.io/controller-runtime v0.1.10
+	sigs.k8s.io/controller-tools v0.1.10
+	sigs.k8s.io/testing_frameworks v0.1.1 // indirect
+	sigs.k8s.io/yaml v1.1.0 // indirect
+)
+
+// Pinned to kubernetes-1.13.1
+replace (
+	k8s.io/api => k8s.io/api v0.0.0-20181213150558-05914d821849
+	k8s.io/apimachinery => k8s.io/apimachinery v0.0.0-20181127025237-2b1284ed4c93
+	k8s.io/client-go => k8s.io/client-go v0.0.0-20181213151034-8d9ed539ba31
+	k8s.io/kubernetes => k8s.io/kubernetes v1.13.1
+)
+
+replace (
+	github.com/coreos/prometheus-operator => github.com/coreos/prometheus-operator v0.29.0
+	k8s.io/code-generator => k8s.io/code-generator v0.0.0-20181117043124-c2090bec4d9b
+	k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20180711000925-0cf8f7e6ed1d
+	github.com/operator-framework/operator-sdk => github.com/operator-framework/operator-sdk v0.8.0
+)
+`
+
+func PrintGoMod(asFile bool) error {
+	b, err := deps.ExecGoModTmpl(goModTmpl)
+	if err != nil {
+		return err
+	}
+	if asFile {
+		fmt.Print(string(b))
+		return nil
+	}
+	return deps.PrintGoMod(b)
+}
diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/gopkgtoml.go b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/gopkgtoml.go
index eb910d2..56e412c 100644
--- a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/gopkgtoml.go
+++ b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/gopkgtoml.go
@@ -15,13 +15,16 @@
 package ansible
 
 import (
+	"fmt"
+
 	"github.com/operator-framework/operator-sdk/internal/pkg/scaffold"
 	"github.com/operator-framework/operator-sdk/internal/pkg/scaffold/input"
+	"github.com/operator-framework/operator-sdk/internal/pkg/scaffold/internal/deps"
 )
 
 // GopkgToml - the Gopkg.toml file for a hybrid operator
 type GopkgToml struct {
-	input.Input
+	StaticInput
 }
 
 func (s *GopkgToml) GetInput() (input.Input, error) {
@@ -36,7 +39,7 @@ const gopkgTomlTmpl = `[[constraint]]
   name = "github.com/operator-framework/operator-sdk"
   # The version rule is used for a specific release and the master branch for in between releases.
   # branch = "master" #osdk_branch_annotation
-  version = "=v0.7.0" #osdk_version_annotation
+  version = "=v0.8.0" #osdk_version_annotation
 
 [[override]]
   name = "k8s.io/api"
@@ -54,3 +57,11 @@ const gopkgTomlTmpl = `[[constraint]]
   go-tests = true
   unused-packages = true
 `
+
+func PrintDepGopkgTOML(asFile bool) error {
+	if asFile {
+		_, err := fmt.Println(gopkgTomlTmpl)
+		return err
+	}
+	return deps.PrintDepGopkgTOML(gopkgTomlTmpl)
+}
diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/version.go b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/input.go
similarity index 63%
copy from vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/version.go
copy to vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/input.go
index 8bd2ccf..ac3e899 100644
--- a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/version.go
+++ b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/input.go
@@ -1,4 +1,4 @@
-// Copyright 2018 The Operator-SDK Authors
+// Copyright 2019 The Operator-SDK Authors
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.
@@ -11,32 +11,22 @@
 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 // See the License for the specific language governing permissions and
 // limitations under the License.
-
-package scaffold
+package ansible
 
 import (
-	"path/filepath"
-
 	"github.com/operator-framework/operator-sdk/internal/pkg/scaffold/input"
+	"github.com/spf13/afero"
 )
 
-const VersionFile = "version.go"
-
-type Version struct {
+// StaticInput is the input for scaffolding a static file with
+// no parameteres
+type StaticInput struct {
 	input.Input
 }
 
-func (s *Version) GetInput() (input.Input, error) {
-	if s.Path == "" {
-		s.Path = filepath.Join(VersionDir, VersionFile)
-	}
-	s.TemplateBody = versionTemplate
-	return s.Input, nil
+// CustomRender return the template body unmodified
+func (s *StaticInput) CustomRender() ([]byte, error) {
+	return []byte(s.TemplateBody), nil
 }
 
-const versionTemplate = `package version
-
-var (
-	Version = "0.0.1"
-)
-`
+func (s StaticInput) SetFS(_ afero.Fs) {}
diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/k8s_status.go b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/k8s_status.go
index aa6d61d..2c9c5b0 100644
--- a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/k8s_status.go
+++ b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/k8s_status.go
@@ -16,15 +16,13 @@ package ansible
 
 import (
 	"github.com/operator-framework/operator-sdk/internal/pkg/scaffold/input"
-
-	"github.com/spf13/afero"
 )
 
 const K8sStatusPythonFile = "library/k8s_status.py"
 
 // K8sStatus - the k8s status module tmpl wrapper
 type K8sStatus struct {
-	input.Input
+	StaticInput
 }
 
 // GetInput - gets the input
@@ -32,13 +30,10 @@ func (k *K8sStatus) GetInput() (input.Input, error) {
 	if k.Path == "" {
 		k.Path = K8sStatusPythonFile
 	}
-	return k.Input, nil
-}
 
-func (s K8sStatus) SetFS(_ afero.Fs) {}
+	k.TemplateBody = k8sStatusTmpl
 
-func (k K8sStatus) CustomRender() ([]byte, error) {
-	return []byte(k8sStatusTmpl), nil
+	return k.Input, nil
 }
 
 const k8sStatusTmpl = `#!/usr/bin/python
diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/main.go b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/main.go
index b457d06..267b439 100644
--- a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/main.go
+++ b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/main.go
@@ -22,7 +22,7 @@ import (
 
 // Main - main source file for ansible operator
 type Main struct {
-	input.Input
+	StaticInput
 }
 
 func (m *Main) GetInput() (input.Input, error) {
diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/molecule_default_asserts.go b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/molecule_default_asserts.go
index 6ec54b3..019f4bd 100644
--- a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/molecule_default_asserts.go
+++ b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/molecule_default_asserts.go
@@ -32,6 +32,7 @@ func (m *MoleculeDefaultAsserts) GetInput() (input.Input, error) {
 		m.Path = filepath.Join(MoleculeDefaultDir, MoleculeDefaultAssertsFile)
 	}
 	m.TemplateBody = moleculeDefaultAssertsAnsibleTmpl
+	m.Delims = AnsibleDelims
 
 	return m.Input, nil
 }
@@ -42,13 +43,13 @@ const moleculeDefaultAssertsAnsibleTmpl = `---
   hosts: localhost
   connection: local
   vars:
-    ansible_python_interpreter: '{{"{{ ansible_playbook_python }}"}}'
+    ansible_python_interpreter: '{{ ansible_playbook_python }}'
   tasks:
-    - name: Get all pods in {{"{{ namespace }}"}}
+    - name: Get all pods in {{ namespace }}
       k8s_facts:
         api_version: v1
         kind: Pod
-        namespace: '{{"{{ namespace }}"}}'
+        namespace: '{{ namespace }}'
       register: pods
 
     - name: Output pods
diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/molecule_default_molecule.go b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/molecule_default_molecule.go
index 5416cd2..d0fe314 100644
--- a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/molecule_default_molecule.go
+++ b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/molecule_default_molecule.go
@@ -23,7 +23,7 @@ import (
 const MoleculeDefaultMoleculeFile = "molecule.yml"
 
 type MoleculeDefaultMolecule struct {
-	input.Input
+	StaticInput
 }
 
 // GetInput - gets the input
diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/molecule_default_playbook.go b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/molecule_default_playbook.go
index 28157ab..6a039bb 100644
--- a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/molecule_default_playbook.go
+++ b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/molecule_default_playbook.go
@@ -35,24 +35,25 @@ func (m *MoleculeDefaultPlaybook) GetInput() (input.Input, error) {
 		m.Path = filepath.Join(MoleculeDefaultDir, MoleculeDefaultPlaybookFile)
 	}
 	m.TemplateBody = moleculeDefaultPlaybookAnsibleTmpl
+	m.Delims = AnsibleDelims
 
 	return m.Input, nil
 }
 
 const moleculeDefaultPlaybookAnsibleTmpl = `---
-{{- if .GeneratePlaybook }}
-- import_playbook: '{{"{{ playbook_dir }}/../../playbook.yml"}}'
-{{- end }}
+[[- if .GeneratePlaybook ]]
+- import_playbook: '{{ playbook_dir }}/../../playbook.yml'
+[[- end ]]
 
-  {{- if not .GeneratePlaybook }}
+  [[- if not .GeneratePlaybook ]]
 - name: Converge
   hosts: localhost
   connection: local
   vars:
-    ansible_python_interpreter: '{{ "{{ ansible_playbook_python }}" }}'
+    ansible_python_interpreter: '{{ ansible_playbook_python }}'
   roles:
-    - {{.Resource.LowerKind}}
-  {{- end }}
+    - [[.Resource.LowerKind]]
+  [[- end ]]
 
-- import_playbook: '{{"{{ playbook_dir }}/asserts.yml"}}'
+- import_playbook: '{{ playbook_dir }}/asserts.yml'
 `
diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/molecule_default_prepare.go b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/molecule_default_prepare.go
index b84808d..3f7ecda 100644
--- a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/molecule_default_prepare.go
+++ b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/molecule_default_prepare.go
@@ -32,6 +32,7 @@ func (m *MoleculeDefaultPrepare) GetInput() (input.Input, error) {
 		m.Path = filepath.Join(MoleculeDefaultDir, MoleculeDefaultPrepareFile)
 	}
 	m.TemplateBody = moleculeDefaultPrepareAnsibleTmpl
+	m.Delims = AnsibleDelims
 
 	return m.Input, nil
 }
@@ -41,25 +42,25 @@ const moleculeDefaultPrepareAnsibleTmpl = `---
   hosts: k8s
   gather_facts: no
   vars:
-    kubeconfig: "{{"{{ lookup('env', 'KUBECONFIG') }}"}}"
+    kubeconfig: "{{ lookup('env', 'KUBECONFIG') }}"
   tasks:
     - name: delete the kubeconfig if present
       file:
-        path: '{{"{{ kubeconfig }}"}}'
+        path: '{{ kubeconfig }}'
         state: absent
       delegate_to: localhost
 
     - name: Fetch the kubeconfig
       fetch:
-        dest: '{{ "{{ kubeconfig }}" }}'
+        dest: '{{ kubeconfig }}'
         flat: yes
         src: /root/.kube/config
 
     - name: Change the kubeconfig port to the proper value
       replace:
         regexp: 8443
-        replace: "{{"{{ lookup('env', 'KIND_PORT') }}"}}"
-        path: '{{ "{{ kubeconfig }}" }}'
+        replace: "{{ lookup('env', 'KIND_PORT') }}"
+        path: '{{ kubeconfig }}'
       delegate_to: localhost
 
     - name: Wait for the Kubernetes API to become available (this could take a minute)
diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/molecule_test_cluster_molecule.go b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/molecule_test_cluster_molecule.go
index 930a427..356f4fe 100644
--- a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/molecule_test_cluster_molecule.go
+++ b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/molecule_test_cluster_molecule.go
@@ -23,7 +23,7 @@ import (
 const MoleculeTestClusterMoleculeFile = "molecule.yml"
 
 type MoleculeTestClusterMolecule struct {
-	input.Input
+	StaticInput
 }
 
 // GetInput - gets the input
diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/molecule_test_cluster_playbook.go b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/molecule_test_cluster_playbook.go
index 255c908..fb57045 100644
--- a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/molecule_test_cluster_playbook.go
+++ b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/molecule_test_cluster_playbook.go
@@ -34,6 +34,7 @@ func (m *MoleculeTestClusterPlaybook) GetInput() (input.Input, error) {
 		m.Path = filepath.Join(MoleculeTestClusterDir, MoleculeTestClusterPlaybookFile)
 	}
 	m.TemplateBody = moleculeTestClusterPlaybookAnsibleTmpl
+	m.Delims = AnsibleDelims
 
 	return m.Input, nil
 }
@@ -44,31 +45,31 @@ const moleculeTestClusterPlaybookAnsibleTmpl = `---
   hosts: localhost
   connection: local
   vars:
-    ansible_python_interpreter: '{{ "{{ ansible_playbook_python }}" }}'
-    deploy_dir: "{{"{{ lookup('env', 'MOLECULE_PROJECT_DIRECTORY') }}/deploy"}}"
-    image_name: {{.Resource.FullGroup}}/{{.ProjectName}}:testing
-    custom_resource: "{{"{{"}} lookup('file', '/'.join([deploy_dir, 'crds/{{.Resource.Group}}_{{.Resource.Version}}_{{.Resource.LowerKind}}_cr.yaml'])) | from_yaml {{"}}"}}"
+    ansible_python_interpreter: '{{ ansible_playbook_python }}'
+    deploy_dir: "{{ lookup('env', 'MOLECULE_PROJECT_DIRECTORY') }}/deploy"
+    image_name: [[.Resource.FullGroup]]/[[.ProjectName]]:testing
+    custom_resource: "{{ lookup('file', '/'.join([deploy_dir, 'crds/[[.Resource.Group]]_[[.Resource.Version]]_[[.Resource.LowerKind]]_cr.yaml'])) | from_yaml }}"
   tasks:
-  - name: Create the {{.Resource.FullGroup}}/{{.Resource.Version}}.{{.Resource.Kind}}
+  - name: Create the [[.Resource.FullGroup]]/[[.Resource.Version]].[[.Resource.Kind]]
     k8s:
-      namespace: '{{ "{{ namespace }}" }}'
-      definition: "{{"{{"}} lookup('file', '/'.join([deploy_dir, 'crds/{{.Resource.Group}}_{{.Resource.Version}}_{{.Resource.LowerKind}}_cr.yaml'])) {{"}}"}}"
+      namespace: '{{ namespace }}'
+      definition: "{{ lookup('file', '/'.join([deploy_dir, 'crds/[[.Resource.Group]]_[[.Resource.Version]]_[[.Resource.LowerKind]]_cr.yaml'])) }}"
 
   - name: Get the newly created Custom Resource
     debug:
-      msg: "{{"{{"}} lookup('k8s', group='{{.Resource.FullGroup}}', api_version='{{.Resource.Version}}', kind='{{.Resource.Kind}}', namespace=namespace, resource_name=custom_resource.metadata.name) {{"}}"}}"
+      msg: "{{ lookup('k8s', group='[[.Resource.FullGroup]]', api_version='[[.Resource.Version]]', kind='[[.Resource.Kind]]', namespace=namespace, resource_name=custom_resource.metadata.name) }}"
 
   - name: Wait 40s for reconciliation to run
     k8s_facts:
-      api_version: '{{.Resource.Version}}'
-      kind: '{{.Resource.Kind }}'
-      namespace: '{{"{{"}} namespace {{"}}"}}'
-      name: '{{"{{"}} custom_resource.metadata.name {{"}}"}}'
+      api_version: '[[.Resource.Version]]'
+      kind: '[[.Resource.Kind]]'
+      namespace: '{{ namespace }}'
+      name: '{{ custom_resource.metadata.name }}'
     register: reconcile_cr
     until:
     - "'Successful' in (reconcile_cr | json_query('resources[].status.conditions[].reason'))"
     delay: 4
     retries: 10
 
-- import_playbook: "{{"{{ playbook_dir }}/../default/asserts.yml"}}"
+- import_playbook: '{{ playbook_dir }}/../default/asserts.yml'
 `
diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/molecule_test_local_molecule.go b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/molecule_test_local_molecule.go
index 2b26e16..47453b6 100644
--- a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/molecule_test_local_molecule.go
+++ b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/molecule_test_local_molecule.go
@@ -23,7 +23,7 @@ import (
 const MoleculeTestLocalMoleculeFile = "molecule.yml"
 
 type MoleculeTestLocalMolecule struct {
-	input.Input
+	StaticInput
 }
 
 // GetInput - gets the input
diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/molecule_test_local_playbook.go b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/molecule_test_local_playbook.go
index d31d0e6..ddb056f 100644
--- a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/molecule_test_local_playbook.go
+++ b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/molecule_test_local_playbook.go
@@ -34,6 +34,7 @@ func (m *MoleculeTestLocalPlaybook) GetInput() (input.Input, error) {
 		m.Path = filepath.Join(MoleculeTestLocalDir, MoleculeTestLocalPlaybookFile)
 	}
 	m.TemplateBody = moleculeTestLocalPlaybookAnsibleTmpl
+	m.Delims = AnsibleDelims
 
 	return m.Input, nil
 }
@@ -43,16 +44,16 @@ const moleculeTestLocalPlaybookAnsibleTmpl = `---
 - name: Build Operator in Kubernetes docker container
   hosts: k8s
   vars:
-    image_name: {{.Resource.FullGroup}}/{{.ProjectName}}:testing
+    image_name: [[.Resource.FullGroup]]/[[.ProjectName]]:testing
   tasks:
   # using command so we don't need to install any dependencies
   - name: Get existing image hash
-    command: docker images -q {{"{{image_name}}"}}
+    command: docker images -q {{ image_name }}
     register: prev_hash
     changed_when: false
 
   - name: Build Operator Image
-    command: docker build -f /build/build/Dockerfile -t {{"{{ image_name }}"}} /build
+    command: docker build -f /build/build/Dockerfile -t {{ image_name }} /build
     register: build_cmd
     changed_when: not prev_hash.stdout or (prev_hash.stdout and prev_hash.stdout not in ''.join(build_cmd.stdout_lines[-2:]))
 
@@ -60,29 +61,29 @@ const moleculeTestLocalPlaybookAnsibleTmpl = `---
   hosts: localhost
   connection: local
   vars:
-    ansible_python_interpreter: '{{ "{{ ansible_playbook_python }}" }}'
-    deploy_dir: "{{"{{ lookup('env', 'MOLECULE_PROJECT_DIRECTORY') }}/deploy"}}"
+    ansible_python_interpreter: '{{ ansible_playbook_python }}'
+    deploy_dir: "{{ lookup('env', 'MOLECULE_PROJECT_DIRECTORY') }}/deploy"
     pull_policy: Never
-    REPLACE_IMAGE: {{.Resource.FullGroup}}/{{.ProjectName}}:testing
-    custom_resource: "{{"{{"}} lookup('file', '/'.join([deploy_dir, 'crds/{{.Resource.Group}}_{{.Resource.Version}}_{{.Resource.LowerKind}}_cr.yaml'])) | from_yaml {{"}}"}}"
+    REPLACE_IMAGE: [[.Resource.FullGroup]]/[[.ProjectName]]:testing
+    custom_resource: "{{ lookup('file', '/'.join([deploy_dir, 'crds/[[.Resource.Group]]_[[.Resource.Version]]_[[.Resource.LowerKind]]_cr.yaml'])) | from_yaml }}"
   tasks:
   - block:
     - name: Delete the Operator Deployment
       k8s:
         state: absent
-        namespace: '{{ "{{ namespace }}" }}'
-        definition: "{{"{{"}} lookup('template', '/'.join([deploy_dir, 'operator.yaml'])) {{"}}"}}"
+        namespace: '{{ namespace }}'
+        definition: "{{ lookup('template', '/'.join([deploy_dir, 'operator.yaml'])) }}"
       register: delete_deployment
       when: hostvars[groups.k8s.0].build_cmd.changed
 
     - name: Wait 30s for Operator Deployment to terminate
       k8s_facts:
-        api_version: '{{"{{"}} definition.apiVersion {{"}}"}}'
-        kind: '{{"{{"}} definition.kind {{"}}"}}'
-        namespace: '{{"{{"}} namespace {{"}}"}}'
-        name: '{{"{{"}} definition.metadata.name {{"}}"}}'
+        api_version: '{{ definition.apiVersion }}'
+        kind: '{{ definition.kind }}'
+        namespace: '{{ namespace }}'
+        name: '{{ definition.metadata.name }}'
       vars:
-        definition: "{{"{{"}} lookup('template', '/'.join([deploy_dir, 'operator.yaml'])) | from_yaml {{"}}"}}"
+        definition: "{{ lookup('template', '/'.join([deploy_dir, 'operator.yaml'])) | from_yaml }}"
       register: deployment
       until: not deployment.resources
       delay: 3
@@ -91,21 +92,21 @@ const moleculeTestLocalPlaybookAnsibleTmpl = `---
 
     - name: Create the Operator Deployment
       k8s:
-        namespace: '{{ "{{ namespace }}" }}'
-        definition: "{{"{{"}} lookup('template', '/'.join([deploy_dir, 'operator.yaml'])) {{"}}"}}"
+        namespace: '{{ namespace }}'
+        definition: "{{ lookup('template', '/'.join([deploy_dir, 'operator.yaml'])) }}"
 
-    - name: Create the {{.Resource.FullGroup}}/{{.Resource.Version}}.{{.Resource.Kind}}
+    - name: Create the [[.Resource.FullGroup]]/[[.Resource.Version]].[[.Resource.Kind]]
       k8s:
         state: present
-        namespace: '{{ "{{ namespace }}" }}'
-        definition: "{{ "{{ custom_resource }}" }}"
+        namespace: '{{ namespace }}'
+        definition: '{{ custom_resource }}'
 
     - name: Wait 40s for reconciliation to run
       k8s_facts:
-        api_version: '{{"{{"}} custom_resource.apiVersion {{"}}"}}'
-        kind: '{{"{{"}} custom_resource.kind {{"}}"}}'
-        namespace: '{{"{{"}} namespace {{"}}"}}'
-        name: '{{"{{"}} custom_resource.metadata.name {{"}}"}}'
+        api_version: '{{ custom_resource.apiVersion }}'
+        kind: '{{ custom_resource.kind }}'
+        namespace: '{{ namespace }}'
+        name: '{{ custom_resource.metadata.name }}'
       register: cr
       until:
       - "'Successful' in (cr | json_query('resources[].status.conditions[].reason'))"
@@ -118,12 +119,12 @@ const moleculeTestLocalPlaybookAnsibleTmpl = `---
       debug:
         var: debug_cr
       vars:
-        debug_cr: '{{"{{"}} lookup("k8s",
+        debug_cr: '{{ lookup("k8s",
           kind=custom_resource.kind,
           api_version=custom_resource.apiVersion,
           namespace=namespace,
           resource_name=custom_resource.metadata.name
-        ){{"}}"}}'
+        )}}'
 
     - name: debug memcached lookup
       ignore_errors: yes
@@ -131,21 +132,21 @@ const moleculeTestLocalPlaybookAnsibleTmpl = `---
       debug:
         var: deploy
       vars:
-        deploy: '{{"{{"}} lookup("k8s",
+        deploy: '{{ lookup("k8s",
           kind="Deployment",
           api_version="apps/v1",
           namespace=namespace,
           label_selector="app=memcached"
-        ){{"}}"}}'
+        )}}'
 
     - name: get operator logs
       ignore_errors: yes
       failed_when: false
-      command: kubectl logs deployment/{{"{{"}} definition.metadata.name {{"}}"}} -n {{"{{"}} namespace {{"}}"}}
+      command: kubectl logs deployment/{{ definition.metadata.name }} -n {{ namespace }}
       environment:
-        KUBECONFIG: '{{"{{"}} lookup("env", "KUBECONFIG") {{"}}"}}'
+        KUBECONFIG: '{{ lookup("env", "KUBECONFIG") }}'
       vars:
-        definition: "{{"{{"}} lookup('template', '/'.join([deploy_dir, 'operator.yaml'])) | from_yaml {{"}}"}}"
+        definition: "{{ lookup('template', '/'.join([deploy_dir, 'operator.yaml'])) | from_yaml }}"
       register: log
 
     - debug: var=log.stdout_lines
@@ -153,5 +154,5 @@ const moleculeTestLocalPlaybookAnsibleTmpl = `---
     - fail:
         msg: "Failed on action: converge"
 
-- import_playbook: '{{"{{ playbook_dir }}/../default/asserts.yml"}}'
+- import_playbook: '{{ playbook_dir }}/../default/asserts.yml'
 `
diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/molecule_test_local_prepare.go b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/molecule_test_local_prepare.go
index 3603967..3c0a8cc 100644
--- a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/molecule_test_local_prepare.go
+++ b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/molecule_test_local_prepare.go
@@ -34,6 +34,7 @@ func (m *MoleculeTestLocalPrepare) GetInput() (input.Input, error) {
 		m.Path = filepath.Join(MoleculeTestLocalDir, MoleculeTestLocalPrepareFile)
 	}
 	m.TemplateBody = moleculeTestLocalPrepareAnsibleTmpl
+	m.Delims = AnsibleDelims
 
 	return m.Input, nil
 }
@@ -45,23 +46,23 @@ const moleculeTestLocalPrepareAnsibleTmpl = `---
   hosts: localhost
   connection: local
   vars:
-    ansible_python_interpreter: '{{ "{{ ansible_playbook_python }}" }}'
-    deploy_dir: "{{"{{ lookup('env', 'MOLECULE_PROJECT_DIRECTORY') }}/deploy"}}"
+    ansible_python_interpreter: '{{ ansible_playbook_python }}'
+    deploy_dir: "{{ lookup('env', 'MOLECULE_PROJECT_DIRECTORY') }}/deploy"
   tasks:
   - name: Create Custom Resource Definition
     k8s:
-      definition: "{{"{{"}} lookup('file', '/'.join([deploy_dir, 'crds/{{.Resource.Group}}_{{.Resource.Version}}_{{.Resource.LowerKind}}_crd.yaml'])) {{"}}"}}"
+      definition: "{{ lookup('file', '/'.join([deploy_dir, 'crds/[[.Resource.Group]]_[[.Resource.Version]]_[[.Resource.LowerKind]]_crd.yaml'])) }}"
 
   - name: Ensure specified namespace is present
     k8s:
       api_version: v1
       kind: Namespace
-      name: '{{ "{{ namespace }}" }}'
+      name: '{{ namespace }}'
 
   - name: Create RBAC resources
     k8s:
-      definition: "{{"{{"}} lookup('template', '/'.join([deploy_dir, item])) {{"}}"}}"
-      namespace: '{{ "{{ namespace }}" }}'
+      definition: "{{ lookup('template', '/'.join([deploy_dir, item])) }}"
+      namespace: '{{ namespace }}'
     with_items:
       - role.yaml
       - role_binding.yaml
diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/playbook.go b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/playbook.go
index 039172e..11dc031 100644
--- a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/playbook.go
+++ b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/playbook.go
@@ -33,6 +33,7 @@ func (p *Playbook) GetInput() (input.Input, error) {
 		p.Path = PlaybookYamlFile
 	}
 	p.TemplateBody = playbookTmpl
+	p.Delims = AnsibleDelims
 	return p.Input, nil
 }
 
@@ -40,5 +41,5 @@ const playbookTmpl = `- hosts: localhost
   gather_facts: no
   tasks:
   - import_role:
-      name: "{{.Resource.LowerKind}}"
+      name: "[[.Resource.LowerKind]]"
 `
diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/roles_defaults_main.go b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/roles_defaults_main.go
index 0717ed8..5309b69 100644
--- a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/roles_defaults_main.go
+++ b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/roles_defaults_main.go
@@ -34,10 +34,11 @@ func (r *RolesDefaultsMain) GetInput() (input.Input, error) {
 		r.Path = filepath.Join(RolesDir, r.Resource.LowerKind, RolesDefaultsMainFile)
 	}
 	r.TemplateBody = rolesDefaultsMainAnsibleTmpl
+	r.Delims = AnsibleDelims
 
 	return r.Input, nil
 }
 
 const rolesDefaultsMainAnsibleTmpl = `---
-# defaults file for {{.Resource.LowerKind}}
+# defaults file for [[.Resource.LowerKind]]
 `
diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/roles_files.go b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/roles_files.go
index d202052..aebc567 100644
--- a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/roles_files.go
+++ b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/roles_files.go
@@ -24,7 +24,7 @@ import (
 const RolesFilesDir = "files" + filePathSep + ".placeholder"
 
 type RolesFiles struct {
-	input.Input
+	StaticInput
 	Resource scaffold.Resource
 }
 
diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/roles_handlers_main.go b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/roles_handlers_main.go
index 67a2a62..3ea9009 100644
--- a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/roles_handlers_main.go
+++ b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/roles_handlers_main.go
@@ -34,10 +34,11 @@ func (r *RolesHandlersMain) GetInput() (input.Input, error) {
 		r.Path = filepath.Join(RolesDir, r.Resource.LowerKind, RolesHandlersMainFile)
 	}
 	r.TemplateBody = rolesHandlersMainAnsibleTmpl
+	r.Delims = AnsibleDelims
 
 	return r.Input, nil
 }
 
 const rolesHandlersMainAnsibleTmpl = `---
-# handlers file for {{.Resource.LowerKind}}
+# handlers file for [[.Resource.LowerKind]]
 `
diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/roles_meta_main.go b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/roles_meta_main.go
index 3be742c..db18dad 100644
--- a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/roles_meta_main.go
+++ b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/roles_meta_main.go
@@ -24,7 +24,7 @@ import (
 const RolesMetaMainFile = "meta" + filePathSep + "main.yml"
 
 type RolesMetaMain struct {
-	input.Input
+	StaticInput
 	Resource scaffold.Resource
 }
 
diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/roles_readme.go b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/roles_readme.go
index fbb6f16..2a9a581 100644
--- a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/roles_readme.go
+++ b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/roles_readme.go
@@ -24,7 +24,7 @@ import (
 const RolesReadmeFile = "README.md"
 
 type RolesReadme struct {
-	input.Input
+	StaticInput
 	Resource scaffold.Resource
 }
 
diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/roles_tasks_main.go b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/roles_tasks_main.go
index 2c25a67..08c9b00 100644
--- a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/roles_tasks_main.go
+++ b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/roles_tasks_main.go
@@ -34,10 +34,11 @@ func (r *RolesTasksMain) GetInput() (input.Input, error) {
 		r.Path = filepath.Join(RolesDir, r.Resource.LowerKind, RolesTasksMainFile)
 	}
 	r.TemplateBody = rolesTasksMainAnsibleTmpl
+	r.Delims = AnsibleDelims
 
 	return r.Input, nil
 }
 
 const rolesTasksMainAnsibleTmpl = `---
-# tasks file for {{.Resource.LowerKind}}
+# tasks file for [[.Resource.LowerKind]]
 `
diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/roles_templates.go b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/roles_templates.go
index 1450be7..e81e297 100644
--- a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/roles_templates.go
+++ b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/roles_templates.go
@@ -24,7 +24,7 @@ import (
 const RolesTemplatesDir = "templates" + filePathSep + ".placeholder"
 
 type RolesTemplates struct {
-	input.Input
+	StaticInput
 	Resource scaffold.Resource
 }
 
diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/roles_vars_main.go b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/roles_vars_main.go
index b7e53f6..eeff594 100644
--- a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/roles_vars_main.go
+++ b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/roles_vars_main.go
@@ -34,10 +34,11 @@ func (r *RolesVarsMain) GetInput() (input.Input, error) {
 		r.Path = filepath.Join(RolesDir, r.Resource.LowerKind, RolesVarsMainFile)
 	}
 	r.TemplateBody = rolesVarsMainAnsibleTmpl
+	r.Delims = AnsibleDelims
 
 	return r.Input, nil
 }
 
 const rolesVarsMainAnsibleTmpl = `---
-# vars file for {{.Resource.LowerKind}}
+# vars file for [[.Resource.LowerKind]]
 `
diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/travis.go b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/travis.go
index 979f7ca..43f5a14 100644
--- a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/travis.go
+++ b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/travis.go
@@ -19,7 +19,7 @@ import "github.com/operator-framework/operator-sdk/internal/pkg/scaffold/input"
 const TravisFile = ".travis.yml"
 
 type Travis struct {
-	input.Input
+	StaticInput
 }
 
 // GetInput - gets the input
diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/usersetup.go b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/usersetup.go
index c9da484..f43bdec 100644
--- a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/usersetup.go
+++ b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/usersetup.go
@@ -22,7 +22,7 @@ import (
 
 // UserSetup - userSetup script
 type UserSetup struct {
-	input.Input
+	StaticInput
 }
 
 func (u *UserSetup) GetInput() (input.Input, error) {
diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/watches.go b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/watches.go
index 10ca417..b724658 100644
--- a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/watches.go
+++ b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible/watches.go
@@ -34,17 +34,18 @@ func (w *Watches) GetInput() (input.Input, error) {
 		w.Path = WatchesFile
 	}
 	w.TemplateBody = watchesAnsibleTmpl
+	w.Delims = AnsibleDelims
 	w.RolesDir = RolesDir
 	return w.Input, nil
 }
 
 const watchesAnsibleTmpl = `---
-- version: {{.Resource.Version}}
-  group: {{.Resource.FullGroup}}
-  kind: {{.Resource.Kind}}
-  {{- if .GeneratePlaybook }}
+- version: [[.Resource.Version]]
+  group: [[.Resource.FullGroup]]
+  kind: [[.Resource.Kind]]
+  [[- if .GeneratePlaybook ]]
   playbook: /opt/ansible/playbook.yml
-  {{- else }}
-  role: /opt/ansible/{{.RolesDir}}/{{.Resource.LowerKind}}
-  {{- end }}
+  [[- else ]]
+  role: /opt/ansible/[[.RolesDir]]/[[.Resource.LowerKind]]
+  [[- end ]]
 `
diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/go_test_script.go b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/boilerplate_go_txt.go
similarity index 55%
rename from vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/go_test_script.go
rename to vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/boilerplate_go_txt.go
index 8abb385..b4b6a78 100644
--- a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/go_test_script.go
+++ b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/boilerplate_go_txt.go
@@ -1,4 +1,4 @@
-// Copyright 2018 The Operator-SDK Authors
+// Copyright 2019 The Operator-SDK Authors
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.
@@ -15,27 +15,38 @@
 package scaffold
 
 import (
+	"io/ioutil"
 	"path/filepath"
 
 	"github.com/operator-framework/operator-sdk/internal/pkg/scaffold/input"
+
+	"github.com/spf13/afero"
 )
 
-const GoTestScriptFile = "go-test.sh"
+const (
+	BoilerplateFile = "boilerplate.go.txt"
+	HackDir         = "hack"
+)
 
-type GoTestScript struct {
+type Boilerplate struct {
 	input.Input
+
+	// BoilerplateSrcPath is the path to a file containing boilerplate text for
+	// generated Go files.
+	BoilerplateSrcPath string
 }
 
-func (s *GoTestScript) GetInput() (input.Input, error) {
+func (s *Boilerplate) GetInput() (input.Input, error) {
 	if s.Path == "" {
-		s.Path = filepath.Join(BuildTestDir, GoTestScriptFile)
+		s.Path = filepath.Join(HackDir, BoilerplateFile)
 	}
-	s.IsExec = true
-	s.TemplateBody = goTestScriptTmpl
 	return s.Input, nil
 }
 
-const goTestScriptTmpl = `#!/bin/sh
+var _ CustomRenderer = &Boilerplate{}
 
-{{.ProjectName}}-test -test.parallel=1 -test.failfast -root=/ -kubeconfig=incluster -namespacedMan=namespaced.yaml -test.v
-`
+func (s *Boilerplate) SetFS(_ afero.Fs) {}
+
+func (s *Boilerplate) CustomRender() ([]byte, error) {
+	return ioutil.ReadFile(s.BoilerplateSrcPath)
+}
diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/build_dockerfile.go b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/build_dockerfile.go
index b70dac7..df30a76 100644
--- a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/build_dockerfile.go
+++ b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/build_dockerfile.go
@@ -34,7 +34,7 @@ func (s *Dockerfile) GetInput() (input.Input, error) {
 	return s.Input, nil
 }
 
-const dockerfileTmpl = `FROM registry.access.redhat.com/ubi7-dev-preview/ubi-minimal:7.6
+const dockerfileTmpl = `FROM registry.access.redhat.com/ubi7/ubi-minimal:latest
 
 ENV OPERATOR=/usr/local/bin/{{.ProjectName}} \
     USER_UID=1001 \
diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/cmd.go b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/cmd.go
index 23c72e5..f11df5f 100644
--- a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/cmd.go
+++ b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/cmd.go
@@ -53,6 +53,7 @@ import (
 	"github.com/operator-framework/operator-sdk/pkg/leader"
 	"github.com/operator-framework/operator-sdk/pkg/log/zap"
 	"github.com/operator-framework/operator-sdk/pkg/metrics"
+	"github.com/operator-framework/operator-sdk/pkg/restmapper"
 	sdkVersion "github.com/operator-framework/operator-sdk/version"
 	"github.com/spf13/pflag"
 	"sigs.k8s.io/controller-runtime/pkg/client/config"
@@ -122,6 +123,7 @@ func main() {
 	// Create a new Cmd to provide shared dependencies and start components
 	mgr, err := manager.New(cfg, manager.Options{
 		Namespace:          namespace,
+		MapperProvider:     restmapper.NewDynamicRESTMapper,
 		MetricsBindAddress: fmt.Sprintf("%s:%d", metricsHost, metricsPort),
 	})	
 	if err != nil {
diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/constants.go b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/constants.go
index 77ea258..15ca75f 100644
--- a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/constants.go
+++ b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/constants.go
@@ -35,5 +35,4 @@ const (
 	DeployDir      = "deploy"
 	OLMCatalogDir  = DeployDir + filePathSep + "olm-catalog"
 	CRDsDir        = DeployDir + filePathSep + "crds"
-	VersionDir     = "version"
 )
diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/controller_kind.go b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/controller_kind.go
index 38529fa..0f8f416 100644
--- a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/controller_kind.go
+++ b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/controller_kind.go
@@ -15,7 +15,11 @@
 package scaffold
 
 import (
+	"fmt"
+	"path"
 	"path/filepath"
+	"strings"
+	"unicode"
 
 	"github.com/operator-framework/operator-sdk/internal/pkg/scaffold/input"
 )
@@ -26,6 +30,18 @@ type ControllerKind struct {
 
 	// Resource defines the inputs for the controller's primary resource
 	Resource *Resource
+	// CustomImport holds the import path for a built-in or custom Kubernetes
+	// API that this controller reconciles, if specified by the scaffold invoker.
+	CustomImport string
+
+	// The following fields will be overwritten by GetInput().
+	//
+	// ImportMap maps all imports destined for the scaffold to their import
+	// identifier, if any.
+	ImportMap map[string]string
+	// GoImportIdent is the import identifier for the API reconciled by this
+	// controller.
+	GoImportIdent string
 }
 
 func (s *ControllerKind) GetInput() (input.Input, error) {
@@ -36,29 +52,99 @@ func (s *ControllerKind) GetInput() (input.Input, error) {
 	// Error if this file exists.
 	s.IfExistsAction = input.Error
 	s.TemplateBody = controllerKindTemplate
+
+	// Set imports.
+	if err := s.setImports(); err != nil {
+		return input.Input{}, err
+	}
 	return s.Input, nil
 }
 
+func (s *ControllerKind) setImports() (err error) {
+	s.ImportMap = controllerKindImports
+	importPath := ""
+	if s.CustomImport != "" {
+		importPath, s.GoImportIdent, err = getCustomAPIImportPathAndIdent(s.CustomImport)
+		if err != nil {
+			return err
+		}
+	} else {
+		importPath = path.Join(s.Repo, "pkg", "apis", s.Resource.GoImportGroup, s.Resource.Version)
+		s.GoImportIdent = s.Resource.GoImportGroup + s.Resource.Version
+	}
+	// Import identifiers must be unique within a file.
+	for p, id := range s.ImportMap {
+		if s.GoImportIdent == id && importPath != p {
+			// Append "api" to the conflicting import identifier.
+			s.GoImportIdent = s.GoImportIdent + "api"
+			break
+		}
+	}
+	s.ImportMap[importPath] = s.GoImportIdent
+	return nil
+}
+
+func getCustomAPIImportPathAndIdent(m string) (p string, id string, err error) {
+	sm := strings.Split(m, "=")
+	for i, e := range sm {
+		if i == 0 {
+			p = strings.TrimSpace(e)
+		} else if i == 1 {
+			id = strings.TrimSpace(e)
+		}
+	}
+	if p == "" {
+		return "", "", fmt.Errorf(`custom import "%s" path is empty`, m)
+	}
+	if id == "" {
+		if len(sm) == 2 {
+			return "", "", fmt.Errorf(`custom import "%s" identifier is empty, remove "=" from passed string`, m)
+		}
+		sp := strings.Split(p, "/")
+		if len(sp) > 1 {
+			id = sp[len(sp)-2] + sp[len(sp)-1]
+		} else {
+			id = sp[0]
+		}
+		id = strings.ToLower(id)
+	}
+	idb := &strings.Builder{}
+	// By definition, all package identifiers must be comprised of "_", unicode
+	// digits, and/or letters.
+	for _, r := range id {
+		if unicode.IsDigit(r) || unicode.IsLetter(r) || r == '_' {
+			if _, err := idb.WriteRune(r); err != nil {
+				return "", "", err
+			}
+		}
+	}
+	return p, idb.String(), nil
+}
+
+var controllerKindImports = map[string]string{
+	"k8s.io/api/core/v1":                                           "corev1",
+	"k8s.io/apimachinery/pkg/api/errors":                           "",
+	"k8s.io/apimachinery/pkg/apis/meta/v1":                         "metav1",
+	"k8s.io/apimachinery/pkg/runtime":                              "",
+	"k8s.io/apimachinery/pkg/types":                                "",
+	"sigs.k8s.io/controller-runtime/pkg/client":                    "",
+	"sigs.k8s.io/controller-runtime/pkg/controller":                "",
+	"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil": "",
+	"sigs.k8s.io/controller-runtime/pkg/handler":                   "",
+	"sigs.k8s.io/controller-runtime/pkg/manager":                   "",
+	"sigs.k8s.io/controller-runtime/pkg/reconcile":                 "",
+	"sigs.k8s.io/controller-runtime/pkg/runtime/log":               "logf",
+	"sigs.k8s.io/controller-runtime/pkg/source":                    "",
+}
+
 const controllerKindTemplate = `package {{ .Resource.LowerKind }}
 
 import (
 	"context"
 
-	{{ .Resource.GoImportGroup}}{{ .Resource.Version }} "{{ .Repo }}/pkg/apis/{{ .Resource.GoImportGroup}}/{{ .Resource.Version }}"
-
-	corev1 "k8s.io/api/core/v1"
-	"k8s.io/apimachinery/pkg/api/errors"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/apimachinery/pkg/runtime"
-	"k8s.io/apimachinery/pkg/types"
-	"sigs.k8s.io/controller-runtime/pkg/client"
-	"sigs.k8s.io/controller-runtime/pkg/controller"
-	"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
-	"sigs.k8s.io/controller-runtime/pkg/handler"
-	"sigs.k8s.io/controller-runtime/pkg/manager"
-	"sigs.k8s.io/controller-runtime/pkg/reconcile"
-	logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
-	"sigs.k8s.io/controller-runtime/pkg/source"
+	{{range $p, $i := .ImportMap -}}
+	{{$i}} "{{$p}}"
+	{{end}}
 )
 
 var log = logf.Log.WithName("controller_{{ .Resource.LowerKind }}")
@@ -88,7 +174,7 @@ func add(mgr manager.Manager, r reconcile.Reconciler) error {
 	}
 
 	// Watch for changes to primary resource {{ .Resource.Kind }}
-	err = c.Watch(&source.Kind{Type: &{{ .Resource.GoImportGroup}}{{ .Resource.Version }}.{{ .Resource.Kind }}{}}, &handler.EnqueueRequestForObject{})
+	err = c.Watch(&source.Kind{Type: &{{ .GoImportIdent }}.{{ .Resource.Kind }}{}}, &handler.EnqueueRequestForObject{})
 	if err != nil {
 		return err
 	}
@@ -97,7 +183,7 @@ func add(mgr manager.Manager, r reconcile.Reconciler) error {
 	// Watch for changes to secondary resource Pods and requeue the owner {{ .Resource.Kind }}
 	err = c.Watch(&source.Kind{Type: &corev1.Pod{}}, &handler.EnqueueRequestForOwner{
 		IsController: true,
-		OwnerType:    &{{ .Resource.GoImportGroup}}{{ .Resource.Version }}.{{ .Resource.Kind }}{},
+		OwnerType:    &{{ .GoImportIdent }}.{{ .Resource.Kind }}{},
 	})
 	if err != nil {
 		return err
@@ -106,6 +192,7 @@ func add(mgr manager.Manager, r reconcile.Reconciler) error {
 	return nil
 }
 
+// blank assignment to verify that Reconcile{{ .Resource.Kind }} implements reconcile.Reconciler
 var _ reconcile.Reconciler = &Reconcile{{ .Resource.Kind }}{}
 
 // Reconcile{{ .Resource.Kind }} reconciles a {{ .Resource.Kind }} object
@@ -128,7 +215,7 @@ func (r *Reconcile{{ .Resource.Kind }}) Reconcile(request reconcile.Request) (re
 	reqLogger.Info("Reconciling {{ .Resource.Kind }}")
 
 	// Fetch the {{ .Resource.Kind }} instance
-	instance := &{{ .Resource.GoImportGroup}}{{ .Resource.Version }}.{{ .Resource.Kind }}{}
+	instance := &{{ .GoImportIdent }}.{{ .Resource.Kind }}{}
 	err := r.client.Get(context.TODO(), request.NamespacedName, instance)
 	if err != nil {
 		if errors.IsNotFound(err) {
@@ -171,7 +258,7 @@ func (r *Reconcile{{ .Resource.Kind }}) Reconcile(request reconcile.Request) (re
 }
 
 // newPodForCR returns a busybox pod with the same name/namespace as the cr
-func newPodForCR(cr *{{ .Resource.GoImportGroup}}{{ .Resource.Version }}.{{ .Resource.Kind }}) *corev1.Pod {
+func newPodForCR(cr *{{ .GoImportIdent }}.{{ .Resource.Kind }}) *corev1.Pod {
 	labels := map[string]string{
 		"app": cr.Name,
 	}
diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/cr.go b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/cr.go
index 34418a3..5711251 100644
--- a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/cr.go
+++ b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/cr.go
@@ -38,7 +38,7 @@ type CR struct {
 func (s *CR) GetInput() (input.Input, error) {
 	if s.Path == "" {
 		fileName := fmt.Sprintf("%s_%s_%s_cr.yaml",
-			strings.ToLower(s.Resource.Group),
+			s.Resource.GoImportGroup,
 			strings.ToLower(s.Resource.Version),
 			s.Resource.LowerKind)
 		s.Path = filepath.Join(CRDsDir, fileName)
diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/crd.go b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/crd.go
index a0b0f25..b5da291 100644
--- a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/crd.go
+++ b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/crd.go
@@ -16,7 +16,6 @@ package scaffold
 
 import (
 	"fmt"
-	"io/ioutil"
 	"os"
 	"path/filepath"
 	"strings"
@@ -41,12 +40,26 @@ type CRD struct {
 
 	// IsOperatorGo is true when the operator is written in Go.
 	IsOperatorGo bool
+
+	once sync.Once
+	fs   afero.Fs // For testing, ex. afero.NewMemMapFs()
+}
+
+func (s *CRD) initFS(fs afero.Fs) {
+	s.once.Do(func() {
+		s.fs = fs
+	})
+}
+
+func (s *CRD) getFS() afero.Fs {
+	s.initFS(afero.NewOsFs())
+	return s.fs
 }
 
 func (s *CRD) GetInput() (input.Input, error) {
 	if s.Path == "" {
 		fileName := fmt.Sprintf("%s_%s_%s_crd.yaml",
-			strings.ToLower(s.Resource.Group),
+			s.Resource.GoImportGroup,
 			strings.ToLower(s.Resource.Version),
 			s.Resource.LowerKind)
 		s.Path = filepath.Join(CRDsDir, fileName)
@@ -76,77 +89,82 @@ func initCache() {
 	})
 }
 
-func (s *CRD) SetFS(_ afero.Fs) {}
+var _ CustomRenderer = &CRD{}
+
+func (s *CRD) SetFS(fs afero.Fs) { s.initFS(fs) }
 
 func (s *CRD) CustomRender() ([]byte, error) {
-	i, _ := s.GetInput()
-	// controller-tools generates crd file names with no _crd.yaml suffix:
-	// <group>_<version>_<kind>.yaml.
-	path := strings.Replace(filepath.Base(i.Path), "_crd.yaml", ".yaml", 1)
-
-	// controller-tools' generators read and make crds for all apis in pkg/apis,
-	// so generate crds in a cached, in-memory fs to extract the data we need.
-	if s.IsOperatorGo && !cache.fileExists(path) {
-		g := &crdgenerator.Generator{
-			RootPath:          s.AbsProjectPath,
-			Domain:            strings.SplitN(s.Resource.FullGroup, ".", 2)[1],
-			OutputDir:         ".",
-			SkipMapValidation: false,
-			OutFs:             cache,
-		}
-		if err := g.ValidateAndInitFields(); err != nil {
-			return nil, err
-		}
-		if err := g.Do(); err != nil {
-			return nil, err
-		}
+	i, err := s.GetInput()
+	if err != nil {
+		return nil, err
 	}
 
-	dstCRD := newCRDForResource(s.Resource)
-	// Get our generated crd's from the in-memory fs. If it doesn't exist in the
-	// fs, the corresponding API does not exist yet, so scaffold a fresh crd
-	// without a validation spec.
-	// If the crd exists in the fs, and a local crd exists, append the validation
-	// spec. If a local crd does not exist, use the generated crd.
-	if _, err := cache.Stat(path); err != nil && !os.IsNotExist(err) {
-		return nil, err
-	} else if err == nil {
+	crd := &apiextv1beta1.CustomResourceDefinition{}
+	if s.IsOperatorGo {
+		// controller-tools generates crd file names with no _crd.yaml suffix:
+		// <group>_<version>_<kind>.yaml.
+		path := strings.Replace(filepath.Base(i.Path), "_crd.yaml", ".yaml", 1)
+
+		// controller-tools' generators read and make crds for all apis in pkg/apis,
+		// so generate crds in a cached, in-memory fs to extract the data we need.
+		if !cache.fileExists(path) {
+			g := &crdgenerator.Generator{
+				RootPath:          s.AbsProjectPath,
+				Domain:            strings.SplitN(s.Resource.FullGroup, ".", 2)[1],
+				Repo:              s.Repo,
+				OutputDir:         ".",
+				SkipMapValidation: false,
+				OutFs:             cache,
+			}
+			if err := g.ValidateAndInitFields(); err != nil {
+				return nil, err
+			}
+			if err := g.Do(); err != nil {
+				return nil, err
+			}
+		}
+
 		b, err := afero.ReadFile(cache, path)
 		if err != nil {
+			if os.IsNotExist(err) {
+				return nil, fmt.Errorf("no API exists for Group %s Version %s Kind %s",
+					s.Resource.GoImportGroup, s.Resource.Version, s.Resource.Kind)
+			}
 			return nil, err
 		}
-		dstCRD = &apiextv1beta1.CustomResourceDefinition{}
-		if err = yaml.Unmarshal(b, dstCRD); err != nil {
+		if err = yaml.Unmarshal(b, crd); err != nil {
 			return nil, err
 		}
-		val := dstCRD.Spec.Validation.DeepCopy()
-
-		// If the crd exists at i.Path, append the validation spec to its crd spec.
-		if _, err := os.Stat(i.Path); err == nil {
-			cb, err := ioutil.ReadFile(i.Path)
+		// controller-tools does not set ListKind or Singular names.
+		setCRDNamesForResource(crd, s.Resource)
+		// Remove controller-tools default label.
+		delete(crd.Labels, "controller-tools.k8s.io")
+	} else {
+		// There are currently no commands to update CRD manifests for non-Go
+		// operators, so if a CRD manifests already exists for this gvk, this
+		// scaffold is a no-op.
+		path := filepath.Join(s.AbsProjectPath, i.Path)
+		if _, err = s.getFS().Stat(path); err == nil {
+			b, err := afero.ReadFile(s.getFS(), path)
 			if err != nil {
 				return nil, err
 			}
-			if len(cb) > 0 {
-				dstCRD = &apiextv1beta1.CustomResourceDefinition{}
-				if err = yaml.Unmarshal(cb, dstCRD); err != nil {
+			if len(b) == 0 {
+				crd = newCRDForResource(s.Resource)
+			} else {
+				if err = yaml.Unmarshal(b, crd); err != nil {
 					return nil, err
 				}
-				dstCRD.Spec.Validation = val
 			}
 		}
-		// controller-tools does not set ListKind or Singular names.
-		dstCRD.Spec.Names = getCRDNamesForResource(s.Resource)
-		// Remove controller-tools default label.
-		delete(dstCRD.Labels, "controller-tools.k8s.io")
 	}
-	addCRDSubresource(dstCRD)
-	addCRDVersions(dstCRD)
-	return k8sutil.GetObjectBytes(dstCRD)
+
+	setCRDVersions(crd)
+	return k8sutil.GetObjectBytes(crd)
 }
 
 func newCRDForResource(r *Resource) *apiextv1beta1.CustomResourceDefinition {
-	return &apiextv1beta1.CustomResourceDefinition{
+	crd := &apiextv1beta1.CustomResourceDefinition{
 		TypeMeta: metav1.TypeMeta{
 			APIVersion: "apiextensions.k8s.io/v1beta1",
 			Kind:       "CustomResourceDefinition",
@@ -156,7 +174,6 @@ func newCRDForResource(r *Resource) *apiextv1beta1.CustomResourceDefinition {
 		},
 		Spec: apiextv1beta1.CustomResourceDefinitionSpec{
 			Group:   r.FullGroup,
-			Names:   getCRDNamesForResource(r),
 			Scope:   apiextv1beta1.NamespaceScoped,
 			Version: r.Version,
 			Subresources: &apiextv1beta1.CustomResourceSubresources{
@@ -164,27 +181,26 @@ func newCRDForResource(r *Resource) *apiextv1beta1.CustomResourceDefinition {
 			},
 		},
 	}
+	setCRDNamesForResource(crd, r)
+	return crd
 }
 
-func getCRDNamesForResource(r *Resource) apiextv1beta1.CustomResourceDefinitionNames {
-	return apiextv1beta1.CustomResourceDefinitionNames{
-		Kind:     r.Kind,
-		ListKind: r.Kind + "List",
-		Plural:   r.Resource,
-		Singular: r.LowerKind,
+func setCRDNamesForResource(crd *apiextv1beta1.CustomResourceDefinition, r *Resource) {
+	if crd.Spec.Names.Kind == "" {
+		crd.Spec.Names.Kind = r.Kind
 	}
-}
-
-func addCRDSubresource(crd *apiextv1beta1.CustomResourceDefinition) {
-	if crd.Spec.Subresources == nil {
-		crd.Spec.Subresources = &apiextv1beta1.CustomResourceSubresources{}
+	if crd.Spec.Names.ListKind == "" {
+		crd.Spec.Names.ListKind = r.Kind + "List"
+	}
+	if crd.Spec.Names.Plural == "" {
+		crd.Spec.Names.Plural = r.Resource
 	}
-	if crd.Spec.Subresources.Status == nil {
-		crd.Spec.Subresources.Status = &apiextv1beta1.CustomResourceSubresourceStatus{}
+	if crd.Spec.Names.Singular == "" {
+		crd.Spec.Names.Singular = r.LowerKind
 	}
 }
 
-func addCRDVersions(crd *apiextv1beta1.CustomResourceDefinition) {
+func setCRDVersions(crd *apiextv1beta1.CustomResourceDefinition) {
 	// crd.Version is deprecated, use crd.Versions instead.
 	var crdVersions []apiextv1beta1.CustomResourceDefinitionVersion
 	if crd.Spec.Version != "" {
diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/go_mod.go b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/go_mod.go
new file mode 100644
index 0000000..3f4fb6e
--- /dev/null
+++ b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/go_mod.go
@@ -0,0 +1,106 @@
+// Copyright 2019 The Operator-SDK Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package scaffold
+
+import (
+	"fmt"
+
+	"github.com/operator-framework/operator-sdk/internal/pkg/scaffold/input"
+	"github.com/operator-framework/operator-sdk/internal/pkg/scaffold/internal/deps"
+)
+
+const GoModFile = "go.mod"
+
+type GoMod struct {
+	input.Input
+}
+
+func (s *GoMod) GetInput() (input.Input, error) {
+	if s.Path == "" {
+		s.Path = GoModFile
+	}
+	s.TemplateBody = goModTmpl
+	return s.Input, nil
+}
+
+const goModTmpl = `module {{ .Repo }}
+
+require (
+	contrib.go.opencensus.io/exporter/ocagent v0.4.9 // indirect
+	github.com/Azure/go-autorest v11.5.2+incompatible // indirect
+	github.com/appscode/jsonpatch v0.0.0-20190108182946-7c0e3b262f30 // indirect
+	github.com/coreos/prometheus-operator v0.26.0 // indirect
+	github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect
+	github.com/emicklei/go-restful v2.8.1+incompatible // indirect
+	github.com/go-logr/logr v0.1.0 // indirect
+	github.com/go-logr/zapr v0.1.0 // indirect
+	github.com/go-openapi/spec v0.18.0 // indirect
+	github.com/golang/groupcache v0.0.0-20180924190550-6f2cf27854a4 // indirect
+	github.com/golang/mock v1.2.0 // indirect
+	github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c // indirect
+	github.com/google/uuid v1.0.0 // indirect
+	github.com/googleapis/gnostic v0.2.0 // indirect
+	github.com/gophercloud/gophercloud v0.0.0-20190318015731-ff9851476e98 // indirect
+	github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect
+	github.com/grpc-ecosystem/grpc-gateway v1.8.5 // indirect
+	github.com/imdario/mergo v0.3.6 // indirect
+	github.com/operator-framework/operator-sdk master
+	github.com/pborman/uuid v0.0.0-20180906182336-adf5a7427709 // indirect
+	github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
+	github.com/spf13/pflag v1.0.3
+	go.opencensus.io v0.19.2 // indirect
+	go.uber.org/atomic v1.3.2 // indirect
+	go.uber.org/multierr v1.1.0 // indirect
+	go.uber.org/zap v1.9.1 // indirect
+	golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2 // indirect
+	k8s.io/apimachinery v0.0.0-20190221213512-86fb29eff628
+	k8s.io/client-go v2.0.0-alpha.0.0.20181126152608-d082d5923d3c+incompatible
+	k8s.io/code-generator v0.0.0-20180823001027-3dcf91f64f63
+	k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6
+	k8s.io/kube-openapi v0.0.0-20180711000925-0cf8f7e6ed1d
+	sigs.k8s.io/controller-runtime v0.1.10
+	sigs.k8s.io/controller-tools v0.1.10
+	sigs.k8s.io/testing_frameworks v0.1.0 // indirect
+)
+
+// Pinned to kubernetes-1.13.1
+replace (
+	k8s.io/api => k8s.io/api v0.0.0-20181213150558-05914d821849
+	k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.0.0-20181213153335-0fe22c71c476
+	k8s.io/apimachinery => k8s.io/apimachinery v0.0.0-20181127025237-2b1284ed4c93
+	k8s.io/client-go => k8s.io/client-go v0.0.0-20181213151034-8d9ed539ba31
+)
+
+replace (
+	github.com/coreos/prometheus-operator => github.com/coreos/prometheus-operator v0.29.0
+	k8s.io/code-generator => k8s.io/code-generator v0.0.0-20181117043124-c2090bec4d9b
+	k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20180711000925-0cf8f7e6ed1d
+	sigs.k8s.io/controller-runtime => sigs.k8s.io/controller-runtime v0.1.10
+	sigs.k8s.io/controller-tools => sigs.k8s.io/controller-tools v0.1.11-0.20190411181648-9d55346c2bde
+	github.com/operator-framework/operator-sdk => github.com/operator-framework/operator-sdk v0.8.0
+)
+`
+
+func PrintGoMod(asFile bool) error {
+	b, err := deps.ExecGoModTmpl(goModTmpl)
+	if err != nil {
+		return err
+	}
+	if asFile {
+		fmt.Print(string(b))
+		return nil
+	}
+	return deps.PrintGoMod(b)
+}
diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/gopkgtoml.go b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/gopkgtoml.go
index 996c2a4..915e1ca 100644
--- a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/gopkgtoml.go
+++ b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/gopkgtoml.go
@@ -15,16 +15,10 @@
 package scaffold
 
 import (
-	"bytes"
-	"encoding/json"
-	"errors"
 	"fmt"
-	"strings"
-	"text/tabwriter"
 
 	"github.com/operator-framework/operator-sdk/internal/pkg/scaffold/input"
-
-	"github.com/BurntSushi/toml"
+	"github.com/operator-framework/operator-sdk/internal/pkg/scaffold/internal/deps"
 )
 
 const GopkgTomlFile = "Gopkg.toml"
@@ -43,7 +37,6 @@ func (s *GopkgToml) GetInput() (input.Input, error) {
 
 const gopkgTomlTmpl = `# Force dep to vendor the code generators, which aren't imported just used at dev time.
 required = [
-  "k8s.io/code-generator/cmd/defaulter-gen",
   "k8s.io/code-generator/cmd/deepcopy-gen",
   "k8s.io/code-generator/cmd/conversion-gen",
   "k8s.io/code-generator/cmd/client-gen",
@@ -69,7 +62,7 @@ required = [
 
 [[override]]
   name = "sigs.k8s.io/controller-tools"
-  version = "=v0.1.8"
+  revision = "9d55346c2bde73fb3326ac22eac2e5210a730207"
 
 [[override]]
   name = "k8s.io/api"
@@ -103,7 +96,7 @@ required = [
   name = "github.com/operator-framework/operator-sdk"
   # The version rule is used for a specific release and the master branch for in between releases.
   # branch = "master" #osdk_branch_annotation
-  version = "=v0.7.0" #osdk_version_annotation
+  version = "=v0.8.0" #osdk_version_annotation
 
 [prune]
   go-tests = true
@@ -118,79 +111,10 @@ required = [
     non-go = false
 `
 
-func PrintDepsAsFile() {
-	fmt.Println(gopkgTomlTmpl)
-}
-
-func PrintDeps() error {
-	gopkgData := make(map[string]interface{})
-	_, err := toml.Decode(gopkgTomlTmpl, &gopkgData)
-	if err != nil {
-		return err
-	}
-
-	buf := &bytes.Buffer{}
-	w := tabwriter.NewWriter(buf, 16, 8, 0, '\t', 0)
-	_, err = w.Write([]byte("NAME\tVERSION\tBRANCH\tREVISION\t\n"))
-	if err != nil {
-		return err
-	}
-
-	constraintList, ok := gopkgData["constraint"]
-	if !ok {
-		return errors.New("constraints not found")
-	}
-	for _, dep := range constraintList.([]map[string]interface{}) {
-		err = writeDepRow(w, dep)
-		if err != nil {
-			return err
-		}
-	}
-	overrideList, ok := gopkgData["override"]
-	if !ok {
-		return errors.New("overrides not found")
-	}
-	for _, dep := range overrideList.([]map[string]interface{}) {
-		err = writeDepRow(w, dep)
-		if err != nil {
-			return err
-		}
-	}
-	if err := w.Flush(); err != nil {
+func PrintDepGopkgTOML(asFile bool) error {
+	if asFile {
+		_, err := fmt.Println(gopkgTomlTmpl)
 		return err
 	}
-
-	requiredList, ok := gopkgData["required"]
-	if !ok {
-		return errors.New("required list not found")
-	}
-	pl, err := json.MarshalIndent(requiredList, "", " ")
-	if err != nil {
-		return err
-	}
-	_, err = buf.Write([]byte(fmt.Sprintf("\nrequired = %v", string(pl))))
-	if err != nil {
-		return err
-	}
-
-	fmt.Println(buf.String())
-
-	return nil
-}
-
-func writeDepRow(w *tabwriter.Writer, dep map[string]interface{}) error {
-	name := dep["name"].(string)
-	ver, col := "", 0
-	if v, ok := dep["version"]; ok {
-		ver, col = v.(string), 1
-	} else if v, ok = dep["branch"]; ok {
-		ver, col = v.(string), 2
-	} else if v, ok = dep["revision"]; ok {
-		ver, col = v.(string), 3
-	} else {
-		return fmt.Errorf("no version, revision, or branch found for %s", name)
-	}
-
-	_, err := w.Write([]byte(name + strings.Repeat("\t", col) + ver + "\t\n"))
-	return err
+	return deps.PrintDepGopkgTOML(gopkgTomlTmpl)
 }
diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/helm/chart.go b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/helm/chart.go
index 89fcc51..7bbb83a 100644
--- a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/helm/chart.go
+++ b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/helm/chart.go
@@ -15,6 +15,7 @@
 package helm
 
 import (
+	"bytes"
 	"fmt"
 	"io/ioutil"
 	"os"
@@ -116,7 +117,7 @@ func CreateChart(projectDir string, opts CreateChartOptions) (*scaffold.Resource
 	chartsDir := filepath.Join(projectDir, HelmChartsDir)
 	err := os.MkdirAll(chartsDir, 0755)
 	if err != nil {
-		return nil, nil, err
+		return nil, nil, fmt.Errorf("failed to create helm-charts directory: %s", err)
 	}
 
 	var (
@@ -128,13 +129,29 @@ func CreateChart(projectDir string, opts CreateChartOptions) (*scaffold.Resource
 	// from Helm's default template. Otherwise, fetch it.
 	if len(opts.Chart) == 0 {
 		r, c, err = scaffoldChart(chartsDir, opts.ResourceAPIVersion, opts.ResourceKind)
+		if err != nil {
+			return nil, nil, fmt.Errorf("failed to scaffold default chart: %s", err)
+		}
 	} else {
 		r, c, err = fetchChart(chartsDir, opts)
+		if err != nil {
+			return nil, nil, fmt.Errorf("failed to fetch chart: %s", err)
+		}
+	}
+
+	relChartPath := filepath.Join(HelmChartsDir, c.GetMetadata().GetName())
+	absChartPath := filepath.Join(projectDir, relChartPath)
+	if err := fetchChartDependencies(absChartPath); err != nil {
+		return nil, nil, fmt.Errorf("failed to fetch chart dependencies: %s", err)
 	}
+
+	// Reload chart in case dependencies changed
+	c, err = chartutil.Load(absChartPath)
 	if err != nil {
-		return nil, nil, err
+		return nil, nil, fmt.Errorf("failed to load chart: %s", err)
 	}
-	log.Infof("Created %s/%s/", HelmChartsDir, c.GetMetadata().GetName())
+
+	log.Infof("Created %s", relChartPath)
 	return r, c, nil
 }
 
@@ -159,7 +176,7 @@ func scaffoldChart(destDir, apiVersion, kind string) (*scaffold.Resource, *chart
 		return nil, nil, err
 	}
 
-	chart, err := chartutil.LoadDir(chartPath)
+	chart, err := chartutil.Load(chartPath)
 	if err != nil {
 		return nil, nil, err
 	}
@@ -198,17 +215,7 @@ func fetchChart(destDir string, opts CreateChartOptions) (*scaffold.Resource, *c
 }
 
 func createChartFromDisk(destDir, source string, isDir bool) (*chart.Chart, error) {
-	var (
-		chart *chart.Chart
-		err   error
-	)
-
-	// If source is a file or directory, attempt to load it
-	if isDir {
-		chart, err = chartutil.LoadDir(source)
-	} else {
-		chart, err = chartutil.LoadFile(source)
-	}
+	chart, err := chartutil.Load(source)
 	if err != nil {
 		return nil, err
 	}
@@ -265,3 +272,24 @@ func createChartFromRemote(destDir string, opts CreateChartOptions) (*chart.Char
 
 	return createChartFromDisk(destDir, chartArchive, false)
 }
+
+func fetchChartDependencies(chartPath string) error {
+	helmHome, ok := os.LookupEnv(environment.HomeEnvVar)
+	if !ok {
+		helmHome = environment.DefaultHelmHome
+	}
+	getters := getter.All(environment.EnvSettings{})
+
+	out := &bytes.Buffer{}
+	man := &downloader.Manager{
+		Out:       out,
+		ChartPath: chartPath,
+		HelmHome:  helmpath.Home(helmHome),
+		Getters:   getters,
+	}
+	if err := man.Build(); err != nil {
+		fmt.Println(out.String())
+		return err
+	}
+	return nil
+}
diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/helm/dockerfilehybrid.go b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/helm/dockerfilehybrid.go
index ee6c2b5..6331bc6 100644
--- a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/helm/dockerfilehybrid.go
+++ b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/helm/dockerfilehybrid.go
@@ -41,7 +41,7 @@ func (d *DockerfileHybrid) GetInput() (input.Input, error) {
 	return d.Input, nil
 }
 
-const dockerFileHybridHelmTmpl = `FROM registry.access.redhat.com/ubi7-dev-preview/ubi-minimal:7.6
+const dockerFileHybridHelmTmpl = `FROM registry.access.redhat.com/ubi7/ubi-minimal:latest
 
 ENV OPERATOR=/usr/local/bin/helm-operator \
     USER_UID=1001 \
diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/helm/go_mod.go b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/helm/go_mod.go
new file mode 100644
index 0000000..38c3897
--- /dev/null
+++ b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/helm/go_mod.go
@@ -0,0 +1,182 @@
+// Copyright 2019 The Operator-SDK Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package helm
+
+import (
+	"fmt"
+
+	"github.com/operator-framework/operator-sdk/internal/pkg/scaffold/input"
+	"github.com/operator-framework/operator-sdk/internal/pkg/scaffold/internal/deps"
+)
+
+const GoModFile = "go.mod"
+
+// GoMod - the go.mod file for a Helm hybrid operator.
+type GoMod struct {
+	input.Input
+}
+
+func (s *GoMod) GetInput() (input.Input, error) {
+	if s.Path == "" {
+		s.Path = GoModFile
+	}
+	s.TemplateBody = goModTmpl
+	return s.Input, nil
+}
+
+const goModTmpl = `module {{ .Repo }}
+
+require (
+	cloud.google.com/go v0.37.2 // indirect
+	contrib.go.opencensus.io/exporter/ocagent v0.4.9 // indirect
+	github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect
+	github.com/Azure/go-autorest v11.7.0+incompatible // indirect
+	github.com/MakeNowJust/heredoc v0.0.0-20171113091838-e9091a26100e // indirect
+	github.com/Masterminds/goutils v1.1.0 // indirect
+	github.com/Masterminds/semver v1.4.2 // indirect
+	github.com/Masterminds/sprig v0.0.0-20190301161902-9f8fceff796f // indirect
+	github.com/PuerkitoBio/purell v1.1.1 // indirect
+	github.com/appscode/jsonpatch v0.0.0-20190108182946-7c0e3b262f30 // indirect
+	github.com/chai2010/gettext-go v0.0.0-20170215093142-bf70f2a70fb1 // indirect
+	github.com/coreos/bbolt v1.3.2 // indirect
+	github.com/coreos/etcd v3.3.12+incompatible // indirect
+	github.com/coreos/go-semver v0.2.0 // indirect
+	github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f // indirect
+	github.com/cyphar/filepath-securejoin v0.2.2 // indirect
+	github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect
+	github.com/docker/distribution v0.0.0-20170726174610-edc3ab29cdff // indirect
+	github.com/docker/docker v0.0.0-20180612054059-a9fbbdc8dd87 // indirect
+	github.com/docker/spdystream v0.0.0-20181023171402-6480d4af844c // indirect
+	github.com/elazarl/goproxy v0.0.0-20181111060418-2ce16c963a8a // indirect
+	github.com/emicklei/go-restful v2.9.3+incompatible // indirect
+	github.com/evanphx/json-patch v4.1.0+incompatible // indirect
+	github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect
+	github.com/fatih/camelcase v1.0.0 // indirect
+	github.com/go-logr/logr v0.1.0 // indirect
+	github.com/go-logr/zapr v0.1.1 // indirect
+	github.com/go-openapi/jsonpointer v0.18.0 // indirect
+	github.com/go-openapi/jsonreference v0.18.0 // indirect
+	github.com/go-openapi/spec v0.19.0 // indirect
+	github.com/go-openapi/swag v0.19.0 // indirect
+	github.com/gobwas/glob v0.2.3 // indirect
+	github.com/gogo/protobuf v1.2.1 // indirect
+	github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef // indirect
+	github.com/google/btree v1.0.0 // indirect
+	github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf // indirect
+	github.com/google/uuid v1.1.1 // indirect
+	github.com/googleapis/gnostic v0.2.0 // indirect
+	github.com/gophercloud/gophercloud v0.0.0-20190328150603-33e54f40ffcf // indirect
+	github.com/gorilla/websocket v1.4.0 // indirect
+	github.com/gotestyourself/gotestyourself v2.2.0+incompatible // indirect
+	github.com/gregjones/httpcache v0.0.0-20190212212710-3befbb6ad0cc // indirect
+	github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 // indirect
+	github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
+	github.com/grpc-ecosystem/grpc-gateway v1.8.5 // indirect
+	github.com/hashicorp/golang-lru v0.5.1 // indirect
+	github.com/huandu/xstrings v1.2.0 // indirect
+	github.com/imdario/mergo v0.3.7 // indirect
+	github.com/inconshreveable/mousetrap v1.0.0 // indirect
+	github.com/jonboulle/clockwork v0.1.0 // indirect
+	github.com/json-iterator/go v1.1.6 // indirect
+	github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect
+	github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe // indirect
+	github.com/markbates/inflect v1.0.4 // indirect
+	github.com/martinlindhe/base36 v0.0.0-20180729042928-5cda0030da17 // indirect
+	github.com/mattbaird/jsonpatch v0.0.0-20171005235357-81af80346b1a // indirect
+	github.com/mitchellh/go-wordwrap v1.0.0 // indirect
+	github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
+	github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 // indirect
+	github.com/opencontainers/go-digest v1.0.0-rc1 // indirect
+	github.com/operator-framework/operator-sdk master
+	github.com/pborman/uuid v0.0.0-20180906182336-adf5a7427709 // indirect
+	github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
+	github.com/pkg/errors v0.8.1 // indirect
+	github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 // indirect
+	github.com/prometheus/procfs v0.0.0-20190328153300-af7bedc223fb // indirect
+	github.com/russross/blackfriday v0.0.0-20151117072312-300106c228d5 // indirect
+	github.com/sergi/go-diff v1.0.0 // indirect
+	github.com/shurcooL/sanitized_anchor_name v1.0.0 // indirect
+	github.com/sirupsen/logrus v1.4.0 // indirect
+	github.com/soheilhy/cmux v0.1.4 // indirect
+	github.com/spf13/afero v1.2.2 // indirect
+	github.com/spf13/cobra v0.0.3 // indirect
+	github.com/spf13/pflag v1.0.3
+	github.com/technosophos/moniker v0.0.0-20180509230615-a5dbd03a2245 // indirect
+	github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 // indirect
+	github.com/ugorji/go/codec v0.0.0-20190320090025-2dc34c0b8780 // indirect
+	github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 // indirect
+	github.com/xlab/handysort v0.0.0-20150421192137-fb3537ed64a1 // indirect
+	go.etcd.io/bbolt v1.3.2 // indirect
+	go.uber.org/atomic v1.3.2 // indirect
+	go.uber.org/multierr v1.1.0 // indirect
+	go.uber.org/zap v1.9.1 // indirect
+	golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c // indirect
+	golang.org/x/net v0.0.0-20190327214358-63eda1eb0650 // indirect
+	golang.org/x/oauth2 v0.0.0-20190319182350-c85d3e98c914 // indirect
+	golang.org/x/sys v0.0.0-20190322080309-f49334f85ddc // indirect
+	golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 // indirect
+	google.golang.org/appengine v1.5.0 // indirect
+	google.golang.org/genproto v0.0.0-20190327125643-d831d65fe17d // indirect
+	gopkg.in/square/go-jose.v2 v2.3.0 // indirect
+	gotest.tools v2.2.0+incompatible // indirect
+	k8s.io/api v0.0.0-20181213150558-05914d821849 // indirect
+	k8s.io/apiextensions-apiserver v0.0.0-20181213153335-0fe22c71c476
+	k8s.io/apimachinery v0.0.0-20181127025237-2b1284ed4c93 // indirect
+	k8s.io/apiserver v0.0.0-20181213151703-3ccfe8365421 // indirect
+	k8s.io/cli-runtime v0.0.0-20181213153952-835b10687cb6 // indirect
+	k8s.io/client-go v0.0.0-20181213151034-8d9ed539ba31
+	k8s.io/code-generator v0.0.0-20190405172246-9a4d48088f6a
+	k8s.io/gengo v0.0.0-20190327210449-e17681d19d3a
+	k8s.io/helm v2.13.1+incompatible // indirect
+	k8s.io/klog v0.2.0 // indirect
+	k8s.io/kube-openapi v0.0.0-20190320154901-5e45bb682580
+	k8s.io/kubernetes v0.0.0-20190201210629-c6d339953bd4 // indirect
+	k8s.io/utils v0.0.0-20190308190857-21c4ce38f2a7 // indirect
+	sigs.k8s.io/controller-runtime v0.1.10
+	sigs.k8s.io/controller-tools v0.1.10
+	sigs.k8s.io/testing_frameworks v0.1.1 // indirect
+	sigs.k8s.io/yaml v1.1.0 // indirect
+	vbom.ml/util v0.0.0-20180919145318-efcd4e0f9787 // indirect
+)
+
+// Pinned to kubernetes-1.13.1
+replace (
+	k8s.io/api => k8s.io/api v0.0.0-20181213150558-05914d821849
+	k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.0.0-20181213153335-0fe22c71c476
+	k8s.io/apimachinery => k8s.io/apimachinery v0.0.0-20181127025237-2b1284ed4c93
+	k8s.io/apiserver => k8s.io/apiserver v0.0.0-20181213151703-3ccfe8365421
+	k8s.io/cli-runtime => k8s.io/cli-runtime v0.0.0-20181213153952-835b10687cb6
+	k8s.io/client-go => k8s.io/client-go v0.0.0-20181213151034-8d9ed539ba31
+	k8s.io/kubernetes => k8s.io/kubernetes v1.13.1
+)
+
+replace (
+	k8s.io/code-generator => k8s.io/code-generator v0.0.0-20181117043124-c2090bec4d9b
+	k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20180711000925-0cf8f7e6ed1d
+	github.com/operator-framework/operator-sdk => github.com/operator-framework/operator-sdk v0.8.0
+)
+`
+
+func PrintGoMod(asFile bool) error {
+	b, err := deps.ExecGoModTmpl(goModTmpl)
+	if err != nil {
+		return err
+	}
+	if asFile {
+		fmt.Print(string(b))
+		return nil
+	}
+	return deps.PrintGoMod(b)
+}
diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/helm/gopkgtoml.go b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/helm/gopkgtoml.go
index 02876d3..1ecea16 100644
--- a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/helm/gopkgtoml.go
+++ b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/helm/gopkgtoml.go
@@ -15,8 +15,11 @@
 package helm
 
 import (
+	"fmt"
+
 	"github.com/operator-framework/operator-sdk/internal/pkg/scaffold"
 	"github.com/operator-framework/operator-sdk/internal/pkg/scaffold/input"
+	"github.com/operator-framework/operator-sdk/internal/pkg/scaffold/internal/deps"
 )
 
 // GopkgToml - the Gopkg.toml file for a hybrid operator
@@ -36,7 +39,7 @@ const gopkgTomlTmpl = `[[constraint]]
   name = "github.com/operator-framework/operator-sdk"
   # The version rule is used for a specific release and the master branch for in between releases.
   # branch = "master" #osdk_branch_annotation
-  version = "=v0.7.0" #osdk_version_annotation
+  version = "=v0.8.0" #osdk_version_annotation
 
 [[override]]
   name = "k8s.io/api"
@@ -85,3 +88,11 @@ revision = "a9fbbdc8dd8794b20af358382ab780559bca589d"
   go-tests = true
   unused-packages = true
 `
+
+func PrintDepGopkgTOML(asFile bool) error {
+	if asFile {
+		_, err := fmt.Println(gopkgTomlTmpl)
+		return err
+	}
+	return deps.PrintDepGopkgTOML(gopkgTomlTmpl)
+}
diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/helm/operator.go b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/helm/operator.go
index d3c98fa..62b2dcf 100644
--- a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/helm/operator.go
+++ b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/helm/operator.go
@@ -24,8 +24,6 @@ import (
 // Operator specifies the Helm operator.yaml manifest scaffold
 type Operator struct {
 	input.Input
-
-	IsClusterScoped bool
 }
 
 // GetInput gets the scaffold execution input
@@ -59,13 +57,9 @@ spec:
           imagePullPolicy: Always
           env:
             - name: WATCH_NAMESPACE
-              {{- if .IsClusterScoped }}
-              value: ""
-              {{- else }}
               valueFrom:
                 fieldRef:
                   fieldPath: metadata.namespace
-              {{- end}}
             - name: POD_NAME
               valueFrom:
                 fieldRef:
diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/helm/role.go b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/helm/role.go
new file mode 100644
index 0000000..810c304
--- /dev/null
+++ b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/helm/role.go
@@ -0,0 +1,229 @@
+// Copyright 2019 The Operator-SDK Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package helm
+
+import (
+	"fmt"
+	"path/filepath"
+	"sort"
+	"strings"
+
+	"github.com/operator-framework/operator-sdk/internal/pkg/scaffold"
+
+	"github.com/ghodss/yaml"
+	log "github.com/sirupsen/logrus"
+	rbacv1 "k8s.io/api/rbac/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+	"k8s.io/apimachinery/pkg/version"
+	"k8s.io/client-go/discovery"
+	"k8s.io/client-go/rest"
+	"k8s.io/helm/pkg/chartutil"
+	"k8s.io/helm/pkg/manifest"
+	"k8s.io/helm/pkg/proto/hapi/chart"
+	"k8s.io/helm/pkg/renderutil"
+	"k8s.io/helm/pkg/tiller"
+)
+
+// CreateRoleScaffold generates a role scaffold from the provided helm chart. It
+// renders a release manifest using the chart's default values and uses the Kubernetes
+// discovery API to lookup each resource in the resulting manifest.
+// The role scaffold will have IsClusterScoped=true if the chart lists cluster scoped resources
+func CreateRoleScaffold(cfg *rest.Config, chart *chart.Chart) (*scaffold.Role, error) {
+	log.Info("Generating RBAC rules")
+
+	roleScaffold := &scaffold.Role{
+		IsClusterScoped:  false,
+		SkipDefaultRules: true,
+		// TODO: enable metrics in helm operator
+		SkipMetricsRules: true,
+		CustomRules: []rbacv1.PolicyRule{
+			// We need this rule so tiller can read namespaces to ensure they exist
+			{
+				APIGroups: []string{""},
+				Resources: []string{"namespaces"},
+				Verbs:     []string{"get"},
+			},
+
+			// We need this rule for leader election and release state storage to work
+			{
+				APIGroups: []string{""},
+				Resources: []string{"configmaps", "secrets"},
+				Verbs:     []string{rbacv1.VerbAll},
+			},
+		},
+	}
+
+	clusterResourceRules, namespacedResourceRules, err := generateRoleRules(cfg, chart)
+	if err != nil {
+		log.Warnf("Using default RBAC rules: failed to generate RBAC rules: %s", err)
+		roleScaffold.SkipDefaultRules = false
+	}
+
+	// Use a ClusterRole if cluster scoped resources are listed in the chart
+	if len(clusterResourceRules) > 0 {
+		log.Info("Scaffolding ClusterRole and ClusterRolebinding for cluster scoped resources in the helm chart")
+		roleScaffold.IsClusterScoped = true
+		roleScaffold.CustomRules = append(roleScaffold.CustomRules, append(clusterResourceRules, namespacedResourceRules...)...)
+	}
+
+	log.Warn("The RBAC rules generated in deploy/role.yaml are based on the chart's default manifest." +
+		" Some rules may be missing for resources that are only enabled with custom values, and" +
+		" some existing rules may be overly broad. Double check the rules generated in deploy/role.yaml" +
+		" to ensure they meet the operator's permission requirements.")
+
+	return roleScaffold, nil
+}
+
+func generateRoleRules(cfg *rest.Config, chart *chart.Chart) ([]rbacv1.PolicyRule, []rbacv1.PolicyRule, error) {
+	kubeVersion, serverResources, err := getServerVersionAndResources(cfg)
+	if err != nil {
+		return nil, nil, fmt.Errorf("failed to get server info: %s", err)
+	}
+
+	manifests, err := getDefaultManifests(chart, kubeVersion)
+	if err != nil {
+		return nil, nil, fmt.Errorf("failed to get default manifest: %s", err)
+	}
+
+	// Use maps of sets of resources, keyed by their group. This helps us
+	// de-duplicate resources within a group as we traverse the manifests.
+	clusterGroups := map[string]map[string]struct{}{}
+	namespacedGroups := map[string]map[string]struct{}{}
+
+	for _, m := range manifests {
+		name := m.Name
+		content := strings.TrimSpace(m.Content)
+
+		// Ignore NOTES.txt, helper manifests, and empty manifests.
+		b := filepath.Base(name)
+		if b == "NOTES.txt" {
+			continue
+		}
+		if strings.HasPrefix(b, "_") {
+			continue
+		}
+		if content == "" || content == "---" {
+			continue
+		}
+
+		// Extract the gvk from the template
+		resource := unstructured.Unstructured{}
+		err := yaml.Unmarshal([]byte(content), &resource)
+		if err != nil {
+			log.Warnf("Skipping rule generation for %s. Failed to parse manifest: %s", name, err)
+			continue
+		}
+		groupVersion := resource.GetAPIVersion()
+		group := resource.GroupVersionKind().Group
+		kind := resource.GroupVersionKind().Kind
+
+		// If we don't have the group or the kind, we won't be able to
+		// create a valid role rule, log a warning and continue.
+		if groupVersion == "" {
+			log.Warnf("Skipping rule generation for %s. Failed to determine resource apiVersion.", name)
+			continue
+		}
+		if kind == "" {
+			log.Warnf("Skipping rule generation for %s. Failed to determine resource kind.", name)
+			continue
+		}
+
+		if resourceName, namespaced, ok := getResource(serverResources, groupVersion, kind); ok {
+			if !namespaced {
+				if clusterGroups[group] == nil {
+					clusterGroups[group] = map[string]struct{}{}
+				}
+				clusterGroups[group][resourceName] = struct{}{}
+			} else {
+				if namespacedGroups[group] == nil {
+					namespacedGroups[group] = map[string]struct{}{}
+				}
+				namespacedGroups[group][resourceName] = struct{}{}
+			}
+		} else {
+			log.Warnf("Skipping rule generation for %s. Failed to determine resource scope for %s.", name, resource.GroupVersionKind())
+			continue
+		}
+	}
+
+	// convert map[string]map[string]struct{} to []rbacv1.PolicyRule
+	clusterRules := buildRulesFromGroups(clusterGroups)
+	namespacedRules := buildRulesFromGroups(namespacedGroups)
+
+	return clusterRules, namespacedRules, nil
+}
+
+func getServerVersionAndResources(cfg *rest.Config) (*version.Info, []*metav1.APIResourceList, error) {
+	dc, err := discovery.NewDiscoveryClientForConfig(cfg)
+	if err != nil {
+		return nil, nil, fmt.Errorf("failed to create discovery client: %s", err)
+	}
+	kubeVersion, err := dc.ServerVersion()
+	if err != nil {
+		return nil, nil, fmt.Errorf("failed to get kubernetes server version: %s", err)
+	}
+	serverResources, err := dc.ServerResources()
+	if err != nil {
+		return nil, nil, fmt.Errorf("failed to get kubernetes server resources: %s", err)
+	}
+	return kubeVersion, serverResources, nil
+}
+
+func getDefaultManifests(c *chart.Chart, kubeVersion *version.Info) ([]tiller.Manifest, error) {
+	renderOpts := renderutil.Options{
+		ReleaseOptions: chartutil.ReleaseOptions{
+			IsInstall: true,
+			IsUpgrade: false,
+		},
+		KubeVersion: fmt.Sprintf("%s.%s", kubeVersion.Major, kubeVersion.Minor),
+	}
+
+	renderedTemplates, err := renderutil.Render(c, &chart.Config{}, renderOpts)
+	if err != nil {
+		return nil, fmt.Errorf("failed to render chart templates: %s", err)
+	}
+	return tiller.SortByKind(manifest.SplitManifests(renderedTemplates)), nil
+}
+
+func getResource(namespacedResourceList []*metav1.APIResourceList, groupVersion, kind string) (string, bool, bool) {
+	for _, apiResourceList := range namespacedResourceList {
+		if apiResourceList.GroupVersion == groupVersion {
+			for _, apiResource := range apiResourceList.APIResources {
+				if apiResource.Kind == kind {
+					return apiResource.Name, apiResource.Namespaced, true
+				}
+			}
+		}
+	}
+	return "", false, false
+}
+
+func buildRulesFromGroups(groups map[string]map[string]struct{}) []rbacv1.PolicyRule {
+	rules := []rbacv1.PolicyRule{}
+	for group, resourceNames := range groups {
+		resources := []string{}
+		for resource := range resourceNames {
+			resources = append(resources, resource)
+		}
+		sort.Strings(resources)
+		rules = append(rules, rbacv1.PolicyRule{
+			APIGroups: []string{group},
+			Resources: resources,
+			Verbs:     []string{rbacv1.VerbAll},
+		})
+	}
+	return rules
+}
diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/input/input.go b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/input/input.go
index d4da73d..6d543cd 100644
--- a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/input/input.go
+++ b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/input/input.go
@@ -60,6 +60,10 @@ type Input struct {
 
 	// ProjectName is the operator's name, ex. app-operator
 	ProjectName string
+
+	// Delims is a slice of two strings representing the left and right delimiter
+	// defaults to {{ }}
+	Delims [2]string
 }
 
 // Repo allows a repo to be set on an object
diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/internal/deps/print_dep.go b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/internal/deps/print_dep.go
new file mode 100644
index 0000000..78dc851
--- /dev/null
+++ b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/internal/deps/print_dep.go
@@ -0,0 +1,99 @@
+// Copyright 2019 The Operator-SDK Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package deps
+
+import (
+	"bytes"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"strings"
+	"text/tabwriter"
+
+	"github.com/BurntSushi/toml"
+)
+
+func PrintDepGopkgTOML(tmpl string) error {
+	gopkgData := make(map[string]interface{})
+	_, err := toml.Decode(tmpl, &gopkgData)
+	if err != nil {
+		return err
+	}
+
+	buf := &bytes.Buffer{}
+	w := tabwriter.NewWriter(buf, 16, 8, 0, '\t', 0)
+	_, err = w.Write([]byte("NAME\tVERSION\tBRANCH\tREVISION\t\n"))
+	if err != nil {
+		return err
+	}
+
+	constraintList, ok := gopkgData["constraint"]
+	if !ok {
+		return errors.New("constraints not found")
+	}
+	for _, dep := range constraintList.([]map[string]interface{}) {
+		err = writeDepRow(w, dep)
+		if err != nil {
+			return err
+		}
+	}
+	overrideList, ok := gopkgData["override"]
+	if !ok {
+		return errors.New("overrides not found")
+	}
+	for _, dep := range overrideList.([]map[string]interface{}) {
+		err = writeDepRow(w, dep)
+		if err != nil {
+			return err
+		}
+	}
+	if err := w.Flush(); err != nil {
+		return err
+	}
+
+	requiredList, ok := gopkgData["required"]
+	if !ok {
+		return errors.New("required list not found")
+	}
+	pl, err := json.MarshalIndent(requiredList, "", " ")
+	if err != nil {
+		return err
+	}
+	_, err = buf.Write([]byte(fmt.Sprintf("\nrequired = %v", string(pl))))
+	if err != nil {
+		return err
+	}
+
+	fmt.Println(buf.String())
+
+	return nil
+}
+
+func writeDepRow(w *tabwriter.Writer, dep map[string]interface{}) error {
+	name := dep["name"].(string)
+	ver, col := "", 0
+	if v, ok := dep["version"]; ok {
+		ver, col = v.(string), 1
+	} else if v, ok = dep["branch"]; ok {
+		ver, col = v.(string), 2
+	} else if v, ok = dep["revision"]; ok {
+		ver, col = v.(string), 3
+	} else {
+		return fmt.Errorf("no version, revision, or branch found for %s", name)
+	}
+
+	_, err := w.Write([]byte(name + strings.Repeat("\t", col) + ver + "\t\n"))
+	return err
+}
diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/internal/deps/print_go_mod.go b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/internal/deps/print_go_mod.go
new file mode 100644
index 0000000..be659f7
--- /dev/null
+++ b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/internal/deps/print_go_mod.go
@@ -0,0 +1,149 @@
+// Copyright 2019 The Operator-SDK Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package deps
+
+import (
+	"bytes"
+	"fmt"
+	"html/template"
+	"io"
+	"text/tabwriter"
+
+	"github.com/operator-framework/operator-sdk/internal/util/projutil"
+
+	"github.com/rogpeppe/go-internal/modfile"
+)
+
+func ExecGoModTmpl(tmpl string) ([]byte, error) {
+	projutil.MustInProjectRoot()
+	repo := projutil.CheckAndGetProjectGoPkg()
+	t, err := template.New("").Parse(tmpl)
+	if err != nil {
+		return nil, fmt.Errorf("failed to parse go mod template: (%v)", err)
+	}
+	buf := &bytes.Buffer{}
+	if err := t.Execute(buf, struct{ Repo string }{Repo: repo}); err != nil {
+		return nil, fmt.Errorf("failed to execute go mod template: (%v)", err)
+	}
+	return buf.Bytes(), nil
+}
+
+func PrintGoMod(b []byte) error {
+	modFile, err := modfile.Parse("go.mod", b, nil)
+	if err != nil {
+		return err
+	}
+
+	mods := &GoModFile{modFile}
+	buf := &bytes.Buffer{}
+	w := tabwriter.NewWriter(buf, 16, 8, 0, '\t', 0)
+	if err = mods.writeRequireSection(w); err != nil {
+		return err
+	}
+	if len(mods.Replace) > 0 {
+		if err = mods.writeReplaceSection(w); err != nil {
+			return err
+		}
+	}
+	if len(mods.Exclude) > 0 {
+		if err = mods.writeExcludeSection(w); err != nil {
+			return err
+		}
+	}
+	if err := w.Flush(); err != nil {
+		return err
+	}
+
+	fmt.Print(buf.String())
+	return nil
+}
+
+type GoModFile struct {
+	*modfile.File
+}
+
+func (g *GoModFile) writeRequireSection(w io.Writer) error {
+	_, err := w.Write([]byte("REQUIRE\t\n"))
+	if err != nil {
+		return err
+	}
+	_, err = w.Write([]byte("Name\tVersion\tIndirect\t\n"))
+	if err != nil {
+		return err
+	}
+	for _, r := range g.Require {
+		if err = writeRowRequire(w, r); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (g *GoModFile) writeReplaceSection(w io.Writer) error {
+	_, err := w.Write([]byte("\nREPLACE\t\n"))
+	if err != nil {
+		return err
+	}
+	_, err = w.Write([]byte("Old Name\tOld Version\tNew Name\tNew Version\t\n"))
+	if err != nil {
+		return err
+	}
+	for _, r := range g.Replace {
+		if err = writeRowReplace(w, r); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (g *GoModFile) writeExcludeSection(w io.Writer) error {
+	_, err := w.Write([]byte("\nEXCLUDE\t\n"))
+	if err != nil {
+		return err
+	}
+	_, err = w.Write([]byte("Name\tVersion\t\n"))
+	if err != nil {
+		return err
+	}
+	for _, e := range g.Exclude {
+		if err = writeRowExclude(w, e); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func writeRowRequire(w io.Writer, r *modfile.Require) error {
+	row := fmt.Sprintf("%v\t%v\t", r.Mod.Path, r.Mod.Version)
+	if r.Indirect {
+		row += "true\t\n"
+	} else {
+		row += "\n"
+	}
+	_, err := w.Write([]byte(row))
+	return err
+}
+
+func writeRowReplace(w io.Writer, r *modfile.Replace) error {
+	row := fmt.Sprintf("%v\t%v\t%v\t%v\t\n", r.Old.Path, r.Old.Version, r.New.Path, r.New.Version)
+	_, err := w.Write([]byte(row))
+	return err
+}
+
+func writeRowExclude(w io.Writer, e *modfile.Exclude) error {
+	row := fmt.Sprintf("%v\t%v\t\n", e.Mod.Path, e.Mod.Version)
+	_, err := w.Write([]byte(row))
+	return err
+}
diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/internal/testutil/test_util.go b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/internal/testutil/test_util.go
new file mode 100644
index 0000000..70c9672
--- /dev/null
+++ b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/internal/testutil/test_util.go
@@ -0,0 +1,28 @@
+package test
+
+import (
+	"os"
+
+	"github.com/operator-framework/operator-sdk/internal/util/fileutil"
+
+	"github.com/spf13/afero"
+)
+
+func WriteOSPathToFS(fromFS, toFS afero.Fs, root string) error {
+	if _, err := fromFS.Stat(root); err != nil {
+		return err
+	}
+	return afero.Walk(fromFS, root, func(path string, info os.FileInfo, err error) error {
+		if err != nil || info == nil {
+			return err
+		}
+		if !info.IsDir() {
+			b, err := afero.ReadFile(fromFS, path)
+			if err != nil {
+				return err
+			}
+			return afero.WriteFile(toFS, path, b, fileutil.DefaultFileMode)
+		}
+		return nil
+	})
+}
diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/olm-catalog/config.go b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/olm-catalog/config.go
index 52e0381..ee2c607 100644
--- a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/olm-catalog/config.go
+++ b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/olm-catalog/config.go
@@ -23,6 +23,7 @@ import (
 	"github.com/operator-framework/operator-sdk/internal/pkg/scaffold"
 
 	"github.com/ghodss/yaml"
+	log "github.com/sirupsen/logrus"
 )
 
 // CSVConfig is a configuration file for CSV composition. Its fields contain
@@ -78,10 +79,16 @@ func (c *CSVConfig) setFields() error {
 
 	if len(c.CRDCRPaths) == 0 {
 		paths, err := getManifestPathsFromDir(scaffold.CRDsDir)
-		if err != nil {
+		if err != nil && !os.IsNotExist(err) {
 			return err
 		}
-		c.CRDCRPaths = paths
+		if os.IsNotExist(err) {
+			log.Infof(`Default CRDs dir "%s" does not exist. Omitting field spec.customresourcedefinitions.owned from CSV.`, scaffold.CRDsDir)
+		} else if len(paths) == 0 {
+			log.Infof(`Default CRDs dir "%s" is empty. Omitting field spec.customresourcedefinitions.owned from CSV.`, scaffold.CRDsDir)
+		} else {
+			c.CRDCRPaths = paths
+		}
 	} else {
 		// Allow user to specify a list of dirs to search. Avoid duplicate files.
 		paths, seen := make([]string, 0), make(map[string]struct{})
diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/olm-catalog/csv.go b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/olm-catalog/csv.go
index 5d70d3d..4a3e13e 100644
--- a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/olm-catalog/csv.go
+++ b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/olm-catalog/csv.go
@@ -18,6 +18,7 @@ import (
 	"bytes"
 	"encoding/json"
 	"errors"
+	"fmt"
 	"os"
 	"path/filepath"
 	"strings"
@@ -166,7 +167,7 @@ func getCSVFromFSIfExists(fs afero.Fs, path string) (*olmapiv1alpha1.ClusterServ
 
 	csv := &olmapiv1alpha1.ClusterServiceVersion{}
 	if err := yaml.Unmarshal(csvBytes, csv); err != nil {
-		return nil, false, err
+		return nil, false, fmt.Errorf("%s: %v", path, err)
 	}
 
 	return csv, true, nil
@@ -369,11 +370,11 @@ func (s *CSV) updateCSVFromManifestFiles(cfg *CSVConfig, csv *olmapiv1alpha1.Clu
 			yamlSpec := scanner.Bytes()
 			kind, err := getKindfromYAML(yamlSpec)
 			if err != nil {
-				return err
+				return fmt.Errorf("%s: %v", f, err)
 			}
 			found, err := store.AddToUpdater(yamlSpec, kind)
 			if err != nil {
-				return err
+				return fmt.Errorf("%s: %v", f, err)
 			}
 			if !found {
 				if _, ok := otherSpecs[kind]; !ok {
diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/operator.go b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/operator.go
index c2bc73f..60fef0f 100644
--- a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/operator.go
+++ b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/operator.go
@@ -24,8 +24,6 @@ const OperatorYamlFile = "operator.yaml"
 
 type Operator struct {
 	input.Input
-
-	IsClusterScoped bool
 }
 
 func (s *Operator) GetInput() (input.Input, error) {
@@ -60,13 +58,9 @@ spec:
           imagePullPolicy: Always
           env:
             - name: WATCH_NAMESPACE
-              {{- if .IsClusterScoped }}
-              value: ""
-              {{- else }}
               valueFrom:
                 fieldRef:
                   fieldPath: metadata.namespace
-              {{- end}}
             - name: POD_NAME
               valueFrom:
                 fieldRef:
diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/resource.go b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/resource.go
index 6039b39..08ac9d1 100644
--- a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/resource.go
+++ b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/resource.go
@@ -124,7 +124,7 @@ func (r *Resource) checkAndSetGroups() error {
 		return errors.New("full group cannot be empty")
 	}
 	g := strings.Split(fg[0], ".")
-	if len(g) < 2 || len(g[0]) == 0 {
+	if len(g) == 0 || len(g[0]) == 0 {
 		return errors.New("group cannot be empty")
 	}
 	r.FullGroup = fg[0]
diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/role.go b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/role.go
index da1cbe8..ce02851 100644
--- a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/role.go
+++ b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/role.go
@@ -35,7 +35,10 @@ const RoleYamlFile = "role.yaml"
 type Role struct {
 	input.Input
 
-	IsClusterScoped bool
+	IsClusterScoped  bool
+	SkipDefaultRules bool
+	SkipMetricsRules bool
+	CustomRules      []rbacv1.PolicyRule
 }
 
 func (s *Role) GetInput() (input.Input, error) {
@@ -157,6 +160,7 @@ apiVersion: rbac.authorization.k8s.io/v1
 metadata:
   name: {{.ProjectName}}
 rules:
+{{- if not .SkipDefaultRules }}
 - apiGroups:
   - ""
   resources:
@@ -170,12 +174,6 @@ rules:
   verbs:
   - "*"
 - apiGroups:
-  - ""
-  resources:
-  - namespaces
-  verbs:
-  - get
-- apiGroups:
   - apps
   resources:
   - deployments
@@ -184,6 +182,38 @@ rules:
   - statefulsets
   verbs:
   - "*"
+{{- end }}
+{{- range .CustomRules }}
+- verbs:
+  {{- range .Verbs }}
+  - "{{ . }}"
+  {{- end }}
+  {{- with .APIGroups }}
+  apiGroups:
+  {{- range . }}
+  - "{{ . }}"
+  {{- end }}
+  {{- end }}
+  {{- with .Resources }}
+  resources:
+  {{- range . }}
+  - "{{ . }}"
+  {{- end }}
+  {{- end }}
+  {{- with .ResourceNames }}
+  resourceNames:
+  {{- range . }}
+  - "{{ . }}"
+  {{- end }}
+  {{- end }}
+  {{- with .NonResourceURLs }}
+  nonResourceURLs:
+  {{- range . }}
+  - "{{ . }}"
+  {{- end }}
+  {{- end }}
+{{- end }}
+{{- if not .SkipMetricsRules }}
 - apiGroups:
   - monitoring.coreos.com
   resources:
@@ -199,4 +229,5 @@ rules:
   - {{ .ProjectName }}
   verbs:
   - "update"
+{{- end }}
 `
diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/scaffold.go b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/scaffold.go
index d84c08d..06ccafc 100644
--- a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/scaffold.go
+++ b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/scaffold.go
@@ -19,6 +19,8 @@ package scaffold
 import (
 	"bytes"
 	"fmt"
+	"go/parser"
+	"go/token"
 	"io"
 	"os"
 	"path/filepath"
@@ -28,6 +30,7 @@ import (
 	"github.com/operator-framework/operator-sdk/internal/pkg/scaffold/input"
 	"github.com/operator-framework/operator-sdk/internal/util/fileutil"
 
+	"github.com/pkg/errors"
 	log "github.com/sirupsen/logrus"
 	"github.com/spf13/afero"
 	"golang.org/x/tools/imports"
@@ -45,6 +48,11 @@ type Scaffold struct {
 	Fs afero.Fs
 	// GetWriter returns a writer for writing scaffold files.
 	GetWriter func(path string, mode os.FileMode) (io.Writer, error)
+	// BoilerplatePath is the path to a file containing Go boilerplate text.
+	BoilerplatePath string
+
+	// boilerplateBytes are bytes of Go boilerplate text.
+	boilerplateBytes []byte
 }
 
 func (s *Scaffold) setFieldsAndValidate(t input.File) error {
@@ -73,6 +81,67 @@ func (s *Scaffold) configure(cfg *input.Config) {
 	s.ProjectName = cfg.ProjectName
 }
 
+func validateBoilerplateBytes(b []byte) error {
+	// Append a 'package main' so we can parse the file.
+	fset := token.NewFileSet()
+	f, err := parser.ParseFile(fset, "", append([]byte("package main\n"), b...), parser.ParseComments)
+	if err != nil {
+		return fmt.Errorf("parse boilerplate comments: %v", err)
+	}
+	if len(f.Comments) == 0 {
+		return fmt.Errorf("boilerplate does not contain comments")
+	}
+	var cb []byte
+	for _, cg := range f.Comments {
+		for _, c := range cg.List {
+			cb = append(cb, []byte(strings.TrimSpace(c.Text)+"\n")...)
+		}
+	}
+	// Remove empty lines before comparison.
+	var tb []byte
+	for _, l := range bytes.Split(b, []byte("\n")) {
+		if len(l) > 0 {
+			tb = append(tb, append(bytes.TrimSpace(l), []byte("\n")...)...)
+		}
+	}
+	tb, cb = bytes.TrimSpace(tb), bytes.TrimSpace(cb)
+	if bytes.Compare(tb, cb) != 0 {
+		return fmt.Errorf(`boilerplate contains text other than comments:\n"%s"\n`, tb)
+	}
+	return nil
+}
+
+func wrapBoilerplateErr(err error, bp string) error {
+	return errors.Wrapf(err, `boilerplate file "%s"`, bp)
+}
+
+func (s *Scaffold) setBoilerplate() (err error) {
+	// If we've already set boilerplate bytes, don't overwrite them.
+	if len(s.boilerplateBytes) == 0 {
+		bp := s.BoilerplatePath
+		if bp == "" {
+			i, err := (&Boilerplate{}).GetInput()
+			if err != nil {
+				return wrapBoilerplateErr(err, i.Path)
+			}
+			if _, err := s.Fs.Stat(i.Path); err == nil {
+				bp = i.Path
+			}
+		}
+		if bp != "" {
+			b, err := afero.ReadFile(s.Fs, bp)
+			if err != nil {
+				return wrapBoilerplateErr(err, bp)
+			}
+			if err = validateBoilerplateBytes(b); err != nil {
+				return wrapBoilerplateErr(err, bp)
+			}
+			s.boilerplateBytes = append(bytes.TrimSpace(b), '\n', '\n')
+		}
+	}
+	return nil
+}
+
 // Execute executes scaffolding the Files
 func (s *Scaffold) Execute(cfg *input.Config, files ...input.File) error {
 	if s.Fs == nil {
@@ -82,6 +151,11 @@ func (s *Scaffold) Execute(cfg *input.Config, files ...input.File) error {
 		s.GetWriter = fileutil.NewFileWriterFS(s.Fs).WriteCloser
 	}
 
+	// Generate boilerplate file first so new Go files get headers.
+	if err := s.setBoilerplate(); err != nil {
+		return err
+	}
+
 	// Configure s using common fields from cfg.
 	s.configure(cfg)
 
@@ -126,6 +200,10 @@ func (s *Scaffold) doFile(e input.File) error {
 
 const goFileExt = ".go"
 
+func isGoFile(p string) bool {
+	return filepath.Ext(p) == goFileExt
+}
+
 func (s *Scaffold) doRender(i input.Input, e input.File, absPath string) error {
 	var mode os.FileMode = fileutil.DefaultFileMode
 	if i.IsExec {
@@ -165,7 +243,7 @@ func (s *Scaffold) doRender(i input.Input, e input.File, absPath string) error {
 	}
 
 	// gofmt the imports
-	if filepath.Ext(absPath) == goFileExt {
+	if isGoFile(absPath) {
 		b, err = imports.Process(absPath, b, nil)
 		if err != nil {
 			return err
@@ -180,6 +258,12 @@ func (s *Scaffold) doRender(i input.Input, e input.File, absPath string) error {
 			}
 		}
 	}
+
+	if isGoFile(absPath) && len(s.boilerplateBytes) != 0 {
+		if _, err = f.Write(s.boilerplateBytes); err != nil {
+			return err
+		}
+	}
 	_, err = f.Write(b)
 	log.Infoln("Created", i.Path)
 	return err
@@ -195,5 +279,8 @@ func newTemplate(i input.Input) (*template.Template, error) {
 	if len(i.TemplateFuncs) > 0 {
 		t.Funcs(i.TemplateFuncs)
 	}
+	if i.Delims[0] != "" && i.Delims[1] != "" {
+		t.Delims(i.Delims[0], i.Delims[1])
+	}
 	return t.Parse(i.TemplateBody)
 }
diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/test_pod.go b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/test_pod.go
deleted file mode 100644
index 76bff7f..0000000
--- a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/test_pod.go
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright 2018 The Operator-SDK Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package scaffold
-
-import (
-	"path/filepath"
-
-	"github.com/operator-framework/operator-sdk/internal/pkg/scaffold/input"
-)
-
-const TestPodYamlFile = "test-pod.yaml"
-
-type TestPod struct {
-	input.Input
-
-	// Image is the image name used for testing, ex. quay.io/repo/operator-image
-	Image string
-
-	// TestNamespaceEnv is an env variable specifying the test namespace
-	TestNamespaceEnv string
-}
-
-func (s *TestPod) GetInput() (input.Input, error) {
-	if s.Path == "" {
-		s.Path = filepath.Join(DeployDir, TestPodYamlFile)
-	}
-	s.TemplateBody = testPodTmpl
-	return s.Input, nil
-}
-
-const testPodTmpl = `apiVersion: v1
-kind: Pod
-metadata:
-  name: {{.ProjectName}}-test
-spec:
-  restartPolicy: Never
-  containers:
-  - name: {{.ProjectName}}-test
-    image: {{.Image}}
-    imagePullPolicy: Always
-    command: ["/go-test.sh"]
-    env:
-      - name: {{.TestNamespaceEnv}}
-        valueFrom:
-          fieldRef:
-            fieldPath: metadata.namespace
-`
diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/test_setup.go b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/test_setup.go
index d0e76db..5212a13 100644
--- a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/test_setup.go
+++ b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/test_setup.go
@@ -19,6 +19,7 @@ import (
 	"io"
 	"os"
 	"path/filepath"
+	"strings"
 
 	"github.com/operator-framework/operator-sdk/internal/pkg/scaffold/input"
 
@@ -57,3 +58,20 @@ func setupScaffoldAndWriter() (*Scaffold, *bytes.Buffer) {
 		},
 	}, buf
 }
+
+func setupTestFrameworkConfig() (*input.Config, error) {
+	absPath, err := os.Getwd()
+	if err != nil {
+		return nil, err
+	}
+	absPath = absPath[:strings.Index(absPath, "internal/pkg")]
+	tfDir := filepath.Join(absPath, "test", "test-framework")
+
+	// Set the project and repo paths to {abs}/test/test-framework, which
+	// contains pkg/apis for the memcached-operator.
+	return &input.Config{
+		Repo:           tfDir[strings.Index(absPath, "github.com"):],
+		AbsProjectPath: tfDir,
+		ProjectName:    filepath.Base(tfDir),
+	}, nil
+}
diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/test_framework_dockerfile.go b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/tools.go
similarity index 53%
rename from vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/test_framework_dockerfile.go
rename to vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/tools.go
index 9f3e3eb..d37d476 100644
--- a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/test_framework_dockerfile.go
+++ b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/tools.go
@@ -1,4 +1,4 @@
-// Copyright 2018 The Operator-SDK Authors
+// Copyright 2019 The Operator-SDK Authors
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.
@@ -15,27 +15,36 @@
 package scaffold
 
 import (
-	"path/filepath"
-
 	"github.com/operator-framework/operator-sdk/internal/pkg/scaffold/input"
 )
 
-type TestFrameworkDockerfile struct {
+const ToolsFile = "tools.go"
+
+type Tools struct {
 	input.Input
 }
 
-func (s *TestFrameworkDockerfile) GetInput() (input.Input, error) {
+func (s *Tools) GetInput() (input.Input, error) {
 	if s.Path == "" {
-		s.Path = filepath.Join(BuildTestDir, DockerfileFile)
+		s.Path = ToolsFile
 	}
-	s.TemplateBody = testFrameworkDockerfileTmpl
+	s.TemplateBody = toolsTmpl
 	return s.Input, nil
 }
 
-const testFrameworkDockerfileTmpl = `ARG BASEIMAGE
-FROM ${BASEIMAGE}
-ADD build/_output/bin/{{.ProjectName}}-test /usr/local/bin/{{.ProjectName}}-test
-ARG NAMESPACEDMAN
-ADD $NAMESPACEDMAN /namespaced.yaml
-ADD build/test-framework/go-test.sh /go-test.sh
+const toolsTmpl = `// +build tools
+
+package tools
+
+import (
+	// Code generators built at runtime.
+	_ "k8s.io/code-generator/cmd/client-gen"
+	_ "k8s.io/code-generator/cmd/conversion-gen"
+	_ "k8s.io/code-generator/cmd/deepcopy-gen"
+	_ "k8s.io/code-generator/cmd/informer-gen"
+	_ "k8s.io/code-generator/cmd/lister-gen"
+	_ "k8s.io/gengo/args"
+	_ "k8s.io/kube-openapi/cmd/openapi-gen"
+	_ "sigs.k8s.io/controller-tools/pkg/crd/generator"
+)
 `
diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/types.go b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/types.go
index afed2a1..72d7e7f 100644
--- a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/types.go
+++ b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/types.go
@@ -71,6 +71,7 @@ type {{.Resource.Kind}}Status struct {
 
 // {{.Resource.Kind}} is the Schema for the {{ .Resource.Resource }} API
 // +k8s:openapi-gen=true
+// +kubebuilder:subresource:status
 type {{.Resource.Kind}} struct {
 	metav1.TypeMeta   ` + "`" + `json:",inline"` + "`" + `
 	metav1.ObjectMeta ` + "`" + `json:"metadata,omitempty"` + "`" + `
diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/version.go b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/version.go
index 8bd2ccf..22f7210 100644
--- a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/version.go
+++ b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scaffold/version.go
@@ -20,7 +20,10 @@ import (
 	"github.com/operator-framework/operator-sdk/internal/pkg/scaffold/input"
 )
 
-const VersionFile = "version.go"
+const (
+	VersionDir  = "version"
+	VersionFile = "version.go"
+)
 
 type Version struct {
 	input.Input
diff --git a/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/scorecard/basic_tests.go b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scorecard/basic_tests.go
similarity index 53%
rename from vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/scorecard/basic_tests.go
rename to vendor/github.com/operator-framework/operator-sdk/internal/pkg/scorecard/basic_tests.go
index 53339cc..6d4c7d9 100644
--- a/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/scorecard/basic_tests.go
+++ b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scorecard/basic_tests.go
@@ -20,9 +20,90 @@ import (
 	"fmt"
 	"strings"
 
+	v1 "k8s.io/api/core/v1"
+	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
 	"k8s.io/apimachinery/pkg/types"
+	"sigs.k8s.io/controller-runtime/pkg/client"
 )
 
+// BasicTestConfig contains all variables required by the BasicTest TestSuite
+type BasicTestConfig struct {
+	Client   client.Client
+	CR       *unstructured.Unstructured
+	ProxyPod *v1.Pod
+}
+
+// Test Defintions
+
+// CheckSpecTest is a scorecard test that verifies that the CR has a spec block
+type CheckSpecTest struct {
+	TestInfo
+	BasicTestConfig
+}
+
+// NewCheckSpecTest returns a new CheckSpecTest object
+func NewCheckSpecTest(conf BasicTestConfig) *CheckSpecTest {
+	return &CheckSpecTest{
+		BasicTestConfig: conf,
+		TestInfo: TestInfo{
+			Name:        "Spec Block Exists",
+			Description: "Custom Resource has a Spec Block",
+			Cumulative:  false,
+		},
+	}
+}
+
+// CheckStatusTest is a scorecard test that verifies that the CR has a status block
+type CheckStatusTest struct {
+	TestInfo
+	BasicTestConfig
+}
+
+// NewCheckStatusTest returns a new CheckStatusTest object
+func NewCheckStatusTest(conf BasicTestConfig) *CheckStatusTest {
+	return &CheckStatusTest{
+		BasicTestConfig: conf,
+		TestInfo: TestInfo{
+			Name:        "Status Block Exists",
+			Description: "Custom Resource has a Status Block",
+			Cumulative:  false,
+		},
+	}
+}
+
+// WritingIntoCRsHasEffectTest is a scorecard test that verifies that the operator is making PUT and/or POST requests to the API server
+type WritingIntoCRsHasEffectTest struct {
+	TestInfo
+	BasicTestConfig
+}
+
+// NewWritingIntoCRsHasEffectTest returns a new WritingIntoCRsHasEffectTest object
+func NewWritingIntoCRsHasEffectTest(conf BasicTestConfig) *WritingIntoCRsHasEffectTest {
+	return &WritingIntoCRsHasEffectTest{
+		BasicTestConfig: conf,
+		TestInfo: TestInfo{
+			Name:        "Writing into CRs has an effect",
+			Description: "A CR sends PUT/POST requests to the API server to modify resources in response to spec block changes",
+			Cumulative:  false,
+		},
+	}
+}
+
+// NewBasicTestSuite returns a new TestSuite object containing basic, functional operator tests
+func NewBasicTestSuite(conf BasicTestConfig) *TestSuite {
+	ts := NewTestSuite(
+		"Basic Tests",
+		"Test suite that runs basic, functional operator tests",
+	)
+	ts.AddTest(NewCheckSpecTest(conf), 1.5)
+	ts.AddTest(NewCheckStatusTest(conf), 1)
+	ts.AddTest(NewWritingIntoCRsHasEffectTest(conf), 1)
+
+	return ts
+}
+
+// Test Implementations
+
 // Run - implements Test interface
 func (t *CheckSpecTest) Run(ctx context.Context) *TestResult {
 	res := &TestResult{Test: t, MaximumPoints: 1}
diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scorecard/helpers.go b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scorecard/helpers.go
new file mode 100644
index 0000000..e103e2a
--- /dev/null
+++ b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scorecard/helpers.go
@@ -0,0 +1,189 @@
+// Copyright 2019 The Operator-SDK Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package scorecard
+
+import (
+	"fmt"
+
+	scapiv1alpha1 "github.com/operator-framework/operator-sdk/pkg/apis/scorecard/v1alpha1"
+
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// These functions should be in the public test definitions file, but they are not complete/stable,
+// so we'll keep these here until they get fully implemented
+
+// ResultsPassFail combines multiple test results and returns a single test results
+// with 1 maximum point and either 0 or 1 earned points
+func ResultsPassFail(results []TestResult) (TestResult, error) {
+	var name string
+	finalResult := TestResult{}
+	if len(results) > 0 {
+		name = results[0].Test.GetName()
+		// all results have the same test
+		finalResult.Test = results[0].Test
+		finalResult.MaximumPoints = 1
+		finalResult.EarnedPoints = 1
+	}
+	for _, result := range results {
+		if result.Test.IsCumulative() {
+			return finalResult, fmt.Errorf("cumulative test passed to ResultsPassFail: name (%s)", result.Test.GetName())
+		}
+		if result.Test.GetName() != name {
+			return finalResult, fmt.Errorf("test name mismatch in ResultsPassFail: %s != %s", result.Test.GetName(), name)
+		}
+		if result.EarnedPoints != result.MaximumPoints {
+			finalResult.EarnedPoints = 0
+		}
+		finalResult.Suggestions = append(finalResult.Suggestions, result.Suggestions...)
+		finalResult.Errors = append(finalResult.Errors, result.Errors...)
+	}
+	return finalResult, nil
+}
+
+// ResultsCumulative takes multiple TestResults and returns a single TestResult with MaximumPoints
+// equal to the sum of the MaximumPoints of the input and EarnedPoints as the sum of EarnedPoints
+// of the input
+func ResultsCumulative(results []TestResult) (TestResult, error) {
+	var name string
+	finalResult := TestResult{}
+	if len(results) > 0 {
+		name = results[0].Test.GetName()
+		// all results have the same test
+		finalResult.Test = results[0].Test
+	}
+	for _, result := range results {
+		if !result.Test.IsCumulative() {
+			return finalResult, fmt.Errorf("non-cumulative test passed to ResultsCumulative: name (%s)", result.Test.GetName())
+		}
+		if result.Test.GetName() != name {
+			return finalResult, fmt.Errorf("test name mismatch in ResultsCumulative: %s != %s", result.Test.GetName(), name)
+		}
+		finalResult.EarnedPoints += result.EarnedPoints
+		finalResult.MaximumPoints += result.MaximumPoints
+		finalResult.Suggestions = append(finalResult.Suggestions, result.Suggestions...)
+		finalResult.Errors = append(finalResult.Errors, result.Errors...)
+	}
+	return finalResult, nil
+}
+
+// CalculateResult returns a ScorecardSuiteResult with the state and Tests fields set based on a slice of ScorecardTestResults
+func CalculateResult(tests []scapiv1alpha1.ScorecardTestResult) scapiv1alpha1.ScorecardSuiteResult {
+	scorecardSuiteResult := scapiv1alpha1.ScorecardSuiteResult{}
+	scorecardSuiteResult.Tests = tests
+	scorecardSuiteResult = UpdateSuiteStates(scorecardSuiteResult)
+	return scorecardSuiteResult
+}
+
+// TestSuitesToScorecardOutput takes an array of test suites and generates a v1alpha1 ScorecardOutput object with the
+// provided name, description, and log
+func TestSuitesToScorecardOutput(suites []TestSuite, log string) scapiv1alpha1.ScorecardOutput {
+	test := scapiv1alpha1.ScorecardOutput{
+		TypeMeta: metav1.TypeMeta{
+			Kind:       "ScorecardOutput",
+			APIVersion: "osdk.openshift.io/v1alpha1",
+		},
+		Log: log,
+	}
+	scorecardSuiteResults := []scapiv1alpha1.ScorecardSuiteResult{}
+	for _, suite := range suites {
+		results := []scapiv1alpha1.ScorecardTestResult{}
+		for _, testResult := range suite.TestResults {
+			results = append(results, TestResultToScorecardTestResult(testResult))
+		}
+		scorecardSuiteResult := CalculateResult(results)
+		scorecardSuiteResult.TotalScore = suite.TotalScore()
+		scorecardSuiteResult.Name = suite.GetName()
+		scorecardSuiteResult.Description = suite.GetDescription()
+		scorecardSuiteResult.Log = suite.Log
+		scorecardSuiteResults = append(scorecardSuiteResults, scorecardSuiteResult)
+	}
+	test.Results = scorecardSuiteResults
+	return test
+}
+
+// TestResultToScorecardTestResult is a helper function for converting from the TestResult type to the ScorecardTestResult type
+func TestResultToScorecardTestResult(tr TestResult) scapiv1alpha1.ScorecardTestResult {
+	sctr := scapiv1alpha1.ScorecardTestResult{}
+	sctr.State = tr.State
+	sctr.Name = tr.Test.GetName()
+	sctr.Description = tr.Test.GetDescription()
+	sctr.EarnedPoints = tr.EarnedPoints
+	sctr.MaximumPoints = tr.MaximumPoints
+	sctr.Suggestions = tr.Suggestions
+	if sctr.Suggestions == nil {
+		sctr.Suggestions = []string{}
+	}
+	stringErrors := []string{}
+	for _, err := range tr.Errors {
+		stringErrors = append(stringErrors, err.Error())
+	}
+	sctr.Errors = stringErrors
+	return sctr
+}
+
+// UpdateState updates the state of a TestResult.
+func UpdateState(res scapiv1alpha1.ScorecardTestResult) scapiv1alpha1.ScorecardTestResult {
+	if res.State == scapiv1alpha1.ErrorState {
+		return res
+	}
+	if res.EarnedPoints == 0 {
+		res.State = scapiv1alpha1.FailState
+	} else if res.EarnedPoints < res.MaximumPoints {
+		res.State = scapiv1alpha1.PartialPassState
+	} else if res.EarnedPoints == res.MaximumPoints {
+		res.State = scapiv1alpha1.PassState
+	}
+	return res
+	// TODO: decide what to do if a Test incorrectly sets points (Earned > Max)
+}
+
+// UpdateSuiteStates update the state of each test in a suite and updates the count to the suite's states to match
+func UpdateSuiteStates(suite scapiv1alpha1.ScorecardSuiteResult) scapiv1alpha1.ScorecardSuiteResult {
+	suite.TotalTests = len(suite.Tests)
+	// reset all state values
+	suite.Error = 0
+	suite.Fail = 0
+	suite.PartialPass = 0
+	suite.Pass = 0
+	for idx, test := range suite.Tests {
+		suite.Tests[idx] = UpdateState(test)
+		switch test.State {
+		case scapiv1alpha1.ErrorState:
+			suite.Error++
+		case scapiv1alpha1.PassState:
+			suite.Pass++
+		case scapiv1alpha1.PartialPassState:
+			suite.PartialPass++
+		case scapiv1alpha1.FailState:
+			suite.Fail++
+		}
+	}
+	return suite
+}
+
+func CombineScorecardOutput(outputs []scapiv1alpha1.ScorecardOutput, log string) scapiv1alpha1.ScorecardOutput {
+	output := scapiv1alpha1.ScorecardOutput{
+		TypeMeta: metav1.TypeMeta{
+			Kind:       "ScorecardOutput",
+			APIVersion: "osdk.openshift.io/v1alpha1",
+		},
+		Log: log,
+	}
+	for _, item := range outputs {
+		output.Results = append(output.Results, item.Results...)
+	}
+	return output
+}
diff --git a/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/scorecard/olm_tests.go b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scorecard/olm_tests.go
similarity index 66%
rename from vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/scorecard/olm_tests.go
rename to vendor/github.com/operator-framework/operator-sdk/internal/pkg/scorecard/olm_tests.go
index 252b265..8ec2345 100644
--- a/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/scorecard/olm_tests.go
+++ b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scorecard/olm_tests.go
@@ -21,15 +21,118 @@ import (
 	"strings"
 
 	"github.com/operator-framework/operator-sdk/internal/util/k8sutil"
+	scapiv1alpha1 "github.com/operator-framework/operator-sdk/pkg/apis/scorecard/v1alpha1"
 
 	olmapiv1alpha1 "github.com/operator-framework/operator-lifecycle-manager/pkg/api/apis/operators/v1alpha1"
-	log "github.com/sirupsen/logrus"
 	v1 "k8s.io/api/core/v1"
 	apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
+	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
 	"k8s.io/apimachinery/pkg/runtime/schema"
 	"k8s.io/apimachinery/pkg/types"
+	"sigs.k8s.io/controller-runtime/pkg/client"
 )
 
+// OLMTestConfig contains all variables required by the OLMTest TestSuite
+type OLMTestConfig struct {
+	Client   client.Client
+	CR       *unstructured.Unstructured
+	CSV      *olmapiv1alpha1.ClusterServiceVersion
+	CRDsDir  string
+	ProxyPod *v1.Pod
+}
+
+// Test Defintions
+
+// CRDsHaveValidationTest is a scorecard test that verifies that all CRDs have a validation section
+type CRDsHaveValidationTest struct {
+	TestInfo
+	OLMTestConfig
+}
+
+// NewCRDsHaveValidationTest returns a new CRDsHaveValidationTest object
+func NewCRDsHaveValidationTest(conf OLMTestConfig) *CRDsHaveValidationTest {
+	return &CRDsHaveValidationTest{
+		OLMTestConfig: conf,
+		TestInfo: TestInfo{
+			Name:        "Provided APIs have validation",
+			Description: "All CRDs have an OpenAPI validation subsection",
+			Cumulative:  true,
+		},
+	}
+}
+
+// CRDsHaveResourcesTest is a scorecard test that verifies that the CSV lists used resources in its owned CRDs secyion
+type CRDsHaveResourcesTest struct {
+	TestInfo
+	OLMTestConfig
+}
+
+// NewCRDsHaveResourcesTest returns a new CRDsHaveResourcesTest object
+func NewCRDsHaveResourcesTest(conf OLMTestConfig) *CRDsHaveResourcesTest {
+	return &CRDsHaveResourcesTest{
+		OLMTestConfig: conf,
+		TestInfo: TestInfo{
+			Name:        "Owned CRDs have resources listed",
+			Description: "All Owned CRDs contain a resources subsection",
+			Cumulative:  true,
+		},
+	}
+}
+
+// AnnotationsContainExamplesTest is a scorecard test that verifies that the CSV contains examples via the alm-examples annotation
+type AnnotationsContainExamplesTest struct {
+	TestInfo
+	OLMTestConfig
+}
+
+// NewAnnotationsContainExamplesTest returns a new AnnotationsContainExamplesTest object
+func NewAnnotationsContainExamplesTest(conf OLMTestConfig) *AnnotationsContainExamplesTest {
+	return &AnnotationsContainExamplesTest{
+		OLMTestConfig: conf,
+		TestInfo: TestInfo{
+			Name:        "CRs have at least 1 example",
+			Description: "The CSV's metadata contains an alm-examples section",
+			Cumulative:  true,
+		},
+	}
+}
+
+// SpecDescriptorsTest is a scorecard test that verifies that all spec fields have descriptors
+type SpecDescriptorsTest struct {
+	TestInfo
+	OLMTestConfig
+}
+
+// NewSpecDescriptorsTest returns a new SpecDescriptorsTest object
+func NewSpecDescriptorsTest(conf OLMTestConfig) *SpecDescriptorsTest {
+	return &SpecDescriptorsTest{
+		OLMTestConfig: conf,
+		TestInfo: TestInfo{
+			Name:        "Spec fields with descriptors",
+			Description: "All spec fields have matching descriptors in the CSV",
+			Cumulative:  true,
+		},
+	}
+}
+
+// StatusDescriptorsTest is a scorecard test that verifies that all status fields have descriptors
+type StatusDescriptorsTest struct {
+	TestInfo
+	OLMTestConfig
+}
+
+// NewStatusDescriptorsTest returns a new StatusDescriptorsTest object
+func NewStatusDescriptorsTest(conf OLMTestConfig) *StatusDescriptorsTest {
+	return &StatusDescriptorsTest{
+		OLMTestConfig: conf,
+		TestInfo: TestInfo{
+			Name:        "Status fields with descriptors",
+			Description: "All status fields have matching descriptors in the CSV",
+			Cumulative:  true,
+		},
+	}
+}
+
 func matchKind(kind1, kind2 string) bool {
 	singularKind1, err := restMapper.ResourceSingularizer(kind1)
 	if err != nil {
@@ -44,6 +147,24 @@ func matchKind(kind1, kind2 string) bool {
 	return strings.EqualFold(singularKind1, singularKind2)
 }
 
+// NewOLMTestSuite returns a new TestSuite object containing CSV best practice checks
+func NewOLMTestSuite(conf OLMTestConfig) *TestSuite {
+	ts := NewTestSuite(
+		"OLM Tests",
+		"Test suite checks if an operator's CSV follows best practices",
+	)
+
+	ts.AddTest(NewCRDsHaveValidationTest(conf), 1.25)
+	ts.AddTest(NewCRDsHaveResourcesTest(conf), 1)
+	ts.AddTest(NewAnnotationsContainExamplesTest(conf), 1)
+	ts.AddTest(NewSpecDescriptorsTest(conf), 1)
+	ts.AddTest(NewStatusDescriptorsTest(conf), 1)
+
+	return ts
+}
+
+// Test Implentations
+
 // matchVersion checks if a CRD contains a specified version in a case insensitive manner
 func matchVersion(version string, crd *apiextv1beta1.CustomResourceDefinition) bool {
 	if strings.EqualFold(version, crd.Spec.Version) {
@@ -64,25 +185,25 @@ func (t *CRDsHaveValidationTest) Run(ctx context.Context) *TestResult {
 	crds, err := k8sutil.GetCRDs(t.CRDsDir)
 	if err != nil {
 		res.Errors = append(res.Errors, fmt.Errorf("failed to get CRDs in %s directory: %v", t.CRDsDir, err))
+		res.State = scapiv1alpha1.ErrorState
 		return res
 	}
 	err = t.Client.Get(ctx, types.NamespacedName{Namespace: t.CR.GetNamespace(), Name: t.CR.GetName()}, t.CR)
 	if err != nil {
 		res.Errors = append(res.Errors, err)
+		res.State = scapiv1alpha1.ErrorState
 		return res
 	}
-	// TODO: we need to make this handle multiple CRs better/correctly
 	for _, crd := range crds {
-		res.MaximumPoints++
-		if crd.Spec.Validation == nil {
-			res.Suggestions = append(res.Suggestions, fmt.Sprintf("Add CRD validation for %s/%s", crd.Spec.Names.Kind, crd.Spec.Version))
-			continue
-		}
 		// check if the CRD matches the testing CR
 		gvk := t.CR.GroupVersionKind()
 		// Only check the validation block if the CRD and CR have the same Kind and Version
 		if !(matchVersion(gvk.Version, crd) && matchKind(gvk.Kind, crd.Spec.Names.Kind)) {
-			res.EarnedPoints++
+			continue
+		}
+		res.MaximumPoints++
+		if crd.Spec.Validation == nil {
+			res.Suggestions = append(res.Suggestions, fmt.Sprintf("Add CRD validation for %s/%s", crd.Spec.Names.Kind, crd.Spec.Version))
 			continue
 		}
 		failed := false
@@ -114,37 +235,34 @@ func (t *CRDsHaveValidationTest) Run(ctx context.Context) *TestResult {
 // Run - implements Test interface
 func (t *CRDsHaveResourcesTest) Run(ctx context.Context) *TestResult {
 	res := &TestResult{Test: t}
+	var missingResources []string
 	for _, crd := range t.CSV.Spec.CustomResourceDefinitions.Owned {
-		res.MaximumPoints++
 		gvk := t.CR.GroupVersionKind()
 		if strings.EqualFold(crd.Version, gvk.Version) && matchKind(gvk.Kind, crd.Kind) {
+			res.MaximumPoints++
+			if len(crd.Resources) > 0 {
+				res.EarnedPoints++
+			}
 			resources, err := getUsedResources(t.ProxyPod)
 			if err != nil {
 				log.Warningf("getUsedResource failed: %v", err)
 			}
-			allResourcesListed := true
 			for _, resource := range resources {
 				foundResource := false
 				for _, listedResource := range crd.Resources {
 					if matchKind(resource.Kind, listedResource.Kind) && strings.EqualFold(resource.Version, listedResource.Version) {
 						foundResource = true
+						break
 					}
 				}
 				if foundResource == false {
-					allResourcesListed = false
+					missingResources = append(missingResources, fmt.Sprintf("%s/%s", resource.Kind, resource.Version))
 				}
 			}
-			if allResourcesListed {
-				res.EarnedPoints++
-			}
-		} else {
-			if len(crd.Resources) > 0 {
-				res.EarnedPoints++
-			}
 		}
 	}
-	if res.EarnedPoints < res.MaximumPoints {
-		res.Suggestions = append(res.Suggestions, "Add resources to owned CRDs")
+	if len(missingResources) > 0 {
+		res.Suggestions = append(res.Suggestions, fmt.Sprintf("If it would be helpful to an end-user to understand or troubleshoot your CR, consider adding resources %v to the resources section for owned CRD %s", missingResources, t.CR.GroupVersionKind().Kind))
 	}
 	return res
 }
@@ -166,14 +284,17 @@ func getUsedResources(proxyPod *v1.Pod) ([]schema.GroupVersionKind, error) {
 		/*
 			There are 6 formats a resource uri can have:
 			Cluster-Scoped:
-				Collection: /apis/GROUP/VERSION/KIND
-				Individual: /apis/GROUP/VERSION/KIND/NAME
-				Core:       /api/v1/KIND
+				Collection:      /apis/GROUP/VERSION/KIND
+				Individual:      /apis/GROUP/VERSION/KIND/NAME
+				Core:            /api/v1/KIND
+				Core Individual: /api/v1/KIND/NAME
+
 			Namespaces:
 				All Namespaces:          /apis/GROUP/VERSION/KIND (same as cluster collection)
 				Collection in Namespace: /apis/GROUP/VERSION/namespaces/NAMESPACE/KIND
 				Individual:              /apis/GROUP/VERSION/namespaces/NAMESPACE/KIND/NAME
 				Core:                    /api/v1/namespaces/NAMESPACE/KIND
+				Core Indiviual:          /api/v1/namespaces/NAMESPACE/KIND/NAME
 
 			These urls are also often appended with options, which are denoted by the '?' symbol
 		*/
@@ -205,7 +326,10 @@ func getUsedResources(proxyPod *v1.Pod) ([]schema.GroupVersionKind, error) {
 			}
 			log.Warnf("Invalid URI: \"%s\"", uri)
 		case 4:
-			if splitURI[0] == "apis" {
+			if splitURI[0] == "api" {
+				resources[schema.GroupVersionKind{Version: splitURI[1], Kind: splitURI[2]}] = true
+				break
+			} else if splitURI[0] == "apis" {
 				resources[schema.GroupVersionKind{Group: splitURI[1], Version: splitURI[2], Kind: splitURI[3]}] = true
 				break
 			}
@@ -220,7 +344,10 @@ func getUsedResources(proxyPod *v1.Pod) ([]schema.GroupVersionKind, error) {
 			}
 			log.Warnf("Invalid URI: \"%s\"", uri)
 		case 6, 7:
-			if splitURI[0] == "apis" {
+			if splitURI[0] == "api" {
+				resources[schema.GroupVersionKind{Version: splitURI[1], Kind: splitURI[4]}] = true
+				break
+			} else if splitURI[0] == "apis" {
 				resources[schema.GroupVersionKind{Group: splitURI[1], Version: splitURI[2], Kind: splitURI[5]}] = true
 				break
 			}
@@ -252,6 +379,7 @@ func (t *StatusDescriptorsTest) Run(ctx context.Context) *TestResult {
 	err := t.Client.Get(ctx, types.NamespacedName{Namespace: t.CR.GetNamespace(), Name: t.CR.GetName()}, t.CR)
 	if err != nil {
 		res.Errors = append(res.Errors, err)
+		res.State = scapiv1alpha1.ErrorState
 		return res
 	}
 	if t.CR.Object["status"] == nil {
@@ -290,6 +418,7 @@ func (t *SpecDescriptorsTest) Run(ctx context.Context) *TestResult {
 	err := t.Client.Get(ctx, types.NamespacedName{Namespace: t.CR.GetNamespace(), Name: t.CR.GetName()}, t.CR)
 	if err != nil {
 		res.Errors = append(res.Errors, err)
+		res.State = scapiv1alpha1.ErrorState
 		return res
 	}
 	if t.CR.Object["spec"] == nil {
diff --git a/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/scorecard/resource_handler.go b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scorecard/resource_handler.go
similarity index 99%
rename from vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/scorecard/resource_handler.go
rename to vendor/github.com/operator-framework/operator-sdk/internal/pkg/scorecard/resource_handler.go
index 2262dcf..dbbd1a8 100644
--- a/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/scorecard/resource_handler.go
+++ b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scorecard/resource_handler.go
@@ -26,11 +26,9 @@ import (
 	"github.com/operator-framework/operator-sdk/internal/util/yamlutil"
 	proxyConf "github.com/operator-framework/operator-sdk/pkg/ansible/proxy/kubeconfig"
 	"github.com/operator-framework/operator-sdk/pkg/k8sutil"
-	"github.com/spf13/viper"
-	"sigs.k8s.io/controller-runtime/pkg/client"
 
 	"github.com/ghodss/yaml"
-	log "github.com/sirupsen/logrus"
+	"github.com/spf13/viper"
 	appsv1 "k8s.io/api/apps/v1"
 	v1 "k8s.io/api/core/v1"
 	apierrors "k8s.io/apimachinery/pkg/api/errors"
@@ -41,6 +39,7 @@ import (
 	"k8s.io/apimachinery/pkg/types"
 	"k8s.io/apimachinery/pkg/util/wait"
 	"k8s.io/client-go/kubernetes"
+	"sigs.k8s.io/controller-runtime/pkg/client"
 )
 
 type cleanupFn func() error
@@ -345,7 +344,7 @@ func addResourceCleanup(obj runtime.Object, key types.NamespacedName) {
 		// make a copy of the object because the client changes it
 		objCopy := obj.DeepCopyObject()
 		err := runtimeClient.Delete(context.TODO(), obj)
-		if err != nil {
+		if err != nil && !apierrors.IsNotFound(err) {
 			return err
 		}
 		err = wait.PollImmediate(time.Second*1, time.Second*10, func() (bool, error) {
diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scorecard/scorecard.go b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scorecard/scorecard.go
new file mode 100644
index 0000000..2062408
--- /dev/null
+++ b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scorecard/scorecard.go
@@ -0,0 +1,542 @@
+// Copyright 2019 The Operator-SDK Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package scorecard
+
+import (
+	"bytes"
+	"context"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"os"
+	"os/exec"
+
+	"github.com/operator-framework/operator-sdk/internal/pkg/scaffold"
+	k8sInternal "github.com/operator-framework/operator-sdk/internal/util/k8sutil"
+	"github.com/operator-framework/operator-sdk/internal/util/projutil"
+	"github.com/operator-framework/operator-sdk/internal/util/yamlutil"
+	scapiv1alpha1 "github.com/operator-framework/operator-sdk/pkg/apis/scorecard/v1alpha1"
+
+	"github.com/ghodss/yaml"
+	olmapiv1alpha1 "github.com/operator-framework/operator-lifecycle-manager/pkg/api/apis/operators/v1alpha1"
+	olminstall "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/install"
+	"github.com/sirupsen/logrus"
+	"github.com/spf13/cobra"
+	"github.com/spf13/viper"
+	v1 "k8s.io/api/core/v1"
+	extscheme "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme"
+	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/apimachinery/pkg/runtime/serializer"
+	"k8s.io/client-go/discovery/cached"
+	"k8s.io/client-go/kubernetes"
+	cgoscheme "k8s.io/client-go/kubernetes/scheme"
+	"k8s.io/client-go/rest"
+	"k8s.io/client-go/restmapper"
+	"sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+const (
+	ConfigOpt                 = "config"
+	NamespaceOpt              = "namespace"
+	KubeconfigOpt             = "kubeconfig"
+	InitTimeoutOpt            = "init-timeout"
+	OlmDeployedOpt            = "olm-deployed"
+	CSVPathOpt                = "csv-path"
+	BasicTestsOpt             = "basic-tests"
+	OLMTestsOpt               = "olm-tests"
+	NamespacedManifestOpt     = "namespaced-manifest"
+	GlobalManifestOpt         = "global-manifest"
+	CRManifestOpt             = "cr-manifest"
+	ProxyImageOpt             = "proxy-image"
+	ProxyPullPolicyOpt        = "proxy-pull-policy"
+	CRDsDirOpt                = "crds-dir"
+	OutputFormatOpt           = "output"
+	PluginDirOpt              = "plugin-dir"
+	JSONOutputFormat          = "json"
+	HumanReadableOutputFormat = "human-readable"
+)
+
+const (
+	basicOperator  = "Basic Operator"
+	olmIntegration = "OLM Integration"
+)
+
+var (
+	kubeconfig     *rest.Config
+	dynamicDecoder runtime.Decoder
+	runtimeClient  client.Client
+	restMapper     *restmapper.DeferredDiscoveryRESTMapper
+	deploymentName string
+	proxyPodGlobal *v1.Pod
+	cleanupFns     []cleanupFn
+)
+
+const (
+	scorecardPodName       = "operator-scorecard-test"
+	scorecardContainerName = "scorecard-proxy"
+)
+
+// make a global logger for scorecard
+var (
+	logReadWriter io.ReadWriter
+	log           = logrus.New()
+)
+
+func runTests() ([]scapiv1alpha1.ScorecardOutput, error) {
+	defer func() {
+		if err := cleanupScorecard(); err != nil {
+			log.Errorf("Failed to cleanup resources: (%v)", err)
+		}
+	}()
+
+	var (
+		tmpNamespaceVar string
+		err             error
+	)
+	kubeconfig, tmpNamespaceVar, err = k8sInternal.GetKubeconfigAndNamespace(viper.GetString(KubeconfigOpt))
+	if err != nil {
+		return nil, fmt.Errorf("failed to build the kubeconfig: %v", err)
+	}
+	if viper.GetString(NamespaceOpt) == "" {
+		viper.Set(NamespaceOpt, tmpNamespaceVar)
+	}
+	scheme := runtime.NewScheme()
+	// scheme for client go
+	if err := cgoscheme.AddToScheme(scheme); err != nil {
+		return nil, fmt.Errorf("failed to add client-go scheme to client: (%v)", err)
+	}
+	// api extensions scheme (CRDs)
+	if err := extscheme.AddToScheme(scheme); err != nil {
+		return nil, fmt.Errorf("failed to add failed to add extensions api scheme to client: (%v)", err)
+	}
+	// olm api (CS
+	if err := olmapiv1alpha1.AddToScheme(scheme); err != nil {
+		return nil, fmt.Errorf("failed to add failed to add oml api scheme (CSVs) to client: (%v)", err)
+	}
+	dynamicDecoder = serializer.NewCodecFactory(scheme).UniversalDeserializer()
+	// if a user creates a new CRD, we need to be able to reset the rest mapper
+	// temporary kubeclient to get a cached discovery
+	kubeclient, err := kubernetes.NewForConfig(kubeconfig)
+	if err != nil {
+		return nil, fmt.Errorf("failed to get a kubeclient: %v", err)
+	}
+	cachedDiscoveryClient := cached.NewMemCacheClient(kubeclient.Discovery())
+	restMapper = restmapper.NewDeferredDiscoveryRESTMapper(cachedDiscoveryClient)
+	restMapper.Reset()
+	runtimeClient, _ = client.New(kubeconfig, client.Options{Scheme: scheme, Mapper: restMapper})
+
+	csv := &olmapiv1alpha1.ClusterServiceVersion{}
+	if viper.GetBool(OLMTestsOpt) {
+		yamlSpec, err := ioutil.ReadFile(viper.GetString(CSVPathOpt))
+		if err != nil {
+			return nil, fmt.Errorf("failed to read csv: %v", err)
+		}
+		if err = yaml.Unmarshal(yamlSpec, csv); err != nil {
+			return nil, fmt.Errorf("error getting ClusterServiceVersion: %v", err)
+		}
+	}
+
+	// Extract operator manifests from the CSV if olm-deployed is set.
+	if viper.GetBool(OlmDeployedOpt) {
+		// Get deploymentName from the deployment manifest within the CSV.
+		strat, err := (&olminstall.StrategyResolver{}).UnmarshalStrategy(csv.Spec.InstallStrategy)
+		if err != nil {
+			return nil, err
+		}
+		stratDep, ok := strat.(*olminstall.StrategyDetailsDeployment)
+		if !ok {
+			return nil, fmt.Errorf("expected StrategyDetailsDeployment, got strategy of type %T", strat)
+		}
+		deploymentName = stratDep.DeploymentSpecs[0].Name
+		// Get the proxy pod, which should have been created with the CSV.
+		proxyPodGlobal, err = getPodFromDeployment(deploymentName, viper.GetString(NamespaceOpt))
+		if err != nil {
+			return nil, err
+		}
+
+		// Create a temporary CR manifest from metadata if one is not provided.
+		crJSONStr, ok := csv.ObjectMeta.Annotations["alm-examples"]
+		if ok && viper.GetString(CRManifestOpt) == "" {
+			var crs []interface{}
+			if err = json.Unmarshal([]byte(crJSONStr), &crs); err != nil {
+				return nil, err
+			}
+			// TODO: run scorecard against all CR's in CSV.
+			cr := crs[0]
+			crJSONBytes, err := json.Marshal(cr)
+			if err != nil {
+				return nil, err
+			}
+			crYAMLBytes, err := yaml.JSONToYAML(crJSONBytes)
+			if err != nil {
+				return nil, err
+			}
+			crFile, err := ioutil.TempFile("", "cr.yaml")
+			if err != nil {
+				return nil, err
+			}
+			if _, err := crFile.Write(crYAMLBytes); err != nil {
+				return nil, err
+			}
+			viper.Set(CRManifestOpt, crFile.Name())
+			defer func() {
+				err := os.Remove(viper.GetString(CRManifestOpt))
+				if err != nil {
+					log.Errorf("Could not delete temporary CR manifest file: (%v)", err)
+				}
+			}()
+		}
+
+	} else {
+		// If no namespaced manifest path is given, combine
+		// deploy/{service_account,role.yaml,role_binding,operator}.yaml.
+		if viper.GetString(NamespacedManifestOpt) == "" {
+			file, err := yamlutil.GenerateCombinedNamespacedManifest(scaffold.DeployDir)
+			if err != nil {
+				return nil, err
+			}
+			viper.Set(NamespacedManifestOpt, file.Name())
+			defer func() {
+				err := os.Remove(viper.GetString(NamespacedManifestOpt))
+				if err != nil {
+					log.Errorf("Could not delete temporary namespace manifest file: (%v)", err)
+				}
+			}()
+		}
+		// If no global manifest is given, combine all CRD's in the given CRD's dir.
+		if viper.GetString(GlobalManifestOpt) == "" {
+			gMan, err := yamlutil.GenerateCombinedGlobalManifest(viper.GetString(CRDsDirOpt))
+			if err != nil {
+				return nil, err
+			}
+			viper.Set(GlobalManifestOpt, gMan.Name())
+			defer func() {
+				err := os.Remove(viper.GetString(GlobalManifestOpt))
+				if err != nil {
+					log.Errorf("Could not delete global manifest file: (%v)", err)
+				}
+			}()
+		}
+	}
+
+	crs := viper.GetStringSlice(CRManifestOpt)
+	// check if there are duplicate CRs
+	gvks := []schema.GroupVersionKind{}
+	for _, cr := range crs {
+		file, err := ioutil.ReadFile(cr)
+		if err != nil {
+			return nil, fmt.Errorf("failed to read file: %s", cr)
+		}
+		newGVKs, err := getGVKs(file)
+		if err != nil {
+			return nil, fmt.Errorf("could not get GVKs for resource(s) in file: %s, due to error: (%v)", cr, err)
+		}
+		gvks = append(gvks, newGVKs...)
+	}
+	dupMap := make(map[schema.GroupVersionKind]bool)
+	for _, gvk := range gvks {
+		if _, ok := dupMap[gvk]; ok {
+			log.Warnf("Duplicate gvks in CR list detected (%s); results may be inaccurate", gvk)
+		}
+		dupMap[gvk] = true
+	}
+
+	var pluginResults []scapiv1alpha1.ScorecardOutput
+	var suites []TestSuite
+	for _, cr := range crs {
+		// TODO: Change built-in tests into plugins
+		// Run built-in tests.
+		fmt.Printf("Running for cr: %s\n", cr)
+		if !viper.GetBool(OlmDeployedOpt) {
+			if err := createFromYAMLFile(viper.GetString(GlobalManifestOpt)); err != nil {
+				return nil, fmt.Errorf("failed to create global resources: %v", err)
+			}
+			if err := createFromYAMLFile(viper.GetString(NamespacedManifestOpt)); err != nil {
+				return nil, fmt.Errorf("failed to create namespaced resources: %v", err)
+			}
+		}
+		if err := createFromYAMLFile(cr); err != nil {
+			return nil, fmt.Errorf("failed to create cr resource: %v", err)
+		}
+		obj, err := yamlToUnstructured(cr)
+		if err != nil {
+			return nil, fmt.Errorf("failed to decode custom resource manifest into object: %s", err)
+		}
+		if err := waitUntilCRStatusExists(obj); err != nil {
+			return nil, fmt.Errorf("failed waiting to check if CR status exists: %v", err)
+		}
+		if viper.GetBool(BasicTestsOpt) {
+			conf := BasicTestConfig{
+				Client:   runtimeClient,
+				CR:       obj,
+				ProxyPod: proxyPodGlobal,
+			}
+			basicTests := NewBasicTestSuite(conf)
+			basicTests.Run(context.TODO())
+			suites = append(suites, *basicTests)
+		}
+		if viper.GetBool(OLMTestsOpt) {
+			conf := OLMTestConfig{
+				Client:   runtimeClient,
+				CR:       obj,
+				CSV:      csv,
+				CRDsDir:  viper.GetString(CRDsDirOpt),
+				ProxyPod: proxyPodGlobal,
+			}
+			olmTests := NewOLMTestSuite(conf)
+			olmTests.Run(context.TODO())
+			suites = append(suites, *olmTests)
+		}
+		// set up clean environment for every CR
+		if err := cleanupScorecard(); err != nil {
+			log.Errorf("Failed to cleanup resources: (%v)", err)
+		}
+		// reset cleanup functions
+		cleanupFns = []cleanupFn{}
+		// clear name of operator deployment
+		deploymentName = ""
+	}
+	suites, err = MergeSuites(suites)
+	if err != nil {
+		return nil, fmt.Errorf("failed to merge test suite results: %v", err)
+	}
+	for _, suite := range suites {
+		// convert to ScorecardOutput format
+		// will add log when basic and olm tests are separated into plugins
+		pluginResults = append(pluginResults, TestSuitesToScorecardOutput([]TestSuite{suite}, ""))
+	}
+	// Run plugins
+	pluginDir := viper.GetString(PluginDirOpt)
+	if dir, err := os.Stat(pluginDir); err != nil || !dir.IsDir() {
+		log.Warnf("Plugin directory not found; skipping plugin tests: %v", err)
+		return pluginResults, nil
+	}
+	if err := os.Chdir(pluginDir); err != nil {
+		return nil, fmt.Errorf("failed to chdir into scorecard plugin directory: %v", err)
+	}
+	// executable files must be in "bin" subdirectory
+	files, err := ioutil.ReadDir("bin")
+	if err != nil {
+		return nil, fmt.Errorf("failed to list files in %s/bin: %v", pluginDir, err)
+	}
+	for _, file := range files {
+		cmd := exec.Command("./bin/" + file.Name())
+		stdout := &bytes.Buffer{}
+		cmd.Stdout = stdout
+		stderr := &bytes.Buffer{}
+		cmd.Stderr = stderr
+		err := cmd.Run()
+		if err != nil {
+			name := fmt.Sprintf("Failed Plugin: %s", file.Name())
+			description := fmt.Sprintf("Plugin with file name `%s` failed", file.Name())
+			logs := fmt.Sprintf("%s:\nStdout: %s\nStderr: %s", err, string(stdout.Bytes()), string(stderr.Bytes()))
+			pluginResults = append(pluginResults, failedPlugin(name, description, logs))
+			// output error to main logger as well for human-readable output
+			log.Errorf("Plugin `%s` failed with error (%v)", file.Name(), err)
+			continue
+		}
+		// parse output and add to suites
+		result := scapiv1alpha1.ScorecardOutput{}
+		err = json.Unmarshal(stdout.Bytes(), &result)
+		if err != nil {
+			name := fmt.Sprintf("Plugin output invalid: %s", file.Name())
+			description := fmt.Sprintf("Plugin with file name %s did not produce valid ScorecardOutput JSON", file.Name())
+			logs := fmt.Sprintf("Stdout: %s\nStderr: %s", string(stdout.Bytes()), string(stderr.Bytes()))
+			pluginResults = append(pluginResults, failedPlugin(name, description, logs))
+			log.Errorf("Output from plugin `%s` failed to unmarshal with error (%v)", file.Name(), err)
+			continue
+		}
+		stderrString := string(stderr.Bytes())
+		if len(stderrString) != 0 {
+			log.Warn(stderrString)
+		}
+		pluginResults = append(pluginResults, result)
+	}
+	return pluginResults, nil
+}
+
+func ScorecardTests(cmd *cobra.Command, args []string) error {
+	if err := initConfig(); err != nil {
+		return err
+	}
+	if err := validateScorecardFlags(); err != nil {
+		return err
+	}
+	cmd.SilenceUsage = true
+	pluginOutputs, err := runTests()
+	if err != nil {
+		return err
+	}
+	totalScore := 0.0
+	// Update the state for the tests
+	for _, suite := range pluginOutputs {
+		for idx, res := range suite.Results {
+			suite.Results[idx] = UpdateSuiteStates(res)
+		}
+	}
+	if viper.GetString(OutputFormatOpt) == HumanReadableOutputFormat {
+		numSuites := 0
+		for _, plugin := range pluginOutputs {
+			for _, suite := range plugin.Results {
+				fmt.Printf("%s:\n", suite.Name)
+				for _, result := range suite.Tests {
+					fmt.Printf("\t%s: %d/%d\n", result.Name, result.EarnedPoints, result.MaximumPoints)
+				}
+				totalScore += float64(suite.TotalScore)
+				numSuites++
+			}
+		}
+		totalScore = totalScore / float64(numSuites)
+		fmt.Printf("\nTotal Score: %.0f%%\n", totalScore)
+		// TODO: We can probably use some helper functions to clean up these quadruple nested loops
+		// Print suggestions
+		for _, plugin := range pluginOutputs {
+			for _, suite := range plugin.Results {
+				for _, result := range suite.Tests {
+					for _, suggestion := range result.Suggestions {
+						// 33 is yellow (specifically, the same shade of yellow that logrus uses for warnings)
+						fmt.Printf("\x1b[%dmSUGGESTION:\x1b[0m %s\n", 33, suggestion)
+					}
+				}
+			}
+		}
+		// Print errors
+		for _, plugin := range pluginOutputs {
+			for _, suite := range plugin.Results {
+				for _, result := range suite.Tests {
+					for _, err := range result.Errors {
+						// 31 is red (specifically, the same shade of red that logrus uses for errors)
+						fmt.Printf("\x1b[%dmERROR:\x1b[0m %s\n", 31, err)
+					}
+				}
+			}
+		}
+	}
+	if viper.GetString(OutputFormatOpt) == JSONOutputFormat {
+		log, err := ioutil.ReadAll(logReadWriter)
+		if err != nil {
+			return fmt.Errorf("failed to read log buffer: %v", err)
+		}
+		scTest := CombineScorecardOutput(pluginOutputs, string(log))
+		// Pretty print so users can also read the json output
+		bytes, err := json.MarshalIndent(scTest, "", "  ")
+		if err != nil {
+			return err
+		}
+		fmt.Printf("%s\n", string(bytes))
+	}
+	return nil
+}
+
+func initConfig() error {
+	// viper/cobra already has flags parsed at this point; we can check if a config file flag is set
+	if viper.GetString(ConfigOpt) != "" {
+		// Use config file from the flag.
+		viper.SetConfigFile(viper.GetString(ConfigOpt))
+	} else {
+		viper.AddConfigPath(projutil.MustGetwd())
+		// using SetConfigName allows users to use a .yaml, .json, or .toml file
+		viper.SetConfigName(".osdk-scorecard")
+	}
+
+	if err := viper.ReadInConfig(); err == nil {
+		// configure logger output before logging anything
+		err := configureLogger()
+		if err != nil {
+			return err
+		}
+		log.Info("Using config file: ", viper.ConfigFileUsed())
+	} else {
+		err := configureLogger()
+		if err != nil {
+			return err
+		}
+		log.Warn("Could not load config file; using flags")
+	}
+	return nil
+}
+
+func configureLogger() error {
+	if viper.GetString(OutputFormatOpt) == HumanReadableOutputFormat {
+		logReadWriter = os.Stdout
+	} else if viper.GetString(OutputFormatOpt) == JSONOutputFormat {
+		logReadWriter = &bytes.Buffer{}
+	} else {
+		return fmt.Errorf("invalid output format: %s", viper.GetString(OutputFormatOpt))
+	}
+	log.SetOutput(logReadWriter)
+	return nil
+}
+
+func validateScorecardFlags() error {
+	if !viper.GetBool(OlmDeployedOpt) && viper.GetStringSlice(CRManifestOpt) == nil {
+		return errors.New("cr-manifest config option must be set")
+	}
+	if !viper.GetBool(BasicTestsOpt) && !viper.GetBool(OLMTestsOpt) {
+		return errors.New("at least one test type must be set")
+	}
+	if viper.GetBool(OLMTestsOpt) && viper.GetString(CSVPathOpt) == "" {
+		return fmt.Errorf("csv-path must be set if olm-tests is enabled")
+	}
+	if viper.GetBool(OlmDeployedOpt) && viper.GetString(CSVPathOpt) == "" {
+		return fmt.Errorf("csv-path must be set if olm-deployed is enabled")
+	}
+	pullPolicy := viper.GetString(ProxyPullPolicyOpt)
+	if pullPolicy != "Always" && pullPolicy != "Never" && pullPolicy != "PullIfNotPresent" {
+		return fmt.Errorf("invalid proxy pull policy: (%s); valid values: Always, Never, PullIfNotPresent", pullPolicy)
+	}
+	// this is already being checked in configure logger; may be unnecessary
+	outputFormat := viper.GetString(OutputFormatOpt)
+	if outputFormat != HumanReadableOutputFormat && outputFormat != JSONOutputFormat {
+		return fmt.Errorf("invalid output format (%s); valid values: %s, %s", outputFormat, HumanReadableOutputFormat, JSONOutputFormat)
+	}
+	return nil
+}
+
+func getGVKs(yamlFile []byte) ([]schema.GroupVersionKind, error) {
+	var gvks []schema.GroupVersionKind
+
+	scanner := yamlutil.NewYAMLScanner(yamlFile)
+	for scanner.Scan() {
+		yamlSpec := scanner.Bytes()
+
+		obj := &unstructured.Unstructured{}
+		jsonSpec, err := yaml.YAMLToJSON(yamlSpec)
+		if err != nil {
+			return nil, fmt.Errorf("could not convert yaml file to json: %v", err)
+		}
+		if err := obj.UnmarshalJSON(jsonSpec); err != nil {
+			return nil, fmt.Errorf("failed to unmarshal object spec: (%v)", err)
+		}
+		gvks = append(gvks, obj.GroupVersionKind())
+	}
+	return gvks, nil
+}
+
+func failedPlugin(name, desc, log string) scapiv1alpha1.ScorecardOutput {
+	return scapiv1alpha1.ScorecardOutput{
+		Results: []scapiv1alpha1.ScorecardSuiteResult{{
+			Name:        name,
+			Description: desc,
+			Error:       1,
+			Log:         log,
+		},
+		},
+	}
+}
diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scorecard/test_definitions.go b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scorecard/test_definitions.go
new file mode 100644
index 0000000..327823f
--- /dev/null
+++ b/vendor/github.com/operator-framework/operator-sdk/internal/pkg/scorecard/test_definitions.go
@@ -0,0 +1,152 @@
+// Copyright 2019 The Operator-SDK Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package scorecard
+
+import (
+	"context"
+	"fmt"
+
+	scapiv1alpha1 "github.com/operator-framework/operator-sdk/pkg/apis/scorecard/v1alpha1"
+)
+
+// Type Definitions
+
+// Test provides methods for running scorecard tests
+type Test interface {
+	GetName() string
+	GetDescription() string
+	IsCumulative() bool
+	Run(context.Context) *TestResult
+}
+
+// TestResult contains a test's points, suggestions, and errors
+type TestResult struct {
+	State         scapiv1alpha1.State
+	Test          Test
+	EarnedPoints  int
+	MaximumPoints int
+	Suggestions   []string
+	Errors        []error
+}
+
+// TestInfo contains information about the scorecard test
+type TestInfo struct {
+	Name        string
+	Description string
+	// If a test is set to cumulative, the scores of multiple runs of the same test on separate CRs are added together for the total score.
+	// If cumulative is false, if any test failed, the total score is 0/1. Otherwise 1/1.
+	Cumulative bool
+}
+
+// GetName return the test name
+func (i TestInfo) GetName() string { return i.Name }
+
+// GetDescription returns the test description
+func (i TestInfo) GetDescription() string { return i.Description }
+
+// IsCumulative returns true if the test's scores are intended to be cumulative
+func (i TestInfo) IsCumulative() bool { return i.Cumulative }
+
+// TestSuite contains a list of tests and results, along with the relative weights of each test. Also can optionally contain a log
+type TestSuite struct {
+	TestInfo
+	Tests       []Test
+	TestResults []TestResult
+	Weights     map[string]float64
+	Log         string
+}
+
+// Helper functions
+
+// AddTest adds a new Test to a TestSuite along with a relative weight for the new Test
+func (ts *TestSuite) AddTest(t Test, weight float64) {
+	ts.Tests = append(ts.Tests, t)
+	ts.Weights[t.GetName()] = weight
+}
+
+// TotalScore calculates and returns the total score of all run Tests in a TestSuite
+func (ts *TestSuite) TotalScore() (score int) {
+	floatScore := 0.0
+	for _, result := range ts.TestResults {
+		if result.MaximumPoints != 0 {
+			floatScore += (float64(result.EarnedPoints) / float64(result.MaximumPoints)) * ts.Weights[result.Test.GetName()]
+		}
+	}
+	// scale to a percentage
+	addedWeights := 0.0
+	for _, weight := range ts.Weights {
+		addedWeights += weight
+	}
+	// protect against divide by zero for failed plugins
+	if addedWeights == 0 {
+		return 0
+	}
+	return int(floatScore * (100 / addedWeights))
+}
+
+// Run runs all Tests in a TestSuite
+func (ts *TestSuite) Run(ctx context.Context) {
+	for _, test := range ts.Tests {
+		ts.TestResults = append(ts.TestResults, *test.Run(ctx))
+	}
+}
+
+// NewTestSuite returns a new TestSuite with a given name and description
+func NewTestSuite(name, description string) *TestSuite {
+	return &TestSuite{
+		TestInfo: TestInfo{
+			Name:        name,
+			Description: description,
+		},
+		Weights: make(map[string]float64),
+	}
+}
+
+// MergeSuites takes an array of TestSuites and combines all suites with the same name
+func MergeSuites(suites []TestSuite) ([]TestSuite, error) {
+	suiteMap := make(map[string][]TestSuite)
+	for _, suite := range suites {
+		suiteMap[suite.GetName()] = append(suiteMap[suite.GetName()], suite)
+	}
+	mergedSuites := []TestSuite{}
+	for _, suiteSlice := range suiteMap {
+		testMap := make(map[string][]TestResult)
+		for _, suite := range suiteSlice {
+			for _, result := range suite.TestResults {
+				testMap[result.Test.GetName()] = append(testMap[result.Test.GetName()], result)
+			}
+		}
+		mergedTestResults := []TestResult{}
+		for _, testSlice := range testMap {
+			if testSlice[0].Test.IsCumulative() {
+				newResult, err := ResultsCumulative(testSlice)
+				if err != nil {
+					return nil, fmt.Errorf("failed to combine test results: %s", err)
+				}
+				mergedTestResults = append(mergedTestResults, newResult)
+			} else {
+				newResult, err := ResultsPassFail(testSlice)
+				if err != nil {
+					return nil, fmt.Errorf("failed to combine test results: %s", err)
+				}
+				mergedTestResults = append(mergedTestResults, newResult)
+			}
+		}
+		newSuite := suiteSlice[0]
+		newSuite.TestResults = mergedTestResults
+		mergedSuites = append(mergedSuites, newSuite)
+	}
+	return mergedSuites, nil
+}
diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/util/projutil/exec.go b/vendor/github.com/operator-framework/operator-sdk/internal/util/projutil/exec.go
new file mode 100644
index 0000000..1fde21f
--- /dev/null
+++ b/vendor/github.com/operator-framework/operator-sdk/internal/util/projutil/exec.go
@@ -0,0 +1,161 @@
+// Copyright 2019 The Operator-SDK Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package projutil
+
+import (
+	"fmt"
+	"os"
+	"os/exec"
+	"path/filepath"
+	"strings"
+
+	log "github.com/sirupsen/logrus"
+)
+
+func ExecCmd(cmd *exec.Cmd) error {
+	cmd.Stdout = os.Stdout
+	cmd.Stderr = os.Stderr
+	log.Debugf("Running %#v", cmd.Args)
+	if err := cmd.Run(); err != nil {
+		return fmt.Errorf("failed to exec %#v: %v", cmd.Args, err)
+	}
+	return nil
+}
+
+// GoCmdOptions is the base option set for "go" subcommands.
+type GoCmdOptions struct {
+	// BinName is the name of the compiled binary, passed to -o.
+	BinName string
+	// Args are args passed to "go {cmd}", aside from "-o {bin_name}" and
+	// test binary args.
+	// These apply to build, clean, get, install, list, run, and test.
+	Args []string
+	// PackagePath is the path to the main (go build) or test (go test) packages.
+	PackagePath string
+	// Env is a list of environment variables to pass to the cmd;
+	// exec.Command.Env is set to this value.
+	Env []string
+	// Dir is the dir to run "go {cmd}" in; exec.Command.Dir is set to this value.
+	Dir string
+	// GoMod determines whether to set the "-mod=vendor" flag.
+	// If true, "go {cmd}" will use modules.
+	// If false, "go {cmd}" will not use go modules. This is the default.
+	// This applies to build, clean, get, install, list, run, and test.
+	GoMod bool
+}
+
+// GoTestOptions is the set of options for "go test".
+type GoTestOptions struct {
+	GoCmdOptions
+	// TestBinaryArgs are args passed to the binary compiled by "go test".
+	TestBinaryArgs []string
+}
+
+const (
+	goBuildCmd = "build"
+	goTestCmd  = "test"
+)
+
+// GoBuild runs "go build" configured with opts.
+func GoBuild(opts GoCmdOptions) error {
+	return goCmd(goBuildCmd, opts)
+}
+
+// GoTest runs "go test" configured with opts.
+func GoTest(opts GoTestOptions) error {
+	bargs, err := getGeneralArgs("test", opts.GoCmdOptions)
+	if err != nil {
+		return err
+	}
+	bargs = append(bargs, opts.TestBinaryArgs...)
+	c := exec.Command("go", bargs...)
+	setCommandFields(c, opts.GoCmdOptions)
+	return ExecCmd(c)
+}
+
+// goCmd runs "go cmd"..
+func goCmd(cmd string, opts GoCmdOptions) error {
+	bargs, err := getGeneralArgs(cmd, opts)
+	if err != nil {
+		return err
+	}
+	c := exec.Command("go", bargs...)
+	setCommandFields(c, opts)
+	return ExecCmd(c)
+}
+
+func getGeneralArgs(cmd string, opts GoCmdOptions) ([]string, error) {
+	bargs := []string{cmd}
+	if opts.BinName != "" {
+		bargs = append(bargs, "-o", opts.BinName)
+	}
+	bargs = append(bargs, opts.Args...)
+	if opts.GoMod {
+		if goModOn, err := GoModOn(); err != nil {
+			return nil, err
+		} else if goModOn {
+			bargs = append(bargs, "-mod=vendor")
+		}
+	}
+	return append(bargs, opts.PackagePath), nil
+}
+
+func setCommandFields(c *exec.Cmd, opts GoCmdOptions) {
+	if len(opts.Env) != 0 {
+		c.Env = append(os.Environ(), opts.Env...)
+	}
+	if opts.Dir != "" {
+		c.Dir = opts.Dir
+	}
+}
+
+// From https://github.com/golang/go/wiki/Modules:
+//	You can activate module support in one of two ways:
+//	- Invoke the go command in a directory outside of the $GOPATH/src tree,
+//		with a valid go.mod file in the current directory or any parent of it and
+//		the environment variable GO111MODULE unset (or explicitly set to auto).
+//	- Invoke the go command with GO111MODULE=on environment variable set.
+//
+// GoModOn returns true if go modules are on in one of the above two ways.
+func GoModOn() (bool, error) {
+	v, ok := os.LookupEnv(GoModEnv)
+	if v == "off" {
+		return false, nil
+	}
+	if v == "on" {
+		return true, nil
+	}
+	inSrc, err := wdInGoPathSrc()
+	if err != nil {
+		return false, err
+	}
+	return !inSrc && (!ok || v == "" || v == "auto"), nil
+}
+
+func wdInGoPathSrc() (bool, error) {
+	wd, err := os.Getwd()
+	if err != nil {
+		return false, err
+	}
+	goPath, ok := os.LookupEnv(GoPathEnv)
+	if !ok || goPath == "" {
+		hd, err := getHomeDir()
+		if err != nil {
+			return false, err
+		}
+		goPath = filepath.Join(hd, "go")
+	}
+	return strings.HasPrefix(wd, filepath.Join(goPath, "src")), nil
+}
diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/util/projutil/project_util.go b/vendor/github.com/operator-framework/operator-sdk/internal/util/projutil/project_util.go
index be56975..7ba7044 100644
--- a/vendor/github.com/operator-framework/operator-sdk/internal/util/projutil/project_util.go
+++ b/vendor/github.com/operator-framework/operator-sdk/internal/util/projutil/project_util.go
@@ -17,26 +17,29 @@ package projutil
 import (
 	"fmt"
 	"os"
-	"os/exec"
 	"path/filepath"
 	"regexp"
 	"strings"
 
-	"github.com/operator-framework/operator-sdk/internal/pkg/scaffold"
-	"github.com/operator-framework/operator-sdk/internal/pkg/scaffold/ansible"
-	"github.com/operator-framework/operator-sdk/internal/pkg/scaffold/helm"
-
+	homedir "github.com/mitchellh/go-homedir"
 	log "github.com/sirupsen/logrus"
 	"github.com/spf13/cobra"
 )
 
 const (
-	GopathEnv  = "GOPATH"
+	GoPathEnv  = "GOPATH"
 	GoFlagsEnv = "GOFLAGS"
+	GoModEnv   = "GO111MODULE"
 	SrcDir     = "src"
-)
 
-var mainFile = filepath.Join(scaffold.ManagerDir, scaffold.CmdFile)
+	fsep            = string(filepath.Separator)
+	mainFile        = "cmd" + fsep + "manager" + fsep + "main.go"
+	buildDockerfile = "build" + fsep + "Dockerfile"
+	rolesDir        = "roles"
+	helmChartsDir   = "helm-charts"
+	goModFile       = "go.mod"
+	gopkgTOMLFile   = "Gopkg.toml"
+)
 
 // OperatorType - the type of operator
 type OperatorType = string
@@ -52,28 +55,69 @@ const (
 	OperatorTypeUnknown OperatorType = "unknown"
 )
 
-// MustInProjectRoot checks if the current dir is the project root and returns the current repo's import path
-// e.g github.com/example-inc/app-operator
+type ErrUnknownOperatorType struct {
+	Type string
+}
+
+func (e ErrUnknownOperatorType) Error() string {
+	if e.Type == "" {
+		return "unknown operator type"
+	}
+	return fmt.Sprintf(`unknown operator type "%v"`, e.Type)
+}
+
+type DepManagerType string
+
+const (
+	DepManagerGoMod DepManagerType = "modules"
+	DepManagerDep   DepManagerType = "dep"
+)
+
+type ErrInvalidDepManager string
+
+func (e ErrInvalidDepManager) Error() string {
+	return fmt.Sprintf(`"%s" is not a valid dep manager; dep manager must be one of ["%v", "%v"]`, e, DepManagerDep, DepManagerGoMod)
+}
+
+var ErrNoDepManager = fmt.Errorf(`no valid dependency manager file found; dep manager must be one of ["%v", "%v"]`, DepManagerDep, DepManagerGoMod)
+
+func GetDepManagerType() (DepManagerType, error) {
+	if IsDepManagerDep() {
+		return DepManagerDep, nil
+	} else if IsDepManagerGoMod() {
+		return DepManagerGoMod, nil
+	}
+	return "", ErrNoDepManager
+}
+
+func IsDepManagerDep() bool {
+	_, err := os.Stat(gopkgTOMLFile)
+	return err == nil || os.IsExist(err)
+}
+
+func IsDepManagerGoMod() bool {
+	_, err := os.Stat(goModFile)
+	return err == nil || os.IsExist(err)
+}
+
+// MustInProjectRoot checks if the current dir is the project root and returns
+// the current repo's import path, ex github.com/example-inc/app-operator
 func MustInProjectRoot() {
-	// if the current directory has the "./build/dockerfile" file, then it is safe to say
+	// If the current directory has a "build/dockerfile", then it is safe to say
 	// we are at the project root.
-	_, err := os.Stat(filepath.Join(scaffold.BuildDir, scaffold.DockerfileFile))
-	if err != nil {
+	if _, err := os.Stat(buildDockerfile); err != nil {
 		if os.IsNotExist(err) {
-			log.Fatal("Must run command in project root dir: project structure requires ./build/Dockerfile")
+			log.Fatalf("Must run command in project root dir: project structure requires %s", buildDockerfile)
 		}
 		log.Fatalf("Error while checking if current directory is the project root: (%v)", err)
 	}
 }
 
 func CheckGoProjectCmd(cmd *cobra.Command) error {
-	t := GetOperatorType()
-	switch t {
-	case OperatorTypeGo:
-	default:
-		return fmt.Errorf("'%s' can only be run for Go operators; %s does not exist.", cmd.CommandPath(), mainFile)
+	if IsOperatorGo() {
+		return nil
 	}
-	return nil
+	return fmt.Errorf("'%s' can only be run for Go operators; %s does not exist.", cmd.CommandPath(), mainFile)
 }
 
 func MustGetwd() string {
@@ -84,42 +128,58 @@ func MustGetwd() string {
 	return wd
 }
 
+func getHomeDir() (string, error) {
+	hd, err := homedir.Dir()
+	if err != nil {
+		return "", err
+	}
+	return homedir.Expand(hd)
+}
+
 // CheckAndGetProjectGoPkg checks if this project's repository path is rooted under $GOPATH and returns the current directory's import path
 // e.g: "github.com/example-inc/app-operator"
 func CheckAndGetProjectGoPkg() string {
 	gopath := MustSetGopath(MustGetGopath())
 	goSrc := filepath.Join(gopath, SrcDir)
 	wd := MustGetwd()
-	currPkg := strings.Replace(wd, goSrc+string(filepath.Separator), "", 1)
+	currPkg := strings.Replace(wd, goSrc, "", 1)
 	// strip any "/" prefix from the repo path.
-	return strings.TrimPrefix(currPkg, string(filepath.Separator))
+	return strings.TrimPrefix(currPkg, fsep)
 }
 
-// GetOperatorType returns type of operator is in cwd
-// This function should be called after verifying the user is in project root
-// e.g: "go", "ansible"
+// GetOperatorType returns type of operator is in cwd.
+// This function should be called after verifying the user is in project root.
 func GetOperatorType() OperatorType {
-	// Assuming that if main.go exists then this is a Go operator
-	if _, err := os.Stat(mainFile); err == nil {
+	switch {
+	case IsOperatorGo():
 		return OperatorTypeGo
-	}
-	if stat, err := os.Stat(ansible.RolesDir); err == nil && stat.IsDir() {
+	case IsOperatorAnsible():
 		return OperatorTypeAnsible
-	}
-	if stat, err := os.Stat(helm.HelmChartsDir); err == nil && stat.IsDir() {
+	case IsOperatorHelm():
 		return OperatorTypeHelm
 	}
 	return OperatorTypeUnknown
 }
 
 func IsOperatorGo() bool {
-	return GetOperatorType() == OperatorTypeGo
+	_, err := os.Stat(mainFile)
+	return err == nil
+}
+
+func IsOperatorAnsible() bool {
+	stat, err := os.Stat(rolesDir)
+	return err == nil && stat.IsDir()
+}
+
+func IsOperatorHelm() bool {
+	stat, err := os.Stat(helmChartsDir)
+	return err == nil && stat.IsDir()
 }
 
 // MustGetGopath gets GOPATH and ensures it is set and non-empty. If GOPATH
 // is not set or empty, MustGetGopath exits.
 func MustGetGopath() string {
-	gopath, ok := os.LookupEnv(GopathEnv)
+	gopath, ok := os.LookupEnv(GoPathEnv)
 	if !ok || len(gopath) == 0 {
 		log.Fatal("GOPATH env not set")
 	}
@@ -144,27 +204,23 @@ func MustSetGopath(currentGopath string) string {
 	if !cwdInGopath {
 		log.Fatalf("Project not in $GOPATH")
 	}
-	if err := os.Setenv(GopathEnv, newGopath); err != nil {
+	if err := os.Setenv(GoPathEnv, newGopath); err != nil {
 		log.Fatal(err)
 	}
 	return newGopath
 }
 
-func ExecCmd(cmd *exec.Cmd) error {
-	cmd.Stdout = os.Stdout
-	cmd.Stderr = os.Stderr
-	err := cmd.Run()
-	if err != nil {
-		return fmt.Errorf("failed to exec %#v: %v", cmd.Args, err)
-	}
-	return nil
-}
-
 var flagRe = regexp.MustCompile("(.* )?-v(.* )?")
 
-// IsGoVerbose returns true if GOFLAGS contains "-v". This function is useful
-// when deciding whether to make "go" command output verbose.
-func IsGoVerbose() bool {
+// SetGoVerbose sets GOFLAGS="${GOFLAGS} -v" if GOFLAGS does not
+// already contain "-v" to make "go" command output verbose.
+func SetGoVerbose() error {
 	gf, ok := os.LookupEnv(GoFlagsEnv)
-	return ok && len(gf) != 0 && flagRe.MatchString(gf)
+	if !ok || len(gf) == 0 {
+		return os.Setenv(GoFlagsEnv, "-v")
+	}
+	if !flagRe.MatchString(gf) {
+		return os.Setenv(GoFlagsEnv, gf+" -v")
+	}
+	return nil
 }
diff --git a/vendor/github.com/operator-framework/operator-sdk/pkg/ansible/controller/reconcile.go b/vendor/github.com/operator-framework/operator-sdk/pkg/ansible/controller/reconcile.go
index cd43b27..1a26424 100644
--- a/vendor/github.com/operator-framework/operator-sdk/pkg/ansible/controller/reconcile.go
+++ b/vendor/github.com/operator-framework/operator-sdk/pkg/ansible/controller/reconcile.go
@@ -27,6 +27,7 @@ import (
 
 	ansiblestatus "github.com/operator-framework/operator-sdk/pkg/ansible/controller/status"
 	"github.com/operator-framework/operator-sdk/pkg/ansible/events"
+	"github.com/operator-framework/operator-sdk/pkg/ansible/metrics"
 	"github.com/operator-framework/operator-sdk/pkg/ansible/proxy/kubeconfig"
 	"github.com/operator-framework/operator-sdk/pkg/ansible/runner"
 	"github.com/operator-framework/operator-sdk/pkg/ansible/runner/eventapi"
@@ -186,7 +187,21 @@ func (r *AnsibleOperatorReconciler) Reconcile(request reconcile.Request) (reconc
 		return reconcileResult, eventErr
 	}
 
-	// We only want to update the CustomResource once, so we'll track changes and do it at the end
+	// Need to get the unstructured object after ansible
+	// this needs to hit the API
+	err = r.Client.Get(context.TODO(), request.NamespacedName, u)
+	if apierrors.IsNotFound(err) {
+		return reconcile.Result{}, nil
+	}
+	if err != nil {
+		return reconcile.Result{}, err
+	}
+
+	// try to get the updated finalizers
+	pendingFinalizers = u.GetFinalizers()
+
+	// We only want to update the CustomResource once, so we'll track changes
+	// and do it at the end
 	runSuccessful := len(failureMessages) == 0
 	// The finalizer has run successfully, time to remove it
 	if deleted && finalizerExists && runSuccessful {
@@ -252,6 +267,7 @@ func (r *AnsibleOperatorReconciler) markRunning(u *unstructured.Unstructured, na
 // i.e Annotations that could be incorrect
 func (r *AnsibleOperatorReconciler) markError(u *unstructured.Unstructured, namespacedName types.NamespacedName, failureMessage string) error {
 	logger := logf.Log.WithName("markError")
+	metrics.ReconcileFailed(r.GVK.String())
 	// Get the latest resource to prevent updating a stale status
 	err := r.Client.Get(context.TODO(), namespacedName, u)
 	if apierrors.IsNotFound(err) {
@@ -308,6 +324,7 @@ func (r *AnsibleOperatorReconciler) markDone(u *unstructured.Unstructured, names
 	ansibleStatus := ansiblestatus.NewAnsibleResultFromStatusJobEvent(statusEvent)
 
 	if !runSuccessful {
+		metrics.ReconcileFailed(r.GVK.String())
 		sc := ansiblestatus.GetCondition(crStatus, ansiblestatus.RunningConditionType)
 		sc.Status = v1.ConditionFalse
 		ansiblestatus.SetCondition(&crStatus, *sc)
@@ -320,6 +337,7 @@ func (r *AnsibleOperatorReconciler) markDone(u *unstructured.Unstructured, names
 		)
 		ansiblestatus.SetCondition(&crStatus, *c)
 	} else {
+		metrics.ReconcileSucceeded(r.GVK.String())
 		c := ansiblestatus.NewCondition(
 			ansiblestatus.RunningConditionType,
 			v1.ConditionTrue,
diff --git a/vendor/github.com/operator-framework/operator-sdk/pkg/ansible/metrics/metrics.go b/vendor/github.com/operator-framework/operator-sdk/pkg/ansible/metrics/metrics.go
new file mode 100644
index 0000000..cd53127
--- /dev/null
+++ b/vendor/github.com/operator-framework/operator-sdk/pkg/ansible/metrics/metrics.go
@@ -0,0 +1,83 @@
+// Copyright 2018 The Operator-SDK Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metrics
+
+import (
+	"fmt"
+
+	"github.com/prometheus/client_golang/prometheus"
+	"sigs.k8s.io/controller-runtime/pkg/metrics"
+	logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
+)
+
+const (
+	subsystem = "ansible_operator"
+)
+
+var (
+	reconcileResults = prometheus.NewGaugeVec(
+		prometheus.GaugeOpts{
+			Subsystem: subsystem,
+			Name:      "reconcile_result",
+			Help:      "Gauge of reconciles and their results.",
+		},
+		[]string{
+			"GVK",
+			"result",
+		})
+
+	reconciles = prometheus.NewHistogramVec(
+		prometheus.HistogramOpts{
+			Subsystem: subsystem,
+			Name:      "reconciles",
+			Help:      "How long in seconds a reconcile takes.",
+		},
+		[]string{
+			"GVK",
+		})
+)
+
+func init() {
+	metrics.Registry.MustRegister(reconcileResults)
+	metrics.Registry.MustRegister(reconciles)
+}
+
+// We will never want to panic our app because of metric saving.
+// Therefore, we will recover our panics here and error log them
+// for later diagnosis but will never fail the app.
+func recoverMetricPanic() {
+	if r := recover(); r != nil {
+		logf.Log.WithName("metrics").Error(fmt.Errorf("%v", r),
+			"Recovering from metric function")
+	}
+}
+
+func ReconcileSucceeded(gvk string) {
+	defer recoverMetricPanic()
+	reconcileResults.WithLabelValues(gvk, "succeeded").Inc()
+}
+
+func ReconcileFailed(gvk string) {
+	// TODO: consider taking in a failure reason
+	defer recoverMetricPanic()
+	reconcileResults.WithLabelValues(gvk, "failed").Inc()
+}
+
+func ReconcileTimer(gvk string) *prometheus.Timer {
+	defer recoverMetricPanic()
+	return prometheus.NewTimer(prometheus.ObserverFunc(func(duration float64) {
+		reconciles.WithLabelValues(gvk).Observe(duration)
+	}))
+}
diff --git a/vendor/github.com/operator-framework/operator-sdk/pkg/ansible/proxy/cache_response.go b/vendor/github.com/operator-framework/operator-sdk/pkg/ansible/proxy/cache_response.go
new file mode 100644
index 0000000..d60874b
--- /dev/null
+++ b/vendor/github.com/operator-framework/operator-sdk/pkg/ansible/proxy/cache_response.go
@@ -0,0 +1,234 @@
+// Copyright 2019 The Operator-SDK Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package proxy
+
+import (
+	"bytes"
+	"context"
+	"encoding/json"
+	"fmt"
+	"net/http"
+	"strings"
+
+	"github.com/operator-framework/operator-sdk/pkg/ansible/proxy/controllermap"
+	"github.com/operator-framework/operator-sdk/pkg/ansible/proxy/requestfactory"
+	k8sRequest "github.com/operator-framework/operator-sdk/pkg/ansible/proxy/requestfactory"
+	osdkHandler "github.com/operator-framework/operator-sdk/pkg/handler"
+	"k8s.io/apimachinery/pkg/api/meta"
+	metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/apimachinery/pkg/util/sets"
+	"sigs.k8s.io/controller-runtime/pkg/cache"
+	"sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+type marshaler interface {
+	MarshalJSON() ([]byte, error)
+}
+
+type cacheResponseHandler struct {
+	next              http.Handler
+	informerCache     cache.Cache
+	restMapper        meta.RESTMapper
+	watchedNamespaces map[string]interface{}
+	cMap              *controllermap.ControllerMap
+	injectOwnerRef    bool
+}
+
+func (c *cacheResponseHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+	switch req.Method {
+	case http.MethodGet:
+		// GET request means we need to check the cache
+		rf := k8sRequest.RequestInfoFactory{APIPrefixes: sets.NewString("api", "apis"), GrouplessAPIPrefixes: sets.NewString("api")}
+		r, err := rf.NewRequestInfo(req)
+		if err != nil {
+			log.Error(err, "Failed to convert request")
+			break
+		}
+
+		if c.skipCacheLookup(r) {
+			break
+		}
+
+		gvr := schema.GroupVersionResource{
+			Group:    r.APIGroup,
+			Version:  r.APIVersion,
+			Resource: r.Resource,
+		}
+		if c.restMapper == nil {
+			c.restMapper = meta.NewDefaultRESTMapper([]schema.GroupVersion{schema.GroupVersion{
+				Group:   r.APIGroup,
+				Version: r.APIVersion,
+			}})
+		}
+
+		k, err := c.restMapper.KindFor(gvr)
+		if err != nil {
+			// break here in case resource doesn't exist in cache
+			log.Info("Cache miss, can not find in rest mapper", "GVR", gvr)
+			break
+		}
+
+		var m marshaler
+
+		log.V(2).Info("Get resource in our cache", "r", r)
+		if r.Verb == "list" {
+			m, err = c.getListFromCache(r, req, k)
+			if err != nil {
+				break
+			}
+		} else {
+			m, err = c.getObjectFromCache(r, req, k)
+			if err != nil {
+				break
+			}
+		}
+
+		i := bytes.Buffer{}
+		resp, err := m.MarshalJSON()
+		if err != nil {
+			// return will give a 500
+			log.Error(err, "Failed to marshal data")
+			http.Error(w, "", http.StatusInternalServerError)
+			return
+		}
+
+		// Set Content-Type header
+		w.Header().Set("Content-Type", "application/json")
+		// Set X-Cache header to signal that response is served from Cache
+		w.Header().Set("X-Cache", "HIT")
+		if err := json.Indent(&i, resp, "", "  "); err != nil {
+			log.Error(err, "Failed to indent json")
+		}
+		_, err = w.Write(i.Bytes())
+		if err != nil {
+			log.Error(err, "Failed to write response")
+			http.Error(w, "", http.StatusInternalServerError)
+			return
+		}
+
+		// Return so that request isn't passed along to APIserver
+		return
+	}
+	c.next.ServeHTTP(w, req)
+}
+
+// skipCacheLookup - determine if we should skip the cache lookup
+func (c *cacheResponseHandler) skipCacheLookup(r *requestfactory.RequestInfo) bool {
+	// check if resource is present on request
+	if !r.IsResourceRequest {
+		return true
+	}
+
+	// check if resource doesn't exist in watched namespaces
+	// if watchedNamespaces[""] exists then we are watching all namespaces
+	// and want to continue
+	_, allNsPresent := c.watchedNamespaces[metav1.NamespaceAll]
+	_, reqNsPresent := c.watchedNamespaces[r.Namespace]
+	if !allNsPresent && !reqNsPresent {
+		return true
+	}
+
+	if strings.HasPrefix(r.Path, "/version") {
+		// Temporarily pass along to API server
+		// Ideally we cache this response as well
+		return true
+	}
+
+	return false
+}
+
+func (c *cacheResponseHandler) recoverDependentWatches(req *http.Request, un *unstructured.Unstructured) {
+	ownerRef, err := getRequestOwnerRef(req)
+	if err != nil {
+		log.Error(err, "Could not get ownerRef from proxy")
+		return
+	}
+
+	for _, oRef := range un.GetOwnerReferences() {
+		if oRef.APIVersion == ownerRef.APIVersion && oRef.Kind == ownerRef.Kind {
+			err := addWatchToController(ownerRef, c.cMap, un, c.restMapper, true)
+			if err != nil {
+				log.Error(err, "Could not recover dependent resource watch", "owner", ownerRef)
+				return
+			}
+		}
+	}
+	if typeString, ok := un.GetAnnotations()[osdkHandler.TypeAnnotation]; ok {
+		ownerGV, err := schema.ParseGroupVersion(ownerRef.APIVersion)
+		if err != nil {
+			log.Error(err, "Could not get ownerRef from proxy")
+			return
+		}
+		if typeString == fmt.Sprintf("%v.%v", ownerRef.Kind, ownerGV.Group) {
+			err := addWatchToController(ownerRef, c.cMap, un, c.restMapper, false)
+			if err != nil {
+				log.Error(err, "Could not recover dependent resource watch", "owner", ownerRef)
+				return
+			}
+		}
+	}
+}
+
+func (c *cacheResponseHandler) getListFromCache(r *requestfactory.RequestInfo, req *http.Request, k schema.GroupVersionKind) (marshaler, error) {
+	listOptions := &metav1.ListOptions{}
+	if err := metainternalversion.ParameterCodec.DecodeParameters(req.URL.Query(), metav1.SchemeGroupVersion, listOptions); err != nil {
+		log.Error(err, "Unable to decode list options from request")
+		return nil, err
+	}
+	lo := client.InNamespace(r.Namespace)
+	if err := lo.SetLabelSelector(listOptions.LabelSelector); err != nil {
+		log.Error(err, "Unable to set label selectors for the client")
+		return nil, err
+	}
+	if listOptions.FieldSelector != "" {
+		if err := lo.SetFieldSelector(listOptions.FieldSelector); err != nil {
+			log.Error(err, "Unable to set field selectors for the client")
+			return nil, err
+		}
+	}
+	k.Kind = k.Kind + "List"
+	un := unstructured.UnstructuredList{}
+	un.SetGroupVersionKind(k)
+	err := c.informerCache.List(context.Background(), lo, &un)
+	if err != nil {
+		// break here in case resource doesn't exist in cache but exists on APIserver
+		// This is very unlikely but provides user with expected 404
+		log.Info(fmt.Sprintf("cache miss: %v err-%v", k, err))
+		return nil, err
+	}
+	return &un, nil
+}
+
+func (c *cacheResponseHandler) getObjectFromCache(r *requestfactory.RequestInfo, req *http.Request, k schema.GroupVersionKind) (marshaler, error) {
+	un := &unstructured.Unstructured{}
+	un.SetGroupVersionKind(k)
+	obj := client.ObjectKey{Namespace: r.Namespace, Name: r.Name}
+	err := c.informerCache.Get(context.Background(), obj, un)
+	if err != nil {
+		// break here in case resource doesn't exist in cache but exists on APIserver
+		// This is very unlikely but provides user with expected 404
+		log.Info(fmt.Sprintf("Cache miss: %v, %v", k, obj))
+		return nil, err
+	}
+	// Once we get the resource, we are going to attempt to recover the dependent watches here,
+	// This will happen in the background, and log errors.
+	if c.injectOwnerRef {
+		go c.recoverDependentWatches(req, un)
+	}
+	return un, nil
+}
diff --git a/vendor/github.com/operator-framework/operator-sdk/pkg/ansible/proxy/inject_owner.go b/vendor/github.com/operator-framework/operator-sdk/pkg/ansible/proxy/inject_owner.go
new file mode 100644
index 0000000..9aee490
--- /dev/null
+++ b/vendor/github.com/operator-framework/operator-sdk/pkg/ansible/proxy/inject_owner.go
@@ -0,0 +1,178 @@
+// Copyright 2019 The Operator-SDK Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package proxy
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"io/ioutil"
+	"net/http"
+	"net/http/httputil"
+	"strings"
+
+	"github.com/operator-framework/operator-sdk/pkg/ansible/proxy/controllermap"
+	"github.com/operator-framework/operator-sdk/pkg/ansible/proxy/kubeconfig"
+	k8sRequest "github.com/operator-framework/operator-sdk/pkg/ansible/proxy/requestfactory"
+	osdkHandler "github.com/operator-framework/operator-sdk/pkg/handler"
+	"k8s.io/apimachinery/pkg/api/meta"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/apimachinery/pkg/util/sets"
+)
+
+// injectOwnerReferenceHandler will handle proxied requests and inject the
+// owner reference found in the authorization header. The Authorization is
+// then deleted so that the proxy can re-set with the correct authorization.
+type injectOwnerReferenceHandler struct {
+	next              http.Handler
+	cMap              *controllermap.ControllerMap
+	restMapper        meta.RESTMapper
+	watchedNamespaces map[string]interface{}
+}
+
+func (i *injectOwnerReferenceHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+	switch req.Method {
+	case http.MethodPost:
+		dump, _ := httputil.DumpRequest(req, false)
+		log.V(2).Info("Dumping request", "RequestDump", string(dump))
+		rf := k8sRequest.RequestInfoFactory{APIPrefixes: sets.NewString("api", "apis"), GrouplessAPIPrefixes: sets.NewString("api")}
+		r, err := rf.NewRequestInfo(req)
+		if err != nil {
+			m := "Could not convert request"
+			log.Error(err, m)
+			http.Error(w, m, http.StatusBadRequest)
+			return
+		}
+		if r.Subresource != "" {
+			// Don't inject owner ref if we are POSTing to a subresource
+			break
+		}
+		log.Info("Injecting owner reference")
+		owner, err := getRequestOwnerRef(req)
+		if err != nil {
+			m := "Could not get owner reference"
+			log.Error(err, m)
+			http.Error(w, m, http.StatusInternalServerError)
+			return
+		}
+
+		body, err := ioutil.ReadAll(req.Body)
+		if err != nil {
+			m := "Could not read request body"
+			log.Error(err, m)
+			http.Error(w, m, http.StatusInternalServerError)
+			return
+		}
+		data := &unstructured.Unstructured{}
+		err = json.Unmarshal(body, data)
+		if err != nil {
+			m := "Could not deserialize request body"
+			log.Error(err, m)
+			http.Error(w, m, http.StatusBadRequest)
+			return
+		}
+
+		addOwnerRef, err := shouldAddOwnerRef(data, owner, i.restMapper)
+		if err != nil {
+			m := "Could not determine if we should add owner ref"
+			log.Error(err, m)
+			http.Error(w, m, http.StatusBadRequest)
+			return
+		}
+		if addOwnerRef {
+			data.SetOwnerReferences(append(data.GetOwnerReferences(), owner.OwnerReference))
+		} else {
+			ownerGV, err := schema.ParseGroupVersion(owner.APIVersion)
+			if err != nil {
+				m := fmt.Sprintf("could not get broup version for: %v", owner)
+				log.Error(err, m)
+				http.Error(w, m, http.StatusBadRequest)
+				return
+			}
+			a := data.GetAnnotations()
+			if a == nil {
+				a = map[string]string{}
+			}
+			a[osdkHandler.NamespacedNameAnnotation] = strings.Join([]string{owner.Namespace, owner.Name}, "/")
+			a[osdkHandler.TypeAnnotation] = fmt.Sprintf("%v.%v", owner.Kind, ownerGV.Group)
+
+			data.SetAnnotations(a)
+		}
+		newBody, err := json.Marshal(data.Object)
+		if err != nil {
+			m := "Could not serialize body"
+			log.Error(err, m)
+			http.Error(w, m, http.StatusInternalServerError)
+			return
+		}
+		log.V(2).Info("Serialized body", "Body", string(newBody))
+		req.Body = ioutil.NopCloser(bytes.NewBuffer(newBody))
+		req.ContentLength = int64(len(newBody))
+
+		// add watch for resource
+		// check if resource doesn't exist in watched namespaces
+		// if watchedNamespaces[""] exists then we are watching all namespaces
+		// and want to continue
+		// This is making sure we are not attempting to watch a resource outside of the
+		// namespaces that the cache can watch.
+		_, allNsPresent := i.watchedNamespaces[metav1.NamespaceAll]
+		_, reqNsPresent := i.watchedNamespaces[r.Namespace]
+		if allNsPresent || reqNsPresent {
+			err = addWatchToController(owner, i.cMap, data, i.restMapper, addOwnerRef)
+			if err != nil {
+				m := "could not add watch to controller"
+				log.Error(err, m)
+				http.Error(w, m, http.StatusInternalServerError)
+				return
+			}
+		}
+	}
+	i.next.ServeHTTP(w, req)
+}
+
+func shouldAddOwnerRef(data *unstructured.Unstructured, owner kubeconfig.NamespacedOwnerReference, restMapper meta.RESTMapper) (bool, error) {
+	dataMapping, err := restMapper.RESTMapping(data.GroupVersionKind().GroupKind(), data.GroupVersionKind().Version)
+	if err != nil {
+		m := fmt.Sprintf("Could not get rest mapping for: %v", data.GroupVersionKind())
+		log.Error(err, m)
+		return false, err
+
+	}
+	// We need to determine whether or not the owner is a cluster-scoped
+	// resource because enqueue based on an owner reference does not work if
+	// a namespaced resource owns a cluster-scoped resource
+	ownerGV, err := schema.ParseGroupVersion(owner.APIVersion)
+	if err != nil {
+		m := fmt.Sprintf("could not get group version for: %v", owner)
+		log.Error(err, m)
+		return false, err
+	}
+	ownerMapping, err := restMapper.RESTMapping(schema.GroupKind{Kind: owner.Kind, Group: ownerGV.Group}, ownerGV.Version)
+	if err != nil {
+		m := fmt.Sprintf("could not get rest mapping for: %v", owner)
+		log.Error(err, m)
+		return false, err
+	}
+
+	dataNamespaceScoped := dataMapping.Scope.Name() != meta.RESTScopeNameRoot
+	ownerNamespaceScoped := ownerMapping.Scope.Name() != meta.RESTScopeNameRoot
+
+	if dataNamespaceScoped && ownerNamespaceScoped && data.GetNamespace() == owner.Namespace {
+		return true, nil
+	}
+	return false, nil
+}
diff --git a/vendor/github.com/operator-framework/operator-sdk/pkg/ansible/proxy/kubectl.go b/vendor/github.com/operator-framework/operator-sdk/pkg/ansible/proxy/kubectl.go
index 7e42e2b..562c49d 100644
--- a/vendor/github.com/operator-framework/operator-sdk/pkg/ansible/proxy/kubectl.go
+++ b/vendor/github.com/operator-framework/operator-sdk/pkg/ansible/proxy/kubectl.go
@@ -28,13 +28,13 @@ import (
 	"os"
 	"regexp"
 	"strings"
-	"syscall"
 	"time"
 
 	utilnet "k8s.io/apimachinery/pkg/util/net"
 	k8sproxy "k8s.io/apimachinery/pkg/util/proxy"
 	"k8s.io/client-go/rest"
 	"k8s.io/client-go/transport"
+	"k8s.io/kubernetes/pkg/kubectl/util"
 	logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
 )
 
@@ -240,9 +240,9 @@ func (s *server) ListenUnix(path string) (net.Listener, error) {
 		}
 	}
 	// Default to only user accessible socket, caller can open up later if desired
-	oldmask := syscall.Umask(0077)
+	oldmask, _ := util.Umask(0077)
 	l, err := net.Listen("unix", path)
-	syscall.Umask(oldmask)
+	util.Umask(oldmask)
 	return l, err
 }
 
diff --git a/vendor/github.com/operator-framework/operator-sdk/pkg/ansible/proxy/proxy.go b/vendor/github.com/operator-framework/operator-sdk/pkg/ansible/proxy/proxy.go
index 97c1a9b..b8675aa 100644
--- a/vendor/github.com/operator-framework/operator-sdk/pkg/ansible/proxy/proxy.go
+++ b/vendor/github.com/operator-framework/operator-sdk/pkg/ansible/proxy/proxy.go
@@ -19,349 +19,25 @@ package proxy
 
 import (
 	"bytes"
-	"context"
 	"encoding/base64"
 	"encoding/json"
 	"errors"
 	"fmt"
 	"io/ioutil"
 	"net/http"
-	"net/http/httputil"
-	"strings"
 
 	"github.com/operator-framework/operator-sdk/pkg/ansible/proxy/controllermap"
 	"github.com/operator-framework/operator-sdk/pkg/ansible/proxy/kubeconfig"
-	k8sRequest "github.com/operator-framework/operator-sdk/pkg/ansible/proxy/requestfactory"
 	osdkHandler "github.com/operator-framework/operator-sdk/pkg/handler"
 	"k8s.io/apimachinery/pkg/api/meta"
-	metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
 	"k8s.io/apimachinery/pkg/runtime/schema"
-	"k8s.io/apimachinery/pkg/util/sets"
 	"k8s.io/client-go/rest"
 	"sigs.k8s.io/controller-runtime/pkg/cache"
-	"sigs.k8s.io/controller-runtime/pkg/client"
 	"sigs.k8s.io/controller-runtime/pkg/handler"
 	"sigs.k8s.io/controller-runtime/pkg/source"
 )
 
-type marshaler interface {
-	MarshalJSON() ([]byte, error)
-}
-
-// CacheResponseHandler will handle proxied requests and check if the requested
-// resource exists in our cache. If it does then there is no need to bombard
-// the APIserver with our request and we should write the response from the
-// proxy.
-func CacheResponseHandler(h http.Handler, informerCache cache.Cache, restMapper meta.RESTMapper, watchedNamespaces map[string]interface{}, cMap *controllermap.ControllerMap, injectOwnerRef bool) http.Handler {
-	return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
-		switch req.Method {
-		case http.MethodGet:
-			// GET request means we need to check the cache
-			rf := k8sRequest.RequestInfoFactory{APIPrefixes: sets.NewString("api", "apis"), GrouplessAPIPrefixes: sets.NewString("api")}
-			r, err := rf.NewRequestInfo(req)
-			if err != nil {
-				log.Error(err, "Failed to convert request")
-				break
-			}
-
-			// check if resource is present on request
-			if !r.IsResourceRequest {
-				break
-			}
-
-			// check if resource doesn't exist in watched namespaces
-			// if watchedNamespaces[""] exists then we are watching all namespaces
-			// and want to continue
-			_, allNsPresent := watchedNamespaces[metav1.NamespaceAll]
-			_, reqNsPresent := watchedNamespaces[r.Namespace]
-			if !allNsPresent && !reqNsPresent {
-				break
-			}
-
-			if strings.HasPrefix(r.Path, "/version") {
-				// Temporarily pass along to API server
-				// Ideally we cache this response as well
-				break
-			}
-
-			gvr := schema.GroupVersionResource{
-				Group:    r.APIGroup,
-				Version:  r.APIVersion,
-				Resource: r.Resource,
-			}
-			if restMapper == nil {
-				restMapper = meta.NewDefaultRESTMapper([]schema.GroupVersion{schema.GroupVersion{
-					Group:   r.APIGroup,
-					Version: r.APIVersion,
-				}})
-			}
-
-			k, err := restMapper.KindFor(gvr)
-			if err != nil {
-				// break here in case resource doesn't exist in cache
-				log.Info("Cache miss, can not find in rest mapper", "GVR", gvr)
-				break
-			}
-
-			var m marshaler
-
-			log.V(2).Info("Get resource in our cache", "r", r)
-			if r.Verb == "list" {
-				listOptions := &metav1.ListOptions{}
-				if err := metainternalversion.ParameterCodec.DecodeParameters(req.URL.Query(), metav1.SchemeGroupVersion, listOptions); err != nil {
-					log.Error(err, "Unable to decode list options from request")
-					break
-				}
-				lo := client.InNamespace(r.Namespace)
-				if err := lo.SetLabelSelector(listOptions.LabelSelector); err != nil {
-					log.Error(err, "Unable to set label selectors for the client")
-					break
-				}
-				if listOptions.FieldSelector != "" {
-					if err := lo.SetFieldSelector(listOptions.FieldSelector); err != nil {
-						log.Error(err, "Unable to set field selectors for the client")
-						break
-					}
-				}
-				k.Kind = k.Kind + "List"
-				un := unstructured.UnstructuredList{}
-				un.SetGroupVersionKind(k)
-				err = informerCache.List(context.Background(), lo, &un)
-				if err != nil {
-					// break here in case resource doesn't exist in cache but exists on APIserver
-					// This is very unlikely but provides user with expected 404
-					log.Info(fmt.Sprintf("cache miss: %v err-%v", k, err))
-					break
-				}
-				m = &un
-			} else {
-				un := &unstructured.Unstructured{}
-				un.SetGroupVersionKind(k)
-				obj := client.ObjectKey{Namespace: r.Namespace, Name: r.Name}
-				err = informerCache.Get(context.Background(), obj, un)
-				if err != nil {
-					// break here in case resource doesn't exist in cache but exists on APIserver
-					// This is very unlikely but provides user with expected 404
-					log.Info(fmt.Sprintf("Cache miss: %v, %v", k, obj))
-					break
-				}
-				m = un
-				// Once we get the resource, we are going to attempt to recover the dependent watches here,
-				// This will happen in the background, and log errors.
-				if injectOwnerRef {
-					go recoverDependentWatches(req, un, cMap, restMapper)
-				}
-			}
-
-			i := bytes.Buffer{}
-			resp, err := m.MarshalJSON()
-			if err != nil {
-				// return will give a 500
-				log.Error(err, "Failed to marshal data")
-				http.Error(w, "", http.StatusInternalServerError)
-				return
-			}
-
-			// Set Content-Type header
-			w.Header().Set("Content-Type", "application/json")
-			// Set X-Cache header to signal that response is served from Cache
-			w.Header().Set("X-Cache", "HIT")
-			if err := json.Indent(&i, resp, "", "  "); err != nil {
-				log.Error(err, "Failed to indent json")
-			}
-			_, err = w.Write(i.Bytes())
-			if err != nil {
-				log.Error(err, "Failed to write response")
-				http.Error(w, "", http.StatusInternalServerError)
-				return
-			}
-
-			// Return so that request isn't passed along to APIserver
-			return
-		}
-		h.ServeHTTP(w, req)
-	})
-}
-
-func recoverDependentWatches(req *http.Request, un *unstructured.Unstructured, cMap *controllermap.ControllerMap, restMapper meta.RESTMapper) {
-	ownerRef, err := getRequestOwnerRef(req)
-	if err != nil {
-		log.Error(err, "Could not get ownerRef from proxy")
-		return
-	}
-
-	for _, oRef := range un.GetOwnerReferences() {
-		if oRef.APIVersion == ownerRef.APIVersion && oRef.Kind == ownerRef.Kind {
-			err := addWatchToController(ownerRef, cMap, un, restMapper, true)
-			if err != nil {
-				log.Error(err, "Could not recover dependent resource watch", "owner", ownerRef)
-				return
-			}
-		}
-	}
-	if typeString, ok := un.GetAnnotations()[osdkHandler.TypeAnnotation]; ok {
-		ownerGV, err := schema.ParseGroupVersion(ownerRef.APIVersion)
-		if err != nil {
-			log.Error(err, "Could not get ownerRef from proxy")
-			return
-		}
-		if typeString == fmt.Sprintf("%v.%v", ownerRef.Kind, ownerGV.Group) {
-			err := addWatchToController(ownerRef, cMap, un, restMapper, false)
-			if err != nil {
-				log.Error(err, "Could not recover dependent resource watch", "owner", ownerRef)
-				return
-			}
-		}
-	}
-}
-
-// InjectOwnerReferenceHandler will handle proxied requests and inject the
-// owner reference found in the authorization header. The Authorization is
-// then deleted so that the proxy can re-set with the correct authorization.
-func InjectOwnerReferenceHandler(h http.Handler, cMap *controllermap.ControllerMap, restMapper meta.RESTMapper, watchedNamespaces map[string]interface{}) http.Handler {
-	return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
-		switch req.Method {
-		case http.MethodPost:
-			dump, _ := httputil.DumpRequest(req, false)
-			log.V(1).Info("Dumping request", "RequestDump", string(dump))
-			rf := k8sRequest.RequestInfoFactory{APIPrefixes: sets.NewString("api", "apis"), GrouplessAPIPrefixes: sets.NewString("api")}
-			r, err := rf.NewRequestInfo(req)
-			if err != nil {
-				m := "Could not convert request"
-				log.Error(err, m)
-				http.Error(w, m, http.StatusBadRequest)
-				return
-			}
-			if r.Subresource != "" {
-				// Don't inject owner ref if we are POSTing to a subresource
-				break
-			}
-			log.Info("Injecting owner reference")
-			owner, err := getRequestOwnerRef(req)
-			if err != nil {
-				m := "Could not get owner reference"
-				log.Error(err, m)
-				http.Error(w, m, http.StatusInternalServerError)
-				return
-			}
-
-			body, err := ioutil.ReadAll(req.Body)
-			if err != nil {
-				m := "Could not read request body"
-				log.Error(err, m)
-				http.Error(w, m, http.StatusInternalServerError)
-				return
-			}
-			data := &unstructured.Unstructured{}
-			err = json.Unmarshal(body, data)
-			if err != nil {
-				m := "Could not deserialize request body"
-				log.Error(err, m)
-				http.Error(w, m, http.StatusBadRequest)
-				return
-			}
-
-			addOwnerRef, err := shouldAddOwnerRef(data, owner, restMapper)
-			if err != nil {
-				m := "Could not determine if we should add owner ref"
-				log.Error(err, m)
-				http.Error(w, m, http.StatusBadRequest)
-				return
-			}
-			if addOwnerRef {
-				data.SetOwnerReferences(append(data.GetOwnerReferences(), owner.OwnerReference))
-			} else {
-				ownerGV, err := schema.ParseGroupVersion(owner.APIVersion)
-				if err != nil {
-					m := fmt.Sprintf("could not get broup version for: %v", owner)
-					log.Error(err, m)
-					http.Error(w, m, http.StatusBadRequest)
-					return
-				}
-				a := data.GetAnnotations()
-				if a == nil {
-					a = map[string]string{}
-				}
-				a[osdkHandler.NamespacedNameAnnotation] = strings.Join([]string{owner.Namespace, owner.Name}, "/")
-				a[osdkHandler.TypeAnnotation] = fmt.Sprintf("%v.%v", owner.Kind, ownerGV.Group)
-
-				data.SetAnnotations(a)
-			}
-			newBody, err := json.Marshal(data.Object)
-			if err != nil {
-				m := "Could not serialize body"
-				log.Error(err, m)
-				http.Error(w, m, http.StatusInternalServerError)
-				return
-			}
-			log.V(1).Info("Serialized body", "Body", string(newBody))
-			req.Body = ioutil.NopCloser(bytes.NewBuffer(newBody))
-			req.ContentLength = int64(len(newBody))
-
-			// add watch for resource
-			// check if resource doesn't exist in watched namespaces
-			// if watchedNamespaces[""] exists then we are watching all namespaces
-			// and want to continue
-			// This is making sure we are not attempting to watch a resource outside of the
-			// namespaces that the cache can watch.
-			_, allNsPresent := watchedNamespaces[metav1.NamespaceAll]
-			_, reqNsPresent := watchedNamespaces[r.Namespace]
-			if allNsPresent || reqNsPresent {
-				err = addWatchToController(owner, cMap, data, restMapper, addOwnerRef)
-				if err != nil {
-					m := "could not add watch to controller"
-					log.Error(err, m)
-					http.Error(w, m, http.StatusInternalServerError)
-					return
-				}
-			}
-		}
-		h.ServeHTTP(w, req)
-	})
-}
-
-func removeAuthorizationHeader(h http.Handler) http.Handler {
-	return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
-		req.Header.Del("Authorization")
-		h.ServeHTTP(w, req)
-	})
-}
-
-func shouldAddOwnerRef(data *unstructured.Unstructured, owner kubeconfig.NamespacedOwnerReference, restMapper meta.RESTMapper) (bool, error) {
-	dataMapping, err := restMapper.RESTMapping(data.GroupVersionKind().GroupKind(), data.GroupVersionKind().Version)
-	if err != nil {
-		m := fmt.Sprintf("Could not get rest mapping for: %v", data.GroupVersionKind())
-		log.Error(err, m)
-		return false, err
-
-	}
-	// We need to determine whether or not the owner is a cluster-scoped
-	// resource because enqueue based on an owner reference does not work if
-	// a namespaced resource owns a cluster-scoped resource
-	ownerGV, err := schema.ParseGroupVersion(owner.APIVersion)
-	if err != nil {
-		m := fmt.Sprintf("could not get group version for: %v", owner)
-		log.Error(err, m)
-		return false, err
-	}
-	ownerMapping, err := restMapper.RESTMapping(schema.GroupKind{Kind: owner.Kind, Group: ownerGV.Group}, ownerGV.Version)
-	if err != nil {
-		m := fmt.Sprintf("could not get rest mapping for: %v", owner)
-		log.Error(err, m)
-		return false, err
-	}
-
-	dataNamespaceScoped := dataMapping.Scope.Name() != meta.RESTScopeNameRoot
-	ownerNamespaceScoped := ownerMapping.Scope.Name() != meta.RESTScopeNameRoot
-
-	if dataNamespaceScoped && ownerNamespaceScoped && data.GetNamespace() == owner.Namespace {
-		return true, nil
-	}
-	return false, nil
-}
-
 // RequestLogHandler - log the requests that come through the proxy.
 func RequestLogHandler(h http.Handler) http.Handler {
 	return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
@@ -447,10 +123,16 @@ func Run(done chan error, o Options) error {
 		o.Cache = informerCache
 	}
 
+	// Remove the authorization header so the proxy can correctly inject the header.
 	server.Handler = removeAuthorizationHeader(server.Handler)
 
 	if o.OwnerInjection {
-		server.Handler = InjectOwnerReferenceHandler(server.Handler, o.ControllerMap, o.RESTMapper, watchedNamespaceMap)
+		server.Handler = &injectOwnerReferenceHandler{
+			next:              server.Handler,
+			cMap:              o.ControllerMap,
+			restMapper:        o.RESTMapper,
+			watchedNamespaces: watchedNamespaceMap,
+		}
 	} else {
 		log.Info("Warning: injection of owner references and dependent watches is turned off")
 	}
@@ -458,7 +140,14 @@ func Run(done chan error, o Options) error {
 		server.Handler = RequestLogHandler(server.Handler)
 	}
 	if !o.DisableCache {
-		server.Handler = CacheResponseHandler(server.Handler, o.Cache, o.RESTMapper, watchedNamespaceMap, o.ControllerMap, o.OwnerInjection)
+		server.Handler = &cacheResponseHandler{
+			next:              server.Handler,
+			informerCache:     o.Cache,
+			restMapper:        o.RESTMapper,
+			watchedNamespaces: watchedNamespaceMap,
+			cMap:              o.ControllerMap,
+			injectOwnerRef:    o.OwnerInjection,
+		}
 	}
 
 	l, err := server.Listen(o.Address, o.Port)
@@ -472,6 +161,7 @@ func Run(done chan error, o Options) error {
 	return nil
 }
 
+// Helper function used by cache response and owner injection
 func addWatchToController(owner kubeconfig.NamespacedOwnerReference, cMap *controllermap.ControllerMap, resource *unstructured.Unstructured, restMapper meta.RESTMapper, useOwnerRef bool) error {
 	dataMapping, err := restMapper.RESTMapping(resource.GroupVersionKind().GroupKind(), resource.GroupVersionKind().Version)
 	if err != nil {
@@ -539,6 +229,14 @@ func addWatchToController(owner kubeconfig.NamespacedOwnerReference, cMap *contr
 	return nil
 }
 
+func removeAuthorizationHeader(h http.Handler) http.Handler {
+	return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+		req.Header.Del("Authorization")
+		h.ServeHTTP(w, req)
+	})
+}
+
+// Helper function used by recovering dependent watches and owner ref injection.
 func getRequestOwnerRef(req *http.Request) (kubeconfig.NamespacedOwnerReference, error) {
 	owner := kubeconfig.NamespacedOwnerReference{}
 	user, _, ok := req.BasicAuth()
diff --git a/vendor/github.com/operator-framework/operator-sdk/pkg/ansible/run.go b/vendor/github.com/operator-framework/operator-sdk/pkg/ansible/run.go
index c07fe92..c60b076 100644
--- a/vendor/github.com/operator-framework/operator-sdk/pkg/ansible/run.go
+++ b/vendor/github.com/operator-framework/operator-sdk/pkg/ansible/run.go
@@ -26,6 +26,7 @@ import (
 	"github.com/operator-framework/operator-sdk/pkg/ansible/proxy/controllermap"
 	"github.com/operator-framework/operator-sdk/pkg/k8sutil"
 	"github.com/operator-framework/operator-sdk/pkg/leader"
+	"github.com/operator-framework/operator-sdk/pkg/metrics"
 	sdkVersion "github.com/operator-framework/operator-sdk/version"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 
@@ -62,8 +63,10 @@ func Run(flags *aoflags.AnsibleOperatorFlags) error {
 		log.Error(err, "Failed to get config.")
 		return err
 	}
+	// TODO: probably should expose the host & port as an environment variables
 	mgr, err := manager.New(cfg, manager.Options{
-		Namespace: namespace,
+		Namespace:          namespace,
+		MetricsBindAddress: "0.0.0.0:8383",
 	})
 	if err != nil {
 		log.Error(err, "Failed to create a new manager.")
@@ -82,6 +85,13 @@ func Run(flags *aoflags.AnsibleOperatorFlags) error {
 		return err
 	}
 
+	// TODO: probably should expose the port as an environment variable
+	_, err = metrics.ExposeMetricsPort(context.TODO(), 8383)
+	if err != nil {
+		log.Error(err, "Exposing metrics port failed.")
+		return err
+	}
+
 	done := make(chan error)
 	cMap := controllermap.NewControllerMap()
 
diff --git a/vendor/github.com/operator-framework/operator-sdk/pkg/ansible/runner/runner.go b/vendor/github.com/operator-framework/operator-sdk/pkg/ansible/runner/runner.go
index 5c37866..a6613e6 100644
--- a/vendor/github.com/operator-framework/operator-sdk/pkg/ansible/runner/runner.go
+++ b/vendor/github.com/operator-framework/operator-sdk/pkg/ansible/runner/runner.go
@@ -26,6 +26,7 @@ import (
 	"strings"
 	"time"
 
+	"github.com/operator-framework/operator-sdk/pkg/ansible/metrics"
 	"github.com/operator-framework/operator-sdk/pkg/ansible/paramconv"
 	"github.com/operator-framework/operator-sdk/pkg/ansible/runner/eventapi"
 	"github.com/operator-framework/operator-sdk/pkg/ansible/runner/internal/inputdir"
@@ -222,6 +223,10 @@ type runner struct {
 }
 
 func (r *runner) Run(ident string, u *unstructured.Unstructured, kubeconfig string) (RunResult, error) {
+
+	timer := metrics.ReconcileTimer(r.GVK.String())
+	defer timer.ObserveDuration()
+
 	if u.GetDeletionTimestamp() != nil && !r.isFinalizerRun(u) {
 		return nil, errors.New("resource has been deleted, but no finalizer was matched, skipping reconciliation")
 	}
diff --git a/vendor/github.com/operator-framework/operator-sdk/version/version.go b/vendor/github.com/operator-framework/operator-sdk/pkg/apis/scorecard/v1alpha1/doc.go
similarity index 81%
copy from vendor/github.com/operator-framework/operator-sdk/version/version.go
copy to vendor/github.com/operator-framework/operator-sdk/pkg/apis/scorecard/v1alpha1/doc.go
index 51db095..d650844 100644
--- a/vendor/github.com/operator-framework/operator-sdk/version/version.go
+++ b/vendor/github.com/operator-framework/operator-sdk/pkg/apis/scorecard/v1alpha1/doc.go
@@ -1,4 +1,4 @@
-// Copyright 2018 The Operator-SDK Authors
+// Copyright 2019 The Operator-SDK Authors
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.
@@ -12,8 +12,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-package version
+// +k8s:deepcopy-gen=package,register
+// +groupName=osdk.openshift.io
 
-var (
-	Version = "v0.7.0"
-)
+package v1alpha1
diff --git a/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/version/cmd.go b/vendor/github.com/operator-framework/operator-sdk/pkg/apis/scorecard/v1alpha1/register.go
similarity index 51%
copy from vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/version/cmd.go
copy to vendor/github.com/operator-framework/operator-sdk/pkg/apis/scorecard/v1alpha1/register.go
index dbc369f..7dd316b 100644
--- a/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/version/cmd.go
+++ b/vendor/github.com/operator-framework/operator-sdk/pkg/apis/scorecard/v1alpha1/register.go
@@ -12,23 +12,20 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-package version
+// Package v1alpha1 contains API Schema definitions for the cache v1alpha1 API group
+// +k8s:deepcopy-gen=package,register
+// +groupName=osdk.openshift.io
+package v1alpha1
 
 import (
-	"fmt"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"sigs.k8s.io/controller-runtime/pkg/runtime/scheme"
+)
 
-	ver "github.com/operator-framework/operator-sdk/version"
+var (
+	// SchemeGroupVersion is group version used to register these objects
+	SchemeGroupVersion = schema.GroupVersion{Group: "osdk.openshift.io", Version: "v1alpha1"}
 
-	"github.com/spf13/cobra"
+	// SchemeBuilder is used to add go types to the GroupVersionKind scheme
+	SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion}
 )
-
-func NewCmd() *cobra.Command {
-	versionCmd := &cobra.Command{
-		Use:   "version",
-		Short: "Prints the version of operator-sdk",
-		Run: func(cmd *cobra.Command, args []string) {
-			fmt.Println("operator-sdk version:", ver.Version)
-		},
-	}
-	return versionCmd
-}
diff --git a/vendor/github.com/operator-framework/operator-sdk/pkg/apis/scorecard/v1alpha1/types.go b/vendor/github.com/operator-framework/operator-sdk/pkg/apis/scorecard/v1alpha1/types.go
new file mode 100644
index 0000000..704727c
--- /dev/null
+++ b/vendor/github.com/operator-framework/operator-sdk/pkg/apis/scorecard/v1alpha1/types.go
@@ -0,0 +1,108 @@
+// Copyright 2019 The Operator-SDK Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1alpha1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// State is a type used to indicate the result state of a Test.
+type State string
+
+const (
+	// UnsetState is the default state for a TestResult. It must be updated by UpdateState or by the Test.
+	UnsetState State = "unset"
+	// PassState occurs when a Test's ExpectedPoints == MaximumPoints.
+	PassState State = "pass"
+	// PartialPassState occurs when a Test's ExpectedPoints < MaximumPoints and ExpectedPoints > 0.
+	PartialPassState State = "partial_pass"
+	// FailState occurs when a Test's ExpectedPoints == 0.
+	FailState State = "fail"
+	// ErrorState occurs when a Test encounters a fatal error and the reported points should not be considered.
+	ErrorState State = "error"
+)
+
+// ScorecardSuiteResult contains the combined results of a suite of tests.
+// +k8s:openapi-gen=true
+type ScorecardSuiteResult struct {
+	// Name is the name of the test suite
+	Name string `json:"name"`
+	// Description is a description of the test suite
+	Description string `json:"description"`
+	// Error is the number of tests that ended in the Error state
+	Error int `json:"error"`
+	// Pass is the number of tests that ended in the Pass state
+	Pass int `json:"pass"`
+	// PartialPass is the number of tests that ended in the PartialPass state
+	PartialPass int `json:"partialPass"`
+	// Fail is the number of tests that ended in the Fail state
+	Fail int `json:"fail"`
+	// TotalTests is the total number of tests run in this suite
+	TotalTests int `json:"totalTests"`
+	// TotalScore is the total score of this suite as a percentage
+	TotalScore int `json:"totalScorePercent"`
+	// Tests is an array containing a json-ified version of the TestResults for the suite
+	Tests []ScorecardTestResult `json:"tests"`
+	// Log is extra logging information from the scorecard suite/plugin.
+	// +optional
+	Log string `json:"log"`
+}
+
+// ScorecardTestResult contains the results of an individual scorecard test.
+// +k8s:openapi-gen=true
+type ScorecardTestResult struct {
+	// State is the final state of the test
+	State State `json:"state"`
+	// Name is the name of the test
+	Name string `json:"name"`
+	// Description describes what the test does
+	Description string `json:"description"`
+	// EarnedPoints is how many points the test received after running
+	EarnedPoints int `json:"earnedPoints"`
+	// MaximumPoints is the maximum number of points possible for the test
+	MaximumPoints int `json:"maximumPoints"`
+	// Suggestions is a list of suggestions for the user to improve their score (if applicable)
+	Suggestions []string `json:"suggestions"`
+	// Errors is a list of the errors that occured during the test (this can include both fatal and non-fatal errors)
+	Errors []string `json:"errors"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ScorecardOutput is the schema for the scorecard API
+// +k8s:openapi-gen=true
+type ScorecardOutput struct {
+	metav1.TypeMeta `json:",inline"`
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Log contains the scorecard's log.
+	Log string `json:"log"`
+	// Results is an array of ScorecardResult for each suite of the curent scorecard run.
+	Results []ScorecardSuiteResult `json:"results"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ScorecardOutputList contains a list of ScorecardTest
+type ScorecardOutputList struct {
+	metav1.TypeMeta `json:",inline"`
+	metav1.ListMeta `json:"metadata,omitempty"`
+	Items           []ScorecardOutput `json:"items"`
+}
+
+func init() {
+	SchemeBuilder.Register(&ScorecardOutput{}, &ScorecardOutputList{})
+}
diff --git a/vendor/github.com/operator-framework/operator-sdk/pkg/apis/scorecard/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/operator-framework/operator-sdk/pkg/apis/scorecard/v1alpha1/zz_generated.deepcopy.go
new file mode 100644
index 0000000..ea3ad64
--- /dev/null
+++ b/vendor/github.com/operator-framework/operator-sdk/pkg/apis/scorecard/v1alpha1/zz_generated.deepcopy.go
@@ -0,0 +1,138 @@
+// +build !ignore_autogenerated
+
+// Copyright 2019 The Operator-SDK Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+	runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ScorecardOutput) DeepCopyInto(out *ScorecardOutput) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	if in.Results != nil {
+		in, out := &in.Results, &out.Results
+		*out = make([]ScorecardSuiteResult, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScorecardOutput.
+func (in *ScorecardOutput) DeepCopy() *ScorecardOutput {
+	if in == nil {
+		return nil
+	}
+	out := new(ScorecardOutput)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ScorecardOutput) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ScorecardOutputList) DeepCopyInto(out *ScorecardOutputList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]ScorecardOutput, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScorecardOutputList.
+func (in *ScorecardOutputList) DeepCopy() *ScorecardOutputList {
+	if in == nil {
+		return nil
+	}
+	out := new(ScorecardOutputList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ScorecardOutputList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ScorecardSuiteResult) DeepCopyInto(out *ScorecardSuiteResult) {
+	*out = *in
+	if in.Tests != nil {
+		in, out := &in.Tests, &out.Tests
+		*out = make([]ScorecardTestResult, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScorecardSuiteResult.
+func (in *ScorecardSuiteResult) DeepCopy() *ScorecardSuiteResult {
+	if in == nil {
+		return nil
+	}
+	out := new(ScorecardSuiteResult)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ScorecardTestResult) DeepCopyInto(out *ScorecardTestResult) {
+	*out = *in
+	if in.Suggestions != nil {
+		in, out := &in.Suggestions, &out.Suggestions
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.Errors != nil {
+		in, out := &in.Errors, &out.Errors
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScorecardTestResult.
+func (in *ScorecardTestResult) DeepCopy() *ScorecardTestResult {
+	if in == nil {
+		return nil
+	}
+	out := new(ScorecardTestResult)
+	in.DeepCopyInto(out)
+	return out
+}
diff --git a/vendor/github.com/operator-framework/operator-sdk/pkg/helm/controller/reconcile.go b/vendor/github.com/operator-framework/operator-sdk/pkg/helm/controller/reconcile.go
index a135928..517adb9 100644
--- a/vendor/github.com/operator-framework/operator-sdk/pkg/helm/controller/reconcile.go
+++ b/vendor/github.com/operator-framework/operator-sdk/pkg/helm/controller/reconcile.go
@@ -31,6 +31,7 @@ import (
 	"github.com/operator-framework/operator-sdk/pkg/helm/release"
 )
 
+// blank assignment to verify that HelmOperatorReconciler implements reconcile.Reconciler
 var _ reconcile.Reconciler = &HelmOperatorReconciler{}
 
 // ReleaseHookFunc defines a function signature for release hooks.
@@ -146,6 +147,7 @@ func (r HelmOperatorReconciler) Reconcile(request reconcile.Request) (reconcile.
 				Status: types.StatusFalse,
 				Reason: types.ReasonUninstallSuccessful,
 			})
+			status.DeployedRelease = nil
 		}
 		if err := r.updateResourceStatus(o, status); err != nil {
 			return reconcile.Result{}, err
@@ -173,7 +175,6 @@ func (r HelmOperatorReconciler) Reconcile(request reconcile.Request) (reconcile.
 				Status:  types.StatusTrue,
 				Reason:  types.ReasonInstallError,
 				Message: err.Error(),
-				Release: installedRelease,
 			})
 			_ = r.updateResourceStatus(o, status)
 			return reconcile.Result{}, err
@@ -197,8 +198,11 @@ func (r HelmOperatorReconciler) Reconcile(request reconcile.Request) (reconcile.
 			Status:  types.StatusTrue,
 			Reason:  types.ReasonInstallSuccessful,
 			Message: installedRelease.GetInfo().GetStatus().GetNotes(),
-			Release: installedRelease,
 		})
+		status.DeployedRelease = &types.HelmAppRelease{
+			Name:     installedRelease.Name,
+			Manifest: installedRelease.Manifest,
+		}
 		err = r.updateResourceStatus(o, status)
 		return reconcile.Result{RequeueAfter: r.ReconcilePeriod}, err
 	}
@@ -212,7 +216,6 @@ func (r HelmOperatorReconciler) Reconcile(request reconcile.Request) (reconcile.
 				Status:  types.StatusTrue,
 				Reason:  types.ReasonUpdateError,
 				Message: err.Error(),
-				Release: updatedRelease,
 			})
 			_ = r.updateResourceStatus(o, status)
 			return reconcile.Result{}, err
@@ -236,12 +239,23 @@ func (r HelmOperatorReconciler) Reconcile(request reconcile.Request) (reconcile.
 			Status:  types.StatusTrue,
 			Reason:  types.ReasonUpdateSuccessful,
 			Message: updatedRelease.GetInfo().GetStatus().GetNotes(),
-			Release: updatedRelease,
 		})
+		status.DeployedRelease = &types.HelmAppRelease{
+			Name:     updatedRelease.Name,
+			Manifest: updatedRelease.Manifest,
+		}
 		err = r.updateResourceStatus(o, status)
 		return reconcile.Result{RequeueAfter: r.ReconcilePeriod}, err
 	}
 
+	// If a change is made to the CR spec that causes a release failure, a
+	// ConditionReleaseFailed is added to the status conditions. If that change
+	// is then reverted to its previous state, the operator will stop
+	// attempting the release and will resume reconciling. In this case, we
+	// need to remove the ConditionReleaseFailed because the failing release is
+	// no longer being attempted.
+	status.RemoveCondition(types.ConditionReleaseFailed)
+
 	expectedRelease, err := manager.ReconcileRelease(context.TODO())
 	if err != nil {
 		log.Error(err, "Failed to reconcile release")
@@ -264,6 +278,10 @@ func (r HelmOperatorReconciler) Reconcile(request reconcile.Request) (reconcile.
 	}
 
 	log.Info("Reconciled release")
+	status.DeployedRelease = &types.HelmAppRelease{
+		Name:     expectedRelease.Name,
+		Manifest: expectedRelease.Manifest,
+	}
 	err = r.updateResourceStatus(o, status)
 	return reconcile.Result{RequeueAfter: r.ReconcilePeriod}, err
 }
diff --git a/vendor/github.com/operator-framework/operator-sdk/pkg/helm/internal/types/types.go b/vendor/github.com/operator-framework/operator-sdk/pkg/helm/internal/types/types.go
index 0b52e21..cb94bc4 100644
--- a/vendor/github.com/operator-framework/operator-sdk/pkg/helm/internal/types/types.go
+++ b/vendor/github.com/operator-framework/operator-sdk/pkg/helm/internal/types/types.go
@@ -20,7 +20,6 @@ import (
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
 	"k8s.io/apimachinery/pkg/runtime"
-	"k8s.io/helm/pkg/proto/hapi/release"
 )
 
 type HelmAppList struct {
@@ -47,11 +46,15 @@ type HelmAppCondition struct {
 	Status  ConditionStatus        `json:"status"`
 	Reason  HelmAppConditionReason `json:"reason,omitempty"`
 	Message string                 `json:"message,omitempty"`
-	Release *release.Release       `json:"release,omitempty"`
 
 	LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"`
 }
 
+type HelmAppRelease struct {
+	Name     string `json:"name,omitempty"`
+	Manifest string `json:"manifest,omitempty"`
+}
+
 const (
 	ConditionInitialized    HelmAppConditionType = "Initialized"
 	ConditionDeployed       HelmAppConditionType = "Deployed"
@@ -72,7 +75,8 @@ const (
 )
 
 type HelmAppStatus struct {
-	Conditions []HelmAppCondition `json:"conditions"`
+	Conditions      []HelmAppCondition `json:"conditions"`
+	DeployedRelease *HelmAppRelease    `json:"deployedRelease,omitempty"`
 }
 
 func (s *HelmAppStatus) ToMap() (map[string]interface{}, error) {
diff --git a/vendor/github.com/operator-framework/operator-sdk/pkg/helm/release/manager.go b/vendor/github.com/operator-framework/operator-sdk/pkg/helm/release/manager.go
index bf383dd..acaa7af 100644
--- a/vendor/github.com/operator-framework/operator-sdk/pkg/helm/release/manager.go
+++ b/vendor/github.com/operator-framework/operator-sdk/pkg/helm/release/manager.go
@@ -94,14 +94,6 @@ func (m manager) IsUpdateRequired() bool {
 // Sync ensures the Helm storage backend is in sync with the status of the
 // custom resource.
 func (m *manager) Sync(ctx context.Context) error {
-	// TODO: We're now persisting releases as secrets. To support seamless upgrades, we
-	// need to sync the release status from the CR to the persistent storage backend.
-	// Once we release the storage backend migration, this function (and comment)
-	// can be removed.
-	if err := m.syncReleaseStatus(*m.status); err != nil {
-		return fmt.Errorf("failed to sync release status to storage backend: %s", err)
-	}
-
 	// Get release history for this release name
 	releases, err := m.storageBackend.History(m.releaseName)
 	if err != nil && !notFoundErr(err) {
@@ -151,31 +143,6 @@ func (m *manager) Sync(ctx context.Context) error {
 	return nil
 }
 
-func (m manager) syncReleaseStatus(status types.HelmAppStatus) error {
-	var release *rpb.Release
-	for _, condition := range status.Conditions {
-		if condition.Type == types.ConditionDeployed && condition.Status == types.StatusTrue {
-			release = condition.Release
-			break
-		}
-	}
-	if release == nil {
-		return nil
-	}
-
-	name := release.GetName()
-	version := release.GetVersion()
-	_, err := m.storageBackend.Get(name, version)
-	if err == nil {
-		return nil
-	}
-
-	if !notFoundErr(err) {
-		return err
-	}
-	return m.storageBackend.Create(release)
-}
-
 func notFoundErr(err error) bool {
 	return strings.Contains(err.Error(), "not found")
 }
diff --git a/vendor/github.com/operator-framework/operator-sdk/pkg/k8sutil/k8sutil.go b/vendor/github.com/operator-framework/operator-sdk/pkg/k8sutil/k8sutil.go
index 2272b42..c163580 100644
--- a/vendor/github.com/operator-framework/operator-sdk/pkg/k8sutil/k8sutil.go
+++ b/vendor/github.com/operator-framework/operator-sdk/pkg/k8sutil/k8sutil.go
@@ -22,7 +22,6 @@ import (
 	"strings"
 
 	corev1 "k8s.io/api/core/v1"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	discovery "k8s.io/client-go/discovery"
 	crclient "sigs.k8s.io/controller-runtime/pkg/client"
 	logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
@@ -99,12 +98,7 @@ func GetPod(ctx context.Context, client crclient.Client, ns string) (*corev1.Pod
 
 	log.V(1).Info("Found podname", "Pod.Name", podName)
 
-	pod := &corev1.Pod{
-		TypeMeta: metav1.TypeMeta{
-			APIVersion: "v1",
-			Kind:       "Pod",
-		},
-	}
+	pod := &corev1.Pod{}
 	key := crclient.ObjectKey{Namespace: ns, Name: podName}
 	err := client.Get(ctx, key, pod)
 	if err != nil {
@@ -112,6 +106,11 @@ func GetPod(ctx context.Context, client crclient.Client, ns string) (*corev1.Pod
 		return nil, err
 	}
 
+	// .Get() clears the APIVersion and Kind,
+	// so we need to set them before returning the object.
+	pod.TypeMeta.APIVersion = "v1"
+	pod.TypeMeta.Kind = "Pod"
+
 	log.V(1).Info("Found Pod", "Pod.Namespace", ns, "Pod.Name", pod.Name)
 
 	return pod, nil
diff --git a/vendor/github.com/operator-framework/operator-sdk/pkg/log/zap/flags.go b/vendor/github.com/operator-framework/operator-sdk/pkg/log/zap/flags.go
index cba8183..d94911d 100644
--- a/vendor/github.com/operator-framework/operator-sdk/pkg/log/zap/flags.go
+++ b/vendor/github.com/operator-framework/operator-sdk/pkg/log/zap/flags.go
@@ -15,6 +15,7 @@
 package zap
 
 import (
+	"flag"
 	"fmt"
 	"strconv"
 	"strings"
@@ -22,6 +23,7 @@ import (
 	"github.com/spf13/pflag"
 	"go.uber.org/zap"
 	"go.uber.org/zap/zapcore"
+	"k8s.io/klog"
 )
 
 var (
@@ -41,6 +43,7 @@ func init() {
 	zapFlagSet.Var(&sampleVal, "zap-sample", "Enable zap log sampling. Sampling will be disabled for integer log levels > 1")
 }
 
+// FlagSet - The zap logging flagset.
 func FlagSet() *pflag.FlagSet {
 	return zapFlagSet
 }
@@ -112,6 +115,15 @@ func (v *levelValue) Set(l string) error {
 		}
 	}
 	v.level = zapcore.Level(int8(lvl))
+	// If log level is greater than debug, set glog/klog level to that level.
+	if lvl < -3 {
+		fs := flag.NewFlagSet("", flag.ContinueOnError)
+		klog.InitFlags(fs)
+		err := fs.Set("v", fmt.Sprintf("%v", -1*lvl))
+		if err != nil {
+			return err
+		}
+	}
 	return nil
 }
 
diff --git a/vendor/github.com/operator-framework/operator-sdk/pkg/restmapper/dynamicrestmapper.go b/vendor/github.com/operator-framework/operator-sdk/pkg/restmapper/dynamicrestmapper.go
new file mode 100644
index 0000000..59562c8
--- /dev/null
+++ b/vendor/github.com/operator-framework/operator-sdk/pkg/restmapper/dynamicrestmapper.go
@@ -0,0 +1,124 @@
+// Copyright 2019 The Operator-SDK Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package restmapper
+
+import (
+	"k8s.io/apimachinery/pkg/api/meta"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+	"k8s.io/client-go/discovery"
+	"k8s.io/client-go/rest"
+	"k8s.io/client-go/restmapper"
+)
+
+type DynamicRESTMapper struct {
+	client   discovery.DiscoveryInterface
+	delegate meta.RESTMapper
+}
+
+// NewDynamicRESTMapper returns a RESTMapper that dynamically discovers resource
+// types at runtime. This is in contrast to controller-manager's default RESTMapper, which
+// only checks resource types at startup, and so can't handle the case of first creating a
+// CRD and then creating an instance of that CRD.
+func NewDynamicRESTMapper(cfg *rest.Config) (meta.RESTMapper, error) {
+	client, err := discovery.NewDiscoveryClientForConfig(cfg)
+	if err != nil {
+		return nil, err
+	}
+
+	drm := &DynamicRESTMapper{client: client}
+	if err := drm.reload(); err != nil {
+		return nil, err
+	}
+	return drm, nil
+}
+
+func (drm *DynamicRESTMapper) reload() error {
+	gr, err := restmapper.GetAPIGroupResources(drm.client)
+	if err != nil {
+		return err
+	}
+	drm.delegate = restmapper.NewDiscoveryRESTMapper(gr)
+	return nil
+}
+
+// reloadOnError checks if an error indicates that the delegated RESTMapper needs to be
+// reloaded, and if so, reloads it and returns true.
+func (drm *DynamicRESTMapper) reloadOnError(err error) bool {
+	if _, matches := err.(*meta.NoKindMatchError); !matches {
+		return false
+	}
+	err = drm.reload()
+	if err != nil {
+		utilruntime.HandleError(err)
+	}
+	return err == nil
+}
+
+func (drm *DynamicRESTMapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) {
+	gvk, err := drm.delegate.KindFor(resource)
+	if drm.reloadOnError(err) {
+		gvk, err = drm.delegate.KindFor(resource)
+	}
+	return gvk, err
+}
+
+func (drm *DynamicRESTMapper) KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) {
+	gvks, err := drm.delegate.KindsFor(resource)
+	if drm.reloadOnError(err) {
+		gvks, err = drm.delegate.KindsFor(resource)
+	}
+	return gvks, err
+}
+
+func (drm *DynamicRESTMapper) ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, error) {
+	gvr, err := drm.delegate.ResourceFor(input)
+	if drm.reloadOnError(err) {
+		gvr, err = drm.delegate.ResourceFor(input)
+	}
+	return gvr, err
+}
+
+func (drm *DynamicRESTMapper) ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) {
+	gvrs, err := drm.delegate.ResourcesFor(input)
+	if drm.reloadOnError(err) {
+		gvrs, err = drm.delegate.ResourcesFor(input)
+	}
+	return gvrs, err
+}
+
+func (drm *DynamicRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) {
+	m, err := drm.delegate.RESTMapping(gk, versions...)
+	if drm.reloadOnError(err) {
+		m, err = drm.delegate.RESTMapping(gk, versions...)
+	}
+	return m, err
+}
+
+func (drm *DynamicRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*meta.RESTMapping, error) {
+	ms, err := drm.delegate.RESTMappings(gk, versions...)
+	if drm.reloadOnError(err) {
+		ms, err = drm.delegate.RESTMappings(gk, versions...)
+	}
+	return ms, err
+}
+
+func (drm *DynamicRESTMapper) ResourceSingularizer(resource string) (singular string, err error) {
+	s, err := drm.delegate.ResourceSingularizer(resource)
+	if drm.reloadOnError(err) {
+		s, err = drm.delegate.ResourceSingularizer(resource)
+	}
+	return s, err
+}
diff --git a/vendor/github.com/operator-framework/operator-sdk/pkg/test/framework.go b/vendor/github.com/operator-framework/operator-sdk/pkg/test/framework.go
index fb5c979..ba72fae 100755
--- a/vendor/github.com/operator-framework/operator-sdk/pkg/test/framework.go
+++ b/vendor/github.com/operator-framework/operator-sdk/pkg/test/framework.go
@@ -17,7 +17,6 @@ package test
 import (
 	goctx "context"
 	"fmt"
-	"net"
 	"os"
 	"sync"
 	"time"
@@ -69,34 +68,10 @@ func setup(kubeconfigPath, namespacedManPath *string, localOperator bool) error
 	}
 	var err error
 	var kubeconfig *rest.Config
-	if *kubeconfigPath == "incluster" {
-		// Work around https://github.com/kubernetes/kubernetes/issues/40973
-		if len(os.Getenv("KUBERNETES_SERVICE_HOST")) == 0 {
-			addrs, err := net.LookupHost("kubernetes.default.svc")
-			if err != nil {
-				return fmt.Errorf("failed to get service host: %v", err)
-			}
-			if err := os.Setenv("KUBERNETES_SERVICE_HOST", addrs[0]); err != nil {
-				return fmt.Errorf("failed to set kubernetes host env var: (%v)", err)
-			}
-		}
-		if len(os.Getenv("KUBERNETES_SERVICE_PORT")) == 0 {
-			if err := os.Setenv("KUBERNETES_SERVICE_PORT", "443"); err != nil {
-				return fmt.Errorf("failed to set kubernetes port env var: (%v)", err)
-			}
-		}
-		kubeconfig, err = rest.InClusterConfig()
-		*singleNamespace = true
-		namespace = os.Getenv(TestNamespaceEnv)
-		if len(namespace) == 0 {
-			return fmt.Errorf("test namespace env not set")
-		}
-	} else {
-		var kcNamespace string
-		kubeconfig, kcNamespace, err = k8sInternal.GetKubeconfigAndNamespace(*kubeconfigPath)
-		if *singleNamespace && namespace == "" {
-			namespace = kcNamespace
-		}
+	var kcNamespace string
+	kubeconfig, kcNamespace, err = k8sInternal.GetKubeconfigAndNamespace(*kubeconfigPath)
+	if *singleNamespace && namespace == "" {
+		namespace = kcNamespace
 	}
 	if err != nil {
 		return fmt.Errorf("failed to build the kubeconfig: %v", err)
diff --git a/vendor/github.com/operator-framework/operator-sdk/pkg/test/main_entry.go b/vendor/github.com/operator-framework/operator-sdk/pkg/test/main_entry.go
index 940f45f..dfe49a4 100644
--- a/vendor/github.com/operator-framework/operator-sdk/pkg/test/main_entry.go
+++ b/vendor/github.com/operator-framework/operator-sdk/pkg/test/main_entry.go
@@ -26,12 +26,12 @@ import (
 	"syscall"
 	"testing"
 
-	"k8s.io/client-go/tools/clientcmd"
-
 	"github.com/operator-framework/operator-sdk/internal/pkg/scaffold"
 	"github.com/operator-framework/operator-sdk/internal/util/projutil"
 	"github.com/operator-framework/operator-sdk/pkg/k8sutil"
+
 	log "github.com/sirupsen/logrus"
+	"k8s.io/client-go/tools/clientcmd"
 )
 
 const (
@@ -64,13 +64,14 @@ func MainEntry(m *testing.M) {
 	var localCmd *exec.Cmd
 	var localCmdOutBuf, localCmdErrBuf bytes.Buffer
 	if *localOperator {
-		absProjectPath := projutil.MustGetwd()
-		projectName := filepath.Base(absProjectPath)
+		projectName := filepath.Base(projutil.MustGetwd())
 		outputBinName := filepath.Join(scaffold.BuildBinDir, projectName+"-local")
-		args := []string{"build", "-o", outputBinName}
-		args = append(args, filepath.Join(scaffold.ManagerDir, scaffold.CmdFile))
-		bc := exec.Command("go", args...)
-		if err := projutil.ExecCmd(bc); err != nil {
+		opts := projutil.GoCmdOptions{
+			BinName:     outputBinName,
+			PackagePath: filepath.Join(scaffold.ManagerDir, scaffold.CmdFile),
+			GoMod:       projutil.IsDepManagerGoMod(),
+		}
+		if err := projutil.GoBuild(opts); err != nil {
 			log.Fatalf("Failed to build local operator binary: %s", err)
 		}
 		localCmd = exec.Command(outputBinName)
@@ -121,14 +122,12 @@ func MainEntry(m *testing.M) {
 		os.Exit(exitCode)
 	}()
 	// create crd
-	if *kubeconfigPath != "incluster" {
-		globalYAML, err := ioutil.ReadFile(*globalManPath)
-		if err != nil {
-			log.Fatalf("Failed to read global resource manifest: %v", err)
-		}
-		err = ctx.createFromYAML(globalYAML, true, &CleanupOptions{TestContext: ctx})
-		if err != nil {
-			log.Fatalf("Failed to create resource(s) in global resource manifest: %v", err)
-		}
+	globalYAML, err := ioutil.ReadFile(*globalManPath)
+	if err != nil {
+		log.Fatalf("Failed to read global resource manifest: %v", err)
+	}
+	err = ctx.createFromYAML(globalYAML, true, &CleanupOptions{TestContext: ctx})
+	if err != nil {
+		log.Fatalf("Failed to create resource(s) in global resource manifest: %v", err)
 	}
 }
diff --git a/vendor/github.com/operator-framework/operator-sdk/test/test-framework/pkg/apis/cache/v1alpha1/memcached_types.go b/vendor/github.com/operator-framework/operator-sdk/test/test-framework/pkg/apis/cache/v1alpha1/memcached_types.go
index 231543b..989a168 100644
--- a/vendor/github.com/operator-framework/operator-sdk/test/test-framework/pkg/apis/cache/v1alpha1/memcached_types.go
+++ b/vendor/github.com/operator-framework/operator-sdk/test/test-framework/pkg/apis/cache/v1alpha1/memcached_types.go
@@ -34,6 +34,7 @@ type MemcachedStatus struct {
 
 // Memcached is the Schema for the memcacheds API
 // +k8s:openapi-gen=true
+// +kubebuilder:subresource:status
 type Memcached struct {
 	metav1.TypeMeta   `json:",inline"`
 	metav1.ObjectMeta `json:"metadata,omitempty"`
diff --git a/vendor/github.com/operator-framework/operator-sdk/test/test-framework/pkg/apis/cache/v1alpha1/memcached_types.go b/vendor/github.com/operator-framework/operator-sdk/test/test-framework/pkg/apis/cache/v1alpha1/memcachedrs_types.go
similarity index 59%
copy from vendor/github.com/operator-framework/operator-sdk/test/test-framework/pkg/apis/cache/v1alpha1/memcached_types.go
copy to vendor/github.com/operator-framework/operator-sdk/test/test-framework/pkg/apis/cache/v1alpha1/memcachedrs_types.go
index 231543b..ac06b6d 100644
--- a/vendor/github.com/operator-framework/operator-sdk/test/test-framework/pkg/apis/cache/v1alpha1/memcached_types.go
+++ b/vendor/github.com/operator-framework/operator-sdk/test/test-framework/pkg/apis/cache/v1alpha1/memcachedrs_types.go
@@ -1,4 +1,4 @@
-// Copyright 2018 The Operator-SDK Authors
+// Copyright 2019 The Operator-SDK Authors
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.
@@ -21,36 +21,41 @@ import (
 // EDIT THIS FILE!  THIS IS SCAFFOLDING FOR YOU TO OWN!
 // NOTE: json tags are required.  Any new fields you add must have json tags for the fields to be serialized.
 
-type MemcachedSpec struct {
-	// Size is the size of the memcached deployment
-	Size int32 `json:"size"`
+// MemcachedRSSpec defines the desired state of MemcachedRS
+// +k8s:openapi-gen=true
+type MemcachedRSSpec struct {
+	NumNodes int32 `json:"numNodes"`
 }
-type MemcachedStatus struct {
-	// Nodes are the names of the memcached pods
-	Nodes []string `json:"nodes"`
+
+// MemcachedRSStatus defines the observed state of MemcachedRS
+// +k8s:openapi-gen=true
+type MemcachedRSStatus struct {
+	NodeList []string `json:"nodeList"`
+	Test     bool     `json:"test"`
 }
 
 // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
 
-// Memcached is the Schema for the memcacheds API
+// MemcachedRS is the Schema for the memcachedrs API
 // +k8s:openapi-gen=true
-type Memcached struct {
+// +kubebuilder:subresource:status
+type MemcachedRS struct {
 	metav1.TypeMeta   `json:",inline"`
 	metav1.ObjectMeta `json:"metadata,omitempty"`
 
-	Spec   MemcachedSpec   `json:"spec,omitempty"`
-	Status MemcachedStatus `json:"status,omitempty"`
+	Spec   MemcachedRSSpec   `json:"spec,omitempty"`
+	Status MemcachedRSStatus `json:"status,omitempty"`
 }
 
 // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
 
-// MemcachedList contains a list of Memcached
-type MemcachedList struct {
+// MemcachedRSList contains a list of MemcachedRS
+type MemcachedRSList struct {
 	metav1.TypeMeta `json:",inline"`
 	metav1.ListMeta `json:"metadata,omitempty"`
-	Items           []Memcached `json:"items"`
+	Items           []MemcachedRS `json:"items"`
 }
 
 func init() {
-	SchemeBuilder.Register(&Memcached{}, &MemcachedList{})
+	SchemeBuilder.Register(&MemcachedRS{}, &MemcachedRSList{})
 }
diff --git a/vendor/github.com/operator-framework/operator-sdk/test/test-framework/pkg/apis/cache/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/operator-framework/operator-sdk/test/test-framework/pkg/apis/cache/v1alpha1/zz_generated.deepcopy.go
index 24be975..2fb8572 100644
--- a/vendor/github.com/operator-framework/operator-sdk/test/test-framework/pkg/apis/cache/v1alpha1/zz_generated.deepcopy.go
+++ b/vendor/github.com/operator-framework/operator-sdk/test/test-framework/pkg/apis/cache/v1alpha1/zz_generated.deepcopy.go
@@ -70,6 +70,104 @@ func (in *MemcachedList) DeepCopyObject() runtime.Object {
 }
 
 // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MemcachedRS) DeepCopyInto(out *MemcachedRS) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	out.Spec = in.Spec
+	in.Status.DeepCopyInto(&out.Status)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemcachedRS.
+func (in *MemcachedRS) DeepCopy() *MemcachedRS {
+	if in == nil {
+		return nil
+	}
+	out := new(MemcachedRS)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *MemcachedRS) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MemcachedRSList) DeepCopyInto(out *MemcachedRSList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]MemcachedRS, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemcachedRSList.
+func (in *MemcachedRSList) DeepCopy() *MemcachedRSList {
+	if in == nil {
+		return nil
+	}
+	out := new(MemcachedRSList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *MemcachedRSList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MemcachedRSSpec) DeepCopyInto(out *MemcachedRSSpec) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemcachedRSSpec.
+func (in *MemcachedRSSpec) DeepCopy() *MemcachedRSSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(MemcachedRSSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MemcachedRSStatus) DeepCopyInto(out *MemcachedRSStatus) {
+	*out = *in
+	if in.NodeList != nil {
+		in, out := &in.NodeList, &out.NodeList
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemcachedRSStatus.
+func (in *MemcachedRSStatus) DeepCopy() *MemcachedRSStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(MemcachedRSStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 func (in *MemcachedSpec) DeepCopyInto(out *MemcachedSpec) {
 	*out = *in
 	return
diff --git a/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/version/cmd.go b/vendor/github.com/operator-framework/operator-sdk/test/test-framework/pkg/controller/add_memcachedrs.go
similarity index 62%
copy from vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/version/cmd.go
copy to vendor/github.com/operator-framework/operator-sdk/test/test-framework/pkg/controller/add_memcachedrs.go
index dbc369f..bb83fb9 100644
--- a/vendor/github.com/operator-framework/operator-sdk/cmd/operator-sdk/version/cmd.go
+++ b/vendor/github.com/operator-framework/operator-sdk/test/test-framework/pkg/controller/add_memcachedrs.go
@@ -12,23 +12,13 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-package version
+package controller
 
 import (
-	"fmt"
-
-	ver "github.com/operator-framework/operator-sdk/version"
-
-	"github.com/spf13/cobra"
+	"github.com/operator-framework/operator-sdk/test/test-framework/pkg/controller/memcachedrs"
 )
 
-func NewCmd() *cobra.Command {
-	versionCmd := &cobra.Command{
-		Use:   "version",
-		Short: "Prints the version of operator-sdk",
-		Run: func(cmd *cobra.Command, args []string) {
-			fmt.Println("operator-sdk version:", ver.Version)
-		},
-	}
-	return versionCmd
+func init() {
+	// AddToManagerFuncs is a list of functions to create controllers and add them to a manager.
+	AddToManagerFuncs = append(AddToManagerFuncs, memcachedrs.Add)
 }
diff --git a/vendor/github.com/operator-framework/operator-sdk/test/test-framework/pkg/controller/memcached/memcached_controller.go b/vendor/github.com/operator-framework/operator-sdk/test/test-framework/pkg/controller/memcached/memcached_controller.go
index daa6b54..3c0ba2e 100644
--- a/vendor/github.com/operator-framework/operator-sdk/test/test-framework/pkg/controller/memcached/memcached_controller.go
+++ b/vendor/github.com/operator-framework/operator-sdk/test/test-framework/pkg/controller/memcached/memcached_controller.go
@@ -77,6 +77,7 @@ func add(mgr manager.Manager, r reconcile.Reconciler) error {
 	return nil
 }
 
+// blank assignment to verify that ReconcileMemcached implements reconcile.Reconciler
 var _ reconcile.Reconciler = &ReconcileMemcached{}
 
 // ReconcileMemcached reconciles a Memcached object
diff --git a/vendor/github.com/operator-framework/operator-sdk/test/test-framework/pkg/controller/memcached/memcached_controller.go b/vendor/github.com/operator-framework/operator-sdk/test/test-framework/pkg/controller/memcachedrs/memcachedrs_controller.go
similarity index 59%
copy from vendor/github.com/operator-framework/operator-sdk/test/test-framework/pkg/controller/memcached/memcached_controller.go
copy to vendor/github.com/operator-framework/operator-sdk/test/test-framework/pkg/controller/memcachedrs/memcachedrs_controller.go
index daa6b54..e9bdb8a 100644
--- a/vendor/github.com/operator-framework/operator-sdk/test/test-framework/pkg/controller/memcached/memcached_controller.go
+++ b/vendor/github.com/operator-framework/operator-sdk/test/test-framework/pkg/controller/memcachedrs/memcachedrs_controller.go
@@ -1,4 +1,4 @@
-// Copyright 2018 The Operator-SDK Authors
+// Copyright 2019 The Operator-SDK Authors
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.
@@ -12,7 +12,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-package memcached
+package memcachedrs
 
 import (
 	"context"
@@ -37,9 +37,14 @@ import (
 	"sigs.k8s.io/controller-runtime/pkg/source"
 )
 
-var log = logf.Log.WithName("controller_memcached")
+var log = logf.Log.WithName("controller_memcachedrs")
 
-// Add creates a new Memcached Controller and adds it to the Manager. The Manager will set fields on the Controller
+/**
+* USER ACTION REQUIRED: This is a scaffold file intended for the user to modify with their own Controller
+* business logic.  Delete these comments after modifying this file.*
+ */
+
+// Add creates a new MemcachedRS Controller and adds it to the Manager. The Manager will set fields on the Controller
 // and Start it when the Manager is Started.
 func Add(mgr manager.Manager) error {
 	return add(mgr, newReconciler(mgr))
@@ -47,28 +52,28 @@ func Add(mgr manager.Manager) error {
 
 // newReconciler returns a new reconcile.Reconciler
 func newReconciler(mgr manager.Manager) reconcile.Reconciler {
-	return &ReconcileMemcached{client: mgr.GetClient(), scheme: mgr.GetScheme()}
+	return &ReconcileMemcachedRS{client: mgr.GetClient(), scheme: mgr.GetScheme()}
 }
 
 // add adds a new Controller to mgr with r as the reconcile.Reconciler
 func add(mgr manager.Manager, r reconcile.Reconciler) error {
 	// Create a new controller
-	c, err := controller.New("memcached-controller", mgr, controller.Options{Reconciler: r})
+	c, err := controller.New("memcachedrs-controller", mgr, controller.Options{Reconciler: r})
 	if err != nil {
 		return err
 	}
 
-	// Watch for changes to primary resource Memcached
-	err = c.Watch(&source.Kind{Type: &cachev1alpha1.Memcached{}}, &handler.EnqueueRequestForObject{})
+	// Watch for changes to primary resource MemcachedRS
+	err = c.Watch(&source.Kind{Type: &cachev1alpha1.MemcachedRS{}}, &handler.EnqueueRequestForObject{})
 	if err != nil {
 		return err
 	}
 
 	// TODO(user): Modify this to be the types you create that are owned by the primary resource
-	// Watch for changes to secondary resource Pods and requeue the owner Memcached
-	err = c.Watch(&source.Kind{Type: &appsv1.Deployment{}}, &handler.EnqueueRequestForOwner{
+	// Watch for changes to secondary resource Pods and requeue the owner MemcachedRS
+	err = c.Watch(&source.Kind{Type: &appsv1.ReplicaSet{}}, &handler.EnqueueRequestForOwner{
 		IsController: true,
-		OwnerType:    &cachev1alpha1.Memcached{},
+		OwnerType:    &cachev1alpha1.MemcachedRS{},
 	})
 	if err != nil {
 		return err
@@ -77,70 +82,68 @@ func add(mgr manager.Manager, r reconcile.Reconciler) error {
 	return nil
 }
 
-var _ reconcile.Reconciler = &ReconcileMemcached{}
+// blank assignment to verify that ReconcileMemcachedRS implements reconcile.Reconciler
+var _ reconcile.Reconciler = &ReconcileMemcachedRS{}
 
-// ReconcileMemcached reconciles a Memcached object
-type ReconcileMemcached struct {
-	// TODO: Clarify the split client
+// ReconcileMemcachedRS reconciles a MemcachedRS object
+type ReconcileMemcachedRS struct {
 	// This client, initialized using mgr.Client() above, is a split client
 	// that reads objects from the cache and writes to the apiserver
 	client client.Client
 	scheme *runtime.Scheme
 }
 
-// Reconcile reads that state of the cluster for a Memcached object and makes changes based on the state read
-// and what is in the Memcached.Spec
+// Reconcile reads that state of the cluster for a MemcachedRS object and makes changes based on the state read
+// and what is in the MemcachedRS.Spec
 // TODO(user): Modify this Reconcile function to implement your Controller logic.  This example creates
-// a Memcached Deployment for each Memcached CR
+// a Pod as an example
 // Note:
 // The Controller will requeue the Request to be processed again if the returned error is non-nil or
 // Result.Requeue is true, otherwise upon completion it will remove the work from the queue.
-func (r *ReconcileMemcached) Reconcile(request reconcile.Request) (reconcile.Result, error) {
+func (r *ReconcileMemcachedRS) Reconcile(request reconcile.Request) (reconcile.Result, error) {
 	reqLogger := log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name)
-	reqLogger.Info("Reconciling Memcached.")
+	reqLogger.Info("Reconciling MemcachedRS")
 
-	// Fetch the Memcached instance
-	memcached := &cachev1alpha1.Memcached{}
-	err := r.client.Get(context.TODO(), request.NamespacedName, memcached)
+	// Fetch the MemcachedRS instance
+	memcachedrs := &cachev1alpha1.MemcachedRS{}
+	err := r.client.Get(context.TODO(), request.NamespacedName, memcachedrs)
 	if err != nil {
 		if errors.IsNotFound(err) {
 			// Request object not found, could have been deleted after reconcile request.
 			// Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.
 			// Return and don't requeue
-			reqLogger.Info("Memcached resource not found. Ignoring since object must be deleted.")
 			return reconcile.Result{}, nil
 		}
 		// Error reading the object - requeue the request.
-		reqLogger.Error(err, "Failed to get Memcached.")
 		return reconcile.Result{}, err
 	}
 
-	// Check if the deployment already exists, if not create a new one
-	found := &appsv1.Deployment{}
-	err = r.client.Get(context.TODO(), types.NamespacedName{Name: memcached.Name, Namespace: memcached.Namespace}, found)
+	// Check if the replicaSet already exists, if not create a new one
+	found := &appsv1.ReplicaSet{}
+	err = r.client.Get(context.TODO(), types.NamespacedName{Name: memcachedrs.Name, Namespace: memcachedrs.Namespace}, found)
 	if err != nil && errors.IsNotFound(err) {
-		// Define a new deployment
-		dep := r.deploymentForMemcached(memcached)
-		reqLogger.Info("Creating a new Deployment.", "Deployment.Namespace", dep.Namespace, "Deployment.Name", dep.Name)
+		// Define a new replicaSet
+		dep := r.replicaSetForMemcached(memcachedrs)
+		reqLogger.Info("Creating a new ReplicaSet", "ReplicaSet.Namespace", dep.Namespace, "ReplicaSet.Name", dep.Name)
 		err = r.client.Create(context.TODO(), dep)
 		if err != nil {
-			reqLogger.Error(err, "Failed to create new Deployment.", "Deployment.Namespace", dep.Namespace, "Deployment.Name", dep.Name)
+			reqLogger.Error(err, "Failed to create new ReplicaSet", "ReplicaSet.Namespace", dep.Namespace, "ReplicaSet.Name", dep.Name)
 			return reconcile.Result{}, err
 		}
-		// Deployment created successfully - return and requeue
+		// ReplicaSet created successfully - return and requeue
 		return reconcile.Result{Requeue: true}, nil
 	} else if err != nil {
-		reqLogger.Error(err, "Failed to get Deployment.")
+		reqLogger.Error(err, "Failed to get ReplicaSet")
 		return reconcile.Result{}, err
 	}
 
-	// Ensure the deployment size is the same as the spec
-	size := memcached.Spec.Size
+	// Ensure the replicaSet size is the same as the spec
+	size := memcachedrs.Spec.NumNodes
 	if *found.Spec.Replicas != size {
 		found.Spec.Replicas = &size
 		err = r.client.Update(context.TODO(), found)
 		if err != nil {
-			reqLogger.Error(err, "Failed to update Deployment.", "Deployment.Namespace", found.Namespace, "Deployment.Name", found.Name)
+			reqLogger.Error(err, "Failed to update ReplicaSet", "ReplicaSet.Namespace", found.Namespace, "ReplicaSet.Name", found.Name)
 			return reconcile.Result{}, err
 		}
 		// Spec updated - return and requeue
@@ -148,58 +151,57 @@ func (r *ReconcileMemcached) Reconcile(request reconcile.Request) (reconcile.Res
 	}
 
 	// Update the Memcached status with the pod names
-	// List the pods for this memcached's deployment
+	// List the pods for this memcached's replicaSet
 	podList := &corev1.PodList{}
-	labelSelector := labels.SelectorFromSet(labelsForMemcached(memcached.Name))
-	listOps := &client.ListOptions{
-		Namespace:     memcached.Namespace,
-		LabelSelector: labelSelector,
-		// HACK: due to a fake client bug, ListOptions.Raw.TypeMeta must be
-		// explicitly populated for testing.
-		//
-		// See https://github.com/kubernetes-sigs/controller-runtime/issues/168
-		Raw: &metav1.ListOptions{
-			TypeMeta: metav1.TypeMeta{
-				Kind:       "Memcached",
-				APIVersion: cachev1alpha1.SchemeGroupVersion.Version,
-			},
-		},
-	}
+	labelSelector := labels.SelectorFromSet(labelsForMemcached(memcachedrs.Name))
+	listOps := &client.ListOptions{Namespace: memcachedrs.Namespace, LabelSelector: labelSelector}
 	err = r.client.List(context.TODO(), listOps, podList)
 	if err != nil {
-		reqLogger.Error(err, "Failed to list pods.", "Memcached.Namespace", memcached.Namespace, "Memcached.Name", memcached.Name)
+		reqLogger.Error(err, "Failed to list pods", "Memcached.Namespace", memcachedrs.Namespace, "Memcached.Name", memcachedrs.Name)
 		return reconcile.Result{}, err
 	}
 	podNames := getPodNames(podList.Items)
 
 	// Update status.Nodes if needed
-	if !reflect.DeepEqual(podNames, memcached.Status.Nodes) {
-		memcached.Status.Nodes = podNames
-		err := r.client.Status().Update(context.TODO(), memcached)
+	if !reflect.DeepEqual(podNames, memcachedrs.Status.NodeList) {
+		memcachedrs.Status.NodeList = podNames
+		err := r.client.Status().Update(context.TODO(), memcachedrs)
 		if err != nil {
-			reqLogger.Error(err, "Failed to update Memcached status.")
+			reqLogger.Error(err, "Failed to update Memcached status")
 			return reconcile.Result{}, err
 		}
 	}
 
+	// Switch testing bool
+	if memcachedrs.Status.Test {
+		memcachedrs.Status.Test = false
+	} else {
+		memcachedrs.Status.Test = true
+	}
+	err = r.client.Status().Update(context.TODO(), memcachedrs)
+	if err != nil {
+		reqLogger.Error(err, "Failed to update Memcached status")
+		return reconcile.Result{}, err
+	}
+
 	return reconcile.Result{}, nil
 }
 
-// deploymentForMemcached returns a memcached Deployment object
-func (r *ReconcileMemcached) deploymentForMemcached(m *cachev1alpha1.Memcached) *appsv1.Deployment {
+// rsForMemcached returns a memcached ReplicaSet object
+func (r *ReconcileMemcachedRS) replicaSetForMemcached(m *cachev1alpha1.MemcachedRS) *appsv1.ReplicaSet {
 	ls := labelsForMemcached(m.Name)
-	replicas := m.Spec.Size
+	replicas := m.Spec.NumNodes
 
-	dep := &appsv1.Deployment{
+	replicaSet := &appsv1.ReplicaSet{
 		TypeMeta: metav1.TypeMeta{
 			APIVersion: "apps/v1",
-			Kind:       "Deployment",
+			Kind:       "ReplicaSet",
 		},
 		ObjectMeta: metav1.ObjectMeta{
 			Name:      m.Name,
 			Namespace: m.Namespace,
 		},
-		Spec: appsv1.DeploymentSpec{
+		Spec: appsv1.ReplicaSetSpec{
 			Replicas: &replicas,
 			Selector: &metav1.LabelSelector{
 				MatchLabels: ls,
@@ -223,16 +225,14 @@ func (r *ReconcileMemcached) deploymentForMemcached(m *cachev1alpha1.Memcached)
 		},
 	}
 	// Set Memcached instance as the owner and controller
-	if err := controllerutil.SetControllerReference(m, dep, r.scheme); err != nil {
-		log.Error(err, "Failed to set controller reference for memcached deployment")
-	}
-	return dep
+	controllerutil.SetControllerReference(m, replicaSet, r.scheme)
+	return replicaSet
 }
 
 // labelsForMemcached returns the labels for selecting the resources
 // belonging to the given memcached CR name.
 func labelsForMemcached(name string) map[string]string {
-	return map[string]string{"app": "memcached", "memcached_cr": name}
+	return map[string]string{"app": "memcached-rs", "memcached_cr": name}
 }
 
 // getPodNames returns the pod names of the array of pods passed in
diff --git a/vendor/github.com/operator-framework/operator-sdk/version/version.go b/vendor/github.com/operator-framework/operator-sdk/version/version.go
index 51db095..fa8eabf 100644
--- a/vendor/github.com/operator-framework/operator-sdk/version/version.go
+++ b/vendor/github.com/operator-framework/operator-sdk/version/version.go
@@ -15,5 +15,7 @@
 package version
 
 var (
-	Version = "v0.7.0"
+	Version    = "v0.8.0"
+	GitVersion = "unknown"
+	GitCommit  = "unknown"
 )