You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@camel.apache.org by lb...@apache.org on 2018/10/26 22:04:51 UTC

[camel-k] branch master updated (043836a -> 980baca)

This is an automated email from the ASF dual-hosted git repository.

lburgazzoli pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/camel-k.git.


    from 043836a  chore(build): remove deprecated methos
     new 5618b0c  upgrade to operator-sdk 0.0.7
     new 980baca  use a shorter refresh period in the CLI

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 Gopkg.lock                                         |  147 +-
 Gopkg.toml                                         |   27 +-
 pkg/apis/camel/v1alpha1/types_support.go           |   14 +
 pkg/client/cmd/root.go                             |    8 +-
 pkg/install/operator.go                            |   20 +
 vendor/github.com/go-openapi/swag/convert.go       |   26 +-
 .../go-openapi/{spec/license.go => swag/doc.go}    |   26 +-
 vendor/github.com/go-openapi/swag/json.go          |   15 +-
 vendor/github.com/go-openapi/swag/util.go          |   79 +-
 .../google/btree}/LICENSE                          |    0
 vendor/github.com/google/btree/btree.go            |  890 +++
 vendor/github.com/google/btree/btree_mem.go        |   76 +
 vendor/github.com/gregjones/httpcache/LICENSE.txt  |    7 +
 .../gregjones/httpcache/diskcache/diskcache.go     |   61 +
 vendor/github.com/gregjones/httpcache/httpcache.go |  551 ++
 vendor/github.com/howeyc/gopass/LICENSE.txt        |   15 -
 vendor/github.com/howeyc/gopass/pass.go            |  110 -
 vendor/github.com/howeyc/gopass/terminal.go        |   25 -
 .../github.com/howeyc/gopass/terminal_solaris.go   |   69 -
 .../{license => LICENSE}                           |    0
 .../operator-sdk/pkg/k8sclient/client.go           |   59 +-
 .../operator-sdk/version/version.go                |    2 +-
 vendor/github.com/petar/GoLLRB/AUTHORS             |    4 +
 vendor/github.com/petar/GoLLRB/LICENSE             |   27 +
 vendor/github.com/petar/GoLLRB/llrb/avgvar.go      |   39 +
 vendor/github.com/petar/GoLLRB/llrb/iterator.go    |   93 +
 vendor/github.com/petar/GoLLRB/llrb/llrb-stats.go  |   46 +
 vendor/github.com/petar/GoLLRB/llrb/llrb.go        |  456 ++
 vendor/github.com/petar/GoLLRB/llrb/util.go        |   17 +
 .../logrus => peterbourgon/diskv}/LICENSE          |    4 +-
 .../github.com/peterbourgon/diskv/compression.go   |   64 +
 vendor/github.com/peterbourgon/diskv/diskv.go      |  624 ++
 vendor/github.com/peterbourgon/diskv/index.go      |  115 +
 .../github.com/prometheus/client_golang/AUTHORS.md |   18 -
 .../client_golang/prometheus/collector.go          |   73 +-
 .../prometheus/client_golang/prometheus/counter.go |  191 +-
 .../prometheus/client_golang/prometheus/desc.go    |   47 +-
 .../prometheus/client_golang/prometheus/doc.go     |   94 +-
 .../prometheus/client_golang/prometheus/fnv.go     |   13 +
 .../prometheus/client_golang/prometheus/gauge.go   |  204 +-
 .../client_golang/prometheus/go_collector.go       |   74 +-
 .../client_golang/prometheus/histogram.go          |  302 +-
 .../prometheus/client_golang/prometheus/http.go    |  151 +-
 .../client_golang/prometheus/internal/metric.go    |   85 +
 .../prometheus/client_golang/prometheus/labels.go  |   70 +
 .../prometheus/client_golang/prometheus/metric.go  |   90 +-
 .../client_golang/prometheus/observer.go           |   52 +
 .../client_golang/prometheus/process_collector.go  |  220 +-
 .../client_golang/prometheus/promhttp/delegator.go |  199 +
 .../prometheus/promhttp/delegator_1_8.go           |  181 +
 .../prometheus/promhttp/delegator_pre_1_8.go       |   44 +
 .../client_golang/prometheus/promhttp/http.go      |  162 +-
 .../prometheus/promhttp/instrument_client.go       |   97 +
 .../prometheus/promhttp/instrument_client_1_8.go   |  144 +
 .../prometheus/promhttp/instrument_server.go       |  447 ++
 .../client_golang/prometheus/registry.go           |  671 +-
 .../prometheus/client_golang/prometheus/summary.go |  192 +-
 .../prometheus/client_golang/prometheus/timer.go   |   51 +
 .../prometheus/client_golang/prometheus/untyped.go |  102 +-
 .../prometheus/client_golang/prometheus/value.go   |   94 +-
 .../prometheus/client_golang/prometheus/vec.go     |  494 +-
 .../prometheus/client_golang/prometheus/wrap.go    |  179 +
 .../prometheus/common/expfmt/text_create.go        |  357 +-
 vendor/github.com/prometheus/common/model/time.go  |    2 +-
 .../prometheus/procfs/internal/util/parse.go       |   15 +-
 .../procfs/internal/util/sysreadfile_linux.go      |   45 +
 vendor/github.com/sirupsen/logrus/entry.go         |   16 +-
 vendor/github.com/sirupsen/logrus/formatter.go     |   15 +-
 .../github.com/sirupsen/logrus/json_formatter.go   |   10 +-
 vendor/github.com/sirupsen/logrus/terminal_bsd.go  |   17 -
 .../github.com/sirupsen/logrus/terminal_linux.go   |   21 -
 .../sirupsen/logrus/terminal_notwindows.go         |    8 +
 .../github.com/sirupsen/logrus/text_formatter.go   |    7 +-
 vendor/golang.org/x/oauth2/internal/token.go       |    1 +
 vendor/golang.org/x/sys/unix/asm_aix_ppc64.s       |   17 +
 vendor/golang.org/x/sys/unix/openbsd_pledge.go     |    6 +-
 vendor/golang.org/x/sys/unix/openbsd_unveil.go     |   44 +
 vendor/golang.org/x/sys/unix/sockcmsg_unix.go      |    2 +-
 vendor/golang.org/x/sys/unix/syscall_aix.go        |   31 +-
 vendor/golang.org/x/sys/unix/syscall_freebsd.go    |  310 +-
 vendor/golang.org/x/sys/unix/syscall_linux.go      |   32 +-
 .../golang.org/x/sys/unix/syscall_linux_amd64.go   |   13 +
 .../golang.org/x/sys/unix/syscall_linux_arm64.go   |    9 +-
 .../golang.org/x/sys/unix/syscall_linux_ppc64x.go  |   13 +
 .../golang.org/x/sys/unix/syscall_linux_riscv64.go |    9 +-
 .../golang.org/x/sys/unix/syscall_linux_s390x.go   |   13 +
 vendor/golang.org/x/sys/unix/syscall_openbsd.go    |   11 +-
 .../golang.org/x/sys/unix/syscall_openbsd_386.go   |    4 +
 .../golang.org/x/sys/unix/syscall_openbsd_arm.go   |    4 +
 vendor/golang.org/x/sys/unix/syscall_unix.go       |    8 +-
 vendor/golang.org/x/sys/unix/types_aix.go          |   12 +-
 vendor/golang.org/x/sys/unix/types_darwin.go       |   12 +-
 vendor/golang.org/x/sys/unix/types_dragonfly.go    |   12 +-
 vendor/golang.org/x/sys/unix/types_freebsd.go      |   77 +-
 vendor/golang.org/x/sys/unix/types_netbsd.go       |   12 +-
 vendor/golang.org/x/sys/unix/types_openbsd.go      |   16 +-
 vendor/golang.org/x/sys/unix/types_solaris.go      |   12 +-
 vendor/golang.org/x/sys/unix/zerrors_linux_386.go  |   35 +-
 .../golang.org/x/sys/unix/zerrors_linux_amd64.go   |   35 +-
 vendor/golang.org/x/sys/unix/zerrors_linux_arm.go  |   35 +-
 .../golang.org/x/sys/unix/zerrors_linux_arm64.go   |   35 +-
 vendor/golang.org/x/sys/unix/zerrors_linux_mips.go |   35 +-
 .../golang.org/x/sys/unix/zerrors_linux_mips64.go  |   35 +-
 .../x/sys/unix/zerrors_linux_mips64le.go           |   35 +-
 .../golang.org/x/sys/unix/zerrors_linux_mipsle.go  |   35 +-
 .../golang.org/x/sys/unix/zerrors_linux_ppc64.go   |   35 +-
 .../golang.org/x/sys/unix/zerrors_linux_ppc64le.go |   35 +-
 .../golang.org/x/sys/unix/zerrors_linux_riscv64.go |   35 +-
 .../golang.org/x/sys/unix/zerrors_linux_s390x.go   |   35 +-
 .../golang.org/x/sys/unix/zerrors_linux_sparc64.go |  350 +-
 vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go   |   97 +-
 vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go | 1073 ++-
 .../golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go | 1162 +++
 .../x/sys/unix/zsyscall_aix_ppc64_gccgo.go         | 1042 +++
 .../golang.org/x/sys/unix/zsyscall_freebsd_386.go  |  102 +-
 .../x/sys/unix/zsyscall_freebsd_amd64.go           |  102 +-
 .../golang.org/x/sys/unix/zsyscall_freebsd_arm.go  |  102 +-
 vendor/golang.org/x/sys/unix/zsyscall_linux_386.go |   51 +
 .../golang.org/x/sys/unix/zsyscall_linux_amd64.go  |   66 +
 vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go |   51 +
 .../golang.org/x/sys/unix/zsyscall_linux_arm64.go  |   51 +
 .../golang.org/x/sys/unix/zsyscall_linux_mips.go   |   51 +
 .../golang.org/x/sys/unix/zsyscall_linux_mips64.go |   51 +
 .../x/sys/unix/zsyscall_linux_mips64le.go          |   51 +
 .../golang.org/x/sys/unix/zsyscall_linux_mipsle.go |   51 +
 .../golang.org/x/sys/unix/zsyscall_linux_ppc64.go  |   66 +
 .../x/sys/unix/zsyscall_linux_ppc64le.go           |   66 +
 .../x/sys/unix/zsyscall_linux_riscv64.go           |   51 +
 .../golang.org/x/sys/unix/zsyscall_linux_s390x.go  |   66 +
 .../golang.org/x/sys/unix/zsyscall_openbsd_386.go  |   27 +
 .../x/sys/unix/zsyscall_openbsd_amd64.go           |   27 +
 .../golang.org/x/sys/unix/zsyscall_openbsd_arm.go  |   27 +
 .../x/sys/unix/zsyscall_solaris_amd64.go           |  256 +
 vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go  |    1 +
 .../golang.org/x/sys/unix/zsysnum_linux_arm64.go   |    1 +
 .../golang.org/x/sys/unix/zsysnum_linux_riscv64.go |    1 +
 .../golang.org/x/sys/unix/zsysnum_openbsd_386.go   |   25 +-
 .../golang.org/x/sys/unix/zsysnum_openbsd_arm.go   |   13 +-
 vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go     |   10 +-
 vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go   |   10 +-
 vendor/golang.org/x/sys/unix/ztypes_darwin_386.go  |   10 +-
 .../golang.org/x/sys/unix/ztypes_darwin_amd64.go   |   10 +-
 vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go  |   10 +-
 .../golang.org/x/sys/unix/ztypes_darwin_arm64.go   |   10 +-
 .../x/sys/unix/ztypes_dragonfly_amd64.go           |   10 +-
 vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go |  265 +-
 .../golang.org/x/sys/unix/ztypes_freebsd_amd64.go  |  283 +-
 vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go |  287 +-
 vendor/golang.org/x/sys/unix/ztypes_linux_386.go   |   33 +-
 vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go |   33 +-
 vendor/golang.org/x/sys/unix/ztypes_linux_arm.go   |   33 +-
 vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go |   33 +-
 vendor/golang.org/x/sys/unix/ztypes_linux_mips.go  |   33 +-
 .../golang.org/x/sys/unix/ztypes_linux_mips64.go   |   33 +-
 .../golang.org/x/sys/unix/ztypes_linux_mips64le.go |   33 +-
 .../golang.org/x/sys/unix/ztypes_linux_mipsle.go   |   33 +-
 vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go |   33 +-
 .../golang.org/x/sys/unix/ztypes_linux_ppc64le.go  |   33 +-
 .../golang.org/x/sys/unix/ztypes_linux_riscv64.go  |   33 +-
 vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go |   33 +-
 .../golang.org/x/sys/unix/ztypes_linux_sparc64.go  |   10 +-
 vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go  |   10 +-
 .../golang.org/x/sys/unix/ztypes_netbsd_amd64.go   |   10 +-
 vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go  |   10 +-
 vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go |   12 +-
 .../golang.org/x/sys/unix/ztypes_openbsd_amd64.go  |   12 +-
 vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go |   12 +-
 .../golang.org/x/sys/unix/ztypes_solaris_amd64.go  |   10 +-
 vendor/golang.org/x/tools/imports/fix.go           |  239 +-
 .../x/tools/internal/fastwalk/fastwalk.go          |    5 +
 .../fastwalk/fastwalk_dirent_namlen_bsd.go}        |   10 +-
 .../fastwalk/fastwalk_dirent_namlen_linux.go       |   24 +
 .../x/tools/internal/fastwalk/fastwalk_portable.go |    8 +
 .../x/tools/internal/fastwalk/fastwalk_unix.go     |   14 +-
 .../golang.org/x/tools/internal/gopathwalk/walk.go |  246 +
 .../admissionregistration/v1alpha1/generated.pb.go |    2 +-
 .../v1alpha1/types_swagger_doc_generated.go        |    4 +-
 .../v1alpha1/zz_generated.deepcopy.go              |    2 +-
 .../admissionregistration/v1beta1/generated.pb.go  |    2 +-
 .../api/admissionregistration/v1beta1/types.go     |    4 +-
 .../v1beta1/types_swagger_doc_generated.go         |    6 +-
 .../v1beta1/zz_generated.deepcopy.go               |    2 +-
 vendor/k8s.io/api/apps/v1/generated.pb.go          |    2 +-
 vendor/k8s.io/api/apps/v1/types.go                 |   16 +-
 .../api/apps/v1/types_swagger_doc_generated.go     |    8 +-
 vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go |    2 +-
 vendor/k8s.io/api/apps/v1beta1/generated.pb.go     |    2 +-
 vendor/k8s.io/api/apps/v1beta1/types.go            |   16 +-
 .../apps/v1beta1/types_swagger_doc_generated.go    |    8 +-
 .../api/apps/v1beta1/zz_generated.deepcopy.go      |    2 +-
 vendor/k8s.io/api/apps/v1beta2/generated.pb.go     |    2 +-
 vendor/k8s.io/api/apps/v1beta2/types.go            |   16 +-
 .../apps/v1beta2/types_swagger_doc_generated.go    |    8 +-
 .../api/apps/v1beta2/zz_generated.deepcopy.go      |    2 +-
 .../k8s.io/api/authentication/v1/generated.pb.go   |    2 +-
 .../v1/types_swagger_doc_generated.go              |    4 +-
 .../api/authentication/v1/zz_generated.deepcopy.go |    2 +-
 .../api/authentication/v1beta1/generated.pb.go     |    2 +-
 .../v1beta1/types_swagger_doc_generated.go         |    4 +-
 .../v1beta1/zz_generated.deepcopy.go               |    2 +-
 vendor/k8s.io/api/authorization/v1/generated.pb.go |    2 +-
 .../v1/types_swagger_doc_generated.go              |    4 +-
 .../api/authorization/v1/zz_generated.deepcopy.go  |    2 +-
 .../api/authorization/v1beta1/generated.pb.go      |    2 +-
 .../v1beta1/types_swagger_doc_generated.go         |    4 +-
 .../authorization/v1beta1/zz_generated.deepcopy.go |    2 +-
 vendor/k8s.io/api/autoscaling/v1/generated.pb.go   |    2 +-
 .../autoscaling/v1/types_swagger_doc_generated.go  |    4 +-
 .../api/autoscaling/v1/zz_generated.deepcopy.go    |    2 +-
 .../k8s.io/api/autoscaling/v2beta1/generated.pb.go |    2 +-
 .../v2beta1/types_swagger_doc_generated.go         |    4 +-
 .../autoscaling/v2beta1/zz_generated.deepcopy.go   |    2 +-
 vendor/k8s.io/api/batch/v1/generated.pb.go         |    2 +-
 .../api/batch/v1/types_swagger_doc_generated.go    |    4 +-
 .../k8s.io/api/batch/v1/zz_generated.deepcopy.go   |    2 +-
 vendor/k8s.io/api/batch/v1beta1/generated.pb.go    |    2 +-
 .../batch/v1beta1/types_swagger_doc_generated.go   |    4 +-
 .../api/batch/v1beta1/zz_generated.deepcopy.go     |    2 +-
 vendor/k8s.io/api/batch/v2alpha1/generated.pb.go   |    2 +-
 .../batch/v2alpha1/types_swagger_doc_generated.go  |    4 +-
 .../api/batch/v2alpha1/zz_generated.deepcopy.go    |    2 +-
 .../api/certificates/v1beta1/generated.pb.go       |    2 +-
 .../v1beta1/types_swagger_doc_generated.go         |    4 +-
 .../certificates/v1beta1/zz_generated.deepcopy.go  |    2 +-
 .../k8s.io/api/core/v1/annotation_key_constants.go |   21 +-
 vendor/k8s.io/api/core/v1/generated.pb.go          | 7398 +++++++++++---------
 vendor/k8s.io/api/core/v1/meta.go                  |  108 -
 vendor/k8s.io/api/core/v1/register.go              |    1 -
 vendor/k8s.io/api/core/v1/resource.go              |    7 -
 vendor/k8s.io/api/core/v1/types.go                 |  618 +-
 .../api/core/v1/types_swagger_doc_generated.go     |  212 +-
 vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go |  447 +-
 vendor/k8s.io/api/events/v1beta1/generated.pb.go   |    2 +-
 .../events/v1beta1/types_swagger_doc_generated.go  |    4 +-
 .../api/events/v1beta1/zz_generated.deepcopy.go    |    2 +-
 .../k8s.io/api/extensions/v1beta1/generated.pb.go  |  592 +-
 vendor/k8s.io/api/extensions/v1beta1/types.go      |  170 +-
 .../v1beta1/types_swagger_doc_generated.go         |   81 +-
 .../extensions/v1beta1/zz_generated.deepcopy.go    |   12 +-
 vendor/k8s.io/api/networking/v1/generated.pb.go    |    2 +-
 vendor/k8s.io/api/networking/v1/types.go           |   25 +-
 .../networking/v1/types_swagger_doc_generated.go   |   12 +-
 .../api/networking/v1/zz_generated.deepcopy.go     |    2 +-
 vendor/k8s.io/api/policy/v1beta1/generated.pb.go   |  346 +-
 vendor/k8s.io/api/policy/v1beta1/types.go          |  121 +-
 .../policy/v1beta1/types_swagger_doc_generated.go  |   69 +-
 .../api/policy/v1beta1/zz_generated.deepcopy.go    |   12 +-
 vendor/k8s.io/api/rbac/v1/generated.pb.go          |    2 +-
 vendor/k8s.io/api/rbac/v1/types.go                 |    6 +-
 .../api/rbac/v1/types_swagger_doc_generated.go     |    4 +-
 vendor/k8s.io/api/rbac/v1/zz_generated.deepcopy.go |    2 +-
 vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go    |    2 +-
 vendor/k8s.io/api/rbac/v1alpha1/types.go           |    6 +-
 .../rbac/v1alpha1/types_swagger_doc_generated.go   |    4 +-
 .../api/rbac/v1alpha1/zz_generated.deepcopy.go     |    2 +-
 vendor/k8s.io/api/rbac/v1beta1/generated.pb.go     |    2 +-
 vendor/k8s.io/api/rbac/v1beta1/types.go            |    6 +-
 .../rbac/v1beta1/types_swagger_doc_generated.go    |    4 +-
 .../api/rbac/v1beta1/zz_generated.deepcopy.go      |    2 +-
 .../k8s.io/api/scheduling/v1alpha1/generated.pb.go |    2 +-
 .../v1alpha1/types_swagger_doc_generated.go        |    4 +-
 .../scheduling/v1alpha1/zz_generated.deepcopy.go   |    2 +-
 .../api/scheduling/{v1alpha1 => v1beta1}/doc.go    |    4 +-
 .../{v1alpha1 => v1beta1}/generated.pb.go          |   76 +-
 .../scheduling/{v1alpha1 => v1beta1}/register.go   |    6 +-
 .../api/scheduling/{v1alpha1 => v1beta1}/types.go  |    4 +-
 .../types_swagger_doc_generated.go                 |    6 +-
 .../{v1alpha1 => v1beta1}/zz_generated.deepcopy.go |    4 +-
 .../k8s.io/api/settings/v1alpha1/generated.pb.go   |    2 +-
 .../v1alpha1/types_swagger_doc_generated.go        |    4 +-
 .../api/settings/v1alpha1/zz_generated.deepcopy.go |    2 +-
 vendor/k8s.io/api/storage/v1/generated.pb.go       |  136 +-
 vendor/k8s.io/api/storage/v1/types.go              |    8 +
 .../api/storage/v1/types_swagger_doc_generated.go  |    5 +-
 .../k8s.io/api/storage/v1/zz_generated.deepcopy.go |    9 +-
 vendor/k8s.io/api/storage/v1alpha1/generated.pb.go |    2 +-
 .../v1alpha1/types_swagger_doc_generated.go        |    4 +-
 .../api/storage/v1alpha1/zz_generated.deepcopy.go  |    2 +-
 vendor/k8s.io/api/storage/v1beta1/generated.pb.go  |  180 +-
 vendor/k8s.io/api/storage/v1beta1/types.go         |    8 +
 .../storage/v1beta1/types_swagger_doc_generated.go |    5 +-
 .../api/storage/v1beta1/zz_generated.deepcopy.go   |    9 +-
 .../k8s.io/apimachinery/pkg/api/meta/interfaces.go |   21 +-
 vendor/k8s.io/apimachinery/pkg/api/meta/lazy.go    |   25 +-
 vendor/k8s.io/apimachinery/pkg/api/meta/meta.go    |    3 -
 .../k8s.io/apimachinery/pkg/api/meta/priority.go   |   30 +-
 .../k8s.io/apimachinery/pkg/api/meta/restmapper.go |   38 +-
 .../apimachinery/pkg/api/meta/unstructured.go      |   47 -
 .../apimachinery/pkg/api/resource/generated.pb.go  |    2 +-
 .../apimachinery/pkg/api/resource/quantity.go      |   42 -
 .../pkg/api/resource/zz_generated.deepcopy.go      |    2 +-
 .../internalversion/zz_generated.conversion.go     |    2 +-
 .../meta/internalversion/zz_generated.deepcopy.go  |    2 +-
 .../apimachinery/pkg/apis/meta/v1/conversion.go    |   12 +
 .../apimachinery/pkg/apis/meta/v1/duration.go      |    5 +-
 .../apimachinery/pkg/apis/meta/v1/generated.pb.go  |    2 +-
 .../apimachinery/pkg/apis/meta/v1/micro_time.go    |    5 +-
 .../k8s.io/apimachinery/pkg/apis/meta/v1/time.go   |    5 +-
 .../k8s.io/apimachinery/pkg/apis/meta/v1/types.go  |    3 +-
 .../apis/meta/v1/types_swagger_doc_generated.go    |    4 +-
 .../pkg/apis/meta/v1/unstructured/helpers.go       |   66 +-
 .../pkg/apis/meta/v1/unstructured/unstructured.go  |   24 +-
 .../apis/meta/v1/unstructured/unstructured_list.go |   17 +-
 .../meta/v1/unstructured/zz_generated.deepcopy.go  |    2 +-
 .../pkg/apis/meta/v1/zz_generated.deepcopy.go      |    2 +-
 .../pkg/apis/meta/v1/zz_generated.defaults.go      |    2 +-
 .../apimachinery/pkg/apis/meta/v1beta1/deepcopy.go |   23 +-
 .../pkg/apis/meta/v1beta1/generated.pb.go          |    2 +-
 .../apimachinery/pkg/apis/meta/v1beta1/types.go    |    4 +-
 .../meta/v1beta1/types_swagger_doc_generated.go    |    6 +-
 .../pkg/apis/meta/v1beta1/zz_generated.deepcopy.go |    2 +-
 .../pkg/apis/meta/v1beta1/zz_generated.defaults.go |    2 +-
 .../pkg/conversion/queryparams/convert.go          |    3 +
 vendor/k8s.io/apimachinery/pkg/fields/selector.go  |   21 +
 .../pkg/labels/zz_generated.deepcopy.go            |    2 +-
 .../k8s.io/apimachinery/pkg/runtime/converter.go   |   26 +-
 vendor/k8s.io/apimachinery/pkg/runtime/error.go    |    8 +
 .../apimachinery/pkg/runtime/generated.pb.go       |    2 +-
 .../k8s.io/apimachinery/pkg/runtime/interfaces.go  |   17 +-
 .../pkg/runtime/schema/generated.pb.go             |    2 +-
 vendor/k8s.io/apimachinery/pkg/runtime/scheme.go   |  146 +-
 .../pkg/runtime/serializer/json/json.go            |   27 +-
 .../runtime/serializer/versioning/versioning.go    |   23 +-
 .../pkg/runtime/zz_generated.deepcopy.go           |    2 +-
 .../apimachinery/pkg/types/namespacedname.go       |   17 -
 vendor/k8s.io/apimachinery/pkg/util/clock/clock.go |   63 +-
 vendor/k8s.io/apimachinery/pkg/util/diff/diff.go   |   44 +-
 .../apimachinery/pkg/util/intstr/generated.pb.go   |    2 +-
 vendor/k8s.io/apimachinery/pkg/util/net/http.go    |   12 +-
 .../k8s.io/apimachinery/pkg/util/net/port_range.go |   66 +-
 .../apimachinery/pkg/util/runtime/runtime.go       |   10 +-
 vendor/k8s.io/apimachinery/pkg/util/sets/byte.go   |    6 +-
 vendor/k8s.io/apimachinery/pkg/util/sets/doc.go    |    4 +-
 vendor/k8s.io/apimachinery/pkg/util/sets/empty.go  |    4 +-
 vendor/k8s.io/apimachinery/pkg/util/sets/int.go    |    6 +-
 vendor/k8s.io/apimachinery/pkg/util/sets/int64.go  |    6 +-
 vendor/k8s.io/apimachinery/pkg/util/sets/string.go |    6 +-
 vendor/k8s.io/apimachinery/pkg/util/wait/wait.go   |   22 +-
 vendor/k8s.io/apimachinery/pkg/version/helpers.go  |   88 +
 vendor/k8s.io/apimachinery/pkg/watch/filter.go     |    6 +-
 vendor/k8s.io/apimachinery/pkg/watch/mux.go        |    6 +-
 .../pkg/watch/zz_generated.deepcopy.go             |    2 +-
 .../forked/golang/reflect/deep_equal.go            |    2 +-
 .../k8s.io/client-go/discovery/cached/memcache.go  |   10 +-
 .../k8s.io/client-go/discovery/cached_discovery.go |  282 +
 .../k8s.io/client-go/discovery/discovery_client.go |  122 +-
 vendor/k8s.io/client-go/discovery/round_tripper.go |   51 +
 vendor/k8s.io/client-go/discovery/unstructured.go  |   24 +-
 vendor/k8s.io/client-go/dynamic/client.go          |  379 -
 vendor/k8s.io/client-go/dynamic/client_pool.go     |  122 -
 vendor/k8s.io/client-go/dynamic/dynamic_util.go    |   96 -
 vendor/k8s.io/client-go/dynamic/interface.go       |   59 +
 .../k8s.io/client-go/dynamic}/scheme.go            |   35 +-
 vendor/k8s.io/client-go/dynamic/simple.go          |  287 +
 vendor/k8s.io/client-go/kubernetes/clientset.go    |   24 +-
 vendor/k8s.io/client-go/kubernetes/doc.go          |    2 +-
 vendor/k8s.io/client-go/kubernetes/scheme/doc.go   |    2 +-
 .../k8s.io/client-go/kubernetes/scheme/register.go |    4 +-
 .../v1alpha1/admissionregistration_client.go       |    2 +-
 .../typed/admissionregistration/v1alpha1/doc.go    |    2 +-
 .../v1alpha1/generated_expansion.go                |    2 +-
 .../v1alpha1/initializerconfiguration.go           |    2 +-
 .../v1beta1/admissionregistration_client.go        |    2 +-
 .../typed/admissionregistration/v1beta1/doc.go     |    2 +-
 .../v1beta1/generated_expansion.go                 |    2 +-
 .../v1beta1/mutatingwebhookconfiguration.go        |    2 +-
 .../v1beta1/validatingwebhookconfiguration.go      |    2 +-
 .../kubernetes/typed/apps/v1/apps_client.go        |    2 +-
 .../kubernetes/typed/apps/v1/controllerrevision.go |    2 +-
 .../kubernetes/typed/apps/v1/daemonset.go          |    2 +-
 .../kubernetes/typed/apps/v1/deployment.go         |    2 +-
 .../client-go/kubernetes/typed/apps/v1/doc.go      |    2 +-
 .../typed/apps/v1/generated_expansion.go           |    2 +-
 .../kubernetes/typed/apps/v1/replicaset.go         |    2 +-
 .../kubernetes/typed/apps/v1/statefulset.go        |    2 +-
 .../kubernetes/typed/apps/v1beta1/apps_client.go   |    2 +-
 .../typed/apps/v1beta1/controllerrevision.go       |    2 +-
 .../kubernetes/typed/apps/v1beta1/deployment.go    |    2 +-
 .../client-go/kubernetes/typed/apps/v1beta1/doc.go |    2 +-
 .../typed/apps/v1beta1/generated_expansion.go      |    2 +-
 .../kubernetes/typed/apps/v1beta1/scale.go         |    2 +-
 .../kubernetes/typed/apps/v1beta1/statefulset.go   |    2 +-
 .../kubernetes/typed/apps/v1beta2/apps_client.go   |    2 +-
 .../typed/apps/v1beta2/controllerrevision.go       |    2 +-
 .../kubernetes/typed/apps/v1beta2/daemonset.go     |    2 +-
 .../kubernetes/typed/apps/v1beta2/deployment.go    |    2 +-
 .../client-go/kubernetes/typed/apps/v1beta2/doc.go |    2 +-
 .../typed/apps/v1beta2/generated_expansion.go      |    2 +-
 .../kubernetes/typed/apps/v1beta2/replicaset.go    |    2 +-
 .../kubernetes/typed/apps/v1beta2/scale.go         |    2 +-
 .../kubernetes/typed/apps/v1beta2/statefulset.go   |    2 +-
 .../authentication/v1/authentication_client.go     |    2 +-
 .../kubernetes/typed/authentication/v1/doc.go      |    2 +-
 .../typed/authentication/v1/generated_expansion.go |    2 +-
 .../typed/authentication/v1/tokenreview.go         |    2 +-
 .../v1beta1/authentication_client.go               |    2 +-
 .../kubernetes/typed/authentication/v1beta1/doc.go |    2 +-
 .../authentication/v1beta1/generated_expansion.go  |    2 +-
 .../typed/authentication/v1beta1/tokenreview.go    |    2 +-
 .../typed/authorization/v1/authorization_client.go |    2 +-
 .../kubernetes/typed/authorization/v1/doc.go       |    2 +-
 .../typed/authorization/v1/generated_expansion.go  |    2 +-
 .../authorization/v1/localsubjectaccessreview.go   |    2 +-
 .../authorization/v1/selfsubjectaccessreview.go    |    2 +-
 .../authorization/v1/selfsubjectrulesreview.go     |    2 +-
 .../typed/authorization/v1/subjectaccessreview.go  |    2 +-
 .../authorization/v1beta1/authorization_client.go  |    2 +-
 .../kubernetes/typed/authorization/v1beta1/doc.go  |    2 +-
 .../authorization/v1beta1/generated_expansion.go   |    2 +-
 .../v1beta1/localsubjectaccessreview.go            |    2 +-
 .../v1beta1/selfsubjectaccessreview.go             |    2 +-
 .../v1beta1/selfsubjectrulesreview.go              |    2 +-
 .../authorization/v1beta1/subjectaccessreview.go   |    2 +-
 .../typed/autoscaling/v1/autoscaling_client.go     |    2 +-
 .../kubernetes/typed/autoscaling/v1/doc.go         |    2 +-
 .../typed/autoscaling/v1/generated_expansion.go    |    2 +-
 .../autoscaling/v1/horizontalpodautoscaler.go      |    2 +-
 .../autoscaling/v2beta1/autoscaling_client.go      |    2 +-
 .../kubernetes/typed/autoscaling/v2beta1/doc.go    |    2 +-
 .../autoscaling/v2beta1/generated_expansion.go     |    2 +-
 .../autoscaling/v2beta1/horizontalpodautoscaler.go |    2 +-
 .../kubernetes/typed/batch/v1/batch_client.go      |    2 +-
 .../client-go/kubernetes/typed/batch/v1/doc.go     |    2 +-
 .../typed/batch/v1/generated_expansion.go          |    2 +-
 .../client-go/kubernetes/typed/batch/v1/job.go     |    2 +-
 .../kubernetes/typed/batch/v1beta1/batch_client.go |    2 +-
 .../kubernetes/typed/batch/v1beta1/cronjob.go      |    2 +-
 .../kubernetes/typed/batch/v1beta1/doc.go          |    2 +-
 .../typed/batch/v1beta1/generated_expansion.go     |    2 +-
 .../typed/batch/v2alpha1/batch_client.go           |    2 +-
 .../kubernetes/typed/batch/v2alpha1/cronjob.go     |    2 +-
 .../kubernetes/typed/batch/v2alpha1/doc.go         |    2 +-
 .../typed/batch/v2alpha1/generated_expansion.go    |    2 +-
 .../certificates/v1beta1/certificates_client.go    |    2 +-
 .../v1beta1/certificatesigningrequest.go           |    2 +-
 .../kubernetes/typed/certificates/v1beta1/doc.go   |    2 +-
 .../certificates/v1beta1/generated_expansion.go    |    2 +-
 .../kubernetes/typed/core/v1/componentstatus.go    |    2 +-
 .../kubernetes/typed/core/v1/configmap.go          |    2 +-
 .../kubernetes/typed/core/v1/core_client.go        |    2 +-
 .../client-go/kubernetes/typed/core/v1/doc.go      |    2 +-
 .../kubernetes/typed/core/v1/endpoints.go          |    2 +-
 .../client-go/kubernetes/typed/core/v1/event.go    |    2 +-
 .../typed/core/v1/generated_expansion.go           |    2 +-
 .../kubernetes/typed/core/v1/limitrange.go         |    2 +-
 .../kubernetes/typed/core/v1/namespace.go          |   13 +-
 .../client-go/kubernetes/typed/core/v1/node.go     |    2 +-
 .../kubernetes/typed/core/v1/persistentvolume.go   |    2 +-
 .../typed/core/v1/persistentvolumeclaim.go         |    2 +-
 .../client-go/kubernetes/typed/core/v1/pod.go      |    2 +-
 .../kubernetes/typed/core/v1/podtemplate.go        |    2 +-
 .../typed/core/v1/replicationcontroller.go         |    2 +-
 .../kubernetes/typed/core/v1/resourcequota.go      |    2 +-
 .../client-go/kubernetes/typed/core/v1/secret.go   |    2 +-
 .../client-go/kubernetes/typed/core/v1/service.go  |   14 +-
 .../kubernetes/typed/core/v1/serviceaccount.go     |    2 +-
 .../kubernetes/typed/events/v1beta1/doc.go         |    2 +-
 .../kubernetes/typed/events/v1beta1/event.go       |    2 +-
 .../typed/events/v1beta1/events_client.go          |    2 +-
 .../typed/events/v1beta1/generated_expansion.go    |    2 +-
 .../typed/extensions/v1beta1/daemonset.go          |    2 +-
 .../typed/extensions/v1beta1/deployment.go         |    2 +-
 .../kubernetes/typed/extensions/v1beta1/doc.go     |    2 +-
 .../typed/extensions/v1beta1/extensions_client.go  |    2 +-
 .../extensions/v1beta1/generated_expansion.go      |    2 +-
 .../kubernetes/typed/extensions/v1beta1/ingress.go |    2 +-
 .../typed/extensions/v1beta1/podsecuritypolicy.go  |    2 +-
 .../typed/extensions/v1beta1/replicaset.go         |    2 +-
 .../kubernetes/typed/extensions/v1beta1/scale.go   |    2 +-
 .../kubernetes/typed/networking/v1/doc.go          |    2 +-
 .../typed/networking/v1/generated_expansion.go     |    2 +-
 .../typed/networking/v1/networking_client.go       |    2 +-
 .../typed/networking/v1/networkpolicy.go           |    2 +-
 .../kubernetes/typed/policy/v1beta1/doc.go         |    2 +-
 .../kubernetes/typed/policy/v1beta1/eviction.go    |    2 +-
 .../typed/policy/v1beta1/generated_expansion.go    |    2 +-
 .../typed/policy/v1beta1/poddisruptionbudget.go    |    2 +-
 .../typed/policy/v1beta1/podsecuritypolicy.go      |    2 +-
 .../typed/policy/v1beta1/policy_client.go          |    2 +-
 .../kubernetes/typed/rbac/v1/clusterrole.go        |    2 +-
 .../kubernetes/typed/rbac/v1/clusterrolebinding.go |    2 +-
 .../client-go/kubernetes/typed/rbac/v1/doc.go      |    2 +-
 .../typed/rbac/v1/generated_expansion.go           |    2 +-
 .../kubernetes/typed/rbac/v1/rbac_client.go        |    2 +-
 .../client-go/kubernetes/typed/rbac/v1/role.go     |    2 +-
 .../kubernetes/typed/rbac/v1/rolebinding.go        |    2 +-
 .../kubernetes/typed/rbac/v1alpha1/clusterrole.go  |    2 +-
 .../typed/rbac/v1alpha1/clusterrolebinding.go      |    2 +-
 .../kubernetes/typed/rbac/v1alpha1/doc.go          |    2 +-
 .../typed/rbac/v1alpha1/generated_expansion.go     |    2 +-
 .../kubernetes/typed/rbac/v1alpha1/rbac_client.go  |    2 +-
 .../kubernetes/typed/rbac/v1alpha1/role.go         |    2 +-
 .../kubernetes/typed/rbac/v1alpha1/rolebinding.go  |    2 +-
 .../kubernetes/typed/rbac/v1beta1/clusterrole.go   |    2 +-
 .../typed/rbac/v1beta1/clusterrolebinding.go       |    2 +-
 .../client-go/kubernetes/typed/rbac/v1beta1/doc.go |    2 +-
 .../typed/rbac/v1beta1/generated_expansion.go      |    2 +-
 .../kubernetes/typed/rbac/v1beta1/rbac_client.go   |    2 +-
 .../kubernetes/typed/rbac/v1beta1/role.go          |    2 +-
 .../kubernetes/typed/rbac/v1beta1/rolebinding.go   |    2 +-
 .../kubernetes/typed/scheduling/v1alpha1/doc.go    |    2 +-
 .../scheduling/v1alpha1/generated_expansion.go     |    2 +-
 .../typed/scheduling/v1alpha1/priorityclass.go     |    2 +-
 .../typed/scheduling/v1alpha1/scheduling_client.go |    2 +-
 .../typed/{apps => scheduling}/v1beta1/doc.go      |    2 +-
 .../{v1alpha1 => v1beta1}/generated_expansion.go   |    4 +-
 .../{v1alpha1 => v1beta1}/priorityclass.go         |   38 +-
 .../v1beta1/scheduling_client.go}                  |   34 +-
 .../kubernetes/typed/settings/v1alpha1/doc.go      |    2 +-
 .../typed/settings/v1alpha1/generated_expansion.go |    2 +-
 .../typed/settings/v1alpha1/podpreset.go           |    2 +-
 .../typed/settings/v1alpha1/settings_client.go     |    2 +-
 .../client-go/kubernetes/typed/storage/v1/doc.go   |    2 +-
 .../typed/storage/v1/generated_expansion.go        |    2 +-
 .../kubernetes/typed/storage/v1/storage_client.go  |    2 +-
 .../kubernetes/typed/storage/v1/storageclass.go    |    2 +-
 .../kubernetes/typed/storage/v1alpha1/doc.go       |    2 +-
 .../typed/storage/v1alpha1/generated_expansion.go  |    2 +-
 .../typed/storage/v1alpha1/storage_client.go       |    2 +-
 .../typed/storage/v1alpha1/volumeattachment.go     |    2 +-
 .../kubernetes/typed/storage/v1beta1/doc.go        |    2 +-
 .../typed/storage/v1beta1/generated_expansion.go   |    2 +-
 .../typed/storage/v1beta1/storage_client.go        |    2 +-
 .../typed/storage/v1beta1/storageclass.go          |    2 +-
 .../typed/storage/v1beta1/volumeattachment.go      |    2 +-
 .../pkg/apis/clientauthentication/types.go         |    7 +
 .../apis/clientauthentication/v1alpha1/types.go    |    8 +
 .../v1alpha1/zz_generated.conversion.go            |    6 +-
 .../v1alpha1/zz_generated.deepcopy.go              |    2 +-
 .../v1alpha1/zz_generated.defaults.go              |    2 +-
 .../clientauthentication/v1beta1/conversion.go}    |   15 +-
 .../{v1alpha1 => v1beta1}/doc.go                   |    2 +-
 .../{v1alpha1 => v1beta1}/register.go              |    4 +-
 .../{v1alpha1 => v1beta1}/types.go                 |   31 +-
 .../v1beta1/zz_generated.conversion.go             |  114 +
 .../{v1alpha1 => v1beta1}/zz_generated.deepcopy.go |   43 +-
 .../v1beta1/zz_generated.defaults.go               |    2 +-
 .../clientauthentication/zz_generated.deepcopy.go  |    2 +-
 vendor/k8s.io/client-go/pkg/version/base.go        |    2 +-
 .../client-go/plugin/pkg/client/auth/exec/exec.go  |  156 +-
 .../client-go/plugin/pkg/client/auth/gcp/gcp.go    |    2 +-
 vendor/k8s.io/client-go/rest/config.go             |    8 +-
 vendor/k8s.io/client-go/rest/request.go            |   37 +-
 vendor/k8s.io/client-go/rest/transport.go          |   65 +-
 .../k8s.io/client-go/rest/zz_generated.deepcopy.go |    2 +-
 .../client-go/restmapper/category_expansion.go     |  119 +
 .../restmapper.go => restmapper/discovery.go}      |   34 +-
 vendor/k8s.io/client-go/restmapper/shortcut.go     |  172 +
 vendor/k8s.io/client-go/tools/cache/listwatch.go   |    3 +-
 .../client-go/tools/cache/mutation_detector.go     |    3 +
 .../client-go/tools/clientcmd/api/v1/conversion.go |   25 +-
 .../clientcmd/api/v1/zz_generated.deepcopy.go      |    2 +-
 .../tools/clientcmd/api/zz_generated.deepcopy.go   |    2 +-
 .../client-go/tools/clientcmd/auth_loaders.go      |   11 +-
 .../client-go/tools/clientcmd/client_config.go     |   23 +-
 vendor/k8s.io/client-go/tools/clientcmd/config.go  |    4 +-
 vendor/k8s.io/client-go/tools/clientcmd/loader.go  |   18 +-
 .../tools/clientcmd/merged_client_builder.go       |    3 +-
 vendor/k8s.io/client-go/tools/pager/pager.go       |    3 +-
 vendor/k8s.io/client-go/tools/reference/ref.go     |    8 +-
 vendor/k8s.io/client-go/transport/cache.go         |   12 +-
 vendor/k8s.io/client-go/transport/config.go        |   13 +-
 .../k8s.io/client-go/transport/round_trippers.go   |    2 +-
 vendor/k8s.io/client-go/transport/transport.go     |   34 +-
 vendor/k8s.io/client-go/util/cert/io.go            |   37 +-
 .../client-go/util/connrotation/connrotation.go    |  105 +
 .../client-go/util/workqueue/delaying_queue.go     |   10 +-
 vendor/k8s.io/code-generator/Godeps/Godeps.json    |  536 +-
 vendor/k8s.io/code-generator/SECURITY_CONTACTS     |   17 +
 .../apiserver/apis/example/install/install.go      |   20 +-
 .../_examples/apiserver/apis/example/v1/doc.go     |    1 +
 .../apis/example/v1/zz_generated.conversion.go     |    2 +-
 .../apis/example/v1/zz_generated.deepcopy.go       |    2 +-
 .../apis/example/v1/zz_generated.defaults.go       |    2 +-
 .../apis/example/zz_generated.deepcopy.go          |    2 +-
 .../apiserver/apis/example2/install/install.go     |   20 +-
 .../_examples/apiserver/apis/example2/v1/doc.go    |    1 +
 .../apis/example2/v1/zz_generated.conversion.go    |    2 +-
 .../apis/example2/v1/zz_generated.deepcopy.go      |    2 +-
 .../apis/example2/v1/zz_generated.defaults.go      |    2 +-
 .../apis/example2/zz_generated.deepcopy.go         |    2 +-
 .../clientset/internalversion/clientset.go         |    4 +-
 .../apiserver/clientset/internalversion/doc.go     |    2 +-
 .../internalversion/fake/clientset_generated.go    |   11 +-
 .../clientset/internalversion/fake/doc.go          |    2 +-
 .../clientset/internalversion/fake/register.go     |    2 +-
 .../clientset/internalversion/scheme/doc.go        |    2 +-
 .../clientset/internalversion/scheme/register.go   |   17 +-
 .../typed/example/internalversion/doc.go           |    2 +-
 .../example/internalversion/example_client.go      |   11 +-
 .../typed/example/internalversion/fake/doc.go      |    2 +-
 .../internalversion/fake/fake_example_client.go    |    2 +-
 .../example/internalversion/fake/fake_testtype.go  |    4 +-
 .../example/internalversion/generated_expansion.go |    2 +-
 .../typed/example/internalversion/testtype.go      |    2 +-
 .../typed/example2/internalversion/doc.go          |    2 +-
 .../example2/internalversion/example2_client.go    |   11 +-
 .../typed/example2/internalversion/fake/doc.go     |    2 +-
 .../internalversion/fake/fake_example2_client.go   |    2 +-
 .../example2/internalversion/fake/fake_testtype.go |    4 +-
 .../internalversion/generated_expansion.go         |    2 +-
 .../typed/example2/internalversion/testtype.go     |    2 +-
 .../apiserver/clientset/versioned/clientset.go     |    4 +-
 .../_examples/apiserver/clientset/versioned/doc.go |    2 +-
 .../versioned/fake/clientset_generated.go          |   11 +-
 .../apiserver/clientset/versioned/fake/doc.go      |    2 +-
 .../apiserver/clientset/versioned/fake/register.go |    2 +-
 .../apiserver/clientset/versioned/scheme/doc.go    |    2 +-
 .../clientset/versioned/scheme/register.go         |    2 +-
 .../clientset/versioned/typed/example/v1/doc.go    |    2 +-
 .../versioned/typed/example/v1/example_client.go   |    2 +-
 .../versioned/typed/example/v1/fake/doc.go         |    2 +-
 .../typed/example/v1/fake/fake_example_client.go   |    2 +-
 .../typed/example/v1/fake/fake_testtype.go         |    4 +-
 .../typed/example/v1/generated_expansion.go        |    2 +-
 .../versioned/typed/example/v1/testtype.go         |    2 +-
 .../clientset/versioned/typed/example2/v1/doc.go   |    2 +-
 .../versioned/typed/example2/v1/example2_client.go |    2 +-
 .../versioned/typed/example2/v1/fake/doc.go        |    2 +-
 .../typed/example2/v1/fake/fake_example2_client.go |    2 +-
 .../typed/example2/v1/fake/fake_testtype.go        |    4 +-
 .../typed/example2/v1/generated_expansion.go       |    2 +-
 .../versioned/typed/example2/v1/testtype.go        |    2 +-
 .../externalversions/example/interface.go          |    2 +-
 .../externalversions/example/v1/interface.go       |    2 +-
 .../externalversions/example/v1/testtype.go        |    2 +-
 .../externalversions/example2/interface.go         |    2 +-
 .../externalversions/example2/v1/interface.go      |    2 +-
 .../externalversions/example2/v1/testtype.go       |    2 +-
 .../informers/externalversions/factory.go          |   63 +-
 .../informers/externalversions/generic.go          |    2 +-
 .../internalinterfaces/factory_interfaces.go       |    2 +-
 .../informers/internalversion/example/interface.go |    2 +-
 .../example/internalversion/interface.go           |    2 +-
 .../example/internalversion/testtype.go            |    2 +-
 .../internalversion/example2/interface.go          |    2 +-
 .../example2/internalversion/interface.go          |    2 +-
 .../example2/internalversion/testtype.go           |    2 +-
 .../apiserver/informers/internalversion/factory.go |   63 +-
 .../apiserver/informers/internalversion/generic.go |    2 +-
 .../internalinterfaces/factory_interfaces.go       |    2 +-
 .../example/internalversion/expansion_generated.go |    2 +-
 .../listers/example/internalversion/testtype.go    |    2 +-
 .../listers/example/v1/expansion_generated.go      |    2 +-
 .../apiserver/listers/example/v1/testtype.go       |    2 +-
 .../internalversion/expansion_generated.go         |    2 +-
 .../listers/example2/internalversion/testtype.go   |    2 +-
 .../listers/example2/v1/expansion_generated.go     |    2 +-
 .../apiserver/listers/example2/v1/testtype.go      |    2 +-
 .../_examples/crd/apis/example/v1/doc.go           |    1 +
 .../_examples/crd/apis/example/v1/types.go         |   27 +
 .../crd/apis/example/v1/zz_generated.deepcopy.go   |   78 +-
 .../apis/example/v1/zz_generated.defaults.go       |    2 +-
 .../_examples/crd/apis/example2/v1/doc.go          |    1 +
 .../crd/apis/example2/v1/zz_generated.deepcopy.go  |    2 +-
 .../apis/example2}/v1/zz_generated.defaults.go     |    2 +-
 .../_examples/crd/clientset/versioned/clientset.go |    4 +-
 .../_examples/crd/clientset/versioned/doc.go       |    2 +-
 .../versioned/fake/clientset_generated.go          |   11 +-
 .../_examples/crd/clientset/versioned/fake/doc.go  |    2 +-
 .../crd/clientset/versioned/fake/register.go       |    2 +-
 .../crd/clientset/versioned/scheme/doc.go          |    2 +-
 .../crd/clientset/versioned/scheme/register.go     |    2 +-
 .../versioned/typed/example/v1/clustertesttype.go  |  193 +
 .../clientset/versioned/typed/example/v1/doc.go    |    2 +-
 .../versioned/typed/example/v1/example_client.go   |    7 +-
 .../versioned/typed/example/v1/fake/doc.go         |    2 +-
 .../typed/example/v1/fake/fake_clustertesttype.go  |  152 +
 .../typed/example/v1/fake/fake_example_client.go   |    6 +-
 .../typed/example/v1/fake/fake_testtype.go         |    4 +-
 .../typed/example/v1/generated_expansion.go        |    4 +-
 .../versioned/typed/example/v1/testtype.go         |    2 +-
 .../clientset/versioned/typed/example2/v1/doc.go   |    2 +-
 .../versioned/typed/example2/v1/example2_client.go |    2 +-
 .../versioned/typed/example2/v1/fake/doc.go        |    2 +-
 .../typed/example2/v1/fake/fake_example2_client.go |    2 +-
 .../typed/example2/v1/fake/fake_testtype.go        |    4 +-
 .../typed/example2/v1/generated_expansion.go       |    2 +-
 .../versioned/typed/example2/v1/testtype.go        |    2 +-
 .../externalversions/example/interface.go          |    2 +-
 .../example/v1/{testtype.go => clustertesttype.go} |   41 +-
 .../externalversions/example/v1/interface.go       |    9 +-
 .../externalversions/example/v1/testtype.go        |    2 +-
 .../externalversions/example2/interface.go         |    2 +-
 .../externalversions/example2/v1/interface.go      |    2 +-
 .../externalversions/example2/v1/testtype.go       |    2 +-
 .../crd/informers/externalversions/factory.go      |   63 +-
 .../crd/informers/externalversions/generic.go      |    4 +-
 .../internalinterfaces/factory_interfaces.go       |    2 +-
 .../crd/listers/example/v1/clustertesttype.go      |   65 +
 .../crd/listers/example/v1/expansion_generated.go  |    6 +-
 .../_examples/crd/listers/example/v1/testtype.go   |    2 +-
 .../crd/listers/example2/v1/expansion_generated.go |    2 +-
 .../_examples/crd/listers/example2/v1/testtype.go  |    2 +-
 vendor/k8s.io/code-generator/cmd/client-gen/BUILD  |   45 -
 .../code-generator/cmd/client-gen/args/BUILD       |   46 -
 .../code-generator/cmd/client-gen/generators/BUILD |   50 -
 .../cmd/client-gen/generators/client_generator.go  |   24 +-
 .../cmd/client-gen/generators/fake/BUILD           |   40 -
 .../fake/generator_fake_for_clientset.go           |   11 +-
 .../generators/fake/generator_fake_for_type.go     |   31 +-
 .../generators/generator_for_clientset.go          |    4 +-
 .../client-gen/generators/generator_for_group.go   |    9 +-
 .../cmd/client-gen/generators/scheme/BUILD         |   32 -
 .../generators/scheme/generator_for_scheme.go      |   44 +-
 .../cmd/client-gen/generators/util/BUILD           |   33 -
 .../code-generator/cmd/client-gen/path/BUILD       |   25 -
 .../code-generator/cmd/client-gen/types/BUILD      |   36 -
 .../k8s.io/code-generator/cmd/conversion-gen/BUILD |   43 -
 .../code-generator/cmd/conversion-gen/args/BUILD   |   26 -
 .../code-generator/cmd/conversion-gen/args/args.go |    2 +-
 .../cmd/conversion-gen/generators/BUILD            |   33 -
 .../k8s.io/code-generator/cmd/deepcopy-gen/BUILD   |   42 -
 .../code-generator/cmd/deepcopy-gen/args/BUILD     |   27 -
 .../k8s.io/code-generator/cmd/defaulter-gen/BUILD  |   42 -
 .../code-generator/cmd/defaulter-gen/args/BUILD    |   27 -
 .../k8s.io/code-generator/cmd/go-to-protobuf/BUILD |   39 -
 .../cmd/go-to-protobuf/protobuf/BUILD              |   51 -
 .../cmd/go-to-protobuf/protoc-gen-gogo/BUILD       |   37 -
 vendor/k8s.io/code-generator/cmd/import-boss/BUILD |   37 -
 .../k8s.io/code-generator/cmd/informer-gen/BUILD   |   43 -
 .../code-generator/cmd/informer-gen/args/BUILD     |   27 -
 .../cmd/informer-gen/generators/BUILD              |   45 -
 .../cmd/informer-gen/generators/factory.go         |   72 +-
 .../cmd/informer-gen/generators/types.go           |    1 +
 vendor/k8s.io/code-generator/cmd/lister-gen/BUILD  |   43 -
 .../code-generator/cmd/lister-gen/args/BUILD       |   27 -
 .../code-generator/cmd/lister-gen/generators/BUILD |   38 -
 vendor/k8s.io/code-generator/cmd/openapi-gen/BUILD |   42 -
 .../code-generator/cmd/openapi-gen/args/BUILD      |   26 -
 vendor/k8s.io/code-generator/cmd/set-gen/BUILD     |   41 -
 vendor/k8s.io/code-generator/generate-groups.sh    |    8 +-
 vendor/k8s.io/code-generator/hack/BUILD            |   18 -
 .../k8s.io/code-generator/hack/boilerplate.go.txt  |    2 +-
 .../k8s.io/code-generator/hack/update-codegen.sh   |    2 +-
 .../k8s.io/code-generator/hack/verify-codegen.sh   |    2 +-
 vendor/k8s.io/code-generator/pkg/util/BUILD        |   22 -
 .../third_party/forked/golang/reflect/BUILD        |   25 -
 vendor/k8s.io/kube-openapi/pkg/common/common.go    |    6 +
 .../kube-openapi/pkg/generators/api_linter.go      |   94 +
 .../k8s.io/kube-openapi/pkg/generators/config.go   |  106 +
 .../k8s.io/kube-openapi/pkg/generators/openapi.go  |  129 +-
 .../pkg/generators/rules/omitempty_match_case.go   |   64 +
 743 files changed, 23822 insertions(+), 11752 deletions(-)
 copy vendor/github.com/go-openapi/{spec/license.go => swag/doc.go} (52%)
 copy vendor/{k8s.io/kube-openapi => github.com/google/btree}/LICENSE (100%)
 create mode 100644 vendor/github.com/google/btree/btree.go
 create mode 100644 vendor/github.com/google/btree/btree_mem.go
 create mode 100644 vendor/github.com/gregjones/httpcache/LICENSE.txt
 create mode 100644 vendor/github.com/gregjones/httpcache/diskcache/diskcache.go
 create mode 100644 vendor/github.com/gregjones/httpcache/httpcache.go
 delete mode 100644 vendor/github.com/howeyc/gopass/LICENSE.txt
 delete mode 100644 vendor/github.com/howeyc/gopass/pass.go
 delete mode 100644 vendor/github.com/howeyc/gopass/terminal.go
 delete mode 100644 vendor/github.com/howeyc/gopass/terminal_solaris.go
 rename vendor/github.com/konsorten/go-windows-terminal-sequences/{license => LICENSE} (100%)
 create mode 100644 vendor/github.com/petar/GoLLRB/AUTHORS
 create mode 100644 vendor/github.com/petar/GoLLRB/LICENSE
 create mode 100644 vendor/github.com/petar/GoLLRB/llrb/avgvar.go
 create mode 100644 vendor/github.com/petar/GoLLRB/llrb/iterator.go
 create mode 100644 vendor/github.com/petar/GoLLRB/llrb/llrb-stats.go
 create mode 100644 vendor/github.com/petar/GoLLRB/llrb/llrb.go
 create mode 100644 vendor/github.com/petar/GoLLRB/llrb/util.go
 copy vendor/github.com/{sirupsen/logrus => peterbourgon/diskv}/LICENSE (94%)
 create mode 100644 vendor/github.com/peterbourgon/diskv/compression.go
 create mode 100644 vendor/github.com/peterbourgon/diskv/diskv.go
 create mode 100644 vendor/github.com/peterbourgon/diskv/index.go
 delete mode 100644 vendor/github.com/prometheus/client_golang/AUTHORS.md
 create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go
 create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/labels.go
 create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/observer.go
 create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
 create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go
 create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go
 create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
 create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go
 create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
 create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/timer.go
 create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/wrap.go
 create mode 100644 vendor/github.com/prometheus/procfs/internal/util/sysreadfile_linux.go
 delete mode 100644 vendor/github.com/sirupsen/logrus/terminal_bsd.go
 delete mode 100644 vendor/github.com/sirupsen/logrus/terminal_linux.go
 create mode 100644 vendor/github.com/sirupsen/logrus/terminal_notwindows.go
 create mode 100644 vendor/golang.org/x/sys/unix/asm_aix_ppc64.s
 create mode 100644 vendor/golang.org/x/sys/unix/openbsd_unveil.go
 create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go
 create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go
 rename vendor/{github.com/sirupsen/logrus/terminal_appengine.go => golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_bsd.go} (51%)
 create mode 100644 vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_linux.go
 create mode 100644 vendor/golang.org/x/tools/internal/gopathwalk/walk.go
 delete mode 100644 vendor/k8s.io/api/core/v1/meta.go
 copy vendor/k8s.io/api/scheduling/{v1alpha1 => v1beta1}/doc.go (86%)
 copy vendor/k8s.io/api/scheduling/{v1alpha1 => v1beta1}/generated.pb.go (81%)
 copy vendor/k8s.io/api/scheduling/{v1alpha1 => v1beta1}/register.go (95%)
 copy vendor/k8s.io/api/scheduling/{v1alpha1 => v1beta1}/types.go (97%)
 copy vendor/k8s.io/api/scheduling/{v1alpha1 => v1beta1}/types_swagger_doc_generated.go (96%)
 copy vendor/k8s.io/api/scheduling/{v1alpha1 => v1beta1}/zz_generated.deepcopy.go (97%)
 delete mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/unstructured.go
 create mode 100644 vendor/k8s.io/apimachinery/pkg/version/helpers.go
 create mode 100644 vendor/k8s.io/client-go/discovery/cached_discovery.go
 create mode 100644 vendor/k8s.io/client-go/discovery/round_tripper.go
 delete mode 100644 vendor/k8s.io/client-go/dynamic/client.go
 delete mode 100644 vendor/k8s.io/client-go/dynamic/client_pool.go
 delete mode 100644 vendor/k8s.io/client-go/dynamic/dynamic_util.go
 create mode 100644 vendor/k8s.io/client-go/dynamic/interface.go
 copy {pkg/util/kubernetes/customclient => vendor/k8s.io/client-go/dynamic}/scheme.go (62%)
 create mode 100644 vendor/k8s.io/client-go/dynamic/simple.go
 copy vendor/k8s.io/client-go/kubernetes/typed/{apps => scheduling}/v1beta1/doc.go (94%)
 copy vendor/k8s.io/client-go/kubernetes/typed/scheduling/{v1alpha1 => v1beta1}/generated_expansion.go (91%)
 copy vendor/k8s.io/client-go/kubernetes/typed/scheduling/{v1alpha1 => v1beta1}/priorityclass.go (77%)
 copy vendor/k8s.io/client-go/kubernetes/typed/{events/v1beta1/events_client.go => scheduling/v1beta1/scheduling_client.go} (62%)
 copy vendor/k8s.io/client-go/{kubernetes/typed/apps/v1beta1/generated_expansion.go => pkg/apis/clientauthentication/v1beta1/conversion.go} (64%)
 copy vendor/k8s.io/client-go/pkg/apis/clientauthentication/{v1alpha1 => v1beta1}/doc.go (90%)
 copy vendor/k8s.io/client-go/pkg/apis/clientauthentication/{v1alpha1 => v1beta1}/register.go (97%)
 copy vendor/k8s.io/client-go/pkg/apis/clientauthentication/{v1alpha1 => v1beta1}/types.go (69%)
 create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.conversion.go
 copy vendor/k8s.io/client-go/pkg/apis/clientauthentication/{v1alpha1 => v1beta1}/zz_generated.deepcopy.go (74%)
 copy vendor/k8s.io/{apimachinery/pkg/apis/meta => client-go/pkg/apis/clientauthentication}/v1beta1/zz_generated.defaults.go (96%)
 create mode 100644 vendor/k8s.io/client-go/restmapper/category_expansion.go
 rename vendor/k8s.io/client-go/{discovery/restmapper.go => restmapper/discovery.go} (92%)
 create mode 100644 vendor/k8s.io/client-go/restmapper/shortcut.go
 create mode 100644 vendor/k8s.io/client-go/util/connrotation/connrotation.go
 create mode 100644 vendor/k8s.io/code-generator/SECURITY_CONTACTS
 copy vendor/k8s.io/code-generator/_examples/{apiserver => crd}/apis/example/v1/zz_generated.defaults.go (96%)
 copy vendor/k8s.io/code-generator/_examples/{apiserver/apis/example => crd/apis/example2}/v1/zz_generated.defaults.go (96%)
 create mode 100644 vendor/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/clustertesttype.go
 create mode 100644 vendor/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/fake/fake_clustertesttype.go
 copy vendor/k8s.io/code-generator/_examples/crd/informers/externalversions/example/v1/{testtype.go => clustertesttype.go} (56%)
 create mode 100644 vendor/k8s.io/code-generator/_examples/crd/listers/example/v1/clustertesttype.go
 delete mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/BUILD
 delete mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/args/BUILD
 delete mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/generators/BUILD
 delete mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/BUILD
 delete mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/generators/scheme/BUILD
 delete mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/generators/util/BUILD
 delete mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/path/BUILD
 delete mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/types/BUILD
 delete mode 100644 vendor/k8s.io/code-generator/cmd/conversion-gen/BUILD
 delete mode 100644 vendor/k8s.io/code-generator/cmd/conversion-gen/args/BUILD
 delete mode 100644 vendor/k8s.io/code-generator/cmd/conversion-gen/generators/BUILD
 delete mode 100644 vendor/k8s.io/code-generator/cmd/deepcopy-gen/BUILD
 delete mode 100644 vendor/k8s.io/code-generator/cmd/deepcopy-gen/args/BUILD
 delete mode 100644 vendor/k8s.io/code-generator/cmd/defaulter-gen/BUILD
 delete mode 100644 vendor/k8s.io/code-generator/cmd/defaulter-gen/args/BUILD
 delete mode 100644 vendor/k8s.io/code-generator/cmd/go-to-protobuf/BUILD
 delete mode 100644 vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/BUILD
 delete mode 100644 vendor/k8s.io/code-generator/cmd/go-to-protobuf/protoc-gen-gogo/BUILD
 delete mode 100644 vendor/k8s.io/code-generator/cmd/import-boss/BUILD
 delete mode 100644 vendor/k8s.io/code-generator/cmd/informer-gen/BUILD
 delete mode 100644 vendor/k8s.io/code-generator/cmd/informer-gen/args/BUILD
 delete mode 100644 vendor/k8s.io/code-generator/cmd/informer-gen/generators/BUILD
 delete mode 100644 vendor/k8s.io/code-generator/cmd/lister-gen/BUILD
 delete mode 100644 vendor/k8s.io/code-generator/cmd/lister-gen/args/BUILD
 delete mode 100644 vendor/k8s.io/code-generator/cmd/lister-gen/generators/BUILD
 delete mode 100644 vendor/k8s.io/code-generator/cmd/openapi-gen/BUILD
 delete mode 100644 vendor/k8s.io/code-generator/cmd/openapi-gen/args/BUILD
 delete mode 100644 vendor/k8s.io/code-generator/cmd/set-gen/BUILD
 delete mode 100644 vendor/k8s.io/code-generator/hack/BUILD
 delete mode 100644 vendor/k8s.io/code-generator/pkg/util/BUILD
 delete mode 100644 vendor/k8s.io/code-generator/third_party/forked/golang/reflect/BUILD
 create mode 100644 vendor/k8s.io/kube-openapi/pkg/generators/config.go
 create mode 100644 vendor/k8s.io/kube-openapi/pkg/generators/rules/omitempty_match_case.go


[camel-k] 01/02: upgrade to operator-sdk 0.0.7

Posted by lb...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

lburgazzoli pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/camel-k.git

commit 5618b0cbf3bae41c006369cd1c9f76b22a15435a
Author: nferraro <ni...@gmail.com>
AuthorDate: Fri Oct 26 09:45:07 2018 +0200

    upgrade to operator-sdk 0.0.7
---
 Gopkg.lock                                         |  146 +-
 Gopkg.toml                                         |   19 +-
 pkg/apis/camel/v1alpha1/types_support.go           |   14 +
 pkg/install/operator.go                            |   20 +
 vendor/github.com/go-openapi/swag/convert.go       |   26 +-
 vendor/github.com/go-openapi/swag/doc.go           |   33 +
 vendor/github.com/go-openapi/swag/json.go          |   15 +-
 vendor/github.com/go-openapi/swag/util.go          |   79 +-
 vendor/github.com/google/btree/LICENSE             |  202 +
 vendor/github.com/google/btree/btree.go            |  890 +++
 vendor/github.com/google/btree/btree_mem.go        |   76 +
 vendor/github.com/gregjones/httpcache/LICENSE.txt  |    7 +
 .../gregjones/httpcache/diskcache/diskcache.go     |   61 +
 vendor/github.com/gregjones/httpcache/httpcache.go |  551 ++
 vendor/github.com/howeyc/gopass/LICENSE.txt        |   15 -
 vendor/github.com/howeyc/gopass/pass.go            |  110 -
 vendor/github.com/howeyc/gopass/terminal.go        |   25 -
 .../github.com/howeyc/gopass/terminal_solaris.go   |   69 -
 .../{license => LICENSE}                           |    0
 .../operator-sdk/pkg/k8sclient/client.go           |   51 +-
 .../operator-sdk/version/version.go                |    2 +-
 vendor/github.com/petar/GoLLRB/AUTHORS             |    4 +
 vendor/github.com/petar/GoLLRB/LICENSE             |   27 +
 vendor/github.com/petar/GoLLRB/llrb/avgvar.go      |   39 +
 vendor/github.com/petar/GoLLRB/llrb/iterator.go    |   93 +
 vendor/github.com/petar/GoLLRB/llrb/llrb-stats.go  |   46 +
 vendor/github.com/petar/GoLLRB/llrb/llrb.go        |  456 ++
 vendor/github.com/petar/GoLLRB/llrb/util.go        |   17 +
 vendor/github.com/peterbourgon/diskv/LICENSE       |   19 +
 .../github.com/peterbourgon/diskv/compression.go   |   64 +
 vendor/github.com/peterbourgon/diskv/diskv.go      |  624 ++
 vendor/github.com/peterbourgon/diskv/index.go      |  115 +
 .../github.com/prometheus/client_golang/AUTHORS.md |   18 -
 .../client_golang/prometheus/collector.go          |   73 +-
 .../prometheus/client_golang/prometheus/counter.go |  191 +-
 .../prometheus/client_golang/prometheus/desc.go    |   47 +-
 .../prometheus/client_golang/prometheus/doc.go     |   94 +-
 .../prometheus/client_golang/prometheus/fnv.go     |   13 +
 .../prometheus/client_golang/prometheus/gauge.go   |  204 +-
 .../client_golang/prometheus/go_collector.go       |   74 +-
 .../client_golang/prometheus/histogram.go          |  302 +-
 .../prometheus/client_golang/prometheus/http.go    |  151 +-
 .../client_golang/prometheus/internal/metric.go    |   85 +
 .../prometheus/client_golang/prometheus/labels.go  |   70 +
 .../prometheus/client_golang/prometheus/metric.go  |   90 +-
 .../client_golang/prometheus/observer.go           |   52 +
 .../client_golang/prometheus/process_collector.go  |  220 +-
 .../client_golang/prometheus/promhttp/delegator.go |  199 +
 .../prometheus/promhttp/delegator_1_8.go           |  181 +
 .../prometheus/promhttp/delegator_pre_1_8.go       |   44 +
 .../client_golang/prometheus/promhttp/http.go      |  162 +-
 .../prometheus/promhttp/instrument_client.go       |   97 +
 .../prometheus/promhttp/instrument_client_1_8.go   |  144 +
 .../prometheus/promhttp/instrument_server.go       |  447 ++
 .../client_golang/prometheus/registry.go           |  671 +-
 .../prometheus/client_golang/prometheus/summary.go |  192 +-
 .../prometheus/client_golang/prometheus/timer.go   |   51 +
 .../prometheus/client_golang/prometheus/untyped.go |  102 +-
 .../prometheus/client_golang/prometheus/value.go   |   94 +-
 .../prometheus/client_golang/prometheus/vec.go     |  494 +-
 .../prometheus/client_golang/prometheus/wrap.go    |  179 +
 .../prometheus/common/expfmt/text_create.go        |  357 +-
 vendor/github.com/prometheus/common/model/time.go  |    2 +-
 .../prometheus/procfs/internal/util/parse.go       |   15 +-
 .../procfs/internal/util/sysreadfile_linux.go      |   45 +
 vendor/github.com/sirupsen/logrus/entry.go         |   16 +-
 vendor/github.com/sirupsen/logrus/formatter.go     |   15 +-
 .../github.com/sirupsen/logrus/json_formatter.go   |   10 +-
 vendor/github.com/sirupsen/logrus/terminal_bsd.go  |   17 -
 .../github.com/sirupsen/logrus/terminal_linux.go   |   21 -
 .../sirupsen/logrus/terminal_notwindows.go         |    8 +
 .../github.com/sirupsen/logrus/text_formatter.go   |    7 +-
 vendor/golang.org/x/oauth2/internal/token.go       |    1 +
 vendor/golang.org/x/sys/unix/asm_aix_ppc64.s       |   17 +
 vendor/golang.org/x/sys/unix/openbsd_pledge.go     |    6 +-
 vendor/golang.org/x/sys/unix/openbsd_unveil.go     |   44 +
 vendor/golang.org/x/sys/unix/sockcmsg_unix.go      |    2 +-
 vendor/golang.org/x/sys/unix/syscall_aix.go        |   31 +-
 vendor/golang.org/x/sys/unix/syscall_freebsd.go    |  310 +-
 vendor/golang.org/x/sys/unix/syscall_linux.go      |   32 +-
 .../golang.org/x/sys/unix/syscall_linux_amd64.go   |   13 +
 .../golang.org/x/sys/unix/syscall_linux_arm64.go   |    9 +-
 .../golang.org/x/sys/unix/syscall_linux_ppc64x.go  |   13 +
 .../golang.org/x/sys/unix/syscall_linux_riscv64.go |    9 +-
 .../golang.org/x/sys/unix/syscall_linux_s390x.go   |   13 +
 vendor/golang.org/x/sys/unix/syscall_openbsd.go    |   11 +-
 .../golang.org/x/sys/unix/syscall_openbsd_386.go   |    4 +
 .../golang.org/x/sys/unix/syscall_openbsd_arm.go   |    4 +
 vendor/golang.org/x/sys/unix/syscall_unix.go       |    8 +-
 vendor/golang.org/x/sys/unix/types_aix.go          |   12 +-
 vendor/golang.org/x/sys/unix/types_darwin.go       |   12 +-
 vendor/golang.org/x/sys/unix/types_dragonfly.go    |   12 +-
 vendor/golang.org/x/sys/unix/types_freebsd.go      |   77 +-
 vendor/golang.org/x/sys/unix/types_netbsd.go       |   12 +-
 vendor/golang.org/x/sys/unix/types_openbsd.go      |   16 +-
 vendor/golang.org/x/sys/unix/types_solaris.go      |   12 +-
 vendor/golang.org/x/sys/unix/zerrors_linux_386.go  |   35 +-
 .../golang.org/x/sys/unix/zerrors_linux_amd64.go   |   35 +-
 vendor/golang.org/x/sys/unix/zerrors_linux_arm.go  |   35 +-
 .../golang.org/x/sys/unix/zerrors_linux_arm64.go   |   35 +-
 vendor/golang.org/x/sys/unix/zerrors_linux_mips.go |   35 +-
 .../golang.org/x/sys/unix/zerrors_linux_mips64.go  |   35 +-
 .../x/sys/unix/zerrors_linux_mips64le.go           |   35 +-
 .../golang.org/x/sys/unix/zerrors_linux_mipsle.go  |   35 +-
 .../golang.org/x/sys/unix/zerrors_linux_ppc64.go   |   35 +-
 .../golang.org/x/sys/unix/zerrors_linux_ppc64le.go |   35 +-
 .../golang.org/x/sys/unix/zerrors_linux_riscv64.go |   35 +-
 .../golang.org/x/sys/unix/zerrors_linux_s390x.go   |   35 +-
 .../golang.org/x/sys/unix/zerrors_linux_sparc64.go |  350 +-
 vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go   |   97 +-
 vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go | 1073 ++-
 .../golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go | 1162 +++
 .../x/sys/unix/zsyscall_aix_ppc64_gccgo.go         | 1042 +++
 .../golang.org/x/sys/unix/zsyscall_freebsd_386.go  |  102 +-
 .../x/sys/unix/zsyscall_freebsd_amd64.go           |  102 +-
 .../golang.org/x/sys/unix/zsyscall_freebsd_arm.go  |  102 +-
 vendor/golang.org/x/sys/unix/zsyscall_linux_386.go |   51 +
 .../golang.org/x/sys/unix/zsyscall_linux_amd64.go  |   66 +
 vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go |   51 +
 .../golang.org/x/sys/unix/zsyscall_linux_arm64.go  |   51 +
 .../golang.org/x/sys/unix/zsyscall_linux_mips.go   |   51 +
 .../golang.org/x/sys/unix/zsyscall_linux_mips64.go |   51 +
 .../x/sys/unix/zsyscall_linux_mips64le.go          |   51 +
 .../golang.org/x/sys/unix/zsyscall_linux_mipsle.go |   51 +
 .../golang.org/x/sys/unix/zsyscall_linux_ppc64.go  |   66 +
 .../x/sys/unix/zsyscall_linux_ppc64le.go           |   66 +
 .../x/sys/unix/zsyscall_linux_riscv64.go           |   51 +
 .../golang.org/x/sys/unix/zsyscall_linux_s390x.go  |   66 +
 .../golang.org/x/sys/unix/zsyscall_openbsd_386.go  |   27 +
 .../x/sys/unix/zsyscall_openbsd_amd64.go           |   27 +
 .../golang.org/x/sys/unix/zsyscall_openbsd_arm.go  |   27 +
 .../x/sys/unix/zsyscall_solaris_amd64.go           |  256 +
 vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go  |    1 +
 .../golang.org/x/sys/unix/zsysnum_linux_arm64.go   |    1 +
 .../golang.org/x/sys/unix/zsysnum_linux_riscv64.go |    1 +
 .../golang.org/x/sys/unix/zsysnum_openbsd_386.go   |   25 +-
 .../golang.org/x/sys/unix/zsysnum_openbsd_arm.go   |   13 +-
 vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go     |   10 +-
 vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go   |   10 +-
 vendor/golang.org/x/sys/unix/ztypes_darwin_386.go  |   10 +-
 .../golang.org/x/sys/unix/ztypes_darwin_amd64.go   |   10 +-
 vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go  |   10 +-
 .../golang.org/x/sys/unix/ztypes_darwin_arm64.go   |   10 +-
 .../x/sys/unix/ztypes_dragonfly_amd64.go           |   10 +-
 vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go |  265 +-
 .../golang.org/x/sys/unix/ztypes_freebsd_amd64.go  |  283 +-
 vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go |  287 +-
 vendor/golang.org/x/sys/unix/ztypes_linux_386.go   |   33 +-
 vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go |   33 +-
 vendor/golang.org/x/sys/unix/ztypes_linux_arm.go   |   33 +-
 vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go |   33 +-
 vendor/golang.org/x/sys/unix/ztypes_linux_mips.go  |   33 +-
 .../golang.org/x/sys/unix/ztypes_linux_mips64.go   |   33 +-
 .../golang.org/x/sys/unix/ztypes_linux_mips64le.go |   33 +-
 .../golang.org/x/sys/unix/ztypes_linux_mipsle.go   |   33 +-
 vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go |   33 +-
 .../golang.org/x/sys/unix/ztypes_linux_ppc64le.go  |   33 +-
 .../golang.org/x/sys/unix/ztypes_linux_riscv64.go  |   33 +-
 vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go |   33 +-
 .../golang.org/x/sys/unix/ztypes_linux_sparc64.go  |   10 +-
 vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go  |   10 +-
 .../golang.org/x/sys/unix/ztypes_netbsd_amd64.go   |   10 +-
 vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go  |   10 +-
 vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go |   12 +-
 .../golang.org/x/sys/unix/ztypes_openbsd_amd64.go  |   12 +-
 vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go |   12 +-
 .../golang.org/x/sys/unix/ztypes_solaris_amd64.go  |   10 +-
 vendor/golang.org/x/tools/imports/fix.go           |  239 +-
 .../x/tools/internal/fastwalk/fastwalk.go          |    5 +
 .../fastwalk/fastwalk_dirent_namlen_bsd.go}        |   10 +-
 .../fastwalk/fastwalk_dirent_namlen_linux.go       |   24 +
 .../x/tools/internal/fastwalk/fastwalk_portable.go |    8 +
 .../x/tools/internal/fastwalk/fastwalk_unix.go     |   14 +-
 .../golang.org/x/tools/internal/gopathwalk/walk.go |  246 +
 .../admissionregistration/v1alpha1/generated.pb.go |    2 +-
 .../v1alpha1/types_swagger_doc_generated.go        |    4 +-
 .../v1alpha1/zz_generated.deepcopy.go              |    2 +-
 .../admissionregistration/v1beta1/generated.pb.go  |    2 +-
 .../api/admissionregistration/v1beta1/types.go     |    4 +-
 .../v1beta1/types_swagger_doc_generated.go         |    6 +-
 .../v1beta1/zz_generated.deepcopy.go               |    2 +-
 vendor/k8s.io/api/apps/v1/generated.pb.go          |    2 +-
 vendor/k8s.io/api/apps/v1/types.go                 |   16 +-
 .../api/apps/v1/types_swagger_doc_generated.go     |    8 +-
 vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go |    2 +-
 vendor/k8s.io/api/apps/v1beta1/generated.pb.go     |    2 +-
 vendor/k8s.io/api/apps/v1beta1/types.go            |   16 +-
 .../apps/v1beta1/types_swagger_doc_generated.go    |    8 +-
 .../api/apps/v1beta1/zz_generated.deepcopy.go      |    2 +-
 vendor/k8s.io/api/apps/v1beta2/generated.pb.go     |    2 +-
 vendor/k8s.io/api/apps/v1beta2/types.go            |   16 +-
 .../apps/v1beta2/types_swagger_doc_generated.go    |    8 +-
 .../api/apps/v1beta2/zz_generated.deepcopy.go      |    2 +-
 .../k8s.io/api/authentication/v1/generated.pb.go   |    2 +-
 .../v1/types_swagger_doc_generated.go              |    4 +-
 .../api/authentication/v1/zz_generated.deepcopy.go |    2 +-
 .../api/authentication/v1beta1/generated.pb.go     |    2 +-
 .../v1beta1/types_swagger_doc_generated.go         |    4 +-
 .../v1beta1/zz_generated.deepcopy.go               |    2 +-
 vendor/k8s.io/api/authorization/v1/generated.pb.go |    2 +-
 .../v1/types_swagger_doc_generated.go              |    4 +-
 .../api/authorization/v1/zz_generated.deepcopy.go  |    2 +-
 .../api/authorization/v1beta1/generated.pb.go      |    2 +-
 .../v1beta1/types_swagger_doc_generated.go         |    4 +-
 .../authorization/v1beta1/zz_generated.deepcopy.go |    2 +-
 vendor/k8s.io/api/autoscaling/v1/generated.pb.go   |    2 +-
 .../autoscaling/v1/types_swagger_doc_generated.go  |    4 +-
 .../api/autoscaling/v1/zz_generated.deepcopy.go    |    2 +-
 .../k8s.io/api/autoscaling/v2beta1/generated.pb.go |    2 +-
 .../v2beta1/types_swagger_doc_generated.go         |    4 +-
 .../autoscaling/v2beta1/zz_generated.deepcopy.go   |    2 +-
 vendor/k8s.io/api/batch/v1/generated.pb.go         |    2 +-
 .../api/batch/v1/types_swagger_doc_generated.go    |    4 +-
 .../k8s.io/api/batch/v1/zz_generated.deepcopy.go   |    2 +-
 vendor/k8s.io/api/batch/v1beta1/generated.pb.go    |    2 +-
 .../batch/v1beta1/types_swagger_doc_generated.go   |    4 +-
 .../api/batch/v1beta1/zz_generated.deepcopy.go     |    2 +-
 vendor/k8s.io/api/batch/v2alpha1/generated.pb.go   |    2 +-
 .../batch/v2alpha1/types_swagger_doc_generated.go  |    4 +-
 .../api/batch/v2alpha1/zz_generated.deepcopy.go    |    2 +-
 .../api/certificates/v1beta1/generated.pb.go       |    2 +-
 .../v1beta1/types_swagger_doc_generated.go         |    4 +-
 .../certificates/v1beta1/zz_generated.deepcopy.go  |    2 +-
 .../k8s.io/api/core/v1/annotation_key_constants.go |   21 +-
 vendor/k8s.io/api/core/v1/generated.pb.go          | 7398 +++++++++++---------
 vendor/k8s.io/api/core/v1/meta.go                  |  108 -
 vendor/k8s.io/api/core/v1/register.go              |    1 -
 vendor/k8s.io/api/core/v1/resource.go              |    7 -
 vendor/k8s.io/api/core/v1/types.go                 |  618 +-
 .../api/core/v1/types_swagger_doc_generated.go     |  212 +-
 vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go |  447 +-
 vendor/k8s.io/api/events/v1beta1/generated.pb.go   |    2 +-
 .../events/v1beta1/types_swagger_doc_generated.go  |    4 +-
 .../api/events/v1beta1/zz_generated.deepcopy.go    |    2 +-
 .../k8s.io/api/extensions/v1beta1/generated.pb.go  |  592 +-
 vendor/k8s.io/api/extensions/v1beta1/types.go      |  170 +-
 .../v1beta1/types_swagger_doc_generated.go         |   81 +-
 .../extensions/v1beta1/zz_generated.deepcopy.go    |   12 +-
 vendor/k8s.io/api/networking/v1/generated.pb.go    |    2 +-
 vendor/k8s.io/api/networking/v1/types.go           |   25 +-
 .../networking/v1/types_swagger_doc_generated.go   |   12 +-
 .../api/networking/v1/zz_generated.deepcopy.go     |    2 +-
 vendor/k8s.io/api/policy/v1beta1/generated.pb.go   |  346 +-
 vendor/k8s.io/api/policy/v1beta1/types.go          |  121 +-
 .../policy/v1beta1/types_swagger_doc_generated.go  |   69 +-
 .../api/policy/v1beta1/zz_generated.deepcopy.go    |   12 +-
 vendor/k8s.io/api/rbac/v1/generated.pb.go          |    2 +-
 vendor/k8s.io/api/rbac/v1/types.go                 |    6 +-
 .../api/rbac/v1/types_swagger_doc_generated.go     |    4 +-
 vendor/k8s.io/api/rbac/v1/zz_generated.deepcopy.go |    2 +-
 vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go    |    2 +-
 vendor/k8s.io/api/rbac/v1alpha1/types.go           |    6 +-
 .../rbac/v1alpha1/types_swagger_doc_generated.go   |    4 +-
 .../api/rbac/v1alpha1/zz_generated.deepcopy.go     |    2 +-
 vendor/k8s.io/api/rbac/v1beta1/generated.pb.go     |    2 +-
 vendor/k8s.io/api/rbac/v1beta1/types.go            |    6 +-
 .../rbac/v1beta1/types_swagger_doc_generated.go    |    4 +-
 .../api/rbac/v1beta1/zz_generated.deepcopy.go      |    2 +-
 .../k8s.io/api/scheduling/v1alpha1/generated.pb.go |    2 +-
 .../v1alpha1/types_swagger_doc_generated.go        |    4 +-
 .../scheduling/v1alpha1/zz_generated.deepcopy.go   |    2 +-
 .../kubernetes => api/scheduling/v1beta1}/doc.go   |    7 +-
 .../{v1alpha1 => v1beta1}/generated.pb.go          |   76 +-
 vendor/k8s.io/api/scheduling/v1beta1/register.go   |   52 +
 vendor/k8s.io/api/scheduling/v1beta1/types.go      |   66 +
 .../types_swagger_doc_generated.go                 |    6 +-
 .../{v1alpha1 => v1beta1}/zz_generated.deepcopy.go |    4 +-
 .../k8s.io/api/settings/v1alpha1/generated.pb.go   |    2 +-
 .../v1alpha1/types_swagger_doc_generated.go        |    4 +-
 .../api/settings/v1alpha1/zz_generated.deepcopy.go |    2 +-
 vendor/k8s.io/api/storage/v1/generated.pb.go       |  136 +-
 vendor/k8s.io/api/storage/v1/types.go              |    8 +
 .../api/storage/v1/types_swagger_doc_generated.go  |    5 +-
 .../k8s.io/api/storage/v1/zz_generated.deepcopy.go |    9 +-
 vendor/k8s.io/api/storage/v1alpha1/generated.pb.go |    2 +-
 .../v1alpha1/types_swagger_doc_generated.go        |    4 +-
 .../api/storage/v1alpha1/zz_generated.deepcopy.go  |    2 +-
 vendor/k8s.io/api/storage/v1beta1/generated.pb.go  |  180 +-
 vendor/k8s.io/api/storage/v1beta1/types.go         |    8 +
 .../storage/v1beta1/types_swagger_doc_generated.go |    5 +-
 .../api/storage/v1beta1/zz_generated.deepcopy.go   |    9 +-
 .../k8s.io/apimachinery/pkg/api/meta/interfaces.go |   21 +-
 vendor/k8s.io/apimachinery/pkg/api/meta/lazy.go    |   25 +-
 vendor/k8s.io/apimachinery/pkg/api/meta/meta.go    |    3 -
 .../k8s.io/apimachinery/pkg/api/meta/priority.go   |   30 +-
 .../k8s.io/apimachinery/pkg/api/meta/restmapper.go |   38 +-
 .../apimachinery/pkg/api/meta/unstructured.go      |   47 -
 .../apimachinery/pkg/api/resource/generated.pb.go  |    2 +-
 .../apimachinery/pkg/api/resource/quantity.go      |   42 -
 .../pkg/api/resource/zz_generated.deepcopy.go      |    2 +-
 .../internalversion/zz_generated.conversion.go     |    2 +-
 .../meta/internalversion/zz_generated.deepcopy.go  |    2 +-
 .../apimachinery/pkg/apis/meta/v1/conversion.go    |   12 +
 .../apimachinery/pkg/apis/meta/v1/duration.go      |    5 +-
 .../apimachinery/pkg/apis/meta/v1/generated.pb.go  |    2 +-
 .../apimachinery/pkg/apis/meta/v1/micro_time.go    |    5 +-
 .../k8s.io/apimachinery/pkg/apis/meta/v1/time.go   |    5 +-
 .../k8s.io/apimachinery/pkg/apis/meta/v1/types.go  |    3 +-
 .../apis/meta/v1/types_swagger_doc_generated.go    |    4 +-
 .../pkg/apis/meta/v1/unstructured/helpers.go       |   66 +-
 .../pkg/apis/meta/v1/unstructured/unstructured.go  |   24 +-
 .../apis/meta/v1/unstructured/unstructured_list.go |   17 +-
 .../meta/v1/unstructured/zz_generated.deepcopy.go  |    2 +-
 .../pkg/apis/meta/v1/zz_generated.deepcopy.go      |    2 +-
 .../pkg/apis/meta/v1/zz_generated.defaults.go      |    2 +-
 .../apimachinery/pkg/apis/meta/v1beta1/deepcopy.go |   23 +-
 .../pkg/apis/meta/v1beta1/generated.pb.go          |    2 +-
 .../apimachinery/pkg/apis/meta/v1beta1/types.go    |    4 +-
 .../meta/v1beta1/types_swagger_doc_generated.go    |    6 +-
 .../pkg/apis/meta/v1beta1/zz_generated.deepcopy.go |    2 +-
 .../pkg/apis/meta/v1beta1/zz_generated.defaults.go |    2 +-
 .../pkg/conversion/queryparams/convert.go          |    3 +
 vendor/k8s.io/apimachinery/pkg/fields/selector.go  |   21 +
 .../pkg/labels/zz_generated.deepcopy.go            |    2 +-
 .../k8s.io/apimachinery/pkg/runtime/converter.go   |   26 +-
 vendor/k8s.io/apimachinery/pkg/runtime/error.go    |    8 +
 .../apimachinery/pkg/runtime/generated.pb.go       |    2 +-
 .../k8s.io/apimachinery/pkg/runtime/interfaces.go  |   17 +-
 .../pkg/runtime/schema/generated.pb.go             |    2 +-
 vendor/k8s.io/apimachinery/pkg/runtime/scheme.go   |  146 +-
 .../pkg/runtime/serializer/json/json.go            |   27 +-
 .../runtime/serializer/versioning/versioning.go    |   23 +-
 .../pkg/runtime/zz_generated.deepcopy.go           |    2 +-
 .../apimachinery/pkg/types/namespacedname.go       |   17 -
 vendor/k8s.io/apimachinery/pkg/util/clock/clock.go |   63 +-
 vendor/k8s.io/apimachinery/pkg/util/diff/diff.go   |   44 +-
 .../apimachinery/pkg/util/intstr/generated.pb.go   |    2 +-
 vendor/k8s.io/apimachinery/pkg/util/net/http.go    |   12 +-
 .../k8s.io/apimachinery/pkg/util/net/port_range.go |   66 +-
 .../apimachinery/pkg/util/runtime/runtime.go       |   10 +-
 vendor/k8s.io/apimachinery/pkg/util/sets/byte.go   |    6 +-
 vendor/k8s.io/apimachinery/pkg/util/sets/doc.go    |    4 +-
 vendor/k8s.io/apimachinery/pkg/util/sets/empty.go  |    4 +-
 vendor/k8s.io/apimachinery/pkg/util/sets/int.go    |    6 +-
 vendor/k8s.io/apimachinery/pkg/util/sets/int64.go  |    6 +-
 vendor/k8s.io/apimachinery/pkg/util/sets/string.go |    6 +-
 vendor/k8s.io/apimachinery/pkg/util/wait/wait.go   |   22 +-
 vendor/k8s.io/apimachinery/pkg/version/helpers.go  |   88 +
 vendor/k8s.io/apimachinery/pkg/watch/filter.go     |    6 +-
 vendor/k8s.io/apimachinery/pkg/watch/mux.go        |    6 +-
 .../pkg/watch/zz_generated.deepcopy.go             |    2 +-
 .../forked/golang/reflect/deep_equal.go            |    2 +-
 .../k8s.io/client-go/discovery/cached/memcache.go  |   10 +-
 .../k8s.io/client-go/discovery/cached_discovery.go |  282 +
 .../k8s.io/client-go/discovery/discovery_client.go |  122 +-
 vendor/k8s.io/client-go/discovery/round_tripper.go |   51 +
 vendor/k8s.io/client-go/discovery/unstructured.go  |   24 +-
 vendor/k8s.io/client-go/dynamic/client.go          |  379 -
 vendor/k8s.io/client-go/dynamic/client_pool.go     |  122 -
 vendor/k8s.io/client-go/dynamic/dynamic_util.go    |   96 -
 vendor/k8s.io/client-go/dynamic/interface.go       |   59 +
 vendor/k8s.io/client-go/dynamic/scheme.go          |   98 +
 vendor/k8s.io/client-go/dynamic/simple.go          |  287 +
 vendor/k8s.io/client-go/kubernetes/clientset.go    |   24 +-
 vendor/k8s.io/client-go/kubernetes/doc.go          |    2 +-
 vendor/k8s.io/client-go/kubernetes/scheme/doc.go   |    2 +-
 .../k8s.io/client-go/kubernetes/scheme/register.go |    4 +-
 .../v1alpha1/admissionregistration_client.go       |    2 +-
 .../typed/admissionregistration/v1alpha1/doc.go    |    2 +-
 .../v1alpha1/generated_expansion.go                |    2 +-
 .../v1alpha1/initializerconfiguration.go           |    2 +-
 .../v1beta1/admissionregistration_client.go        |    2 +-
 .../typed/admissionregistration/v1beta1/doc.go     |    2 +-
 .../v1beta1/generated_expansion.go                 |    2 +-
 .../v1beta1/mutatingwebhookconfiguration.go        |    2 +-
 .../v1beta1/validatingwebhookconfiguration.go      |    2 +-
 .../kubernetes/typed/apps/v1/apps_client.go        |    2 +-
 .../kubernetes/typed/apps/v1/controllerrevision.go |    2 +-
 .../kubernetes/typed/apps/v1/daemonset.go          |    2 +-
 .../kubernetes/typed/apps/v1/deployment.go         |    2 +-
 .../client-go/kubernetes/typed/apps/v1/doc.go      |    2 +-
 .../typed/apps/v1/generated_expansion.go           |    2 +-
 .../kubernetes/typed/apps/v1/replicaset.go         |    2 +-
 .../kubernetes/typed/apps/v1/statefulset.go        |    2 +-
 .../kubernetes/typed/apps/v1beta1/apps_client.go   |    2 +-
 .../typed/apps/v1beta1/controllerrevision.go       |    2 +-
 .../kubernetes/typed/apps/v1beta1/deployment.go    |    2 +-
 .../client-go/kubernetes/typed/apps/v1beta1/doc.go |    2 +-
 .../typed/apps/v1beta1/generated_expansion.go      |    2 +-
 .../kubernetes/typed/apps/v1beta1/scale.go         |    2 +-
 .../kubernetes/typed/apps/v1beta1/statefulset.go   |    2 +-
 .../kubernetes/typed/apps/v1beta2/apps_client.go   |    2 +-
 .../typed/apps/v1beta2/controllerrevision.go       |    2 +-
 .../kubernetes/typed/apps/v1beta2/daemonset.go     |    2 +-
 .../kubernetes/typed/apps/v1beta2/deployment.go    |    2 +-
 .../client-go/kubernetes/typed/apps/v1beta2/doc.go |    2 +-
 .../typed/apps/v1beta2/generated_expansion.go      |    2 +-
 .../kubernetes/typed/apps/v1beta2/replicaset.go    |    2 +-
 .../kubernetes/typed/apps/v1beta2/scale.go         |    2 +-
 .../kubernetes/typed/apps/v1beta2/statefulset.go   |    2 +-
 .../authentication/v1/authentication_client.go     |    2 +-
 .../kubernetes/typed/authentication/v1/doc.go      |    2 +-
 .../typed/authentication/v1/generated_expansion.go |    2 +-
 .../typed/authentication/v1/tokenreview.go         |    2 +-
 .../v1beta1/authentication_client.go               |    2 +-
 .../kubernetes/typed/authentication/v1beta1/doc.go |    2 +-
 .../authentication/v1beta1/generated_expansion.go  |    2 +-
 .../typed/authentication/v1beta1/tokenreview.go    |    2 +-
 .../typed/authorization/v1/authorization_client.go |    2 +-
 .../kubernetes/typed/authorization/v1/doc.go       |    2 +-
 .../typed/authorization/v1/generated_expansion.go  |    2 +-
 .../authorization/v1/localsubjectaccessreview.go   |    2 +-
 .../authorization/v1/selfsubjectaccessreview.go    |    2 +-
 .../authorization/v1/selfsubjectrulesreview.go     |    2 +-
 .../typed/authorization/v1/subjectaccessreview.go  |    2 +-
 .../authorization/v1beta1/authorization_client.go  |    2 +-
 .../kubernetes/typed/authorization/v1beta1/doc.go  |    2 +-
 .../authorization/v1beta1/generated_expansion.go   |    2 +-
 .../v1beta1/localsubjectaccessreview.go            |    2 +-
 .../v1beta1/selfsubjectaccessreview.go             |    2 +-
 .../v1beta1/selfsubjectrulesreview.go              |    2 +-
 .../authorization/v1beta1/subjectaccessreview.go   |    2 +-
 .../typed/autoscaling/v1/autoscaling_client.go     |    2 +-
 .../kubernetes/typed/autoscaling/v1/doc.go         |    2 +-
 .../typed/autoscaling/v1/generated_expansion.go    |    2 +-
 .../autoscaling/v1/horizontalpodautoscaler.go      |    2 +-
 .../autoscaling/v2beta1/autoscaling_client.go      |    2 +-
 .../kubernetes/typed/autoscaling/v2beta1/doc.go    |    2 +-
 .../autoscaling/v2beta1/generated_expansion.go     |    2 +-
 .../autoscaling/v2beta1/horizontalpodautoscaler.go |    2 +-
 .../kubernetes/typed/batch/v1/batch_client.go      |    2 +-
 .../client-go/kubernetes/typed/batch/v1/doc.go     |    2 +-
 .../typed/batch/v1/generated_expansion.go          |    2 +-
 .../client-go/kubernetes/typed/batch/v1/job.go     |    2 +-
 .../kubernetes/typed/batch/v1beta1/batch_client.go |    2 +-
 .../kubernetes/typed/batch/v1beta1/cronjob.go      |    2 +-
 .../kubernetes/typed/batch/v1beta1/doc.go          |    2 +-
 .../typed/batch/v1beta1/generated_expansion.go     |    2 +-
 .../typed/batch/v2alpha1/batch_client.go           |    2 +-
 .../kubernetes/typed/batch/v2alpha1/cronjob.go     |    2 +-
 .../kubernetes/typed/batch/v2alpha1/doc.go         |    2 +-
 .../typed/batch/v2alpha1/generated_expansion.go    |    2 +-
 .../certificates/v1beta1/certificates_client.go    |    2 +-
 .../v1beta1/certificatesigningrequest.go           |    2 +-
 .../kubernetes/typed/certificates/v1beta1/doc.go   |    2 +-
 .../certificates/v1beta1/generated_expansion.go    |    2 +-
 .../kubernetes/typed/core/v1/componentstatus.go    |    2 +-
 .../kubernetes/typed/core/v1/configmap.go          |    2 +-
 .../kubernetes/typed/core/v1/core_client.go        |    2 +-
 .../client-go/kubernetes/typed/core/v1/doc.go      |    2 +-
 .../kubernetes/typed/core/v1/endpoints.go          |    2 +-
 .../client-go/kubernetes/typed/core/v1/event.go    |    2 +-
 .../typed/core/v1/generated_expansion.go           |    2 +-
 .../kubernetes/typed/core/v1/limitrange.go         |    2 +-
 .../kubernetes/typed/core/v1/namespace.go          |   13 +-
 .../client-go/kubernetes/typed/core/v1/node.go     |    2 +-
 .../kubernetes/typed/core/v1/persistentvolume.go   |    2 +-
 .../typed/core/v1/persistentvolumeclaim.go         |    2 +-
 .../client-go/kubernetes/typed/core/v1/pod.go      |    2 +-
 .../kubernetes/typed/core/v1/podtemplate.go        |    2 +-
 .../typed/core/v1/replicationcontroller.go         |    2 +-
 .../kubernetes/typed/core/v1/resourcequota.go      |    2 +-
 .../client-go/kubernetes/typed/core/v1/secret.go   |    2 +-
 .../client-go/kubernetes/typed/core/v1/service.go  |   14 +-
 .../kubernetes/typed/core/v1/serviceaccount.go     |    2 +-
 .../kubernetes/typed/events/v1beta1/doc.go         |    2 +-
 .../kubernetes/typed/events/v1beta1/event.go       |    2 +-
 .../typed/events/v1beta1/events_client.go          |    2 +-
 .../typed/events/v1beta1/generated_expansion.go    |    2 +-
 .../typed/extensions/v1beta1/daemonset.go          |    2 +-
 .../typed/extensions/v1beta1/deployment.go         |    2 +-
 .../kubernetes/typed/extensions/v1beta1/doc.go     |    2 +-
 .../typed/extensions/v1beta1/extensions_client.go  |    2 +-
 .../extensions/v1beta1/generated_expansion.go      |    2 +-
 .../kubernetes/typed/extensions/v1beta1/ingress.go |    2 +-
 .../typed/extensions/v1beta1/podsecuritypolicy.go  |    2 +-
 .../typed/extensions/v1beta1/replicaset.go         |    2 +-
 .../kubernetes/typed/extensions/v1beta1/scale.go   |    2 +-
 .../kubernetes/typed/networking/v1/doc.go          |    2 +-
 .../typed/networking/v1/generated_expansion.go     |    2 +-
 .../typed/networking/v1/networking_client.go       |    2 +-
 .../typed/networking/v1/networkpolicy.go           |    2 +-
 .../kubernetes/typed/policy/v1beta1/doc.go         |    2 +-
 .../kubernetes/typed/policy/v1beta1/eviction.go    |    2 +-
 .../typed/policy/v1beta1/generated_expansion.go    |    2 +-
 .../typed/policy/v1beta1/poddisruptionbudget.go    |    2 +-
 .../typed/policy/v1beta1/podsecuritypolicy.go      |    2 +-
 .../typed/policy/v1beta1/policy_client.go          |    2 +-
 .../kubernetes/typed/rbac/v1/clusterrole.go        |    2 +-
 .../kubernetes/typed/rbac/v1/clusterrolebinding.go |    2 +-
 .../client-go/kubernetes/typed/rbac/v1/doc.go      |    2 +-
 .../typed/rbac/v1/generated_expansion.go           |    2 +-
 .../kubernetes/typed/rbac/v1/rbac_client.go        |    2 +-
 .../client-go/kubernetes/typed/rbac/v1/role.go     |    2 +-
 .../kubernetes/typed/rbac/v1/rolebinding.go        |    2 +-
 .../kubernetes/typed/rbac/v1alpha1/clusterrole.go  |    2 +-
 .../typed/rbac/v1alpha1/clusterrolebinding.go      |    2 +-
 .../kubernetes/typed/rbac/v1alpha1/doc.go          |    2 +-
 .../typed/rbac/v1alpha1/generated_expansion.go     |    2 +-
 .../kubernetes/typed/rbac/v1alpha1/rbac_client.go  |    2 +-
 .../kubernetes/typed/rbac/v1alpha1/role.go         |    2 +-
 .../kubernetes/typed/rbac/v1alpha1/rolebinding.go  |    2 +-
 .../kubernetes/typed/rbac/v1beta1/clusterrole.go   |    2 +-
 .../typed/rbac/v1beta1/clusterrolebinding.go       |    2 +-
 .../client-go/kubernetes/typed/rbac/v1beta1/doc.go |    2 +-
 .../typed/rbac/v1beta1/generated_expansion.go      |    2 +-
 .../kubernetes/typed/rbac/v1beta1/rbac_client.go   |    2 +-
 .../kubernetes/typed/rbac/v1beta1/role.go          |    2 +-
 .../kubernetes/typed/rbac/v1beta1/rolebinding.go   |    2 +-
 .../kubernetes/typed/scheduling/v1alpha1/doc.go    |    2 +-
 .../scheduling/v1alpha1/generated_expansion.go     |    2 +-
 .../typed/scheduling/v1alpha1/priorityclass.go     |    2 +-
 .../typed/scheduling/v1alpha1/scheduling_client.go |    2 +-
 .../{authentication => scheduling}/v1beta1/doc.go  |    2 +-
 .../{v1alpha1 => v1beta1}/generated_expansion.go   |    4 +-
 .../{v1alpha1 => v1beta1}/priorityclass.go         |   38 +-
 .../v1beta1/scheduling_client.go}                  |   34 +-
 .../kubernetes/typed/settings/v1alpha1/doc.go      |    2 +-
 .../typed/settings/v1alpha1/generated_expansion.go |    2 +-
 .../typed/settings/v1alpha1/podpreset.go           |    2 +-
 .../typed/settings/v1alpha1/settings_client.go     |    2 +-
 .../client-go/kubernetes/typed/storage/v1/doc.go   |    2 +-
 .../typed/storage/v1/generated_expansion.go        |    2 +-
 .../kubernetes/typed/storage/v1/storage_client.go  |    2 +-
 .../kubernetes/typed/storage/v1/storageclass.go    |    2 +-
 .../kubernetes/typed/storage/v1alpha1/doc.go       |    2 +-
 .../typed/storage/v1alpha1/generated_expansion.go  |    2 +-
 .../typed/storage/v1alpha1/storage_client.go       |    2 +-
 .../typed/storage/v1alpha1/volumeattachment.go     |    2 +-
 .../kubernetes/typed/storage/v1beta1/doc.go        |    2 +-
 .../typed/storage/v1beta1/generated_expansion.go   |    2 +-
 .../typed/storage/v1beta1/storage_client.go        |    2 +-
 .../typed/storage/v1beta1/storageclass.go          |    2 +-
 .../typed/storage/v1beta1/volumeattachment.go      |    2 +-
 .../pkg/apis/clientauthentication/types.go         |    7 +
 .../apis/clientauthentication/v1alpha1/types.go    |    8 +
 .../v1alpha1/zz_generated.conversion.go            |    6 +-
 .../v1alpha1/zz_generated.deepcopy.go              |    2 +-
 .../v1alpha1/zz_generated.defaults.go              |    2 +-
 .../clientauthentication/v1beta1/conversion.go}    |   11 +-
 .../apis/clientauthentication/v1beta1}/doc.go      |    9 +-
 .../apis/clientauthentication/v1beta1}/register.go |   71 +-
 .../{v1alpha1 => v1beta1}/types.go                 |   31 +-
 .../v1beta1/zz_generated.conversion.go             |  114 +
 .../{v1alpha1 => v1beta1}/zz_generated.deepcopy.go |   43 +-
 .../v1beta1/zz_generated.defaults.go               |    2 +-
 .../clientauthentication/zz_generated.deepcopy.go  |    2 +-
 vendor/k8s.io/client-go/pkg/version/base.go        |    2 +-
 .../client-go/plugin/pkg/client/auth/exec/exec.go  |  156 +-
 .../client-go/plugin/pkg/client/auth/gcp/gcp.go    |    2 +-
 vendor/k8s.io/client-go/rest/config.go             |    8 +-
 vendor/k8s.io/client-go/rest/request.go            |   37 +-
 vendor/k8s.io/client-go/rest/transport.go          |   65 +-
 .../k8s.io/client-go/rest/zz_generated.deepcopy.go |    2 +-
 .../client-go/restmapper/category_expansion.go     |  119 +
 .../restmapper.go => restmapper/discovery.go}      |   34 +-
 vendor/k8s.io/client-go/restmapper/shortcut.go     |  172 +
 vendor/k8s.io/client-go/tools/cache/listwatch.go   |    3 +-
 .../client-go/tools/cache/mutation_detector.go     |    3 +
 .../client-go/tools/clientcmd/api/v1/conversion.go |   25 +-
 .../clientcmd/api/v1/zz_generated.deepcopy.go      |    2 +-
 .../tools/clientcmd/api/zz_generated.deepcopy.go   |    2 +-
 .../client-go/tools/clientcmd/auth_loaders.go      |   11 +-
 .../client-go/tools/clientcmd/client_config.go     |   23 +-
 vendor/k8s.io/client-go/tools/clientcmd/config.go  |    4 +-
 vendor/k8s.io/client-go/tools/clientcmd/loader.go  |   18 +-
 .../tools/clientcmd/merged_client_builder.go       |    3 +-
 vendor/k8s.io/client-go/tools/pager/pager.go       |    3 +-
 vendor/k8s.io/client-go/tools/reference/ref.go     |    8 +-
 vendor/k8s.io/client-go/transport/cache.go         |   12 +-
 vendor/k8s.io/client-go/transport/config.go        |   13 +-
 .../k8s.io/client-go/transport/round_trippers.go   |    2 +-
 vendor/k8s.io/client-go/transport/transport.go     |   34 +-
 vendor/k8s.io/client-go/util/cert/io.go            |   37 +-
 .../client-go/util/connrotation/connrotation.go    |  105 +
 .../client-go/util/workqueue/delaying_queue.go     |   10 +-
 vendor/k8s.io/code-generator/Godeps/Godeps.json    |  536 +-
 vendor/k8s.io/code-generator/SECURITY_CONTACTS     |   17 +
 .../apiserver/apis/example/install/install.go      |   20 +-
 .../_examples/apiserver/apis/example/v1/doc.go     |    1 +
 .../apis/example/v1/zz_generated.conversion.go     |    2 +-
 .../apis/example/v1/zz_generated.deepcopy.go       |    2 +-
 .../apis/example/v1/zz_generated.defaults.go       |    2 +-
 .../apis/example/zz_generated.deepcopy.go          |    2 +-
 .../apiserver/apis/example2/install/install.go     |   20 +-
 .../_examples/apiserver/apis/example2/v1/doc.go    |    1 +
 .../apis/example2/v1/zz_generated.conversion.go    |    2 +-
 .../apis/example2/v1/zz_generated.deepcopy.go      |    2 +-
 .../apis/example2/v1/zz_generated.defaults.go      |    2 +-
 .../apis/example2/zz_generated.deepcopy.go         |    2 +-
 .../clientset/internalversion/clientset.go         |    4 +-
 .../apiserver/clientset/internalversion/doc.go     |    2 +-
 .../internalversion/fake/clientset_generated.go    |   11 +-
 .../clientset/internalversion/fake/doc.go          |    2 +-
 .../clientset/internalversion/fake/register.go     |    2 +-
 .../clientset/internalversion/scheme/doc.go        |    2 +-
 .../clientset/internalversion/scheme/register.go   |   17 +-
 .../typed/example/internalversion/doc.go           |    2 +-
 .../example/internalversion/example_client.go      |   11 +-
 .../typed/example/internalversion/fake/doc.go      |    2 +-
 .../internalversion/fake/fake_example_client.go    |    2 +-
 .../example/internalversion/fake/fake_testtype.go  |    4 +-
 .../example/internalversion/generated_expansion.go |    2 +-
 .../typed/example/internalversion/testtype.go      |    2 +-
 .../typed/example2/internalversion/doc.go          |    2 +-
 .../example2/internalversion/example2_client.go    |   11 +-
 .../typed/example2/internalversion/fake/doc.go     |    2 +-
 .../internalversion/fake/fake_example2_client.go   |    2 +-
 .../example2/internalversion/fake/fake_testtype.go |    4 +-
 .../internalversion/generated_expansion.go         |    2 +-
 .../typed/example2/internalversion/testtype.go     |    2 +-
 .../apiserver/clientset/versioned/clientset.go     |    4 +-
 .../_examples/apiserver/clientset/versioned/doc.go |    2 +-
 .../versioned/fake/clientset_generated.go          |   11 +-
 .../apiserver/clientset/versioned/fake/doc.go      |    2 +-
 .../apiserver/clientset/versioned/fake/register.go |    2 +-
 .../apiserver/clientset/versioned/scheme/doc.go    |    2 +-
 .../clientset/versioned/scheme/register.go         |    2 +-
 .../clientset/versioned/typed/example/v1/doc.go    |    2 +-
 .../versioned/typed/example/v1/example_client.go   |    2 +-
 .../versioned/typed/example/v1/fake/doc.go         |    2 +-
 .../typed/example/v1/fake/fake_example_client.go   |    2 +-
 .../typed/example/v1/fake/fake_testtype.go         |    4 +-
 .../typed/example/v1/generated_expansion.go        |    2 +-
 .../versioned/typed/example/v1/testtype.go         |    2 +-
 .../clientset/versioned/typed/example2/v1/doc.go   |    2 +-
 .../versioned/typed/example2/v1/example2_client.go |    2 +-
 .../versioned/typed/example2/v1/fake/doc.go        |    2 +-
 .../typed/example2/v1/fake/fake_example2_client.go |    2 +-
 .../typed/example2/v1/fake/fake_testtype.go        |    4 +-
 .../typed/example2/v1/generated_expansion.go       |    2 +-
 .../versioned/typed/example2/v1/testtype.go        |    2 +-
 .../externalversions/example/interface.go          |    2 +-
 .../externalversions/example/v1/interface.go       |    2 +-
 .../externalversions/example/v1/testtype.go        |    2 +-
 .../externalversions/example2/interface.go         |    2 +-
 .../externalversions/example2/v1/interface.go      |    2 +-
 .../externalversions/example2/v1/testtype.go       |    2 +-
 .../informers/externalversions/factory.go          |   63 +-
 .../informers/externalversions/generic.go          |    2 +-
 .../internalinterfaces/factory_interfaces.go       |    2 +-
 .../informers/internalversion/example/interface.go |    2 +-
 .../example/internalversion/interface.go           |    2 +-
 .../example/internalversion/testtype.go            |    2 +-
 .../internalversion/example2/interface.go          |    2 +-
 .../example2/internalversion/interface.go          |    2 +-
 .../example2/internalversion/testtype.go           |    2 +-
 .../apiserver/informers/internalversion/factory.go |   63 +-
 .../apiserver/informers/internalversion/generic.go |    2 +-
 .../internalinterfaces/factory_interfaces.go       |    2 +-
 .../example/internalversion/expansion_generated.go |    2 +-
 .../listers/example/internalversion/testtype.go    |    2 +-
 .../listers/example/v1/expansion_generated.go      |    2 +-
 .../apiserver/listers/example/v1/testtype.go       |    2 +-
 .../internalversion/expansion_generated.go         |    2 +-
 .../listers/example2/internalversion/testtype.go   |    2 +-
 .../listers/example2/v1/expansion_generated.go     |    2 +-
 .../apiserver/listers/example2/v1/testtype.go      |    2 +-
 .../_examples/crd/apis/example/v1/doc.go           |    1 +
 .../_examples/crd/apis/example/v1/types.go         |   27 +
 .../crd/apis/example/v1/zz_generated.deepcopy.go   |   78 +-
 .../crd/apis/example}/v1/zz_generated.defaults.go  |    2 +-
 .../_examples/crd/apis/example2/v1/doc.go          |    1 +
 .../crd/apis/example2/v1/zz_generated.deepcopy.go  |    2 +-
 .../crd/apis/example2}/v1/zz_generated.defaults.go |    2 +-
 .../_examples/crd/clientset/versioned/clientset.go |    4 +-
 .../_examples/crd/clientset/versioned/doc.go       |    2 +-
 .../versioned/fake/clientset_generated.go          |   11 +-
 .../_examples/crd/clientset/versioned/fake/doc.go  |    2 +-
 .../crd/clientset/versioned/fake/register.go       |    2 +-
 .../crd/clientset/versioned/scheme/doc.go          |    2 +-
 .../crd/clientset/versioned/scheme/register.go     |    2 +-
 .../versioned/typed/example/v1/clustertesttype.go  |  193 +
 .../clientset/versioned/typed/example/v1/doc.go    |    2 +-
 .../versioned/typed/example/v1/example_client.go   |    7 +-
 .../versioned/typed/example/v1/fake/doc.go         |    2 +-
 .../typed/example/v1/fake/fake_clustertesttype.go  |  152 +
 .../typed/example/v1/fake/fake_example_client.go   |    6 +-
 .../typed/example/v1/fake/fake_testtype.go         |    4 +-
 .../typed/example/v1/generated_expansion.go        |    4 +-
 .../versioned/typed/example/v1/testtype.go         |    2 +-
 .../clientset/versioned/typed/example2/v1/doc.go   |    2 +-
 .../versioned/typed/example2/v1/example2_client.go |    2 +-
 .../versioned/typed/example2/v1/fake/doc.go        |    2 +-
 .../typed/example2/v1/fake/fake_example2_client.go |    2 +-
 .../typed/example2/v1/fake/fake_testtype.go        |    4 +-
 .../typed/example2/v1/generated_expansion.go       |    2 +-
 .../versioned/typed/example2/v1/testtype.go        |    2 +-
 .../externalversions/example/interface.go          |    2 +-
 .../example/v1/{testtype.go => clustertesttype.go} |   41 +-
 .../externalversions/example/v1/interface.go       |    9 +-
 .../externalversions/example/v1/testtype.go        |    2 +-
 .../externalversions/example2/interface.go         |    2 +-
 .../externalversions/example2/v1/interface.go      |    2 +-
 .../externalversions/example2/v1/testtype.go       |    2 +-
 .../crd/informers/externalversions/factory.go      |   63 +-
 .../crd/informers/externalversions/generic.go      |    4 +-
 .../internalinterfaces/factory_interfaces.go       |    2 +-
 .../crd/listers/example/v1/clustertesttype.go      |   65 +
 .../crd/listers/example/v1/expansion_generated.go  |    6 +-
 .../_examples/crd/listers/example/v1/testtype.go   |    2 +-
 .../crd/listers/example2/v1/expansion_generated.go |    2 +-
 .../_examples/crd/listers/example2/v1/testtype.go  |    2 +-
 vendor/k8s.io/code-generator/cmd/client-gen/BUILD  |   45 -
 .../code-generator/cmd/client-gen/args/BUILD       |   46 -
 .../code-generator/cmd/client-gen/generators/BUILD |   50 -
 .../cmd/client-gen/generators/client_generator.go  |   24 +-
 .../cmd/client-gen/generators/fake/BUILD           |   40 -
 .../fake/generator_fake_for_clientset.go           |   11 +-
 .../generators/fake/generator_fake_for_type.go     |   31 +-
 .../generators/generator_for_clientset.go          |    4 +-
 .../client-gen/generators/generator_for_group.go   |    9 +-
 .../cmd/client-gen/generators/scheme/BUILD         |   32 -
 .../generators/scheme/generator_for_scheme.go      |   44 +-
 .../cmd/client-gen/generators/util/BUILD           |   33 -
 .../code-generator/cmd/client-gen/path/BUILD       |   25 -
 .../code-generator/cmd/client-gen/types/BUILD      |   36 -
 .../k8s.io/code-generator/cmd/conversion-gen/BUILD |   43 -
 .../code-generator/cmd/conversion-gen/args/BUILD   |   26 -
 .../code-generator/cmd/conversion-gen/args/args.go |    2 +-
 .../cmd/conversion-gen/generators/BUILD            |   33 -
 .../k8s.io/code-generator/cmd/deepcopy-gen/BUILD   |   42 -
 .../code-generator/cmd/deepcopy-gen/args/BUILD     |   27 -
 .../k8s.io/code-generator/cmd/defaulter-gen/BUILD  |   42 -
 .../code-generator/cmd/defaulter-gen/args/BUILD    |   27 -
 .../k8s.io/code-generator/cmd/go-to-protobuf/BUILD |   39 -
 .../cmd/go-to-protobuf/protobuf/BUILD              |   51 -
 .../cmd/go-to-protobuf/protoc-gen-gogo/BUILD       |   37 -
 vendor/k8s.io/code-generator/cmd/import-boss/BUILD |   37 -
 .../k8s.io/code-generator/cmd/informer-gen/BUILD   |   43 -
 .../code-generator/cmd/informer-gen/args/BUILD     |   27 -
 .../cmd/informer-gen/generators/BUILD              |   45 -
 .../cmd/informer-gen/generators/factory.go         |   72 +-
 .../cmd/informer-gen/generators/types.go           |    1 +
 vendor/k8s.io/code-generator/cmd/lister-gen/BUILD  |   43 -
 .../code-generator/cmd/lister-gen/args/BUILD       |   27 -
 .../code-generator/cmd/lister-gen/generators/BUILD |   38 -
 vendor/k8s.io/code-generator/cmd/openapi-gen/BUILD |   42 -
 .../code-generator/cmd/openapi-gen/args/BUILD      |   26 -
 vendor/k8s.io/code-generator/cmd/set-gen/BUILD     |   41 -
 vendor/k8s.io/code-generator/generate-groups.sh    |    8 +-
 vendor/k8s.io/code-generator/hack/BUILD            |   18 -
 .../k8s.io/code-generator/hack/boilerplate.go.txt  |    2 +-
 .../k8s.io/code-generator/hack/update-codegen.sh   |    2 +-
 .../k8s.io/code-generator/hack/verify-codegen.sh   |    2 +-
 vendor/k8s.io/code-generator/pkg/util/BUILD        |   22 -
 .../third_party/forked/golang/reflect/BUILD        |   25 -
 vendor/k8s.io/kube-openapi/pkg/common/common.go    |    6 +
 .../kube-openapi/pkg/generators/api_linter.go      |   94 +
 .../k8s.io/kube-openapi/pkg/generators/config.go   |  106 +
 .../k8s.io/kube-openapi/pkg/generators/openapi.go  |  129 +-
 .../pkg/generators/rules/omitempty_match_case.go   |   64 +
 742 files changed, 24235 insertions(+), 11782 deletions(-)

diff --git a/Gopkg.lock b/Gopkg.lock
index 0c22817..d82a832 100644
--- a/Gopkg.lock
+++ b/Gopkg.lock
@@ -6,8 +6,8 @@
   name = "cloud.google.com/go"
   packages = ["compute/metadata"]
   pruneopts = "NUT"
-  revision = "6cb29e61d96723a38dcac44d4c15c36744d96d07"
-  version = "v0.29.0"
+  revision = "debcad1964693daf8ef4bc06292d7e828e075130"
+  version = "v0.31.0"
 
 [[projects]]
   digest = "1:d8ebbd207f3d3266d4423ce4860c9f3794956306ded6c7ba312ecc69cdfbf04c"
@@ -73,32 +73,32 @@
   name = "github.com/go-openapi/jsonpointer"
   packages = ["."]
   pruneopts = "NUT"
-  revision = "3a0015ad55fa9873f41605d3e8f28cd279c32ab2"
-  version = "0.16.0"
+  revision = "ef5f0afec364d3b9396b7b77b43dbe26bf1f8004"
+  version = "v0.17.2"
 
 [[projects]]
   digest = "1:98abd61947ff5c7c6fcfec5473d02a4821ed3a2dd99a4fbfdb7925b0dd745546"
   name = "github.com/go-openapi/jsonreference"
   packages = ["."]
   pruneopts = "NUT"
-  revision = "3fb327e6747da3043567ee86abd02bb6376b6be2"
-  version = "0.16.0"
+  revision = "8483a886a90412cd6858df4ea3483dce9c8e35a3"
+  version = "v0.17.2"
 
 [[projects]]
   digest = "1:dfab391de021809e0041f0ab5648da6b74dd16a685472a1b8c3dc06b3dca1ee2"
   name = "github.com/go-openapi/spec"
   packages = ["."]
   pruneopts = "NUT"
-  revision = "384415f06ee238aae1df5caad877de6ceac3a5c4"
-  version = "0.16.0"
+  revision = "5bae59e25b21498baea7f9d46e9c147ec106a42e"
+  version = "v0.17.2"
 
 [[projects]]
-  digest = "1:44d7c281784896656e8c802c13a2e38d0f440904bdf6bdf968bb29f032df6a36"
+  digest = "1:983f95b2fae6fe8fdd361738325ed6090f4f3bd15ce4db745e899fb5b0fdfc46"
   name = "github.com/go-openapi/swag"
   packages = ["."]
   pruneopts = "NUT"
-  revision = "becd2f08beafcca035645a8a101e0e3e18140458"
-  version = "0.16.0"
+  revision = "5899d5c5e619fda5fa86e14795a835f473ca284c"
+  version = "v0.17.2"
 
 [[projects]]
   digest = "1:8679b8a64f3613e9749c5640c3535c83399b8e69f67ce54d91dc73f6d77373af"
@@ -135,6 +135,14 @@
 
 [[projects]]
   branch = "master"
+  digest = "1:05f95ffdfcf651bdb0f05b40b69e7f5663047f8da75c72d58728acb59b5cc107"
+  name = "github.com/google/btree"
+  packages = ["."]
+  pruneopts = "NUT"
+  revision = "4030bb1f1f0c35b30ca7009e9ebd06849dd45306"
+
+[[projects]]
+  branch = "master"
   digest = "1:52c5834e2bebac9030c97cc0798ac11c3aa8a39f098aeb419f142533da6cd3cc"
   name = "github.com/google/gofuzz"
   packages = ["."]
@@ -154,6 +162,17 @@
   version = "v0.2.0"
 
 [[projects]]
+  branch = "master"
+  digest = "1:7fdf3223c7372d1ced0b98bf53457c5e89d89aecbad9a77ba9fcc6e01f9e5621"
+  name = "github.com/gregjones/httpcache"
+  packages = [
+    ".",
+    "diskcache",
+  ]
+  pruneopts = "NUT"
+  revision = "9cad4c3443a7200dd6400aef47183728de563a38"
+
+[[projects]]
   digest = "1:b42cde0e1f3c816dd57f57f7bbcf05ca40263ad96f168714c130c611fc0856a6"
   name = "github.com/hashicorp/golang-lru"
   packages = [
@@ -165,14 +184,6 @@
   version = "v0.5.0"
 
 [[projects]]
-  branch = "master"
-  digest = "1:b7f860847a1d71f925ba9385ed95f1ebc0abfeb418a78e219ab61f48fdfeffad"
-  name = "github.com/howeyc/gopass"
-  packages = ["."]
-  pruneopts = "NUT"
-  revision = "bf9dde6d0d2c004a008c27aaee91170c786f6db8"
-
-[[projects]]
   digest = "1:9a52adf44086cead3b384e5d0dbf7a1c1cce65e67552ee3383a8561c42a18cd3"
   name = "github.com/imdario/mergo"
   packages = ["."]
@@ -197,12 +208,12 @@
   version = "v1.1.5"
 
 [[projects]]
-  branch = "master"
-  digest = "1:c8a452cc8dd4ef9f857570ce2be31ca257a0928bf3c2b08cd7e11972b985c6d7"
+  digest = "1:4059c14e87a2de3a434430340521b5feece186c1469eff0834c29a63870de3ed"
   name = "github.com/konsorten/go-windows-terminal-sequences"
   packages = ["."]
   pruneopts = "NUT"
-  revision = "b729f2633dfe35f4d1d8a32385f6685610ce1cb5"
+  revision = "5c8c8bd35d3832f5d134ae1e1e375b69a4d25242"
+  version = "v1.0.1"
 
 [[projects]]
   branch = "master"
@@ -267,7 +278,7 @@
   version = "v3.9.0"
 
 [[projects]]
-  digest = "1:8809516dcbfea012372b937388ec89bc929b97824840f43a3cd5c168fea1076e"
+  digest = "1:ce6ed8da0816d327c1d586722a13c05a3fd26cf9b8649cdcda300c16f026feca"
   name = "github.com/operator-framework/operator-sdk"
   packages = [
     "pkg/k8sclient",
@@ -277,8 +288,24 @@
     "version",
   ]
   pruneopts = "NUT"
-  revision = "cb89692cdd145f084798749c16baaf44a6e62cac"
-  version = "v0.0.6"
+  revision = "e5a0ab096e1a7c0e6b937d2b41707eccb82c3c77"
+  version = "v0.0.7"
+
+[[projects]]
+  branch = "master"
+  digest = "1:3bf17a6e6eaa6ad24152148a631d18662f7212e21637c2699bff3369b7f00fa2"
+  name = "github.com/petar/GoLLRB"
+  packages = ["llrb"]
+  pruneopts = "NUT"
+  revision = "53be0d36a84c2a886ca057d34b6aa4468df9ccb4"
+
+[[projects]]
+  digest = "1:6c6d91dc326ed6778783cff869c49fb2f61303cdd2ebbcf90abe53505793f3b6"
+  name = "github.com/peterbourgon/diskv"
+  packages = ["."]
+  pruneopts = "NUT"
+  revision = "5f041e8faa004a95c88a202771f4cc3e991971e6"
+  version = "v2.0.1"
 
 [[projects]]
   digest = "1:5cf3f025cbee5951a4ee961de067c8a89fc95a5adabead774f82822efabab121"
@@ -297,15 +324,16 @@
   version = "v1.0.0"
 
 [[projects]]
-  digest = "1:03bca087b180bf24c4f9060775f137775550a0834e18f0bca0520a868679dbd7"
+  digest = "1:aa2da1df3327c3a338bb42f978407c07de74cd0a5bef35e9411881dffd444214"
   name = "github.com/prometheus/client_golang"
   packages = [
     "prometheus",
+    "prometheus/internal",
     "prometheus/promhttp",
   ]
   pruneopts = "NUT"
-  revision = "c5b7fccd204277076155f10851dad72b76a49317"
-  version = "v0.8.0"
+  revision = "1cafe34db7fdec6022e17e00e1c1ea501022f3e4"
+  version = "v0.9.0"
 
 [[projects]]
   branch = "master"
@@ -317,7 +345,7 @@
 
 [[projects]]
   branch = "master"
-  digest = "1:fad5a35eea6a1a33d6c8f949fbc146f24275ca809ece854248187683f52cc30b"
+  digest = "1:06375f3b602de9c99fa99b8484f0e949fd5273e6e9c6592b5a0dd4cd9085f3ea"
   name = "github.com/prometheus/common"
   packages = [
     "expfmt",
@@ -325,11 +353,11 @@
     "model",
   ]
   pruneopts = "NUT"
-  revision = "c7de2306084e37d54b8be01f3541a8464345e9a5"
+  revision = "7e9e6cabbd393fc208072eedef99188d0ce788b6"
 
 [[projects]]
   branch = "master"
-  digest = "1:26a2f5e891cc4d2321f18a0caa84c8e788663c17bed6a487f3cbe2c4295292d0"
+  digest = "1:102dea0c03a915acfc634b7c67f2662012b5483b56d9025e33f5188e112759b6"
   name = "github.com/prometheus/procfs"
   packages = [
     ".",
@@ -338,7 +366,7 @@
     "xfs",
   ]
   pruneopts = "NUT"
-  revision = "418d78d0b9a7b7de3a6bbc8a23def624cc977bb2"
+  revision = "185b4288413d2a0dd0806f78c90dde719829e5ae"
 
 [[projects]]
   digest = "1:669828a2363f1ecad15fff9f008dd1d07d449fb25c9060998b15f83fec896458"
@@ -357,12 +385,12 @@
   version = "v1.2.1"
 
 [[projects]]
-  digest = "1:01252cd79aac70f16cac02a72a1067dd136e0ad6d5b597d0129cf74c739fd8d1"
+  digest = "1:ecf78eacf406c42f07f66d6b79fda24d2b92dc711bfd0760d0c931678f9621fe"
   name = "github.com/sirupsen/logrus"
   packages = ["."]
   pruneopts = "NUT"
-  revision = "a67f783a3814b8729bd2dac5780b5f78f8dbd64d"
-  version = "v1.1.0"
+  revision = "ad15b42461921f1fb3529b058c6786c6a45d5162"
+  version = "v1.1.1"
 
 [[projects]]
   digest = "1:343d44e06621142ab09ae0c76c1799104cdfddd3ffb445d78b1adf8dc3ffaf3d"
@@ -402,7 +430,7 @@
   name = "golang.org/x/crypto"
   packages = ["ssh/terminal"]
   pruneopts = "NUT"
-  revision = "e3636079e1a4c1f337f212cc5cd2aca108f6c900"
+  revision = "e84da0312774c21d64ee2317962ef669b27ffb41"
 
 [[projects]]
   branch = "master"
@@ -417,11 +445,11 @@
     "idna",
   ]
   pruneopts = "NUT"
-  revision = "f5e5bdd778241bfefa8627f7124c39cd6ad8d74f"
+  revision = "9b4f9f5ad5197c79fd623a3638e70d8b26cef344"
 
 [[projects]]
   branch = "master"
-  digest = "1:dcb89c032286a9c3c5118a1496f8e0e237c1437f5356ac9602f6fdef560a5c21"
+  digest = "1:b0fef33b00740f7eeb5198f67ee1642d8d2560e9b428df7fb5f69fb140f5c4d0"
   name = "golang.org/x/oauth2"
   packages = [
     ".",
@@ -431,18 +459,18 @@
     "jwt",
   ]
   pruneopts = "NUT"
-  revision = "c57b0facaced709681d9f90397429b9430a74754"
+  revision = "9dcd33a902f40452422c2367fefcb95b54f9f8f8"
 
 [[projects]]
   branch = "master"
-  digest = "1:847ffe30bb2463c17bf12bac2558b63f5dc104fd5920c8adf8c150a08cd176fb"
+  digest = "1:f9eb1fd707210fde0db4076dd3bfdb401d3770f929b8dbd49171ef3e9cece2a6"
   name = "golang.org/x/sys"
   packages = [
     "unix",
     "windows",
   ]
   pruneopts = "NUT"
-  revision = "af653ce8b74f808d092db8ca9741fbb63d2a469d"
+  revision = "731415f00dce967a133e841b3079eda31c996761"
 
 [[projects]]
   digest = "1:e33513a825fcd765e97b5de639a2f7547542d1a8245df0cef18e1fd390b778a9"
@@ -478,15 +506,16 @@
 
 [[projects]]
   branch = "master"
-  digest = "1:45751dc3302c90ea55913674261b2d74286b05cdd8e3ae9606e02e4e77f4353f"
+  digest = "1:3a04778e417b28bba5d30f0af919206b4869f57a1d5e152c4c2f29bf18889dce"
   name = "golang.org/x/tools"
   packages = [
     "go/ast/astutil",
     "imports",
     "internal/fastwalk",
+    "internal/gopathwalk",
   ]
   pruneopts = "NUT"
-  revision = "140737fa61ecd083e4967b36551743e4d3c4600f"
+  revision = "a2dc47679d30b6c496245bafc6a166b46c5fe318"
 
 [[projects]]
   digest = "1:e2da54c7866453ac5831c61c7ec5d887f39328cac088c806553303bff4048e6f"
@@ -524,7 +553,7 @@
   version = "v2.2.1"
 
 [[projects]]
-  digest = "1:f541c95b242bf41dc42ba035d66e7d240c1e8c7ccd45480f81c6526237d0b940"
+  digest = "1:ef716a2116d8a040e16fbcd7fca71d3354915a94720de6af22c7a09970234296"
   name = "k8s.io/api"
   packages = [
     "admissionregistration/v1alpha1",
@@ -551,16 +580,18 @@
     "rbac/v1alpha1",
     "rbac/v1beta1",
     "scheduling/v1alpha1",
+    "scheduling/v1beta1",
     "settings/v1alpha1",
     "storage/v1",
     "storage/v1alpha1",
     "storage/v1beta1",
   ]
   pruneopts = "NUT"
-  revision = "73d903622b7391f3312dcbac6483fed484e185f8"
+  revision = "2d6f90ab1293a1fb871cf149423ebb72aa7423aa"
+  version = "kubernetes-1.11.2"
 
 [[projects]]
-  digest = "1:912a2bed08850f9bb0f2078dd8381e162f2e393bf0ee5650ff79073f9fa9fac5"
+  digest = "1:69b102c3ee60ab3704fac6e46bac1e8894e20b11498ec832846a229f21946200"
   name = "k8s.io/apimachinery"
   packages = [
     "pkg/api/errors",
@@ -603,10 +634,11 @@
     "third_party/forked/golang/reflect",
   ]
   pruneopts = "NUT"
-  revision = "302974c03f7e50f16561ba237db776ab93594ef6"
+  revision = "103fd098999dc9c0c88536f5c9ad2e5da39373ae"
+  version = "kubernetes-1.11.2"
 
 [[projects]]
-  digest = "1:21c27c3f27815953e7e167c3e8b9f657af6bdfbaaccf639d2bafd64178adbe08"
+  digest = "1:6cca3c9f626aeb165dad88de9db9b71585a92514134f466268dd83fd449df9a7"
   name = "k8s.io/client-go"
   packages = [
     "discovery",
@@ -638,17 +670,20 @@
     "kubernetes/typed/rbac/v1alpha1",
     "kubernetes/typed/rbac/v1beta1",
     "kubernetes/typed/scheduling/v1alpha1",
+    "kubernetes/typed/scheduling/v1beta1",
     "kubernetes/typed/settings/v1alpha1",
     "kubernetes/typed/storage/v1",
     "kubernetes/typed/storage/v1alpha1",
     "kubernetes/typed/storage/v1beta1",
     "pkg/apis/clientauthentication",
     "pkg/apis/clientauthentication/v1alpha1",
+    "pkg/apis/clientauthentication/v1beta1",
     "pkg/version",
     "plugin/pkg/client/auth/exec",
     "plugin/pkg/client/auth/gcp",
     "rest",
     "rest/watch",
+    "restmapper",
     "third_party/forked/golang/template",
     "tools/auth",
     "tools/cache",
@@ -662,6 +697,7 @@
     "transport",
     "util/buffer",
     "util/cert",
+    "util/connrotation",
     "util/flowcontrol",
     "util/homedir",
     "util/integer",
@@ -670,10 +706,11 @@
     "util/workqueue",
   ]
   pruneopts = "NUT"
-  revision = "989be4278f353e42f26c416c53757d16fcff77db"
+  revision = "1f13a808da65775f22cbf47862c4e5898d8f4ca1"
+  version = "kubernetes-1.11.2"
 
 [[projects]]
-  digest = "1:b6d40560bfc8ef1300dbec6fae898810b59f26b1fd62d16692ab9befe8694dfc"
+  digest = "1:8ab487a323486c8bbbaa3b689850487fdccc6cbea8690620e083b2d230a4447e"
   name = "k8s.io/code-generator"
   packages = [
     "cmd/client-gen",
@@ -702,7 +739,8 @@
     "pkg/util",
   ]
   pruneopts = "T"
-  revision = "7ead8f38b01cf8653249f5af80ce7b2c8aba12e2"
+  revision = "6702109cc68eb6fe6350b83e14407c8d7309fd1a"
+  version = "kubernetes-1.11.2"
 
 [[projects]]
   branch = "master"
@@ -719,11 +757,11 @@
     "types",
   ]
   pruneopts = "NUT"
-  revision = "4242d8e6c5dba56827bb7bcf14ad11cda38f3991"
+  revision = "7338e4bfd6915369a1375890db1bbda0158c9863"
 
 [[projects]]
   branch = "master"
-  digest = "1:11a6835553568158414400041e840b68e1128453e65240a986f197605b0345eb"
+  digest = "1:e8451187fe9d2b9bf86a44495959c391e831355fb835a63e117ff49b69bc70f9"
   name = "k8s.io/kube-openapi"
   packages = [
     "cmd/openapi-gen/args",
@@ -733,7 +771,7 @@
     "pkg/util/sets",
   ]
   pruneopts = "NUT"
-  revision = "9dfdf9be683f61f82cda12362c44c784e0778b56"
+  revision = "3a9b63ab1e397dc12a9764df998f99bc59dfd9ae"
 
 [solve-meta]
   analyzer-name = "dep"
diff --git a/Gopkg.toml b/Gopkg.toml
index 801fb4e..4102eb3 100644
--- a/Gopkg.toml
+++ b/Gopkg.toml
@@ -14,32 +14,27 @@ required = [
 
 [[override]]
   name = "k8s.io/code-generator"
-  # revision for tag "kubernetes-1.10.1"
-  revision = "7ead8f38b01cf8653249f5af80ce7b2c8aba12e2"
+  version = "kubernetes-1.11.2"
 
 [[override]]
   name = "k8s.io/api"
-  # revision for tag "kubernetes-1.10.1"
-  revision = "73d903622b7391f3312dcbac6483fed484e185f8"
+  version = "kubernetes-1.11.2"
 
 [[override]]
   name = "k8s.io/apiextensions-apiserver"
-  # revision for tag "kubernetes-1.10.1"
-  revision = "4347b330d0ff094db860f2f75fa725b4f4b53618"
+  version = "kubernetes-1.11.2"
 
 [[override]]
   name = "k8s.io/apimachinery"
-  # revision for tag "kubernetes-1.10.1"
-  revision = "302974c03f7e50f16561ba237db776ab93594ef6"
+  version = "kubernetes-1.11.2"
 
 [[override]]
   name = "k8s.io/client-go"
-  # revision for tag "kubernetes-1.10.1"
-  revision = "989be4278f353e42f26c416c53757d16fcff77db"
+  version = "kubernetes-1.11.2"
 
 [[override]]
   name = "sigs.k8s.io/controller-runtime"
-  revision = "60bb251ad86f9b313653618aad0c2c53f41a6625"
+  version = "v0.1.4"
 
 [prune]
   go-tests = true
@@ -58,4 +53,4 @@ required = [
   #branch = "custom-init"
   # The version rule is used for a specific release and the master branch for in between releases.
   #branch = "master"
-  version = "=v0.0.6"
+  version = "=v0.0.7"
diff --git a/pkg/apis/camel/v1alpha1/types_support.go b/pkg/apis/camel/v1alpha1/types_support.go
index fa4cceb..32b1961 100644
--- a/pkg/apis/camel/v1alpha1/types_support.go
+++ b/pkg/apis/camel/v1alpha1/types_support.go
@@ -49,6 +49,20 @@ func NewIntegrationPlatformList() IntegrationPlatformList {
 	}
 }
 
+// NewIntegrationPlatform --
+func NewIntegrationPlatform(namespace string, name string) IntegrationPlatform {
+	return IntegrationPlatform{
+		TypeMeta: metav1.TypeMeta{
+			APIVersion: SchemeGroupVersion.String(),
+			Kind:       IntegrationPlatformKind,
+		},
+		ObjectMeta: metav1.ObjectMeta{
+			Namespace: namespace,
+			Name:      name,
+		},
+	}
+}
+
 // NewIntegrationList --
 func NewIntegrationList() IntegrationList {
 	return IntegrationList{
diff --git a/pkg/install/operator.go b/pkg/install/operator.go
index 59d5a13..c4856e2 100644
--- a/pkg/install/operator.go
+++ b/pkg/install/operator.go
@@ -24,6 +24,9 @@ import (
 	"github.com/apache/camel-k/pkg/util/kubernetes"
 	"github.com/apache/camel-k/pkg/util/minishift"
 	"github.com/apache/camel-k/pkg/util/openshift"
+	"github.com/operator-framework/operator-sdk/pkg/sdk"
+	"strconv"
+	"time"
 )
 
 // Operator --
@@ -61,6 +64,9 @@ func installKubernetes(namespace string) error {
 
 // Platform installs the platform custom resource
 func Platform(namespace string, registry string) error {
+	if err := waitForPlatformCRDAvailable(namespace, 15*time.Second); err != nil {
+		return err
+	}
 	isOpenshift, err := openshift.IsOpenShift()
 	if err != nil {
 		return err
@@ -92,6 +98,20 @@ func Platform(namespace string, registry string) error {
 	}
 }
 
+func waitForPlatformCRDAvailable(namespace string, timeout time.Duration) error {
+	deadline := time.Now().Add(timeout)
+	for {
+		pla := v1alpha1.NewIntegrationPlatformList()
+		if err := sdk.List(namespace, &pla); err == nil {
+			return nil
+		}
+		if time.Now().After(deadline) {
+			return errors.New("cannot list integration platforms after " + strconv.FormatInt(timeout.Nanoseconds()/1000000000, 10) + " seconds")
+		}
+		time.Sleep(2 * time.Second)
+	}
+}
+
 // Example --
 func Example(namespace string) error {
 	return Resources(namespace,
diff --git a/vendor/github.com/go-openapi/swag/convert.go b/vendor/github.com/go-openapi/swag/convert.go
index 378253e..4e446ff 100644
--- a/vendor/github.com/go-openapi/swag/convert.go
+++ b/vendor/github.com/go-openapi/swag/convert.go
@@ -50,17 +50,21 @@ func IsFloat64AJSONInteger(f float64) bool {
 	return diff/math.Min(fa+ga, math.MaxFloat64) < epsilon
 }
 
-var evaluatesAsTrue = map[string]struct{}{
-	"true":     {},
-	"1":        {},
-	"yes":      {},
-	"ok":       {},
-	"y":        {},
-	"on":       {},
-	"selected": {},
-	"checked":  {},
-	"t":        {},
-	"enabled":  {},
+var evaluatesAsTrue map[string]struct{}
+
+func init() {
+	evaluatesAsTrue = map[string]struct{}{
+		"true":     {},
+		"1":        {},
+		"yes":      {},
+		"ok":       {},
+		"y":        {},
+		"on":       {},
+		"selected": {},
+		"checked":  {},
+		"t":        {},
+		"enabled":  {},
+	}
 }
 
 // ConvertBool turn a string into a boolean
diff --git a/vendor/github.com/go-openapi/swag/doc.go b/vendor/github.com/go-openapi/swag/doc.go
new file mode 100644
index 0000000..e01e1a0
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/doc.go
@@ -0,0 +1,33 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package swag contains a bunch of helper functions for go-openapi and go-swagger projects.
+
+You may also use it standalone for your projects.
+
+  * convert between value and pointers for builtin types
+  * convert from string to builtin types (wraps strconv)
+  * fast json concatenation
+  * search in path
+  * load from file or http
+  * name mangling
+
+
+This repo has only few dependencies outside of the standard library:
+
+  * JSON utilities depend on github.com/mailru/easyjson
+  * YAML utilities depend on gopkg.in/yaml.v2
+*/
+package swag
diff --git a/vendor/github.com/go-openapi/swag/json.go b/vendor/github.com/go-openapi/swag/json.go
index 60e55ec..33da5e4 100644
--- a/vendor/github.com/go-openapi/swag/json.go
+++ b/vendor/github.com/go-openapi/swag/json.go
@@ -21,7 +21,6 @@ import (
 	"reflect"
 	"strings"
 	"sync"
-	"sync/atomic"
 
 	"github.com/mailru/easyjson/jlexer"
 	"github.com/mailru/easyjson/jwriter"
@@ -35,14 +34,13 @@ var DefaultJSONNameProvider = NewNameProvider()
 
 const comma = byte(',')
 
-var atomicClosers atomic.Value
+var closers map[byte]byte
 
 func init() {
-	atomicClosers.Store(
-		map[byte]byte{
-			'{': '}',
-			'[': ']',
-		})
+	closers = map[byte]byte{
+		'{': '}',
+		'[': ']',
+	}
 }
 
 type ejMarshaler interface {
@@ -113,7 +111,6 @@ func ConcatJSON(blobs ...[]byte) []byte {
 	var opening, closing byte
 	var idx, a int
 	buf := bytes.NewBuffer(nil)
-	closers := atomicClosers.Load().(map[byte]byte)
 
 	for i, b := range blobs[:last+1] {
 		if b == nil || bytes.Equal(b, nullJSON) {
@@ -264,7 +261,7 @@ func (n *NameProvider) GetJSONNames(subject interface{}) []string {
 		names = n.makeNameIndex(tpe)
 	}
 
-	var res []string
+	res := make([]string, 0, len(names.jsonNames))
 	for k := range names.jsonNames {
 		res = append(res, k)
 	}
diff --git a/vendor/github.com/go-openapi/swag/util.go b/vendor/github.com/go-openapi/swag/util.go
index 6eba19c..e659968 100644
--- a/vendor/github.com/go-openapi/swag/util.go
+++ b/vendor/github.com/go-openapi/swag/util.go
@@ -88,7 +88,15 @@ func ensureSorted() {
 	initialisms = commonInitialisms.sorted()
 }
 
-// JoinByFormat joins a string array by a known format:
+const (
+	//collectionFormatComma = "csv"
+	collectionFormatSpace = "ssv"
+	collectionFormatTab   = "tsv"
+	collectionFormatPipe  = "pipes"
+	collectionFormatMulti = "multi"
+)
+
+// JoinByFormat joins a string array by a known format (e.g. swagger's collectionFormat attribute):
 //		ssv: space separated value
 //		tsv: tab separated value
 //		pipes: pipe (|) separated value
@@ -99,13 +107,13 @@ func JoinByFormat(data []string, format string) []string {
 	}
 	var sep string
 	switch format {
-	case "ssv":
+	case collectionFormatSpace:
 		sep = " "
-	case "tsv":
+	case collectionFormatTab:
 		sep = "\t"
-	case "pipes":
+	case collectionFormatPipe:
 		sep = "|"
-	case "multi":
+	case collectionFormatMulti:
 		return data
 	default:
 		sep = ","
@@ -118,19 +126,20 @@ func JoinByFormat(data []string, format string) []string {
 //		tsv: tab separated value
 //		pipes: pipe (|) separated value
 //		csv: comma separated value (default)
+//
 func SplitByFormat(data, format string) []string {
 	if data == "" {
 		return nil
 	}
 	var sep string
 	switch format {
-	case "ssv":
+	case collectionFormatSpace:
 		sep = " "
-	case "tsv":
+	case collectionFormatTab:
 		sep = "\t"
-	case "pipes":
+	case collectionFormatPipe:
 		sep = "|"
-	case "multi":
+	case collectionFormatMulti:
 		return nil
 	default:
 		sep = ","
@@ -157,7 +166,7 @@ func (s byLength) Less(i, j int) bool {
 }
 
 // Prepares strings by splitting by caps, spaces, dashes, and underscore
-func split(str string) (words []string) {
+func split(str string) []string {
 	repl := strings.NewReplacer(
 		"@", "At ",
 		"&", "And ",
@@ -185,9 +194,8 @@ func split(str string) (words []string) {
 		str = strings.Replace(str, rex1.ReplaceAllString(k, " $1"), " "+k, -1)
 	}
 	// Get the final list of words
-	words = rex2.FindAllString(str, -1)
-
-	return
+	//words = rex2.FindAllString(str, -1)
+	return rex2.FindAllString(str, -1)
 }
 
 // Removes leading whitespaces
@@ -219,9 +227,10 @@ func Camelize(word string) (camelized string) {
 
 // ToFileName lowercases and underscores a go type name
 func ToFileName(name string) string {
-	var out []string
+	in := split(name)
+	out := make([]string, 0, len(in))
 
-	for _, w := range split(name) {
+	for _, w := range in {
 		out = append(out, lower(w))
 	}
 
@@ -230,8 +239,10 @@ func ToFileName(name string) string {
 
 // ToCommandName lowercases and underscores a go type name
 func ToCommandName(name string) string {
-	var out []string
-	for _, w := range split(name) {
+	in := split(name)
+	out := make([]string, 0, len(in))
+
+	for _, w := range in {
 		out = append(out, lower(w))
 	}
 	return strings.Join(out, "-")
@@ -239,8 +250,10 @@ func ToCommandName(name string) string {
 
 // ToHumanNameLower represents a code name as a human series of words
 func ToHumanNameLower(name string) string {
-	var out []string
-	for _, w := range split(name) {
+	in := split(name)
+	out := make([]string, 0, len(in))
+
+	for _, w := range in {
 		if !isInitialism(upper(w)) {
 			out = append(out, lower(w))
 		} else {
@@ -252,8 +265,10 @@ func ToHumanNameLower(name string) string {
 
 // ToHumanNameTitle represents a code name as a human series of words with the first letters titleized
 func ToHumanNameTitle(name string) string {
-	var out []string
-	for _, w := range split(name) {
+	in := split(name)
+	out := make([]string, 0, len(in))
+
+	for _, w := range in {
 		uw := upper(w)
 		if !isInitialism(uw) {
 			out = append(out, upper(w[:1])+lower(w[1:]))
@@ -266,8 +281,10 @@ func ToHumanNameTitle(name string) string {
 
 // ToJSONName camelcases a name which can be underscored or pascal cased
 func ToJSONName(name string) string {
-	var out []string
-	for i, w := range split(name) {
+	in := split(name)
+	out := make([]string, 0, len(in))
+
+	for i, w := range in {
 		if i == 0 {
 			out = append(out, lower(w))
 			continue
@@ -291,8 +308,10 @@ func ToVarName(name string) string {
 
 // ToGoName translates a swagger name which can be underscored or camel cased to a name that golint likes
 func ToGoName(name string) string {
-	var out []string
-	for _, w := range split(name) {
+	in := split(name)
+	out := make([]string, 0, len(in))
+
+	for _, w := range in {
 		uw := upper(w)
 		mod := int(math.Min(float64(len(uw)), 2))
 		if !isInitialism(uw) && !isInitialism(uw[:len(uw)-mod]) {
@@ -314,6 +333,16 @@ func ToGoName(name string) string {
 	return result
 }
 
+// ContainsStrings searches a slice of strings for a case-sensitive match
+func ContainsStrings(coll []string, item string) bool {
+	for _, a := range coll {
+		if a == item {
+			return true
+		}
+	}
+	return false
+}
+
 // ContainsStringsCI searches a slice of strings for a case-insensitive match
 func ContainsStringsCI(coll []string, item string) bool {
 	for _, a := range coll {
diff --git a/vendor/github.com/google/btree/LICENSE b/vendor/github.com/google/btree/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/vendor/github.com/google/btree/LICENSE
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/google/btree/btree.go b/vendor/github.com/google/btree/btree.go
new file mode 100644
index 0000000..6ff062f
--- /dev/null
+++ b/vendor/github.com/google/btree/btree.go
@@ -0,0 +1,890 @@
+// Copyright 2014 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package btree implements in-memory B-Trees of arbitrary degree.
+//
+// btree implements an in-memory B-Tree for use as an ordered data structure.
+// It is not meant for persistent storage solutions.
+//
+// It has a flatter structure than an equivalent red-black or other binary tree,
+// which in some cases yields better memory usage and/or performance.
+// See some discussion on the matter here:
+//   http://google-opensource.blogspot.com/2013/01/c-containers-that-save-memory-and-time.html
+// Note, though, that this project is in no way related to the C++ B-Tree
+// implementation written about there.
+//
+// Within this tree, each node contains a slice of items and a (possibly nil)
+// slice of children.  For basic numeric values or raw structs, this can cause
+// efficiency differences when compared to equivalent C++ template code that
+// stores values in arrays within the node:
+//   * Due to the overhead of storing values as interfaces (each
+//     value needs to be stored as the value itself, then 2 words for the
+//     interface pointing to that value and its type), resulting in higher
+//     memory use.
+//   * Since interfaces can point to values anywhere in memory, values are
+//     most likely not stored in contiguous blocks, resulting in a higher
+//     number of cache misses.
+// These issues don't tend to matter, though, when working with strings or other
+// heap-allocated structures, since C++-equivalent structures also must store
+// pointers and also distribute their values across the heap.
+//
+// This implementation is designed to be a drop-in replacement to gollrb.LLRB
+// trees, (http://github.com/petar/gollrb), an excellent and probably the most
+// widely used ordered tree implementation in the Go ecosystem currently.
+// Its functions, therefore, exactly mirror those of
+// llrb.LLRB where possible.  Unlike gollrb, though, we currently don't
+// support storing multiple equivalent values.
+package btree
+
+import (
+	"fmt"
+	"io"
+	"sort"
+	"strings"
+	"sync"
+)
+
+// Item represents a single object in the tree.
+type Item interface {
+	// Less tests whether the current item is less than the given argument.
+	//
+	// This must provide a strict weak ordering.
+	// If !a.Less(b) && !b.Less(a), we treat this to mean a == b (i.e. we can only
+	// hold one of either a or b in the tree).
+	Less(than Item) bool
+}
+
+const (
+	DefaultFreeListSize = 32
+)
+
+var (
+	nilItems    = make(items, 16)
+	nilChildren = make(children, 16)
+)
+
+// FreeList represents a free list of btree nodes. By default each
+// BTree has its own FreeList, but multiple BTrees can share the same
+// FreeList.
+// Two Btrees using the same freelist are safe for concurrent write access.
+type FreeList struct {
+	mu       sync.Mutex
+	freelist []*node
+}
+
+// NewFreeList creates a new free list.
+// size is the maximum size of the returned free list.
+func NewFreeList(size int) *FreeList {
+	return &FreeList{freelist: make([]*node, 0, size)}
+}
+
+func (f *FreeList) newNode() (n *node) {
+	f.mu.Lock()
+	index := len(f.freelist) - 1
+	if index < 0 {
+		f.mu.Unlock()
+		return new(node)
+	}
+	n = f.freelist[index]
+	f.freelist[index] = nil
+	f.freelist = f.freelist[:index]
+	f.mu.Unlock()
+	return
+}
+
+// freeNode adds the given node to the list, returning true if it was added
+// and false if it was discarded.
+func (f *FreeList) freeNode(n *node) (out bool) {
+	f.mu.Lock()
+	if len(f.freelist) < cap(f.freelist) {
+		f.freelist = append(f.freelist, n)
+		out = true
+	}
+	f.mu.Unlock()
+	return
+}
+
+// ItemIterator allows callers of Ascend* to iterate in-order over portions of
+// the tree.  When this function returns false, iteration will stop and the
+// associated Ascend* function will immediately return.
+type ItemIterator func(i Item) bool
+
+// New creates a new B-Tree with the given degree.
+//
+// New(2), for example, will create a 2-3-4 tree (each node contains 1-3 items
+// and 2-4 children).
+func New(degree int) *BTree {
+	return NewWithFreeList(degree, NewFreeList(DefaultFreeListSize))
+}
+
+// NewWithFreeList creates a new B-Tree that uses the given node free list.
+func NewWithFreeList(degree int, f *FreeList) *BTree {
+	if degree <= 1 {
+		panic("bad degree")
+	}
+	return &BTree{
+		degree: degree,
+		cow:    &copyOnWriteContext{freelist: f},
+	}
+}
+
+// items stores items in a node.
+type items []Item
+
+// insertAt inserts a value into the given index, pushing all subsequent values
+// forward.
+func (s *items) insertAt(index int, item Item) {
+	*s = append(*s, nil)
+	if index < len(*s) {
+		copy((*s)[index+1:], (*s)[index:])
+	}
+	(*s)[index] = item
+}
+
+// removeAt removes a value at a given index, pulling all subsequent values
+// back.
+func (s *items) removeAt(index int) Item {
+	item := (*s)[index]
+	copy((*s)[index:], (*s)[index+1:])
+	(*s)[len(*s)-1] = nil
+	*s = (*s)[:len(*s)-1]
+	return item
+}
+
+// pop removes and returns the last element in the list.
+func (s *items) pop() (out Item) {
+	index := len(*s) - 1
+	out = (*s)[index]
+	(*s)[index] = nil
+	*s = (*s)[:index]
+	return
+}
+
+// truncate truncates this instance at index so that it contains only the
+// first index items. index must be less than or equal to length.
+func (s *items) truncate(index int) {
+	var toClear items
+	*s, toClear = (*s)[:index], (*s)[index:]
+	for len(toClear) > 0 {
+		toClear = toClear[copy(toClear, nilItems):]
+	}
+}
+
+// find returns the index where the given item should be inserted into this
+// list.  'found' is true if the item already exists in the list at the given
+// index.
+func (s items) find(item Item) (index int, found bool) {
+	i := sort.Search(len(s), func(i int) bool {
+		return item.Less(s[i])
+	})
+	if i > 0 && !s[i-1].Less(item) {
+		return i - 1, true
+	}
+	return i, false
+}
+
+// children stores child nodes in a node.
+type children []*node
+
+// insertAt inserts a value into the given index, pushing all subsequent values
+// forward.
+func (s *children) insertAt(index int, n *node) {
+	*s = append(*s, nil)
+	if index < len(*s) {
+		copy((*s)[index+1:], (*s)[index:])
+	}
+	(*s)[index] = n
+}
+
+// removeAt removes a value at a given index, pulling all subsequent values
+// back.
+func (s *children) removeAt(index int) *node {
+	n := (*s)[index]
+	copy((*s)[index:], (*s)[index+1:])
+	(*s)[len(*s)-1] = nil
+	*s = (*s)[:len(*s)-1]
+	return n
+}
+
+// pop removes and returns the last element in the list.
+func (s *children) pop() (out *node) {
+	index := len(*s) - 1
+	out = (*s)[index]
+	(*s)[index] = nil
+	*s = (*s)[:index]
+	return
+}
+
+// truncate truncates this instance at index so that it contains only the
+// first index children. index must be less than or equal to length.
+func (s *children) truncate(index int) {
+	var toClear children
+	*s, toClear = (*s)[:index], (*s)[index:]
+	for len(toClear) > 0 {
+		toClear = toClear[copy(toClear, nilChildren):]
+	}
+}
+
+// node is an internal node in a tree.
+//
+// It must at all times maintain the invariant that either
+//   * len(children) == 0, len(items) unconstrained
+//   * len(children) == len(items) + 1
+type node struct {
+	items    items
+	children children
+	cow      *copyOnWriteContext
+}
+
+func (n *node) mutableFor(cow *copyOnWriteContext) *node {
+	if n.cow == cow {
+		return n
+	}
+	out := cow.newNode()
+	if cap(out.items) >= len(n.items) {
+		out.items = out.items[:len(n.items)]
+	} else {
+		out.items = make(items, len(n.items), cap(n.items))
+	}
+	copy(out.items, n.items)
+	// Copy children
+	if cap(out.children) >= len(n.children) {
+		out.children = out.children[:len(n.children)]
+	} else {
+		out.children = make(children, len(n.children), cap(n.children))
+	}
+	copy(out.children, n.children)
+	return out
+}
+
+func (n *node) mutableChild(i int) *node {
+	c := n.children[i].mutableFor(n.cow)
+	n.children[i] = c
+	return c
+}
+
+// split splits the given node at the given index.  The current node shrinks,
+// and this function returns the item that existed at that index and a new node
+// containing all items/children after it.
+func (n *node) split(i int) (Item, *node) {
+	item := n.items[i]
+	next := n.cow.newNode()
+	next.items = append(next.items, n.items[i+1:]...)
+	n.items.truncate(i)
+	if len(n.children) > 0 {
+		next.children = append(next.children, n.children[i+1:]...)
+		n.children.truncate(i + 1)
+	}
+	return item, next
+}
+
+// maybeSplitChild checks if a child should be split, and if so splits it.
+// Returns whether or not a split occurred.
+func (n *node) maybeSplitChild(i, maxItems int) bool {
+	if len(n.children[i].items) < maxItems {
+		return false
+	}
+	first := n.mutableChild(i)
+	item, second := first.split(maxItems / 2)
+	n.items.insertAt(i, item)
+	n.children.insertAt(i+1, second)
+	return true
+}
+
+// insert inserts an item into the subtree rooted at this node, making sure
+// no nodes in the subtree exceed maxItems items.  Should an equivalent item be
+// be found/replaced by insert, it will be returned.
+func (n *node) insert(item Item, maxItems int) Item {
+	i, found := n.items.find(item)
+	if found {
+		out := n.items[i]
+		n.items[i] = item
+		return out
+	}
+	if len(n.children) == 0 {
+		n.items.insertAt(i, item)
+		return nil
+	}
+	if n.maybeSplitChild(i, maxItems) {
+		inTree := n.items[i]
+		switch {
+		case item.Less(inTree):
+			// no change, we want first split node
+		case inTree.Less(item):
+			i++ // we want second split node
+		default:
+			out := n.items[i]
+			n.items[i] = item
+			return out
+		}
+	}
+	return n.mutableChild(i).insert(item, maxItems)
+}
+
+// get finds the given key in the subtree and returns it.
+func (n *node) get(key Item) Item {
+	i, found := n.items.find(key)
+	if found {
+		return n.items[i]
+	} else if len(n.children) > 0 {
+		return n.children[i].get(key)
+	}
+	return nil
+}
+
+// min returns the first item in the subtree.
+func min(n *node) Item {
+	if n == nil {
+		return nil
+	}
+	for len(n.children) > 0 {
+		n = n.children[0]
+	}
+	if len(n.items) == 0 {
+		return nil
+	}
+	return n.items[0]
+}
+
+// max returns the last item in the subtree.
+func max(n *node) Item {
+	if n == nil {
+		return nil
+	}
+	for len(n.children) > 0 {
+		n = n.children[len(n.children)-1]
+	}
+	if len(n.items) == 0 {
+		return nil
+	}
+	return n.items[len(n.items)-1]
+}
+
+// toRemove details what item to remove in a node.remove call.
+type toRemove int
+
+const (
+	removeItem toRemove = iota // removes the given item
+	removeMin                  // removes smallest item in the subtree
+	removeMax                  // removes largest item in the subtree
+)
+
+// remove removes an item from the subtree rooted at this node.
+func (n *node) remove(item Item, minItems int, typ toRemove) Item {
+	var i int
+	var found bool
+	switch typ {
+	case removeMax:
+		if len(n.children) == 0 {
+			return n.items.pop()
+		}
+		i = len(n.items)
+	case removeMin:
+		if len(n.children) == 0 {
+			return n.items.removeAt(0)
+		}
+		i = 0
+	case removeItem:
+		i, found = n.items.find(item)
+		if len(n.children) == 0 {
+			if found {
+				return n.items.removeAt(i)
+			}
+			return nil
+		}
+	default:
+		panic("invalid type")
+	}
+	// If we get to here, we have children.
+	if len(n.children[i].items) <= minItems {
+		return n.growChildAndRemove(i, item, minItems, typ)
+	}
+	child := n.mutableChild(i)
+	// Either we had enough items to begin with, or we've done some
+	// merging/stealing, because we've got enough now and we're ready to return
+	// stuff.
+	if found {
+		// The item exists at index 'i', and the child we've selected can give us a
+		// predecessor, since if we've gotten here it's got > minItems items in it.
+		out := n.items[i]
+		// We use our special-case 'remove' call with typ=maxItem to pull the
+		// predecessor of item i (the rightmost leaf of our immediate left child)
+		// and set it into where we pulled the item from.
+		n.items[i] = child.remove(nil, minItems, removeMax)
+		return out
+	}
+	// Final recursive call.  Once we're here, we know that the item isn't in this
+	// node and that the child is big enough to remove from.
+	return child.remove(item, minItems, typ)
+}
+
+// growChildAndRemove grows child 'i' to make sure it's possible to remove an
+// item from it while keeping it at minItems, then calls remove to actually
+// remove it.
+//
+// Most documentation says we have to do two sets of special casing:
+//   1) item is in this node
+//   2) item is in child
+// In both cases, we need to handle the two subcases:
+//   A) node has enough values that it can spare one
+//   B) node doesn't have enough values
+// For the latter, we have to check:
+//   a) left sibling has node to spare
+//   b) right sibling has node to spare
+//   c) we must merge
+// To simplify our code here, we handle cases #1 and #2 the same:
+// If a node doesn't have enough items, we make sure it does (using a,b,c).
+// We then simply redo our remove call, and the second time (regardless of
+// whether we're in case 1 or 2), we'll have enough items and can guarantee
+// that we hit case A.
+func (n *node) growChildAndRemove(i int, item Item, minItems int, typ toRemove) Item {
+	if i > 0 && len(n.children[i-1].items) > minItems {
+		// Steal from left child
+		child := n.mutableChild(i)
+		stealFrom := n.mutableChild(i - 1)
+		stolenItem := stealFrom.items.pop()
+		child.items.insertAt(0, n.items[i-1])
+		n.items[i-1] = stolenItem
+		if len(stealFrom.children) > 0 {
+			child.children.insertAt(0, stealFrom.children.pop())
+		}
+	} else if i < len(n.items) && len(n.children[i+1].items) > minItems {
+		// steal from right child
+		child := n.mutableChild(i)
+		stealFrom := n.mutableChild(i + 1)
+		stolenItem := stealFrom.items.removeAt(0)
+		child.items = append(child.items, n.items[i])
+		n.items[i] = stolenItem
+		if len(stealFrom.children) > 0 {
+			child.children = append(child.children, stealFrom.children.removeAt(0))
+		}
+	} else {
+		if i >= len(n.items) {
+			i--
+		}
+		child := n.mutableChild(i)
+		// merge with right child
+		mergeItem := n.items.removeAt(i)
+		mergeChild := n.children.removeAt(i + 1)
+		child.items = append(child.items, mergeItem)
+		child.items = append(child.items, mergeChild.items...)
+		child.children = append(child.children, mergeChild.children...)
+		n.cow.freeNode(mergeChild)
+	}
+	return n.remove(item, minItems, typ)
+}
+
+type direction int
+
+const (
+	descend = direction(-1)
+	ascend  = direction(+1)
+)
+
+// iterate provides a simple method for iterating over elements in the tree.
+//
+// When ascending, the 'start' should be less than 'stop' and when descending,
+// the 'start' should be greater than 'stop'. Setting 'includeStart' to true
+// will force the iterator to include the first item when it equals 'start',
+// thus creating a "greaterOrEqual" or "lessThanEqual" rather than just a
+// "greaterThan" or "lessThan" queries.
+func (n *node) iterate(dir direction, start, stop Item, includeStart bool, hit bool, iter ItemIterator) (bool, bool) {
+	var ok, found bool
+	var index int
+	switch dir {
+	case ascend:
+		if start != nil {
+			index, _ = n.items.find(start)
+		}
+		for i := index; i < len(n.items); i++ {
+			if len(n.children) > 0 {
+				if hit, ok = n.children[i].iterate(dir, start, stop, includeStart, hit, iter); !ok {
+					return hit, false
+				}
+			}
+			if !includeStart && !hit && start != nil && !start.Less(n.items[i]) {
+				hit = true
+				continue
+			}
+			hit = true
+			if stop != nil && !n.items[i].Less(stop) {
+				return hit, false
+			}
+			if !iter(n.items[i]) {
+				return hit, false
+			}
+		}
+		if len(n.children) > 0 {
+			if hit, ok = n.children[len(n.children)-1].iterate(dir, start, stop, includeStart, hit, iter); !ok {
+				return hit, false
+			}
+		}
+	case descend:
+		if start != nil {
+			index, found = n.items.find(start)
+			if !found {
+				index = index - 1
+			}
+		} else {
+			index = len(n.items) - 1
+		}
+		for i := index; i >= 0; i-- {
+			if start != nil && !n.items[i].Less(start) {
+				if !includeStart || hit || start.Less(n.items[i]) {
+					continue
+				}
+			}
+			if len(n.children) > 0 {
+				if hit, ok = n.children[i+1].iterate(dir, start, stop, includeStart, hit, iter); !ok {
+					return hit, false
+				}
+			}
+			if stop != nil && !stop.Less(n.items[i]) {
+				return hit, false //	continue
+			}
+			hit = true
+			if !iter(n.items[i]) {
+				return hit, false
+			}
+		}
+		if len(n.children) > 0 {
+			if hit, ok = n.children[0].iterate(dir, start, stop, includeStart, hit, iter); !ok {
+				return hit, false
+			}
+		}
+	}
+	return hit, true
+}
+
+// Used for testing/debugging purposes.
+func (n *node) print(w io.Writer, level int) {
+	fmt.Fprintf(w, "%sNODE:%v\n", strings.Repeat("  ", level), n.items)
+	for _, c := range n.children {
+		c.print(w, level+1)
+	}
+}
+
+// BTree is an implementation of a B-Tree.
+//
+// BTree stores Item instances in an ordered structure, allowing easy insertion,
+// removal, and iteration.
+//
+// Write operations are not safe for concurrent mutation by multiple
+// goroutines, but Read operations are.
+type BTree struct {
+	degree int
+	length int
+	root   *node
+	cow    *copyOnWriteContext
+}
+
+// copyOnWriteContext pointers determine node ownership... a tree with a write
+// context equivalent to a node's write context is allowed to modify that node.
+// A tree whose write context does not match a node's is not allowed to modify
+// it, and must create a new, writable copy (IE: it's a Clone).
+//
+// When doing any write operation, we maintain the invariant that the current
+// node's context is equal to the context of the tree that requested the write.
+// We do this by, before we descend into any node, creating a copy with the
+// correct context if the contexts don't match.
+//
+// Since the node we're currently visiting on any write has the requesting
+// tree's context, that node is modifiable in place.  Children of that node may
+// not share context, but before we descend into them, we'll make a mutable
+// copy.
+type copyOnWriteContext struct {
+	freelist *FreeList
+}
+
+// Clone clones the btree, lazily.  Clone should not be called concurrently,
+// but the original tree (t) and the new tree (t2) can be used concurrently
+// once the Clone call completes.
+//
+// The internal tree structure of b is marked read-only and shared between t and
+// t2.  Writes to both t and t2 use copy-on-write logic, creating new nodes
+// whenever one of b's original nodes would have been modified.  Read operations
+// should have no performance degredation.  Write operations for both t and t2
+// will initially experience minor slow-downs caused by additional allocs and
+// copies due to the aforementioned copy-on-write logic, but should converge to
+// the original performance characteristics of the original tree.
+func (t *BTree) Clone() (t2 *BTree) {
+	// Create two entirely new copy-on-write contexts.
+	// This operation effectively creates three trees:
+	//   the original, shared nodes (old b.cow)
+	//   the new b.cow nodes
+	//   the new out.cow nodes
+	cow1, cow2 := *t.cow, *t.cow
+	out := *t
+	t.cow = &cow1
+	out.cow = &cow2
+	return &out
+}
+
+// maxItems returns the max number of items to allow per node.
+func (t *BTree) maxItems() int {
+	return t.degree*2 - 1
+}
+
+// minItems returns the min number of items to allow per node (ignored for the
+// root node).
+func (t *BTree) minItems() int {
+	return t.degree - 1
+}
+
+func (c *copyOnWriteContext) newNode() (n *node) {
+	n = c.freelist.newNode()
+	n.cow = c
+	return
+}
+
+type freeType int
+
+const (
+	ftFreelistFull freeType = iota // node was freed (available for GC, not stored in freelist)
+	ftStored                       // node was stored in the freelist for later use
+	ftNotOwned                     // node was ignored by COW, since it's owned by another one
+)
+
+// freeNode frees a node within a given COW context, if it's owned by that
+// context.  It returns what happened to the node (see freeType const
+// documentation).
+func (c *copyOnWriteContext) freeNode(n *node) freeType {
+	if n.cow == c {
+		// clear to allow GC
+		n.items.truncate(0)
+		n.children.truncate(0)
+		n.cow = nil
+		if c.freelist.freeNode(n) {
+			return ftStored
+		} else {
+			return ftFreelistFull
+		}
+	} else {
+		return ftNotOwned
+	}
+}
+
+// ReplaceOrInsert adds the given item to the tree.  If an item in the tree
+// already equals the given one, it is removed from the tree and returned.
+// Otherwise, nil is returned.
+//
+// nil cannot be added to the tree (will panic).
+func (t *BTree) ReplaceOrInsert(item Item) Item {
+	if item == nil {
+		panic("nil item being added to BTree")
+	}
+	if t.root == nil {
+		t.root = t.cow.newNode()
+		t.root.items = append(t.root.items, item)
+		t.length++
+		return nil
+	} else {
+		t.root = t.root.mutableFor(t.cow)
+		if len(t.root.items) >= t.maxItems() {
+			item2, second := t.root.split(t.maxItems() / 2)
+			oldroot := t.root
+			t.root = t.cow.newNode()
+			t.root.items = append(t.root.items, item2)
+			t.root.children = append(t.root.children, oldroot, second)
+		}
+	}
+	out := t.root.insert(item, t.maxItems())
+	if out == nil {
+		t.length++
+	}
+	return out
+}
+
+// Delete removes an item equal to the passed in item from the tree, returning
+// it.  If no such item exists, returns nil.
+func (t *BTree) Delete(item Item) Item {
+	return t.deleteItem(item, removeItem)
+}
+
+// DeleteMin removes the smallest item in the tree and returns it.
+// If no such item exists, returns nil.
+func (t *BTree) DeleteMin() Item {
+	return t.deleteItem(nil, removeMin)
+}
+
+// DeleteMax removes the largest item in the tree and returns it.
+// If no such item exists, returns nil.
+func (t *BTree) DeleteMax() Item {
+	return t.deleteItem(nil, removeMax)
+}
+
+func (t *BTree) deleteItem(item Item, typ toRemove) Item {
+	if t.root == nil || len(t.root.items) == 0 {
+		return nil
+	}
+	t.root = t.root.mutableFor(t.cow)
+	out := t.root.remove(item, t.minItems(), typ)
+	if len(t.root.items) == 0 && len(t.root.children) > 0 {
+		oldroot := t.root
+		t.root = t.root.children[0]
+		t.cow.freeNode(oldroot)
+	}
+	if out != nil {
+		t.length--
+	}
+	return out
+}
+
+// AscendRange calls the iterator for every value in the tree within the range
+// [greaterOrEqual, lessThan), until iterator returns false.
+func (t *BTree) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator) {
+	if t.root == nil {
+		return
+	}
+	t.root.iterate(ascend, greaterOrEqual, lessThan, true, false, iterator)
+}
+
+// AscendLessThan calls the iterator for every value in the tree within the range
+// [first, pivot), until iterator returns false.
+func (t *BTree) AscendLessThan(pivot Item, iterator ItemIterator) {
+	if t.root == nil {
+		return
+	}
+	t.root.iterate(ascend, nil, pivot, false, false, iterator)
+}
+
+// AscendGreaterOrEqual calls the iterator for every value in the tree within
+// the range [pivot, last], until iterator returns false.
+func (t *BTree) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) {
+	if t.root == nil {
+		return
+	}
+	t.root.iterate(ascend, pivot, nil, true, false, iterator)
+}
+
+// Ascend calls the iterator for every value in the tree within the range
+// [first, last], until iterator returns false.
+func (t *BTree) Ascend(iterator ItemIterator) {
+	if t.root == nil {
+		return
+	}
+	t.root.iterate(ascend, nil, nil, false, false, iterator)
+}
+
+// DescendRange calls the iterator for every value in the tree within the range
+// [lessOrEqual, greaterThan), until iterator returns false.
+func (t *BTree) DescendRange(lessOrEqual, greaterThan Item, iterator ItemIterator) {
+	if t.root == nil {
+		return
+	}
+	t.root.iterate(descend, lessOrEqual, greaterThan, true, false, iterator)
+}
+
+// DescendLessOrEqual calls the iterator for every value in the tree within the range
+// [pivot, first], until iterator returns false.
+func (t *BTree) DescendLessOrEqual(pivot Item, iterator ItemIterator) {
+	if t.root == nil {
+		return
+	}
+	t.root.iterate(descend, pivot, nil, true, false, iterator)
+}
+
+// DescendGreaterThan calls the iterator for every value in the tree within
+// the range (pivot, last], until iterator returns false.
+func (t *BTree) DescendGreaterThan(pivot Item, iterator ItemIterator) {
+	if t.root == nil {
+		return
+	}
+	t.root.iterate(descend, nil, pivot, false, false, iterator)
+}
+
+// Descend calls the iterator for every value in the tree within the range
+// [last, first], until iterator returns false.
+func (t *BTree) Descend(iterator ItemIterator) {
+	if t.root == nil {
+		return
+	}
+	t.root.iterate(descend, nil, nil, false, false, iterator)
+}
+
+// Get looks for the key item in the tree, returning it.  It returns nil if
+// unable to find that item.
+func (t *BTree) Get(key Item) Item {
+	if t.root == nil {
+		return nil
+	}
+	return t.root.get(key)
+}
+
+// Min returns the smallest item in the tree, or nil if the tree is empty.
+func (t *BTree) Min() Item {
+	return min(t.root)
+}
+
+// Max returns the largest item in the tree, or nil if the tree is empty.
+func (t *BTree) Max() Item {
+	return max(t.root)
+}
+
+// Has returns true if the given key is in the tree.
+func (t *BTree) Has(key Item) bool {
+	return t.Get(key) != nil
+}
+
+// Len returns the number of items currently in the tree.
+func (t *BTree) Len() int {
+	return t.length
+}
+
+// Clear removes all items from the btree.  If addNodesToFreelist is true,
+// t's nodes are added to its freelist as part of this call, until the freelist
+// is full.  Otherwise, the root node is simply dereferenced and the subtree
+// left to Go's normal GC processes.
+//
+// This can be much faster
+// than calling Delete on all elements, because that requires finding/removing
+// each element in the tree and updating the tree accordingly.  It also is
+// somewhat faster than creating a new tree to replace the old one, because
+// nodes from the old tree are reclaimed into the freelist for use by the new
+// one, instead of being lost to the garbage collector.
+//
+// This call takes:
+//   O(1): when addNodesToFreelist is false, this is a single operation.
+//   O(1): when the freelist is already full, it breaks out immediately
+//   O(freelist size):  when the freelist is empty and the nodes are all owned
+//       by this tree, nodes are added to the freelist until full.
+//   O(tree size):  when all nodes are owned by another tree, all nodes are
+//       iterated over looking for nodes to add to the freelist, and due to
+//       ownership, none are.
+func (t *BTree) Clear(addNodesToFreelist bool) {
+	if t.root != nil && addNodesToFreelist {
+		t.root.reset(t.cow)
+	}
+	t.root, t.length = nil, 0
+}
+
+// reset returns a subtree to the freelist.  It breaks out immediately if the
+// freelist is full, since the only benefit of iterating is to fill that
+// freelist up.  Returns true if parent reset call should continue.
+func (n *node) reset(c *copyOnWriteContext) bool {
+	for _, child := range n.children {
+		if !child.reset(c) {
+			return false
+		}
+	}
+	return c.freeNode(n) != ftFreelistFull
+}
+
+// Int implements the Item interface for integers.
+type Int int
+
+// Less returns true if int(a) < int(b).
+func (a Int) Less(b Item) bool {
+	return a < b.(Int)
+}
diff --git a/vendor/github.com/google/btree/btree_mem.go b/vendor/github.com/google/btree/btree_mem.go
new file mode 100644
index 0000000..cb95b7f
--- /dev/null
+++ b/vendor/github.com/google/btree/btree_mem.go
@@ -0,0 +1,76 @@
+// Copyright 2014 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build ignore
+
+// This binary compares memory usage between btree and gollrb.
+package main
+
+import (
+	"flag"
+	"fmt"
+	"math/rand"
+	"runtime"
+	"time"
+
+	"github.com/google/btree"
+	"github.com/petar/GoLLRB/llrb"
+)
+
+var (
+	size   = flag.Int("size", 1000000, "size of the tree to build")
+	degree = flag.Int("degree", 8, "degree of btree")
+	gollrb = flag.Bool("llrb", false, "use llrb instead of btree")
+)
+
+func main() {
+	flag.Parse()
+	vals := rand.Perm(*size)
+	var t, v interface{}
+	v = vals
+	var stats runtime.MemStats
+	for i := 0; i < 10; i++ {
+		runtime.GC()
+	}
+	fmt.Println("-------- BEFORE ----------")
+	runtime.ReadMemStats(&stats)
+	fmt.Printf("%+v\n", stats)
+	start := time.Now()
+	if *gollrb {
+		tr := llrb.New()
+		for _, v := range vals {
+			tr.ReplaceOrInsert(llrb.Int(v))
+		}
+		t = tr // keep it around
+	} else {
+		tr := btree.New(*degree)
+		for _, v := range vals {
+			tr.ReplaceOrInsert(btree.Int(v))
+		}
+		t = tr // keep it around
+	}
+	fmt.Printf("%v inserts in %v\n", *size, time.Since(start))
+	fmt.Println("-------- AFTER ----------")
+	runtime.ReadMemStats(&stats)
+	fmt.Printf("%+v\n", stats)
+	for i := 0; i < 10; i++ {
+		runtime.GC()
+	}
+	fmt.Println("-------- AFTER GC ----------")
+	runtime.ReadMemStats(&stats)
+	fmt.Printf("%+v\n", stats)
+	if t == v {
+		fmt.Println("to make sure vals and tree aren't GC'd")
+	}
+}
diff --git a/vendor/github.com/gregjones/httpcache/LICENSE.txt b/vendor/github.com/gregjones/httpcache/LICENSE.txt
new file mode 100644
index 0000000..81316be
--- /dev/null
+++ b/vendor/github.com/gregjones/httpcache/LICENSE.txt
@@ -0,0 +1,7 @@
+Copyright © 2012 Greg Jones (greg.jones@gmail.com)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\ No newline at end of file
diff --git a/vendor/github.com/gregjones/httpcache/diskcache/diskcache.go b/vendor/github.com/gregjones/httpcache/diskcache/diskcache.go
new file mode 100644
index 0000000..42e3129
--- /dev/null
+++ b/vendor/github.com/gregjones/httpcache/diskcache/diskcache.go
@@ -0,0 +1,61 @@
+// Package diskcache provides an implementation of httpcache.Cache that uses the diskv package
+// to supplement an in-memory map with persistent storage
+//
+package diskcache
+
+import (
+	"bytes"
+	"crypto/md5"
+	"encoding/hex"
+	"github.com/peterbourgon/diskv"
+	"io"
+)
+
+// Cache is an implementation of httpcache.Cache that supplements the in-memory map with persistent storage
+type Cache struct {
+	d *diskv.Diskv
+}
+
+// Get returns the response corresponding to key if present
+func (c *Cache) Get(key string) (resp []byte, ok bool) {
+	key = keyToFilename(key)
+	resp, err := c.d.Read(key)
+	if err != nil {
+		return []byte{}, false
+	}
+	return resp, true
+}
+
+// Set saves a response to the cache as key
+func (c *Cache) Set(key string, resp []byte) {
+	key = keyToFilename(key)
+	c.d.WriteStream(key, bytes.NewReader(resp), true)
+}
+
+// Delete removes the response with key from the cache
+func (c *Cache) Delete(key string) {
+	key = keyToFilename(key)
+	c.d.Erase(key)
+}
+
+func keyToFilename(key string) string {
+	h := md5.New()
+	io.WriteString(h, key)
+	return hex.EncodeToString(h.Sum(nil))
+}
+
+// New returns a new Cache that will store files in basePath
+func New(basePath string) *Cache {
+	return &Cache{
+		d: diskv.New(diskv.Options{
+			BasePath:     basePath,
+			CacheSizeMax: 100 * 1024 * 1024, // 100MB
+		}),
+	}
+}
+
+// NewWithDiskv returns a new Cache using the provided Diskv as underlying
+// storage.
+func NewWithDiskv(d *diskv.Diskv) *Cache {
+	return &Cache{d}
+}
diff --git a/vendor/github.com/gregjones/httpcache/httpcache.go b/vendor/github.com/gregjones/httpcache/httpcache.go
new file mode 100644
index 0000000..f6a2ec4
--- /dev/null
+++ b/vendor/github.com/gregjones/httpcache/httpcache.go
@@ -0,0 +1,551 @@
+// Package httpcache provides a http.RoundTripper implementation that works as a
+// mostly RFC-compliant cache for http responses.
+//
+// It is only suitable for use as a 'private' cache (i.e. for a web-browser or an API-client
+// and not for a shared proxy).
+//
+package httpcache
+
+import (
+	"bufio"
+	"bytes"
+	"errors"
+	"io"
+	"io/ioutil"
+	"net/http"
+	"net/http/httputil"
+	"strings"
+	"sync"
+	"time"
+)
+
+const (
+	stale = iota
+	fresh
+	transparent
+	// XFromCache is the header added to responses that are returned from the cache
+	XFromCache = "X-From-Cache"
+)
+
+// A Cache interface is used by the Transport to store and retrieve responses.
+type Cache interface {
+	// Get returns the []byte representation of a cached response and a bool
+	// set to true if the value isn't empty
+	Get(key string) (responseBytes []byte, ok bool)
+	// Set stores the []byte representation of a response against a key
+	Set(key string, responseBytes []byte)
+	// Delete removes the value associated with the key
+	Delete(key string)
+}
+
+// cacheKey returns the cache key for req.
+func cacheKey(req *http.Request) string {
+	if req.Method == http.MethodGet {
+		return req.URL.String()
+	} else {
+		return req.Method + " " + req.URL.String()
+	}
+}
+
+// CachedResponse returns the cached http.Response for req if present, and nil
+// otherwise.
+func CachedResponse(c Cache, req *http.Request) (resp *http.Response, err error) {
+	cachedVal, ok := c.Get(cacheKey(req))
+	if !ok {
+		return
+	}
+
+	b := bytes.NewBuffer(cachedVal)
+	return http.ReadResponse(bufio.NewReader(b), req)
+}
+
+// MemoryCache is an implemtation of Cache that stores responses in an in-memory map.
+type MemoryCache struct {
+	mu    sync.RWMutex
+	items map[string][]byte
+}
+
+// Get returns the []byte representation of the response and true if present, false if not
+func (c *MemoryCache) Get(key string) (resp []byte, ok bool) {
+	c.mu.RLock()
+	resp, ok = c.items[key]
+	c.mu.RUnlock()
+	return resp, ok
+}
+
+// Set saves response resp to the cache with key
+func (c *MemoryCache) Set(key string, resp []byte) {
+	c.mu.Lock()
+	c.items[key] = resp
+	c.mu.Unlock()
+}
+
+// Delete removes key from the cache
+func (c *MemoryCache) Delete(key string) {
+	c.mu.Lock()
+	delete(c.items, key)
+	c.mu.Unlock()
+}
+
+// NewMemoryCache returns a new Cache that will store items in an in-memory map
+func NewMemoryCache() *MemoryCache {
+	c := &MemoryCache{items: map[string][]byte{}}
+	return c
+}
+
+// Transport is an implementation of http.RoundTripper that will return values from a cache
+// where possible (avoiding a network request) and will additionally add validators (etag/if-modified-since)
+// to repeated requests allowing servers to return 304 / Not Modified
+type Transport struct {
+	// The RoundTripper interface actually used to make requests
+	// If nil, http.DefaultTransport is used
+	Transport http.RoundTripper
+	Cache     Cache
+	// If true, responses returned from the cache will be given an extra header, X-From-Cache
+	MarkCachedResponses bool
+}
+
+// NewTransport returns a new Transport with the
+// provided Cache implementation and MarkCachedResponses set to true
+func NewTransport(c Cache) *Transport {
+	return &Transport{Cache: c, MarkCachedResponses: true}
+}
+
+// Client returns an *http.Client that caches responses.
+func (t *Transport) Client() *http.Client {
+	return &http.Client{Transport: t}
+}
+
+// varyMatches will return false unless all of the cached values for the headers listed in Vary
+// match the new request
+func varyMatches(cachedResp *http.Response, req *http.Request) bool {
+	for _, header := range headerAllCommaSepValues(cachedResp.Header, "vary") {
+		header = http.CanonicalHeaderKey(header)
+		if header != "" && req.Header.Get(header) != cachedResp.Header.Get("X-Varied-"+header) {
+			return false
+		}
+	}
+	return true
+}
+
+// RoundTrip takes a Request and returns a Response
+//
+// If there is a fresh Response already in cache, then it will be returned without connecting to
+// the server.
+//
+// If there is a stale Response, then any validators it contains will be set on the new request
+// to give the server a chance to respond with NotModified. If this happens, then the cached Response
+// will be returned.
+func (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err error) {
+	cacheKey := cacheKey(req)
+	cacheable := (req.Method == "GET" || req.Method == "HEAD") && req.Header.Get("range") == ""
+	var cachedResp *http.Response
+	if cacheable {
+		cachedResp, err = CachedResponse(t.Cache, req)
+	} else {
+		// Need to invalidate an existing value
+		t.Cache.Delete(cacheKey)
+	}
+
+	transport := t.Transport
+	if transport == nil {
+		transport = http.DefaultTransport
+	}
+
+	if cacheable && cachedResp != nil && err == nil {
+		if t.MarkCachedResponses {
+			cachedResp.Header.Set(XFromCache, "1")
+		}
+
+		if varyMatches(cachedResp, req) {
+			// Can only use cached value if the new request doesn't Vary significantly
+			freshness := getFreshness(cachedResp.Header, req.Header)
+			if freshness == fresh {
+				return cachedResp, nil
+			}
+
+			if freshness == stale {
+				var req2 *http.Request
+				// Add validators if caller hasn't already done so
+				etag := cachedResp.Header.Get("etag")
+				if etag != "" && req.Header.Get("etag") == "" {
+					req2 = cloneRequest(req)
+					req2.Header.Set("if-none-match", etag)
+				}
+				lastModified := cachedResp.Header.Get("last-modified")
+				if lastModified != "" && req.Header.Get("last-modified") == "" {
+					if req2 == nil {
+						req2 = cloneRequest(req)
+					}
+					req2.Header.Set("if-modified-since", lastModified)
+				}
+				if req2 != nil {
+					req = req2
+				}
+			}
+		}
+
+		resp, err = transport.RoundTrip(req)
+		if err == nil && req.Method == "GET" && resp.StatusCode == http.StatusNotModified {
+			// Replace the 304 response with the one from cache, but update with some new headers
+			endToEndHeaders := getEndToEndHeaders(resp.Header)
+			for _, header := range endToEndHeaders {
+				cachedResp.Header[header] = resp.Header[header]
+			}
+			resp = cachedResp
+		} else if (err != nil || (cachedResp != nil && resp.StatusCode >= 500)) &&
+			req.Method == "GET" && canStaleOnError(cachedResp.Header, req.Header) {
+			// In case of transport failure and stale-if-error activated, returns cached content
+			// when available
+			return cachedResp, nil
+		} else {
+			if err != nil || resp.StatusCode != http.StatusOK {
+				t.Cache.Delete(cacheKey)
+			}
+			if err != nil {
+				return nil, err
+			}
+		}
+	} else {
+		reqCacheControl := parseCacheControl(req.Header)
+		if _, ok := reqCacheControl["only-if-cached"]; ok {
+			resp = newGatewayTimeoutResponse(req)
+		} else {
+			resp, err = transport.RoundTrip(req)
+			if err != nil {
+				return nil, err
+			}
+		}
+	}
+
+	if cacheable && canStore(parseCacheControl(req.Header), parseCacheControl(resp.Header)) {
+		for _, varyKey := range headerAllCommaSepValues(resp.Header, "vary") {
+			varyKey = http.CanonicalHeaderKey(varyKey)
+			fakeHeader := "X-Varied-" + varyKey
+			reqValue := req.Header.Get(varyKey)
+			if reqValue != "" {
+				resp.Header.Set(fakeHeader, reqValue)
+			}
+		}
+		switch req.Method {
+		case "GET":
+			// Delay caching until EOF is reached.
+			resp.Body = &cachingReadCloser{
+				R: resp.Body,
+				OnEOF: func(r io.Reader) {
+					resp := *resp
+					resp.Body = ioutil.NopCloser(r)
+					respBytes, err := httputil.DumpResponse(&resp, true)
+					if err == nil {
+						t.Cache.Set(cacheKey, respBytes)
+					}
+				},
+			}
+		default:
+			respBytes, err := httputil.DumpResponse(resp, true)
+			if err == nil {
+				t.Cache.Set(cacheKey, respBytes)
+			}
+		}
+	} else {
+		t.Cache.Delete(cacheKey)
+	}
+	return resp, nil
+}
+
+// ErrNoDateHeader indicates that the HTTP headers contained no Date header.
+var ErrNoDateHeader = errors.New("no Date header")
+
+// Date parses and returns the value of the Date header.
+func Date(respHeaders http.Header) (date time.Time, err error) {
+	dateHeader := respHeaders.Get("date")
+	if dateHeader == "" {
+		err = ErrNoDateHeader
+		return
+	}
+
+	return time.Parse(time.RFC1123, dateHeader)
+}
+
+type realClock struct{}
+
+func (c *realClock) since(d time.Time) time.Duration {
+	return time.Since(d)
+}
+
+type timer interface {
+	since(d time.Time) time.Duration
+}
+
+var clock timer = &realClock{}
+
+// getFreshness will return one of fresh/stale/transparent based on the cache-control
+// values of the request and the response
+//
+// fresh indicates the response can be returned
+// stale indicates that the response needs validating before it is returned
+// transparent indicates the response should not be used to fulfil the request
+//
+// Because this is only a private cache, 'public' and 'private' in cache-control aren't
+// signficant. Similarly, smax-age isn't used.
+func getFreshness(respHeaders, reqHeaders http.Header) (freshness int) {
+	respCacheControl := parseCacheControl(respHeaders)
+	reqCacheControl := parseCacheControl(reqHeaders)
+	if _, ok := reqCacheControl["no-cache"]; ok {
+		return transparent
+	}
+	if _, ok := respCacheControl["no-cache"]; ok {
+		return stale
+	}
+	if _, ok := reqCacheControl["only-if-cached"]; ok {
+		return fresh
+	}
+
+	date, err := Date(respHeaders)
+	if err != nil {
+		return stale
+	}
+	currentAge := clock.since(date)
+
+	var lifetime time.Duration
+	var zeroDuration time.Duration
+
+	// If a response includes both an Expires header and a max-age directive,
+	// the max-age directive overrides the Expires header, even if the Expires header is more restrictive.
+	if maxAge, ok := respCacheControl["max-age"]; ok {
+		lifetime, err = time.ParseDuration(maxAge + "s")
+		if err != nil {
+			lifetime = zeroDuration
+		}
+	} else {
+		expiresHeader := respHeaders.Get("Expires")
+		if expiresHeader != "" {
+			expires, err := time.Parse(time.RFC1123, expiresHeader)
+			if err != nil {
+				lifetime = zeroDuration
+			} else {
+				lifetime = expires.Sub(date)
+			}
+		}
+	}
+
+	if maxAge, ok := reqCacheControl["max-age"]; ok {
+		// the client is willing to accept a response whose age is no greater than the specified time in seconds
+		lifetime, err = time.ParseDuration(maxAge + "s")
+		if err != nil {
+			lifetime = zeroDuration
+		}
+	}
+	if minfresh, ok := reqCacheControl["min-fresh"]; ok {
+		//  the client wants a response that will still be fresh for at least the specified number of seconds.
+		minfreshDuration, err := time.ParseDuration(minfresh + "s")
+		if err == nil {
+			currentAge = time.Duration(currentAge + minfreshDuration)
+		}
+	}
+
+	if maxstale, ok := reqCacheControl["max-stale"]; ok {
+		// Indicates that the client is willing to accept a response that has exceeded its expiration time.
+		// If max-stale is assigned a value, then the client is willing to accept a response that has exceeded
+		// its expiration time by no more than the specified number of seconds.
+		// If no value is assigned to max-stale, then the client is willing to accept a stale response of any age.
+		//
+		// Responses served only because of a max-stale value are supposed to have a Warning header added to them,
+		// but that seems like a  hassle, and is it actually useful? If so, then there needs to be a different
+		// return-value available here.
+		if maxstale == "" {
+			return fresh
+		}
+		maxstaleDuration, err := time.ParseDuration(maxstale + "s")
+		if err == nil {
+			currentAge = time.Duration(currentAge - maxstaleDuration)
+		}
+	}
+
+	if lifetime > currentAge {
+		return fresh
+	}
+
+	return stale
+}
+
+// Returns true if either the request or the response includes the stale-if-error
+// cache control extension: https://tools.ietf.org/html/rfc5861
+func canStaleOnError(respHeaders, reqHeaders http.Header) bool {
+	respCacheControl := parseCacheControl(respHeaders)
+	reqCacheControl := parseCacheControl(reqHeaders)
+
+	var err error
+	lifetime := time.Duration(-1)
+
+	if staleMaxAge, ok := respCacheControl["stale-if-error"]; ok {
+		if staleMaxAge != "" {
+			lifetime, err = time.ParseDuration(staleMaxAge + "s")
+			if err != nil {
+				return false
+			}
+		} else {
+			return true
+		}
+	}
+	if staleMaxAge, ok := reqCacheControl["stale-if-error"]; ok {
+		if staleMaxAge != "" {
+			lifetime, err = time.ParseDuration(staleMaxAge + "s")
+			if err != nil {
+				return false
+			}
+		} else {
+			return true
+		}
+	}
+
+	if lifetime >= 0 {
+		date, err := Date(respHeaders)
+		if err != nil {
+			return false
+		}
+		currentAge := clock.since(date)
+		if lifetime > currentAge {
+			return true
+		}
+	}
+
+	return false
+}
+
+func getEndToEndHeaders(respHeaders http.Header) []string {
+	// These headers are always hop-by-hop
+	hopByHopHeaders := map[string]struct{}{
+		"Connection":          struct{}{},
+		"Keep-Alive":          struct{}{},
+		"Proxy-Authenticate":  struct{}{},
+		"Proxy-Authorization": struct{}{},
+		"Te":                struct{}{},
+		"Trailers":          struct{}{},
+		"Transfer-Encoding": struct{}{},
+		"Upgrade":           struct{}{},
+	}
+
+	for _, extra := range strings.Split(respHeaders.Get("connection"), ",") {
+		// any header listed in connection, if present, is also considered hop-by-hop
+		if strings.Trim(extra, " ") != "" {
+			hopByHopHeaders[http.CanonicalHeaderKey(extra)] = struct{}{}
+		}
+	}
+	endToEndHeaders := []string{}
+	for respHeader, _ := range respHeaders {
+		if _, ok := hopByHopHeaders[respHeader]; !ok {
+			endToEndHeaders = append(endToEndHeaders, respHeader)
+		}
+	}
+	return endToEndHeaders
+}
+
+func canStore(reqCacheControl, respCacheControl cacheControl) (canStore bool) {
+	if _, ok := respCacheControl["no-store"]; ok {
+		return false
+	}
+	if _, ok := reqCacheControl["no-store"]; ok {
+		return false
+	}
+	return true
+}
+
+func newGatewayTimeoutResponse(req *http.Request) *http.Response {
+	var braw bytes.Buffer
+	braw.WriteString("HTTP/1.1 504 Gateway Timeout\r\n\r\n")
+	resp, err := http.ReadResponse(bufio.NewReader(&braw), req)
+	if err != nil {
+		panic(err)
+	}
+	return resp
+}
+
+// cloneRequest returns a clone of the provided *http.Request.
+// The clone is a shallow copy of the struct and its Header map.
+// (This function copyright goauth2 authors: https://code.google.com/p/goauth2)
+func cloneRequest(r *http.Request) *http.Request {
+	// shallow copy of the struct
+	r2 := new(http.Request)
+	*r2 = *r
+	// deep copy of the Header
+	r2.Header = make(http.Header)
+	for k, s := range r.Header {
+		r2.Header[k] = s
+	}
+	return r2
+}
+
+type cacheControl map[string]string
+
+func parseCacheControl(headers http.Header) cacheControl {
+	cc := cacheControl{}
+	ccHeader := headers.Get("Cache-Control")
+	for _, part := range strings.Split(ccHeader, ",") {
+		part = strings.Trim(part, " ")
+		if part == "" {
+			continue
+		}
+		if strings.ContainsRune(part, '=') {
+			keyval := strings.Split(part, "=")
+			cc[strings.Trim(keyval[0], " ")] = strings.Trim(keyval[1], ",")
+		} else {
+			cc[part] = ""
+		}
+	}
+	return cc
+}
+
+// headerAllCommaSepValues returns all comma-separated values (each
+// with whitespace trimmed) for header name in headers. According to
+// Section 4.2 of the HTTP/1.1 spec
+// (http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2),
+// values from multiple occurrences of a header should be concatenated, if
+// the header's value is a comma-separated list.
+func headerAllCommaSepValues(headers http.Header, name string) []string {
+	var vals []string
+	for _, val := range headers[http.CanonicalHeaderKey(name)] {
+		fields := strings.Split(val, ",")
+		for i, f := range fields {
+			fields[i] = strings.TrimSpace(f)
+		}
+		vals = append(vals, fields...)
+	}
+	return vals
+}
+
+// cachingReadCloser is a wrapper around ReadCloser R that calls OnEOF
+// handler with a full copy of the content read from R when EOF is
+// reached.
+type cachingReadCloser struct {
+	// Underlying ReadCloser.
+	R io.ReadCloser
+	// OnEOF is called with a copy of the content of R when EOF is reached.
+	OnEOF func(io.Reader)
+
+	buf bytes.Buffer // buf stores a copy of the content of R.
+}
+
+// Read reads the next len(p) bytes from R or until R is drained. The
+// return value n is the number of bytes read. If R has no data to
+// return, err is io.EOF and OnEOF is called with a full copy of what
+// has been read so far.
+func (r *cachingReadCloser) Read(p []byte) (n int, err error) {
+	n, err = r.R.Read(p)
+	r.buf.Write(p[:n])
+	if err == io.EOF {
+		r.OnEOF(bytes.NewReader(r.buf.Bytes()))
+	}
+	return n, err
+}
+
+func (r *cachingReadCloser) Close() error {
+	return r.R.Close()
+}
+
+// NewMemoryCacheTransport returns a new Transport using the in-memory cache implementation
+func NewMemoryCacheTransport() *Transport {
+	c := NewMemoryCache()
+	t := NewTransport(c)
+	return t
+}
diff --git a/vendor/github.com/howeyc/gopass/LICENSE.txt b/vendor/github.com/howeyc/gopass/LICENSE.txt
deleted file mode 100644
index 14f7470..0000000
--- a/vendor/github.com/howeyc/gopass/LICENSE.txt
+++ /dev/null
@@ -1,15 +0,0 @@
-ISC License
-
-Copyright (c) 2012 Chris Howey
-
-Permission to use, copy, modify, and distribute this software for any
-purpose with or without fee is hereby granted, provided that the above
-copyright notice and this permission notice appear in all copies.
-
-THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/vendor/github.com/howeyc/gopass/pass.go b/vendor/github.com/howeyc/gopass/pass.go
deleted file mode 100644
index f5bd5a5..0000000
--- a/vendor/github.com/howeyc/gopass/pass.go
+++ /dev/null
@@ -1,110 +0,0 @@
-package gopass
-
-import (
-	"errors"
-	"fmt"
-	"io"
-	"os"
-)
-
-type FdReader interface {
-	io.Reader
-	Fd() uintptr
-}
-
-var defaultGetCh = func(r io.Reader) (byte, error) {
-	buf := make([]byte, 1)
-	if n, err := r.Read(buf); n == 0 || err != nil {
-		if err != nil {
-			return 0, err
-		}
-		return 0, io.EOF
-	}
-	return buf[0], nil
-}
-
-var (
-	maxLength            = 512
-	ErrInterrupted       = errors.New("interrupted")
-	ErrMaxLengthExceeded = fmt.Errorf("maximum byte limit (%v) exceeded", maxLength)
-
-	// Provide variable so that tests can provide a mock implementation.
-	getch = defaultGetCh
-)
-
-// getPasswd returns the input read from terminal.
-// If prompt is not empty, it will be output as a prompt to the user
-// If masked is true, typing will be matched by asterisks on the screen.
-// Otherwise, typing will echo nothing.
-func getPasswd(prompt string, masked bool, r FdReader, w io.Writer) ([]byte, error) {
-	var err error
-	var pass, bs, mask []byte
-	if masked {
-		bs = []byte("\b \b")
-		mask = []byte("*")
-	}
-
-	if isTerminal(r.Fd()) {
-		if oldState, err := makeRaw(r.Fd()); err != nil {
-			return pass, err
-		} else {
-			defer func() {
-				restore(r.Fd(), oldState)
-				fmt.Fprintln(w)
-			}()
-		}
-	}
-
-	if prompt != "" {
-		fmt.Fprint(w, prompt)
-	}
-
-	// Track total bytes read, not just bytes in the password.  This ensures any
-	// errors that might flood the console with nil or -1 bytes infinitely are
-	// capped.
-	var counter int
-	for counter = 0; counter <= maxLength; counter++ {
-		if v, e := getch(r); e != nil {
-			err = e
-			break
-		} else if v == 127 || v == 8 {
-			if l := len(pass); l > 0 {
-				pass = pass[:l-1]
-				fmt.Fprint(w, string(bs))
-			}
-		} else if v == 13 || v == 10 {
-			break
-		} else if v == 3 {
-			err = ErrInterrupted
-			break
-		} else if v != 0 {
-			pass = append(pass, v)
-			fmt.Fprint(w, string(mask))
-		}
-	}
-
-	if counter > maxLength {
-		err = ErrMaxLengthExceeded
-	}
-
-	return pass, err
-}
-
-// GetPasswd returns the password read from the terminal without echoing input.
-// The returned byte array does not include end-of-line characters.
-func GetPasswd() ([]byte, error) {
-	return getPasswd("", false, os.Stdin, os.Stdout)
-}
-
-// GetPasswdMasked returns the password read from the terminal, echoing asterisks.
-// The returned byte array does not include end-of-line characters.
-func GetPasswdMasked() ([]byte, error) {
-	return getPasswd("", true, os.Stdin, os.Stdout)
-}
-
-// GetPasswdPrompt prompts the user and returns the password read from the terminal.
-// If mask is true, then asterisks are echoed.
-// The returned byte array does not include end-of-line characters.
-func GetPasswdPrompt(prompt string, mask bool, r FdReader, w io.Writer) ([]byte, error) {
-	return getPasswd(prompt, mask, r, w)
-}
diff --git a/vendor/github.com/howeyc/gopass/terminal.go b/vendor/github.com/howeyc/gopass/terminal.go
deleted file mode 100644
index 0835641..0000000
--- a/vendor/github.com/howeyc/gopass/terminal.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// +build !solaris
-
-package gopass
-
-import "golang.org/x/crypto/ssh/terminal"
-
-type terminalState struct {
-	state *terminal.State
-}
-
-func isTerminal(fd uintptr) bool {
-	return terminal.IsTerminal(int(fd))
-}
-
-func makeRaw(fd uintptr) (*terminalState, error) {
-	state, err := terminal.MakeRaw(int(fd))
-
-	return &terminalState{
-		state: state,
-	}, err
-}
-
-func restore(fd uintptr, oldState *terminalState) error {
-	return terminal.Restore(int(fd), oldState.state)
-}
diff --git a/vendor/github.com/howeyc/gopass/terminal_solaris.go b/vendor/github.com/howeyc/gopass/terminal_solaris.go
deleted file mode 100644
index 257e1b4..0000000
--- a/vendor/github.com/howeyc/gopass/terminal_solaris.go
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License").  You may not use this file except in compliance
- * with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-// Below is derived from Solaris source, so CDDL license is included.
-
-package gopass
-
-import (
-	"syscall"
-
-	"golang.org/x/sys/unix"
-)
-
-type terminalState struct {
-	state *unix.Termios
-}
-
-// isTerminal returns true if there is a terminal attached to the given
-// file descriptor.
-// Source: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libbc/libc/gen/common/isatty.c
-func isTerminal(fd uintptr) bool {
-	var termio unix.Termio
-	err := unix.IoctlSetTermio(int(fd), unix.TCGETA, &termio)
-	return err == nil
-}
-
-// makeRaw puts the terminal connected to the given file descriptor into raw
-// mode and returns the previous state of the terminal so that it can be
-// restored.
-// Source: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libast/common/uwin/getpass.c
-func makeRaw(fd uintptr) (*terminalState, error) {
-	oldTermiosPtr, err := unix.IoctlGetTermios(int(fd), unix.TCGETS)
-	if err != nil {
-		return nil, err
-	}
-	oldTermios := *oldTermiosPtr
-
-	newTermios := oldTermios
-	newTermios.Lflag &^= syscall.ECHO | syscall.ECHOE | syscall.ECHOK | syscall.ECHONL
-	if err := unix.IoctlSetTermios(int(fd), unix.TCSETS, &newTermios); err != nil {
-		return nil, err
-	}
-
-	return &terminalState{
-		state: oldTermiosPtr,
-	}, nil
-}
-
-func restore(fd uintptr, oldState *terminalState) error {
-	return unix.IoctlSetTermios(int(fd), unix.TCSETS, oldState.state)
-}
diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/license b/vendor/github.com/konsorten/go-windows-terminal-sequences/LICENSE
similarity index 100%
rename from vendor/github.com/konsorten/go-windows-terminal-sequences/license
rename to vendor/github.com/konsorten/go-windows-terminal-sequences/LICENSE
diff --git a/vendor/github.com/operator-framework/operator-sdk/pkg/k8sclient/client.go b/vendor/github.com/operator-framework/operator-sdk/pkg/k8sclient/client.go
index fc78f67..466bd3b 100644
--- a/vendor/github.com/operator-framework/operator-sdk/pkg/k8sclient/client.go
+++ b/vendor/github.com/operator-framework/operator-sdk/pkg/k8sclient/client.go
@@ -23,22 +23,20 @@ import (
 
 	"github.com/operator-framework/operator-sdk/pkg/util/k8sutil"
 
-	"k8s.io/apimachinery/pkg/api/meta"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/apimachinery/pkg/runtime/schema"
-	"k8s.io/client-go/discovery"
 	"k8s.io/client-go/discovery/cached"
 	"k8s.io/client-go/dynamic"
 	"k8s.io/client-go/kubernetes"
 	"k8s.io/client-go/rest"
+	"k8s.io/client-go/restmapper"
 	"k8s.io/client-go/tools/clientcmd"
 )
 
 type resourceClientFactory struct {
-	restMapper *discovery.DeferredDiscoveryRESTMapper
-	clientPool dynamic.ClientPool
-	kubeClient kubernetes.Interface
-	kubeConfig *rest.Config
+	dynamicClient dynamic.Interface
+	restMapper    *restmapper.DeferredDiscoveryRESTMapper
+	kubeClient    kubernetes.Interface
+	kubeConfig    *rest.Config
 }
 
 var (
@@ -51,16 +49,19 @@ var (
 func newSingletonFactory() {
 	kubeClient, kubeConfig := mustNewKubeClientAndConfig()
 	cachedDiscoveryClient := cached.NewMemCacheClient(kubeClient.Discovery())
-	restMapper := discovery.NewDeferredDiscoveryRESTMapper(cachedDiscoveryClient, meta.InterfacesForUnstructured)
+	restMapper := restmapper.NewDeferredDiscoveryRESTMapper(cachedDiscoveryClient)
 	restMapper.Reset()
-	kubeConfig.ContentConfig = dynamic.ContentConfig()
-	clientPool := dynamic.NewClientPool(kubeConfig, restMapper, dynamic.LegacyAPIPathResolverFunc)
+
+	dynamicClient, err := dynamic.NewForConfig(kubeConfig)
+	if err != nil {
+		panic(err)
+	}
 
 	singletonFactory = &resourceClientFactory{
-		kubeClient: kubeClient,
-		kubeConfig: kubeConfig,
-		restMapper: restMapper,
-		clientPool: clientPool,
+		kubeClient:    kubeClient,
+		kubeConfig:    kubeConfig,
+		dynamicClient: dynamicClient,
+		restMapper:    restMapper,
 	}
 	singletonFactory.runBackgroundCacheReset(1 * time.Minute)
 }
@@ -95,31 +96,23 @@ func (c *resourceClientFactory) GetResourceClient(apiVersion, kind, namespace st
 		Kind:    kind,
 	}
 
-	client, err := c.clientPool.ClientForGroupVersionKind(gvk)
-	if err != nil {
-		return nil, "", fmt.Errorf("failed to get client for GroupVersionKind(%s): %v", gvk.String(), err)
-	}
-	resource, err := apiResource(gvk, c.restMapper)
+	gvr, err := gvkToGVR(gvk, c.restMapper)
 	if err != nil {
 		return nil, "", fmt.Errorf("failed to get resource type: %v", err)
 	}
-	pluralName := resource.Name
-	resourceClient := client.Resource(resource, namespace)
+	pluralName := gvr.Resource
+
+	resourceClient := c.dynamicClient.Resource(*gvr).Namespace(namespace)
 	return resourceClient, pluralName, nil
 }
 
-// apiResource consults the REST mapper to translate an <apiVersion, kind, namespace> tuple to a metav1.APIResource struct.
-func apiResource(gvk schema.GroupVersionKind, restMapper *discovery.DeferredDiscoveryRESTMapper) (*metav1.APIResource, error) {
+// apiResource consults the REST mapper to translate an <apiVersion, kind, namespace> tuple to a GroupVersionResource
+func gvkToGVR(gvk schema.GroupVersionKind, restMapper *restmapper.DeferredDiscoveryRESTMapper) (*schema.GroupVersionResource, error) {
 	mapping, err := restMapper.RESTMapping(gvk.GroupKind(), gvk.Version)
 	if err != nil {
 		return nil, fmt.Errorf("failed to get the resource REST mapping for GroupVersionKind(%s): %v", gvk.String(), err)
 	}
-	resource := &metav1.APIResource{
-		Name:       mapping.Resource,
-		Namespaced: mapping.Scope == meta.RESTScopeNamespace,
-		Kind:       gvk.Kind,
-	}
-	return resource, nil
+	return &mapping.Resource, nil
 }
 
 // mustNewKubeClientAndConfig returns the in-cluster config and kubernetes client
diff --git a/vendor/github.com/operator-framework/operator-sdk/version/version.go b/vendor/github.com/operator-framework/operator-sdk/version/version.go
index 6f7a8f0..617efb3 100644
--- a/vendor/github.com/operator-framework/operator-sdk/version/version.go
+++ b/vendor/github.com/operator-framework/operator-sdk/version/version.go
@@ -15,5 +15,5 @@
 package version
 
 var (
-	Version = "0.0.6"
+	Version = "0.0.7"
 )
diff --git a/vendor/github.com/petar/GoLLRB/AUTHORS b/vendor/github.com/petar/GoLLRB/AUTHORS
new file mode 100644
index 0000000..78d1de4
--- /dev/null
+++ b/vendor/github.com/petar/GoLLRB/AUTHORS
@@ -0,0 +1,4 @@
+Petar Maymounkov <pe...@5ttt.org>
+Vadim Vygonets <va...@vygo.net>
+Ian Smith <ia...@acm.org>
+Martin Bruse
diff --git a/vendor/github.com/petar/GoLLRB/LICENSE b/vendor/github.com/petar/GoLLRB/LICENSE
new file mode 100644
index 0000000..b75312c
--- /dev/null
+++ b/vendor/github.com/petar/GoLLRB/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2010, Petar Maymounkov
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+(*) Redistributions of source code must retain the above copyright notice, this list
+of conditions and the following disclaimer.
+
+(*) Redistributions in binary form must reproduce the above copyright notice, this
+list of conditions and the following disclaimer in the documentation and/or
+other materials provided with the distribution.
+
+(*) Neither the name of Petar Maymounkov nor the names of its contributors may be
+used to endorse or promote products derived from this software without specific
+prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/petar/GoLLRB/llrb/avgvar.go b/vendor/github.com/petar/GoLLRB/llrb/avgvar.go
new file mode 100644
index 0000000..2d7e2a3
--- /dev/null
+++ b/vendor/github.com/petar/GoLLRB/llrb/avgvar.go
@@ -0,0 +1,39 @@
+// Copyright 2010 Petar Maymounkov. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package llrb
+
+import "math"
+
+// avgVar maintains the average and variance of a stream of numbers
+// in a space-efficient manner.
+type avgVar struct {
+	count      int64
+	sum, sumsq float64
+}
+
+func (av *avgVar) Init() {
+	av.count = 0
+	av.sum = 0.0
+	av.sumsq = 0.0
+}
+
+func (av *avgVar) Add(sample float64) {
+	av.count++
+	av.sum += sample
+	av.sumsq += sample * sample
+}
+
+func (av *avgVar) GetCount() int64 { return av.count }
+
+func (av *avgVar) GetAvg() float64 { return av.sum / float64(av.count) }
+
+func (av *avgVar) GetTotal() float64 { return av.sum }
+
+func (av *avgVar) GetVar() float64 {
+	a := av.GetAvg()
+	return av.sumsq/float64(av.count) - a*a
+}
+
+func (av *avgVar) GetStdDev() float64 { return math.Sqrt(av.GetVar()) }
diff --git a/vendor/github.com/petar/GoLLRB/llrb/iterator.go b/vendor/github.com/petar/GoLLRB/llrb/iterator.go
new file mode 100644
index 0000000..ee7b27f
--- /dev/null
+++ b/vendor/github.com/petar/GoLLRB/llrb/iterator.go
@@ -0,0 +1,93 @@
+package llrb
+
+type ItemIterator func(i Item) bool
+
+//func (t *Tree) Ascend(iterator ItemIterator) {
+//	t.AscendGreaterOrEqual(Inf(-1), iterator)
+//}
+
+func (t *LLRB) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator) {
+	t.ascendRange(t.root, greaterOrEqual, lessThan, iterator)
+}
+
+func (t *LLRB) ascendRange(h *Node, inf, sup Item, iterator ItemIterator) bool {
+	if h == nil {
+		return true
+	}
+	if !less(h.Item, sup) {
+		return t.ascendRange(h.Left, inf, sup, iterator)
+	}
+	if less(h.Item, inf) {
+		return t.ascendRange(h.Right, inf, sup, iterator)
+	}
+
+	if !t.ascendRange(h.Left, inf, sup, iterator) {
+		return false
+	}
+	if !iterator(h.Item) {
+		return false
+	}
+	return t.ascendRange(h.Right, inf, sup, iterator)
+}
+
+// AscendGreaterOrEqual will call iterator once for each element greater or equal to
+// pivot in ascending order. It will stop whenever the iterator returns false.
+func (t *LLRB) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) {
+	t.ascendGreaterOrEqual(t.root, pivot, iterator)
+}
+
+func (t *LLRB) ascendGreaterOrEqual(h *Node, pivot Item, iterator ItemIterator) bool {
+	if h == nil {
+		return true
+	}
+	if !less(h.Item, pivot) {
+		if !t.ascendGreaterOrEqual(h.Left, pivot, iterator) {
+			return false
+		}
+		if !iterator(h.Item) {
+			return false
+		}
+	}
+	return t.ascendGreaterOrEqual(h.Right, pivot, iterator)
+}
+
+func (t *LLRB) AscendLessThan(pivot Item, iterator ItemIterator) {
+	t.ascendLessThan(t.root, pivot, iterator)
+}
+
+func (t *LLRB) ascendLessThan(h *Node, pivot Item, iterator ItemIterator) bool {
+	if h == nil {
+		return true
+	}
+	if !t.ascendLessThan(h.Left, pivot, iterator) {
+		return false
+	}
+	if !iterator(h.Item) {
+		return false
+	}
+	if less(h.Item, pivot) {
+		return t.ascendLessThan(h.Left, pivot, iterator)
+	}
+	return true
+}
+
+// DescendLessOrEqual will call iterator once for each element less than the
+// pivot in descending order. It will stop whenever the iterator returns false.
+func (t *LLRB) DescendLessOrEqual(pivot Item, iterator ItemIterator) {
+	t.descendLessOrEqual(t.root, pivot, iterator)
+}
+
+func (t *LLRB) descendLessOrEqual(h *Node, pivot Item, iterator ItemIterator) bool {
+	if h == nil {
+		return true
+	}
+	if less(h.Item, pivot) || !less(pivot, h.Item) {
+		if !t.descendLessOrEqual(h.Right, pivot, iterator) {
+			return false
+		}
+		if !iterator(h.Item) {
+			return false
+		}
+	}
+	return t.descendLessOrEqual(h.Left, pivot, iterator)
+}
diff --git a/vendor/github.com/petar/GoLLRB/llrb/llrb-stats.go b/vendor/github.com/petar/GoLLRB/llrb/llrb-stats.go
new file mode 100644
index 0000000..47126a3
--- /dev/null
+++ b/vendor/github.com/petar/GoLLRB/llrb/llrb-stats.go
@@ -0,0 +1,46 @@
+// Copyright 2010 Petar Maymounkov. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package llrb
+
+// GetHeight() returns an item in the tree with key @key, and it's height in the tree
+func (t *LLRB) GetHeight(key Item) (result Item, depth int) {
+	return t.getHeight(t.root, key)
+}
+
+func (t *LLRB) getHeight(h *Node, item Item) (Item, int) {
+	if h == nil {
+		return nil, 0
+	}
+	if less(item, h.Item) {
+		result, depth := t.getHeight(h.Left, item)
+		return result, depth + 1
+	}
+	if less(h.Item, item) {
+		result, depth := t.getHeight(h.Right, item)
+		return result, depth + 1
+	}
+	return h.Item, 0
+}
+
+// HeightStats() returns the average and standard deviation of the height
+// of elements in the tree
+func (t *LLRB) HeightStats() (avg, stddev float64) {
+	av := &avgVar{}
+	heightStats(t.root, 0, av)
+	return av.GetAvg(), av.GetStdDev()
+}
+
+func heightStats(h *Node, d int, av *avgVar) {
+	if h == nil {
+		return
+	}
+	av.Add(float64(d))
+	if h.Left != nil {
+		heightStats(h.Left, d+1, av)
+	}
+	if h.Right != nil {
+		heightStats(h.Right, d+1, av)
+	}
+}
diff --git a/vendor/github.com/petar/GoLLRB/llrb/llrb.go b/vendor/github.com/petar/GoLLRB/llrb/llrb.go
new file mode 100644
index 0000000..81373fb
--- /dev/null
+++ b/vendor/github.com/petar/GoLLRB/llrb/llrb.go
@@ -0,0 +1,456 @@
+// Copyright 2010 Petar Maymounkov. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// A Left-Leaning Red-Black (LLRB) implementation of 2-3 balanced binary search trees,
+// based on the following work:
+//
+//   http://www.cs.princeton.edu/~rs/talks/LLRB/08Penn.pdf
+//   http://www.cs.princeton.edu/~rs/talks/LLRB/LLRB.pdf
+//   http://www.cs.princeton.edu/~rs/talks/LLRB/Java/RedBlackBST.java
+//
+//  2-3 trees (and the run-time equivalent 2-3-4 trees) are the de facto standard BST
+//  algoritms found in implementations of Python, Java, and other libraries. The LLRB
+//  implementation of 2-3 trees is a recent improvement on the traditional implementation,
+//  observed and documented by Robert Sedgewick.
+//
+package llrb
+
+// Tree is a Left-Leaning Red-Black (LLRB) implementation of 2-3 trees
+type LLRB struct {
+	count int
+	root  *Node
+}
+
+type Node struct {
+	Item
+	Left, Right *Node // Pointers to left and right child nodes
+	Black       bool  // If set, the color of the link (incoming from the parent) is black
+	// In the LLRB, new nodes are always red, hence the zero-value for node
+}
+
+type Item interface {
+	Less(than Item) bool
+}
+
+//
+func less(x, y Item) bool {
+	if x == pinf {
+		return false
+	}
+	if x == ninf {
+		return true
+	}
+	return x.Less(y)
+}
+
+// Inf returns an Item that is "bigger than" any other item, if sign is positive.
+// Otherwise  it returns an Item that is "smaller than" any other item.
+func Inf(sign int) Item {
+	if sign == 0 {
+		panic("sign")
+	}
+	if sign > 0 {
+		return pinf
+	}
+	return ninf
+}
+
+var (
+	ninf = nInf{}
+	pinf = pInf{}
+)
+
+type nInf struct{}
+
+func (nInf) Less(Item) bool {
+	return true
+}
+
+type pInf struct{}
+
+func (pInf) Less(Item) bool {
+	return false
+}
+
+// New() allocates a new tree
+func New() *LLRB {
+	return &LLRB{}
+}
+
+// SetRoot sets the root node of the tree.
+// It is intended to be used by functions that deserialize the tree.
+func (t *LLRB) SetRoot(r *Node) {
+	t.root = r
+}
+
+// Root returns the root node of the tree.
+// It is intended to be used by functions that serialize the tree.
+func (t *LLRB) Root() *Node {
+	return t.root
+}
+
+// Len returns the number of nodes in the tree.
+func (t *LLRB) Len() int { return t.count }
+
+// Has returns true if the tree contains an element whose order is the same as that of key.
+func (t *LLRB) Has(key Item) bool {
+	return t.Get(key) != nil
+}
+
+// Get retrieves an element from the tree whose order is the same as that of key.
+func (t *LLRB) Get(key Item) Item {
+	h := t.root
+	for h != nil {
+		switch {
+		case less(key, h.Item):
+			h = h.Left
+		case less(h.Item, key):
+			h = h.Right
+		default:
+			return h.Item
+		}
+	}
+	return nil
+}
+
+// Min returns the minimum element in the tree.
+func (t *LLRB) Min() Item {
+	h := t.root
+	if h == nil {
+		return nil
+	}
+	for h.Left != nil {
+		h = h.Left
+	}
+	return h.Item
+}
+
+// Max returns the maximum element in the tree.
+func (t *LLRB) Max() Item {
+	h := t.root
+	if h == nil {
+		return nil
+	}
+	for h.Right != nil {
+		h = h.Right
+	}
+	return h.Item
+}
+
+func (t *LLRB) ReplaceOrInsertBulk(items ...Item) {
+	for _, i := range items {
+		t.ReplaceOrInsert(i)
+	}
+}
+
+func (t *LLRB) InsertNoReplaceBulk(items ...Item) {
+	for _, i := range items {
+		t.InsertNoReplace(i)
+	}
+}
+
+// ReplaceOrInsert inserts item into the tree. If an existing
+// element has the same order, it is removed from the tree and returned.
+func (t *LLRB) ReplaceOrInsert(item Item) Item {
+	if item == nil {
+		panic("inserting nil item")
+	}
+	var replaced Item
+	t.root, replaced = t.replaceOrInsert(t.root, item)
+	t.root.Black = true
+	if replaced == nil {
+		t.count++
+	}
+	return replaced
+}
+
+func (t *LLRB) replaceOrInsert(h *Node, item Item) (*Node, Item) {
+	if h == nil {
+		return newNode(item), nil
+	}
+
+	h = walkDownRot23(h)
+
+	var replaced Item
+	if less(item, h.Item) { // BUG
+		h.Left, replaced = t.replaceOrInsert(h.Left, item)
+	} else if less(h.Item, item) {
+		h.Right, replaced = t.replaceOrInsert(h.Right, item)
+	} else {
+		replaced, h.Item = h.Item, item
+	}
+
+	h = walkUpRot23(h)
+
+	return h, replaced
+}
+
+// InsertNoReplace inserts item into the tree. If an existing
+// element has the same order, both elements remain in the tree.
+func (t *LLRB) InsertNoReplace(item Item) {
+	if item == nil {
+		panic("inserting nil item")
+	}
+	t.root = t.insertNoReplace(t.root, item)
+	t.root.Black = true
+	t.count++
+}
+
+func (t *LLRB) insertNoReplace(h *Node, item Item) *Node {
+	if h == nil {
+		return newNode(item)
+	}
+
+	h = walkDownRot23(h)
+
+	if less(item, h.Item) {
+		h.Left = t.insertNoReplace(h.Left, item)
+	} else {
+		h.Right = t.insertNoReplace(h.Right, item)
+	}
+
+	return walkUpRot23(h)
+}
+
+// Rotation driver routines for 2-3 algorithm
+
+func walkDownRot23(h *Node) *Node { return h }
+
+func walkUpRot23(h *Node) *Node {
+	if isRed(h.Right) && !isRed(h.Left) {
+		h = rotateLeft(h)
+	}
+
+	if isRed(h.Left) && isRed(h.Left.Left) {
+		h = rotateRight(h)
+	}
+
+	if isRed(h.Left) && isRed(h.Right) {
+		flip(h)
+	}
+
+	return h
+}
+
+// Rotation driver routines for 2-3-4 algorithm
+
+func walkDownRot234(h *Node) *Node {
+	if isRed(h.Left) && isRed(h.Right) {
+		flip(h)
+	}
+
+	return h
+}
+
+func walkUpRot234(h *Node) *Node {
+	if isRed(h.Right) && !isRed(h.Left) {
+		h = rotateLeft(h)
+	}
+
+	if isRed(h.Left) && isRed(h.Left.Left) {
+		h = rotateRight(h)
+	}
+
+	return h
+}
+
+// DeleteMin deletes the minimum element in the tree and returns the
+// deleted item or nil otherwise.
+func (t *LLRB) DeleteMin() Item {
+	var deleted Item
+	t.root, deleted = deleteMin(t.root)
+	if t.root != nil {
+		t.root.Black = true
+	}
+	if deleted != nil {
+		t.count--
+	}
+	return deleted
+}
+
+// deleteMin code for LLRB 2-3 trees
+func deleteMin(h *Node) (*Node, Item) {
+	if h == nil {
+		return nil, nil
+	}
+	if h.Left == nil {
+		return nil, h.Item
+	}
+
+	if !isRed(h.Left) && !isRed(h.Left.Left) {
+		h = moveRedLeft(h)
+	}
+
+	var deleted Item
+	h.Left, deleted = deleteMin(h.Left)
+
+	return fixUp(h), deleted
+}
+
+// DeleteMax deletes the maximum element in the tree and returns
+// the deleted item or nil otherwise
+func (t *LLRB) DeleteMax() Item {
+	var deleted Item
+	t.root, deleted = deleteMax(t.root)
+	if t.root != nil {
+		t.root.Black = true
+	}
+	if deleted != nil {
+		t.count--
+	}
+	return deleted
+}
+
+func deleteMax(h *Node) (*Node, Item) {
+	if h == nil {
+		return nil, nil
+	}
+	if isRed(h.Left) {
+		h = rotateRight(h)
+	}
+	if h.Right == nil {
+		return nil, h.Item
+	}
+	if !isRed(h.Right) && !isRed(h.Right.Left) {
+		h = moveRedRight(h)
+	}
+	var deleted Item
+	h.Right, deleted = deleteMax(h.Right)
+
+	return fixUp(h), deleted
+}
+
+// Delete deletes an item from the tree whose key equals key.
+// The deleted item is return, otherwise nil is returned.
+func (t *LLRB) Delete(key Item) Item {
+	var deleted Item
+	t.root, deleted = t.delete(t.root, key)
+	if t.root != nil {
+		t.root.Black = true
+	}
+	if deleted != nil {
+		t.count--
+	}
+	return deleted
+}
+
+func (t *LLRB) delete(h *Node, item Item) (*Node, Item) {
+	var deleted Item
+	if h == nil {
+		return nil, nil
+	}
+	if less(item, h.Item) {
+		if h.Left == nil { // item not present. Nothing to delete
+			return h, nil
+		}
+		if !isRed(h.Left) && !isRed(h.Left.Left) {
+			h = moveRedLeft(h)
+		}
+		h.Left, deleted = t.delete(h.Left, item)
+	} else {
+		if isRed(h.Left) {
+			h = rotateRight(h)
+		}
+		// If @item equals @h.Item and no right children at @h
+		if !less(h.Item, item) && h.Right == nil {
+			return nil, h.Item
+		}
+		// PETAR: Added 'h.Right != nil' below
+		if h.Right != nil && !isRed(h.Right) && !isRed(h.Right.Left) {
+			h = moveRedRight(h)
+		}
+		// If @item equals @h.Item, and (from above) 'h.Right != nil'
+		if !less(h.Item, item) {
+			var subDeleted Item
+			h.Right, subDeleted = deleteMin(h.Right)
+			if subDeleted == nil {
+				panic("logic")
+			}
+			deleted, h.Item = h.Item, subDeleted
+		} else { // Else, @item is bigger than @h.Item
+			h.Right, deleted = t.delete(h.Right, item)
+		}
+	}
+
+	return fixUp(h), deleted
+}
+
+// Internal node manipulation routines
+
+func newNode(item Item) *Node { return &Node{Item: item} }
+
+func isRed(h *Node) bool {
+	if h == nil {
+		return false
+	}
+	return !h.Black
+}
+
+func rotateLeft(h *Node) *Node {
+	x := h.Right
+	if x.Black {
+		panic("rotating a black link")
+	}
+	h.Right = x.Left
+	x.Left = h
+	x.Black = h.Black
+	h.Black = false
+	return x
+}
+
+func rotateRight(h *Node) *Node {
+	x := h.Left
+	if x.Black {
+		panic("rotating a black link")
+	}
+	h.Left = x.Right
+	x.Right = h
+	x.Black = h.Black
+	h.Black = false
+	return x
+}
+
+// REQUIRE: Left and Right children must be present
+func flip(h *Node) {
+	h.Black = !h.Black
+	h.Left.Black = !h.Left.Black
+	h.Right.Black = !h.Right.Black
+}
+
+// REQUIRE: Left and Right children must be present
+func moveRedLeft(h *Node) *Node {
+	flip(h)
+	if isRed(h.Right.Left) {
+		h.Right = rotateRight(h.Right)
+		h = rotateLeft(h)
+		flip(h)
+	}
+	return h
+}
+
+// REQUIRE: Left and Right children must be present
+func moveRedRight(h *Node) *Node {
+	flip(h)
+	if isRed(h.Left.Left) {
+		h = rotateRight(h)
+		flip(h)
+	}
+	return h
+}
+
+func fixUp(h *Node) *Node {
+	if isRed(h.Right) {
+		h = rotateLeft(h)
+	}
+
+	if isRed(h.Left) && isRed(h.Left.Left) {
+		h = rotateRight(h)
+	}
+
+	if isRed(h.Left) && isRed(h.Right) {
+		flip(h)
+	}
+
+	return h
+}
diff --git a/vendor/github.com/petar/GoLLRB/llrb/util.go b/vendor/github.com/petar/GoLLRB/llrb/util.go
new file mode 100644
index 0000000..63dbdb2
--- /dev/null
+++ b/vendor/github.com/petar/GoLLRB/llrb/util.go
@@ -0,0 +1,17 @@
+// Copyright 2010 Petar Maymounkov. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package llrb
+
+type Int int
+
+func (x Int) Less(than Item) bool {
+	return x < than.(Int)
+}
+
+type String string
+
+func (x String) Less(than Item) bool {
+	return x < than.(String)
+}
diff --git a/vendor/github.com/peterbourgon/diskv/LICENSE b/vendor/github.com/peterbourgon/diskv/LICENSE
new file mode 100644
index 0000000..41ce7f1
--- /dev/null
+++ b/vendor/github.com/peterbourgon/diskv/LICENSE
@@ -0,0 +1,19 @@
+Copyright (c) 2011-2012 Peter Bourgon
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/peterbourgon/diskv/compression.go b/vendor/github.com/peterbourgon/diskv/compression.go
new file mode 100644
index 0000000..5192b02
--- /dev/null
+++ b/vendor/github.com/peterbourgon/diskv/compression.go
@@ -0,0 +1,64 @@
+package diskv
+
+import (
+	"compress/flate"
+	"compress/gzip"
+	"compress/zlib"
+	"io"
+)
+
+// Compression is an interface that Diskv uses to implement compression of
+// data. Writer takes a destination io.Writer and returns a WriteCloser that
+// compresses all data written through it. Reader takes a source io.Reader and
+// returns a ReadCloser that decompresses all data read through it. You may
+// define these methods on your own type, or use one of the NewCompression
+// helpers.
+type Compression interface {
+	Writer(dst io.Writer) (io.WriteCloser, error)
+	Reader(src io.Reader) (io.ReadCloser, error)
+}
+
+// NewGzipCompression returns a Gzip-based Compression.
+func NewGzipCompression() Compression {
+	return NewGzipCompressionLevel(flate.DefaultCompression)
+}
+
+// NewGzipCompressionLevel returns a Gzip-based Compression with the given level.
+func NewGzipCompressionLevel(level int) Compression {
+	return &genericCompression{
+		wf: func(w io.Writer) (io.WriteCloser, error) { return gzip.NewWriterLevel(w, level) },
+		rf: func(r io.Reader) (io.ReadCloser, error) { return gzip.NewReader(r) },
+	}
+}
+
+// NewZlibCompression returns a Zlib-based Compression.
+func NewZlibCompression() Compression {
+	return NewZlibCompressionLevel(flate.DefaultCompression)
+}
+
+// NewZlibCompressionLevel returns a Zlib-based Compression with the given level.
+func NewZlibCompressionLevel(level int) Compression {
+	return NewZlibCompressionLevelDict(level, nil)
+}
+
+// NewZlibCompressionLevelDict returns a Zlib-based Compression with the given
+// level, based on the given dictionary.
+func NewZlibCompressionLevelDict(level int, dict []byte) Compression {
+	return &genericCompression{
+		func(w io.Writer) (io.WriteCloser, error) { return zlib.NewWriterLevelDict(w, level, dict) },
+		func(r io.Reader) (io.ReadCloser, error) { return zlib.NewReaderDict(r, dict) },
+	}
+}
+
+type genericCompression struct {
+	wf func(w io.Writer) (io.WriteCloser, error)
+	rf func(r io.Reader) (io.ReadCloser, error)
+}
+
+func (g *genericCompression) Writer(dst io.Writer) (io.WriteCloser, error) {
+	return g.wf(dst)
+}
+
+func (g *genericCompression) Reader(src io.Reader) (io.ReadCloser, error) {
+	return g.rf(src)
+}
diff --git a/vendor/github.com/peterbourgon/diskv/diskv.go b/vendor/github.com/peterbourgon/diskv/diskv.go
new file mode 100644
index 0000000..524dc0a
--- /dev/null
+++ b/vendor/github.com/peterbourgon/diskv/diskv.go
@@ -0,0 +1,624 @@
+// Diskv (disk-vee) is a simple, persistent, key-value store.
+// It stores all data flatly on the filesystem.
+
+package diskv
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"strings"
+	"sync"
+	"syscall"
+)
+
+const (
+	defaultBasePath             = "diskv"
+	defaultFilePerm os.FileMode = 0666
+	defaultPathPerm os.FileMode = 0777
+)
+
+var (
+	defaultTransform   = func(s string) []string { return []string{} }
+	errCanceled        = errors.New("canceled")
+	errEmptyKey        = errors.New("empty key")
+	errBadKey          = errors.New("bad key")
+	errImportDirectory = errors.New("can't import a directory")
+)
+
+// TransformFunction transforms a key into a slice of strings, with each
+// element in the slice representing a directory in the file path where the
+// key's entry will eventually be stored.
+//
+// For example, if TransformFunc transforms "abcdef" to ["ab", "cde", "f"],
+// the final location of the data file will be <basedir>/ab/cde/f/abcdef
+type TransformFunction func(s string) []string
+
+// Options define a set of properties that dictate Diskv behavior.
+// All values are optional.
+type Options struct {
+	BasePath     string
+	Transform    TransformFunction
+	CacheSizeMax uint64 // bytes
+	PathPerm     os.FileMode
+	FilePerm     os.FileMode
+	// If TempDir is set, it will enable filesystem atomic writes by
+	// writing temporary files to that location before being moved
+	// to BasePath.
+	// Note that TempDir MUST be on the same device/partition as
+	// BasePath.
+	TempDir string
+
+	Index     Index
+	IndexLess LessFunction
+
+	Compression Compression
+}
+
+// Diskv implements the Diskv interface. You shouldn't construct Diskv
+// structures directly; instead, use the New constructor.
+type Diskv struct {
+	Options
+	mu        sync.RWMutex
+	cache     map[string][]byte
+	cacheSize uint64
+}
+
+// New returns an initialized Diskv structure, ready to use.
+// If the path identified by baseDir already contains data,
+// it will be accessible, but not yet cached.
+func New(o Options) *Diskv {
+	if o.BasePath == "" {
+		o.BasePath = defaultBasePath
+	}
+	if o.Transform == nil {
+		o.Transform = defaultTransform
+	}
+	if o.PathPerm == 0 {
+		o.PathPerm = defaultPathPerm
+	}
+	if o.FilePerm == 0 {
+		o.FilePerm = defaultFilePerm
+	}
+
+	d := &Diskv{
+		Options:   o,
+		cache:     map[string][]byte{},
+		cacheSize: 0,
+	}
+
+	if d.Index != nil && d.IndexLess != nil {
+		d.Index.Initialize(d.IndexLess, d.Keys(nil))
+	}
+
+	return d
+}
+
+// Write synchronously writes the key-value pair to disk, making it immediately
+// available for reads. Write relies on the filesystem to perform an eventual
+// sync to physical media. If you need stronger guarantees, see WriteStream.
+func (d *Diskv) Write(key string, val []byte) error {
+	return d.WriteStream(key, bytes.NewBuffer(val), false)
+}
+
+// WriteStream writes the data represented by the io.Reader to the disk, under
+// the provided key. If sync is true, WriteStream performs an explicit sync on
+// the file as soon as it's written.
+//
+// bytes.Buffer provides io.Reader semantics for basic data types.
+func (d *Diskv) WriteStream(key string, r io.Reader, sync bool) error {
+	if len(key) <= 0 {
+		return errEmptyKey
+	}
+
+	d.mu.Lock()
+	defer d.mu.Unlock()
+
+	return d.writeStreamWithLock(key, r, sync)
+}
+
+// createKeyFileWithLock either creates the key file directly, or
+// creates a temporary file in TempDir if it is set.
+func (d *Diskv) createKeyFileWithLock(key string) (*os.File, error) {
+	if d.TempDir != "" {
+		if err := os.MkdirAll(d.TempDir, d.PathPerm); err != nil {
+			return nil, fmt.Errorf("temp mkdir: %s", err)
+		}
+		f, err := ioutil.TempFile(d.TempDir, "")
+		if err != nil {
+			return nil, fmt.Errorf("temp file: %s", err)
+		}
+
+		if err := f.Chmod(d.FilePerm); err != nil {
+			f.Close()           // error deliberately ignored
+			os.Remove(f.Name()) // error deliberately ignored
+			return nil, fmt.Errorf("chmod: %s", err)
+		}
+		return f, nil
+	}
+
+	mode := os.O_WRONLY | os.O_CREATE | os.O_TRUNC // overwrite if exists
+	f, err := os.OpenFile(d.completeFilename(key), mode, d.FilePerm)
+	if err != nil {
+		return nil, fmt.Errorf("open file: %s", err)
+	}
+	return f, nil
+}
+
+// writeStream does no input validation checking.
+func (d *Diskv) writeStreamWithLock(key string, r io.Reader, sync bool) error {
+	if err := d.ensurePathWithLock(key); err != nil {
+		return fmt.Errorf("ensure path: %s", err)
+	}
+
+	f, err := d.createKeyFileWithLock(key)
+	if err != nil {
+		return fmt.Errorf("create key file: %s", err)
+	}
+
+	wc := io.WriteCloser(&nopWriteCloser{f})
+	if d.Compression != nil {
+		wc, err = d.Compression.Writer(f)
+		if err != nil {
+			f.Close()           // error deliberately ignored
+			os.Remove(f.Name()) // error deliberately ignored
+			return fmt.Errorf("compression writer: %s", err)
+		}
+	}
+
+	if _, err := io.Copy(wc, r); err != nil {
+		f.Close()           // error deliberately ignored
+		os.Remove(f.Name()) // error deliberately ignored
+		return fmt.Errorf("i/o copy: %s", err)
+	}
+
+	if err := wc.Close(); err != nil {
+		f.Close()           // error deliberately ignored
+		os.Remove(f.Name()) // error deliberately ignored
+		return fmt.Errorf("compression close: %s", err)
+	}
+
+	if sync {
+		if err := f.Sync(); err != nil {
+			f.Close()           // error deliberately ignored
+			os.Remove(f.Name()) // error deliberately ignored
+			return fmt.Errorf("file sync: %s", err)
+		}
+	}
+
+	if err := f.Close(); err != nil {
+		return fmt.Errorf("file close: %s", err)
+	}
+
+	if f.Name() != d.completeFilename(key) {
+		if err := os.Rename(f.Name(), d.completeFilename(key)); err != nil {
+			os.Remove(f.Name()) // error deliberately ignored
+			return fmt.Errorf("rename: %s", err)
+		}
+	}
+
+	if d.Index != nil {
+		d.Index.Insert(key)
+	}
+
+	d.bustCacheWithLock(key) // cache only on read
+
+	return nil
+}
+
+// Import imports the source file into diskv under the destination key. If the
+// destination key already exists, it's overwritten. If move is true, the
+// source file is removed after a successful import.
+func (d *Diskv) Import(srcFilename, dstKey string, move bool) (err error) {
+	if dstKey == "" {
+		return errEmptyKey
+	}
+
+	if fi, err := os.Stat(srcFilename); err != nil {
+		return err
+	} else if fi.IsDir() {
+		return errImportDirectory
+	}
+
+	d.mu.Lock()
+	defer d.mu.Unlock()
+
+	if err := d.ensurePathWithLock(dstKey); err != nil {
+		return fmt.Errorf("ensure path: %s", err)
+	}
+
+	if move {
+		if err := syscall.Rename(srcFilename, d.completeFilename(dstKey)); err == nil {
+			d.bustCacheWithLock(dstKey)
+			return nil
+		} else if err != syscall.EXDEV {
+			// If it failed due to being on a different device, fall back to copying
+			return err
+		}
+	}
+
+	f, err := os.Open(srcFilename)
+	if err != nil {
+		return err
+	}
+	defer f.Close()
+	err = d.writeStreamWithLock(dstKey, f, false)
+	if err == nil && move {
+		err = os.Remove(srcFilename)
+	}
+	return err
+}
+
+// Read reads the key and returns the value.
+// If the key is available in the cache, Read won't touch the disk.
+// If the key is not in the cache, Read will have the side-effect of
+// lazily caching the value.
+func (d *Diskv) Read(key string) ([]byte, error) {
+	rc, err := d.ReadStream(key, false)
+	if err != nil {
+		return []byte{}, err
+	}
+	defer rc.Close()
+	return ioutil.ReadAll(rc)
+}
+
+// ReadStream reads the key and returns the value (data) as an io.ReadCloser.
+// If the value is cached from a previous read, and direct is false,
+// ReadStream will use the cached value. Otherwise, it will return a handle to
+// the file on disk, and cache the data on read.
+//
+// If direct is true, ReadStream will lazily delete any cached value for the
+// key, and return a direct handle to the file on disk.
+//
+// If compression is enabled, ReadStream taps into the io.Reader stream prior
+// to decompression, and caches the compressed data.
+func (d *Diskv) ReadStream(key string, direct bool) (io.ReadCloser, error) {
+	d.mu.RLock()
+	defer d.mu.RUnlock()
+
+	if val, ok := d.cache[key]; ok {
+		if !direct {
+			buf := bytes.NewBuffer(val)
+			if d.Compression != nil {
+				return d.Compression.Reader(buf)
+			}
+			return ioutil.NopCloser(buf), nil
+		}
+
+		go func() {
+			d.mu.Lock()
+			defer d.mu.Unlock()
+			d.uncacheWithLock(key, uint64(len(val)))
+		}()
+	}
+
+	return d.readWithRLock(key)
+}
+
+// read ignores the cache, and returns an io.ReadCloser representing the
+// decompressed data for the given key, streamed from the disk. Clients should
+// acquire a read lock on the Diskv and check the cache themselves before
+// calling read.
+func (d *Diskv) readWithRLock(key string) (io.ReadCloser, error) {
+	filename := d.completeFilename(key)
+
+	fi, err := os.Stat(filename)
+	if err != nil {
+		return nil, err
+	}
+	if fi.IsDir() {
+		return nil, os.ErrNotExist
+	}
+
+	f, err := os.Open(filename)
+	if err != nil {
+		return nil, err
+	}
+
+	var r io.Reader
+	if d.CacheSizeMax > 0 {
+		r = newSiphon(f, d, key)
+	} else {
+		r = &closingReader{f}
+	}
+
+	var rc = io.ReadCloser(ioutil.NopCloser(r))
+	if d.Compression != nil {
+		rc, err = d.Compression.Reader(r)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	return rc, nil
+}
+
+// closingReader provides a Reader that automatically closes the
+// embedded ReadCloser when it reaches EOF
+type closingReader struct {
+	rc io.ReadCloser
+}
+
+func (cr closingReader) Read(p []byte) (int, error) {
+	n, err := cr.rc.Read(p)
+	if err == io.EOF {
+		if closeErr := cr.rc.Close(); closeErr != nil {
+			return n, closeErr // close must succeed for Read to succeed
+		}
+	}
+	return n, err
+}
+
+// siphon is like a TeeReader: it copies all data read through it to an
+// internal buffer, and moves that buffer to the cache at EOF.
+type siphon struct {
+	f   *os.File
+	d   *Diskv
+	key string
+	buf *bytes.Buffer
+}
+
+// newSiphon constructs a siphoning reader that represents the passed file.
+// When a successful series of reads ends in an EOF, the siphon will write
+// the buffered data to Diskv's cache under the given key.
+func newSiphon(f *os.File, d *Diskv, key string) io.Reader {
+	return &siphon{
+		f:   f,
+		d:   d,
+		key: key,
+		buf: &bytes.Buffer{},
+	}
+}
+
+// Read implements the io.Reader interface for siphon.
+func (s *siphon) Read(p []byte) (int, error) {
+	n, err := s.f.Read(p)
+
+	if err == nil {
+		return s.buf.Write(p[0:n]) // Write must succeed for Read to succeed
+	}
+
+	if err == io.EOF {
+		s.d.cacheWithoutLock(s.key, s.buf.Bytes()) // cache may fail
+		if closeErr := s.f.Close(); closeErr != nil {
+			return n, closeErr // close must succeed for Read to succeed
+		}
+		return n, err
+	}
+
+	return n, err
+}
+
+// Erase synchronously erases the given key from the disk and the cache.
+func (d *Diskv) Erase(key string) error {
+	d.mu.Lock()
+	defer d.mu.Unlock()
+
+	d.bustCacheWithLock(key)
+
+	// erase from index
+	if d.Index != nil {
+		d.Index.Delete(key)
+	}
+
+	// erase from disk
+	filename := d.completeFilename(key)
+	if s, err := os.Stat(filename); err == nil {
+		if s.IsDir() {
+			return errBadKey
+		}
+		if err = os.Remove(filename); err != nil {
+			return err
+		}
+	} else {
+		// Return err as-is so caller can do os.IsNotExist(err).
+		return err
+	}
+
+	// clean up and return
+	d.pruneDirsWithLock(key)
+	return nil
+}
+
+// EraseAll will delete all of the data from the store, both in the cache and on
+// the disk. Note that EraseAll doesn't distinguish diskv-related data from non-
+// diskv-related data. Care should be taken to always specify a diskv base
+// directory that is exclusively for diskv data.
+func (d *Diskv) EraseAll() error {
+	d.mu.Lock()
+	defer d.mu.Unlock()
+	d.cache = make(map[string][]byte)
+	d.cacheSize = 0
+	if d.TempDir != "" {
+		os.RemoveAll(d.TempDir) // errors ignored
+	}
+	return os.RemoveAll(d.BasePath)
+}
+
+// Has returns true if the given key exists.
+func (d *Diskv) Has(key string) bool {
+	d.mu.Lock()
+	defer d.mu.Unlock()
+
+	if _, ok := d.cache[key]; ok {
+		return true
+	}
+
+	filename := d.completeFilename(key)
+	s, err := os.Stat(filename)
+	if err != nil {
+		return false
+	}
+	if s.IsDir() {
+		return false
+	}
+
+	return true
+}
+
+// Keys returns a channel that will yield every key accessible by the store,
+// in undefined order. If a cancel channel is provided, closing it will
+// terminate and close the keys channel.
+func (d *Diskv) Keys(cancel <-chan struct{}) <-chan string {
+	return d.KeysPrefix("", cancel)
+}
+
+// KeysPrefix returns a channel that will yield every key accessible by the
+// store with the given prefix, in undefined order. If a cancel channel is
+// provided, closing it will terminate and close the keys channel. If the
+// provided prefix is the empty string, all keys will be yielded.
+func (d *Diskv) KeysPrefix(prefix string, cancel <-chan struct{}) <-chan string {
+	var prepath string
+	if prefix == "" {
+		prepath = d.BasePath
+	} else {
+		prepath = d.pathFor(prefix)
+	}
+	c := make(chan string)
+	go func() {
+		filepath.Walk(prepath, walker(c, prefix, cancel))
+		close(c)
+	}()
+	return c
+}
+
+// walker returns a function which satisfies the filepath.WalkFunc interface.
+// It sends every non-directory file entry down the channel c.
+func walker(c chan<- string, prefix string, cancel <-chan struct{}) filepath.WalkFunc {
+	return func(path string, info os.FileInfo, err error) error {
+		if err != nil {
+			return err
+		}
+
+		if info.IsDir() || !strings.HasPrefix(info.Name(), prefix) {
+			return nil // "pass"
+		}
+
+		select {
+		case c <- info.Name():
+		case <-cancel:
+			return errCanceled
+		}
+
+		return nil
+	}
+}
+
+// pathFor returns the absolute path for location on the filesystem where the
+// data for the given key will be stored.
+func (d *Diskv) pathFor(key string) string {
+	return filepath.Join(d.BasePath, filepath.Join(d.Transform(key)...))
+}
+
+// ensurePathWithLock is a helper function that generates all necessary
+// directories on the filesystem for the given key.
+func (d *Diskv) ensurePathWithLock(key string) error {
+	return os.MkdirAll(d.pathFor(key), d.PathPerm)
+}
+
+// completeFilename returns the absolute path to the file for the given key.
+func (d *Diskv) completeFilename(key string) string {
+	return filepath.Join(d.pathFor(key), key)
+}
+
+// cacheWithLock attempts to cache the given key-value pair in the store's
+// cache. It can fail if the value is larger than the cache's maximum size.
+func (d *Diskv) cacheWithLock(key string, val []byte) error {
+	valueSize := uint64(len(val))
+	if err := d.ensureCacheSpaceWithLock(valueSize); err != nil {
+		return fmt.Errorf("%s; not caching", err)
+	}
+
+	// be very strict about memory guarantees
+	if (d.cacheSize + valueSize) > d.CacheSizeMax {
+		panic(fmt.Sprintf("failed to make room for value (%d/%d)", valueSize, d.CacheSizeMax))
+	}
+
+	d.cache[key] = val
+	d.cacheSize += valueSize
+	return nil
+}
+
+// cacheWithoutLock acquires the store's (write) mutex and calls cacheWithLock.
+func (d *Diskv) cacheWithoutLock(key string, val []byte) error {
+	d.mu.Lock()
+	defer d.mu.Unlock()
+	return d.cacheWithLock(key, val)
+}
+
+func (d *Diskv) bustCacheWithLock(key string) {
+	if val, ok := d.cache[key]; ok {
+		d.uncacheWithLock(key, uint64(len(val)))
+	}
+}
+
+func (d *Diskv) uncacheWithLock(key string, sz uint64) {
+	d.cacheSize -= sz
+	delete(d.cache, key)
+}
+
+// pruneDirsWithLock deletes empty directories in the path walk leading to the
+// key k. Typically this function is called after an Erase is made.
+func (d *Diskv) pruneDirsWithLock(key string) error {
+	pathlist := d.Transform(key)
+	for i := range pathlist {
+		dir := filepath.Join(d.BasePath, filepath.Join(pathlist[:len(pathlist)-i]...))
+
+		// thanks to Steven Blenkinsop for this snippet
+		switch fi, err := os.Stat(dir); true {
+		case err != nil:
+			return err
+		case !fi.IsDir():
+			panic(fmt.Sprintf("corrupt dirstate at %s", dir))
+		}
+
+		nlinks, err := filepath.Glob(filepath.Join(dir, "*"))
+		if err != nil {
+			return err
+		} else if len(nlinks) > 0 {
+			return nil // has subdirs -- do not prune
+		}
+		if err = os.Remove(dir); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+// ensureCacheSpaceWithLock deletes entries from the cache in arbitrary order
+// until the cache has at least valueSize bytes available.
+func (d *Diskv) ensureCacheSpaceWithLock(valueSize uint64) error {
+	if valueSize > d.CacheSizeMax {
+		return fmt.Errorf("value size (%d bytes) too large for cache (%d bytes)", valueSize, d.CacheSizeMax)
+	}
+
+	safe := func() bool { return (d.cacheSize + valueSize) <= d.CacheSizeMax }
+
+	for key, val := range d.cache {
+		if safe() {
+			break
+		}
+
+		d.uncacheWithLock(key, uint64(len(val)))
+	}
+
+	if !safe() {
+		panic(fmt.Sprintf("%d bytes still won't fit in the cache! (max %d bytes)", valueSize, d.CacheSizeMax))
+	}
+
+	return nil
+}
+
+// nopWriteCloser wraps an io.Writer and provides a no-op Close method to
+// satisfy the io.WriteCloser interface.
+type nopWriteCloser struct {
+	io.Writer
+}
+
+func (wc *nopWriteCloser) Write(p []byte) (int, error) { return wc.Writer.Write(p) }
+func (wc *nopWriteCloser) Close() error                { return nil }
diff --git a/vendor/github.com/peterbourgon/diskv/index.go b/vendor/github.com/peterbourgon/diskv/index.go
new file mode 100644
index 0000000..96fee51
--- /dev/null
+++ b/vendor/github.com/peterbourgon/diskv/index.go
@@ -0,0 +1,115 @@
+package diskv
+
+import (
+	"sync"
+
+	"github.com/google/btree"
+)
+
+// Index is a generic interface for things that can
+// provide an ordered list of keys.
+type Index interface {
+	Initialize(less LessFunction, keys <-chan string)
+	Insert(key string)
+	Delete(key string)
+	Keys(from string, n int) []string
+}
+
+// LessFunction is used to initialize an Index of keys in a specific order.
+type LessFunction func(string, string) bool
+
+// btreeString is a custom data type that satisfies the BTree Less interface,
+// making the strings it wraps sortable by the BTree package.
+type btreeString struct {
+	s string
+	l LessFunction
+}
+
+// Less satisfies the BTree.Less interface using the btreeString's LessFunction.
+func (s btreeString) Less(i btree.Item) bool {
+	return s.l(s.s, i.(btreeString).s)
+}
+
+// BTreeIndex is an implementation of the Index interface using google/btree.
+type BTreeIndex struct {
+	sync.RWMutex
+	LessFunction
+	*btree.BTree
+}
+
+// Initialize populates the BTree tree with data from the keys channel,
+// according to the passed less function. It's destructive to the BTreeIndex.
+func (i *BTreeIndex) Initialize(less LessFunction, keys <-chan string) {
+	i.Lock()
+	defer i.Unlock()
+	i.LessFunction = less
+	i.BTree = rebuild(less, keys)
+}
+
+// Insert inserts the given key (only) into the BTree tree.
+func (i *BTreeIndex) Insert(key string) {
+	i.Lock()
+	defer i.Unlock()
+	if i.BTree == nil || i.LessFunction == nil {
+		panic("uninitialized index")
+	}
+	i.BTree.ReplaceOrInsert(btreeString{s: key, l: i.LessFunction})
+}
+
+// Delete removes the given key (only) from the BTree tree.
+func (i *BTreeIndex) Delete(key string) {
+	i.Lock()
+	defer i.Unlock()
+	if i.BTree == nil || i.LessFunction == nil {
+		panic("uninitialized index")
+	}
+	i.BTree.Delete(btreeString{s: key, l: i.LessFunction})
+}
+
+// Keys yields a maximum of n keys in order. If the passed 'from' key is empty,
+// Keys will return the first n keys. If the passed 'from' key is non-empty, the
+// first key in the returned slice will be the key that immediately follows the
+// passed key, in key order.
+func (i *BTreeIndex) Keys(from string, n int) []string {
+	i.RLock()
+	defer i.RUnlock()
+
+	if i.BTree == nil || i.LessFunction == nil {
+		panic("uninitialized index")
+	}
+
+	if i.BTree.Len() <= 0 {
+		return []string{}
+	}
+
+	btreeFrom := btreeString{s: from, l: i.LessFunction}
+	skipFirst := true
+	if len(from) <= 0 || !i.BTree.Has(btreeFrom) {
+		// no such key, so fabricate an always-smallest item
+		btreeFrom = btreeString{s: "", l: func(string, string) bool { return true }}
+		skipFirst = false
+	}
+
+	keys := []string{}
+	iterator := func(i btree.Item) bool {
+		keys = append(keys, i.(btreeString).s)
+		return len(keys) < n
+	}
+	i.BTree.AscendGreaterOrEqual(btreeFrom, iterator)
+
+	if skipFirst && len(keys) > 0 {
+		keys = keys[1:]
+	}
+
+	return keys
+}
+
+// rebuildIndex does the work of regenerating the index
+// with the given keys.
+func rebuild(less LessFunction, keys <-chan string) *btree.BTree {
+	tree := btree.New(2)
+	for key := range keys {
+		tree.ReplaceOrInsert(btreeString{s: key, l: less})
+	}
+	return tree
+}
diff --git a/vendor/github.com/prometheus/client_golang/AUTHORS.md b/vendor/github.com/prometheus/client_golang/AUTHORS.md
deleted file mode 100644
index c5275d5..0000000
--- a/vendor/github.com/prometheus/client_golang/AUTHORS.md
+++ /dev/null
@@ -1,18 +0,0 @@
-The Prometheus project was started by Matt T. Proud (emeritus) and
-Julius Volz in 2012.
-
-Maintainers of this repository:
-
-* Björn Rabenstein <be...@soundcloud.com>
-
-The following individuals have contributed code to this repository
-(listed in alphabetical order):
-
-* Bernerd Schaefer <bj...@gmail.com>
-* Björn Rabenstein <be...@soundcloud.com>
-* Daniel Bornkessel <da...@soundcloud.com>
-* Jeff Younker <je...@drinktomi.com>
-* Julius Volz <ju...@gmail.com>
-* Matt T. Proud <ma...@gmail.com>
-* Tobias Schmidt <ts...@soundcloud.com>
-
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collector.go
index 623d3d8..c0d70b2 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/collector.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/collector.go
@@ -29,27 +29,72 @@ type Collector interface {
 	// collected by this Collector to the provided channel and returns once
 	// the last descriptor has been sent. The sent descriptors fulfill the
 	// consistency and uniqueness requirements described in the Desc
-	// documentation. (It is valid if one and the same Collector sends
-	// duplicate descriptors. Those duplicates are simply ignored. However,
-	// two different Collectors must not send duplicate descriptors.) This
-	// method idempotently sends the same descriptors throughout the
-	// lifetime of the Collector. If a Collector encounters an error while
-	// executing this method, it must send an invalid descriptor (created
-	// with NewInvalidDesc) to signal the error to the registry.
+	// documentation.
+	//
+	// It is valid if one and the same Collector sends duplicate
+	// descriptors. Those duplicates are simply ignored. However, two
+	// different Collectors must not send duplicate descriptors.
+	//
+	// Sending no descriptor at all marks the Collector as “unchecked”,
+	// i.e. no checks will be performed at registration time, and the
+	// Collector may yield any Metric it sees fit in its Collect method.
+	//
+	// This method idempotently sends the same descriptors throughout the
+	// lifetime of the Collector. It may be called concurrently and
+	// therefore must be implemented in a concurrency safe way.
+	//
+	// If a Collector encounters an error while executing this method, it
+	// must send an invalid descriptor (created with NewInvalidDesc) to
+	// signal the error to the registry.
 	Describe(chan<- *Desc)
 	// Collect is called by the Prometheus registry when collecting
 	// metrics. The implementation sends each collected metric via the
 	// provided channel and returns once the last metric has been sent. The
-	// descriptor of each sent metric is one of those returned by
-	// Describe. Returned metrics that share the same descriptor must differ
-	// in their variable label values. This method may be called
-	// concurrently and must therefore be implemented in a concurrency safe
-	// way. Blocking occurs at the expense of total performance of rendering
-	// all registered metrics. Ideally, Collector implementations support
-	// concurrent readers.
+	// descriptor of each sent metric is one of those returned by Describe
+	// (unless the Collector is unchecked, see above). Returned metrics that
+	// share the same descriptor must differ in their variable label
+	// values.
+	//
+	// This method may be called concurrently and must therefore be
+	// implemented in a concurrency safe way. Blocking occurs at the expense
+	// of total performance of rendering all registered metrics. Ideally,
+	// Collector implementations support concurrent readers.
 	Collect(chan<- Metric)
 }
 
+// DescribeByCollect is a helper to implement the Describe method of a custom
+// Collector. It collects the metrics from the provided Collector and sends
+// their descriptors to the provided channel.
+//
+// If a Collector collects the same metrics throughout its lifetime, its
+// Describe method can simply be implemented as:
+//
+//   func (c customCollector) Describe(ch chan<- *Desc) {
+//   	DescribeByCollect(c, ch)
+//   }
+//
+// However, this will not work if the metrics collected change dynamically over
+// the lifetime of the Collector in a way that their combined set of descriptors
+// changes as well. The shortcut implementation will then violate the contract
+// of the Describe method. If a Collector sometimes collects no metrics at all
+// (for example vectors like CounterVec, GaugeVec, etc., which only collect
+// metrics after a metric with a fully specified label set has been accessed),
+// it might even get registered as an unchecked Collecter (cf. the Register
+// method of the Registerer interface). Hence, only use this shortcut
+// implementation of Describe if you are certain to fulfill the contract.
+//
+// The Collector example demonstrates a use of DescribeByCollect.
+func DescribeByCollect(c Collector, descs chan<- *Desc) {
+	metrics := make(chan Metric)
+	go func() {
+		c.Collect(metrics)
+		close(metrics)
+	}()
+	for m := range metrics {
+		descs <- m.Desc()
+	}
+}
+
 // selfCollector implements Collector for a single Metric so that the Metric
 // collects itself. Add it as an anonymous field to a struct that implements
 // Metric, and call init with the Metric itself as an argument.
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go
index ee37949..765e455 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/counter.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/counter.go
@@ -15,6 +15,10 @@ package prometheus
 
 import (
 	"errors"
+	"math"
+	"sync/atomic"
+
+	dto "github.com/prometheus/client_model/go"
 )
 
 // Counter is a Metric that represents a single numerical value that only ever
@@ -30,16 +34,8 @@ type Counter interface {
 	Metric
 	Collector
 
-	// Set is used to set the Counter to an arbitrary value. It is only used
-	// if you have to transfer a value from an external counter into this
-	// Prometheus metric. Do not use it for regular handling of a
-	// Prometheus counter (as it can be used to break the contract of
-	// monotonically increasing values).
-	//
-	// Deprecated: Use NewConstMetric to create a counter for an external
-	// value. A Counter should never be set.
-	Set(float64)
-	// Inc increments the counter by 1.
+	// Inc increments the counter by 1. Use Add to increment it by arbitrary
+	// non-negative values.
 	Inc()
 	// Add adds the given value to the counter. It panics if the value is <
 	// 0.
@@ -50,6 +46,14 @@ type Counter interface {
 type CounterOpts Opts
 
 // NewCounter creates a new Counter based on the provided CounterOpts.
+//
+// The returned implementation tracks the counter value in two separate
+// variables, a float64 and a uint64. The latter is used to track calls of the
+// Inc method and calls of the Add method with a value that can be represented
+// as a uint64. This allows atomic increments of the counter with optimal
+// performance. (It is common to have an Inc call in very hot execution paths.)
+// Both internal tracking values are added up in the Write method. This has to
+// be taken into account when it comes to precision and overflow behavior.
 func NewCounter(opts CounterOpts) Counter {
 	desc := NewDesc(
 		BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
@@ -57,20 +61,58 @@ func NewCounter(opts CounterOpts) Counter {
 		nil,
 		opts.ConstLabels,
 	)
-	result := &counter{value: value{desc: desc, valType: CounterValue, labelPairs: desc.constLabelPairs}}
+	result := &counter{desc: desc, labelPairs: desc.constLabelPairs}
 	result.init(result) // Init self-collection.
 	return result
 }
 
 type counter struct {
-	value
+	// valBits contains the bits of the represented float64 value, while
+	// valInt stores values that are exact integers. Both have to go first
+	// in the struct to guarantee alignment for atomic operations.
+	// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+	valBits uint64
+	valInt  uint64
+
+	selfCollector
+	desc *Desc
+
+	labelPairs []*dto.LabelPair
+}
+
+func (c *counter) Desc() *Desc {
+	return c.desc
 }
 
 func (c *counter) Add(v float64) {
 	if v < 0 {
 		panic(errors.New("counter cannot decrease in value"))
 	}
-	c.value.Add(v)
+	ival := uint64(v)
+	if float64(ival) == v {
+		atomic.AddUint64(&c.valInt, ival)
+		return
+	}
+
+	for {
+		oldBits := atomic.LoadUint64(&c.valBits)
+		newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
+		if atomic.CompareAndSwapUint64(&c.valBits, oldBits, newBits) {
+			return
+		}
+	}
+}
+
+func (c *counter) Inc() {
+	atomic.AddUint64(&c.valInt, 1)
+}
+
+func (c *counter) Write(out *dto.Metric) error {
+	fval := math.Float64frombits(atomic.LoadUint64(&c.valBits))
+	ival := atomic.LoadUint64(&c.valInt)
+	val := fval + float64(ival)
+
+	return populateMetric(CounterValue, val, c.labelPairs, out)
 }
 
 // CounterVec is a Collector that bundles a set of Counters that all share the
@@ -78,16 +120,12 @@ func (c *counter) Add(v float64) {
 // if you want to count the same thing partitioned by various dimensions
 // (e.g. number of HTTP requests, partitioned by response code and
 // method). Create instances with NewCounterVec.
-//
-// CounterVec embeds MetricVec. See there for a full list of methods with
-// detailed documentation.
 type CounterVec struct {
-	*MetricVec
+	*metricVec
 }
 
 // NewCounterVec creates a new CounterVec based on the provided CounterOpts and
-// partitioned by the given label names. At least one label name must be
-// provided.
+// partitioned by the given label names.
 func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
 	desc := NewDesc(
 		BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
@@ -96,34 +134,62 @@ func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
 		opts.ConstLabels,
 	)
 	return &CounterVec{
-		MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
-			result := &counter{value: value{
-				desc:       desc,
-				valType:    CounterValue,
-				labelPairs: makeLabelPairs(desc, lvs),
-			}}
+		metricVec: newMetricVec(desc, func(lvs ...string) Metric {
+			if len(lvs) != len(desc.variableLabels) {
+				panic(errInconsistentCardinality)
+			}
+			result := &counter{desc: desc, labelPairs: makeLabelPairs(desc, lvs)}
 			result.init(result) // Init self-collection.
 			return result
 		}),
 	}
 }
 
-// GetMetricWithLabelValues replaces the method of the same name in
-// MetricVec. The difference is that this method returns a Counter and not a
-// Metric so that no type conversion is required.
-func (m *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) {
-	metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
+// GetMetricWithLabelValues returns the Counter for the given slice of label
+// values (same order as the VariableLabels in Desc). If that combination of
+// label values is accessed for the first time, a new Counter is created.
+//
+// It is possible to call this method without using the returned Counter to only
+// create the new Counter but leave it at its starting value 0. See also the
+// SummaryVec example.
+//
+// Keeping the Counter for later use is possible (and should be considered if
+// performance is critical), but keep in mind that Reset, DeleteLabelValues and
+// Delete can be used to delete the Counter from the CounterVec. In that case,
+// the Counter will still exist, but it will not be exported anymore, even if a
+// Counter with the same label values is created later.
+//
+// An error is returned if the number of label values is not the same as the
+// number of VariableLabels in Desc (minus any curried labels).
+//
+// Note that for more than one label value, this method is prone to mistakes
+// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
+// an alternative to avoid that type of mistake. For higher label numbers, the
+// latter has a much more readable (albeit more verbose) syntax, but it comes
+// with a performance overhead (for creating and processing the Labels map).
+// See also the GaugeVec example.
+func (v *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) {
+	metric, err := v.metricVec.getMetricWithLabelValues(lvs...)
 	if metric != nil {
 		return metric.(Counter), err
 	}
 	return nil, err
 }
 
-// GetMetricWith replaces the method of the same name in MetricVec. The
-// difference is that this method returns a Counter and not a Metric so that no
-// type conversion is required.
-func (m *CounterVec) GetMetricWith(labels Labels) (Counter, error) {
-	metric, err := m.MetricVec.GetMetricWith(labels)
+// GetMetricWith returns the Counter for the given Labels map (the label names
+// must match those of the VariableLabels in Desc). If that label map is
+// accessed for the first time, a new Counter is created. Implications of
+// creating a Counter without using it and keeping the Counter for later use are
+// the same as for GetMetricWithLabelValues.
+//
+// An error is returned if the number and names of the Labels are inconsistent
+// with those of the VariableLabels in Desc (minus any curried labels).
+//
+// This method is used for the same purpose as
+// GetMetricWithLabelValues(...string). See there for pros and cons of the two
+// methods.
+func (v *CounterVec) GetMetricWith(labels Labels) (Counter, error) {
+	metric, err := v.metricVec.getMetricWith(labels)
 	if metric != nil {
 		return metric.(Counter), err
 	}
@@ -131,18 +197,57 @@ func (m *CounterVec) GetMetricWith(labels Labels) (Counter, error) {
 }
 
 // WithLabelValues works as GetMetricWithLabelValues, but panics where
-// GetMetricWithLabelValues would have returned an error. By not returning an
-// error, WithLabelValues allows shortcuts like
+// GetMetricWithLabelValues would have returned an error. Not returning an
+// error allows shortcuts like
 //     myVec.WithLabelValues("404", "GET").Add(42)
-func (m *CounterVec) WithLabelValues(lvs ...string) Counter {
-	return m.MetricVec.WithLabelValues(lvs...).(Counter)
+func (v *CounterVec) WithLabelValues(lvs ...string) Counter {
+	c, err := v.GetMetricWithLabelValues(lvs...)
+	if err != nil {
+		panic(err)
+	}
+	return c
 }
 
 // With works as GetMetricWith, but panics where GetMetricWithLabels would have
-// returned an error. By not returning an error, With allows shortcuts like
-//     myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
-func (m *CounterVec) With(labels Labels) Counter {
-	return m.MetricVec.With(labels).(Counter)
+// returned an error. Not returning an error allows shortcuts like
+//     myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
+func (v *CounterVec) With(labels Labels) Counter {
+	c, err := v.GetMetricWith(labels)
+	if err != nil {
+		panic(err)
+	}
+	return c
+}
+
+// CurryWith returns a vector curried with the provided labels, i.e. the
+// returned vector has those labels pre-set for all labeled operations performed
+// on it. The cardinality of the curried vector is reduced accordingly. The
+// order of the remaining labels stays the same (just with the curried labels
+// taken out of the sequence – which is relevant for the
+// (GetMetric)WithLabelValues methods). It is possible to curry a curried
+// vector, but only with labels not yet used for currying before.
+//
+// The metrics contained in the CounterVec are shared between the curried and
+// uncurried vectors. They are just accessed differently. Curried and uncurried
+// vectors behave identically in terms of collection. Only one must be
+// registered with a given registry (usually the uncurried version). The Reset
+// method deletes all metrics, even if called on a curried vector.
+func (v *CounterVec) CurryWith(labels Labels) (*CounterVec, error) {
+	vec, err := v.curryWith(labels)
+	if vec != nil {
+		return &CounterVec{vec}, err
+	}
+	return nil, err
+}
+
+// MustCurryWith works as CurryWith but panics where CurryWith would have
+// returned an error.
+func (v *CounterVec) MustCurryWith(labels Labels) *CounterVec {
+	vec, err := v.CurryWith(labels)
+	if err != nil {
+		panic(err)
+	}
+	return vec
 }
 
 // CounterFunc is a Counter whose value is determined at collect time by calling a
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go
index 77f4b30..7b8827f 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/desc.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go
@@ -16,33 +16,15 @@ package prometheus
 import (
 	"errors"
 	"fmt"
-	"regexp"
 	"sort"
 	"strings"
 
 	"github.com/golang/protobuf/proto"
+	"github.com/prometheus/common/model"
 
 	dto "github.com/prometheus/client_model/go"
 )
 
-var (
-	metricNameRE = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_:]*$`)
-	labelNameRE  = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
-)
-
-// reservedLabelPrefix is a prefix which is not legal in user-supplied
-// label names.
-const reservedLabelPrefix = "__"
-
-// Labels represents a collection of label name -> value mappings. This type is
-// commonly used with the With(Labels) and GetMetricWith(Labels) methods of
-// metric vector Collectors, e.g.:
-//     myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
-//
-// The other use-case is the specification of constant label pairs in Opts or to
-// create a Desc.
-type Labels map[string]string
-
 // Desc is the descriptor used by every Prometheus Metric. It is essentially
 // the immutable meta-data of a Metric. The normal Metric implementations
 // included in this package manage their Desc under the hood. Users only have to
@@ -78,32 +60,27 @@ type Desc struct {
 	// Help string. Each Desc with the same fqName must have the same
 	// dimHash.
 	dimHash uint64
-	// err is an error that occured during construction. It is reported on
+	// err is an error that occurred during construction. It is reported on
 	// registration time.
 	err error
 }
 
 // NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc
 // and will be reported on registration time. variableLabels and constLabels can
-// be nil if no such labels should be set. fqName and help must not be empty.
+// be nil if no such labels should be set. fqName must not be empty.
 //
 // variableLabels only contain the label names. Their label values are variable
 // and therefore not part of the Desc. (They are managed within the Metric.)
 //
 // For constLabels, the label values are constant. Therefore, they are fully
-// specified in the Desc. See the Opts documentation for the implications of
-// constant labels.
+// specified in the Desc. See the Collector example for a usage pattern.
 func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc {
 	d := &Desc{
 		fqName:         fqName,
 		help:           help,
 		variableLabels: variableLabels,
 	}
-	if help == "" {
-		d.err = errors.New("empty help string")
-		return d
-	}
-	if !metricNameRE.MatchString(fqName) {
+	if !model.IsValidMetricName(model.LabelValue(fqName)) {
 		d.err = fmt.Errorf("%q is not a valid metric name", fqName)
 		return d
 	}
@@ -127,6 +104,12 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
 	for _, labelName := range labelNames {
 		labelValues = append(labelValues, constLabels[labelName])
 	}
+	// Validate the const label values. They can't have a wrong cardinality, so
+	// use in len(labelValues) as expectedNumberOfValues.
+	if err := validateLabelValues(labelValues, len(labelValues)); err != nil {
+		d.err = err
+		return d
+	}
 	// Now add the variable label names, but prefix them with something that
 	// cannot be in a regular label name. That prevents matching the label
 	// dimension with a different mix between preset and variable labels.
@@ -142,6 +125,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
 		d.err = errors.New("duplicate label names")
 		return d
 	}
+
 	vh := hashNew()
 	for _, val := range labelValues {
 		vh = hashAdd(vh, val)
@@ -168,7 +152,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
 			Value: proto.String(v),
 		})
 	}
-	sort.Sort(LabelPairSorter(d.constLabelPairs))
+	sort.Sort(labelPairSorter(d.constLabelPairs))
 	return d
 }
 
@@ -198,8 +182,3 @@ func (d *Desc) String() string {
 		d.variableLabels,
 	)
 }
-
-func checkLabelName(l string) bool {
-	return labelNameRE.MatchString(l) &&
-		!strings.HasPrefix(l, reservedLabelPrefix)
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go
index b15a2d3..5d9525d 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/doc.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/doc.go
@@ -11,13 +11,15 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// Package prometheus provides metrics primitives to instrument code for
-// monitoring. It also offers a registry for metrics. Sub-packages allow to
-// expose the registered metrics via HTTP (package promhttp) or push them to a
-// Pushgateway (package push).
+// Package prometheus is the core instrumentation package. It provides metrics
+// primitives to instrument code for monitoring. It also offers a registry for
+// metrics. Sub-packages allow to expose the registered metrics via HTTP
+// (package promhttp) or push them to a Pushgateway (package push). There is
+// also a sub-package promauto, which provides metrics constructors with
+// automatic registration.
 //
 // All exported functions and methods are safe to be used concurrently unless
-//specified otherwise.
+// specified otherwise.
 //
 // A Basic Example
 //
@@ -26,6 +28,7 @@
 //    package main
 //
 //    import (
+//    	"log"
 //    	"net/http"
 //
 //    	"github.com/prometheus/client_golang/prometheus"
@@ -59,7 +62,7 @@
 //    	// The Handler function provides a default handler to expose metrics
 //    	// via an HTTP server. "/metrics" is the usual endpoint for that.
 //    	http.Handle("/metrics", promhttp.Handler())
-//    	http.ListenAndServe(":8080", nil)
+//    	log.Fatal(http.ListenAndServe(":8080", nil))
 //    }
 //
 //
@@ -69,9 +72,12 @@
 // Metrics
 //
 // The number of exported identifiers in this package might appear a bit
-// overwhelming. Hovever, in addition to the basic plumbing shown in the example
+// overwhelming. However, in addition to the basic plumbing shown in the example
 // above, you only need to understand the different metric types and their
-// vector versions for basic usage.
+// vector versions for basic usage. Furthermore, if you are not concerned with
+// fine-grained control of when and how to register metrics with the registry,
+// have a look at the promauto package, which will effectively allow you to
+// ignore registration altogether in simple cases.
 //
 // Above, you have already touched the Counter and the Gauge. There are two more
 // advanced metric types: the Summary and Histogram. A more thorough description
@@ -95,8 +101,8 @@
 // SummaryVec, HistogramVec, and UntypedVec are not.
 //
 // To create instances of Metrics and their vector versions, you need a suitable
-// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts,
-// HistogramOpts, or UntypedOpts.
+// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, HistogramOpts, or
+// UntypedOpts.
 //
 // Custom Collectors and constant Metrics
 //
@@ -114,8 +120,18 @@
 // Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and
 // NewConstSummary (and their respective Must… versions). That will happen in
 // the Collect method. The Describe method has to return separate Desc
-// instances, representative of the “throw-away” metrics to be created
-// later. NewDesc comes in handy to create those Desc instances.
+// instances, representative of the “throw-away” metrics to be created later.
+// NewDesc comes in handy to create those Desc instances. Alternatively, you
+// could return no Desc at all, which will marke the Collector “unchecked”.  No
+// checks are porformed at registration time, but metric consistency will still
+// be ensured at scrape time, i.e. any inconsistencies will lead to scrape
+// errors. Thus, with unchecked Collectors, the responsibility to not collect
+// metrics that lead to inconsistencies in the total scrape result lies with the
+// implementer of the Collector. While this is not a desirable state, it is
+// sometimes necessary. The typical use case is a situatios where the exact
+// metrics to be returned by a Collector cannot be predicted at registration
+// time, but the implementer has sufficient knowledge of the whole system to
+// guarantee metric consistency.
 //
 // The Collector example illustrates the use case. You can also look at the
 // source code of the processCollector (mirroring process metrics), the
@@ -129,34 +145,34 @@
 // Advanced Uses of the Registry
 //
 // While MustRegister is the by far most common way of registering a Collector,
-// sometimes you might want to handle the errors the registration might
-// cause. As suggested by the name, MustRegister panics if an error occurs. With
-// the Register function, the error is returned and can be handled.
+// sometimes you might want to handle the errors the registration might cause.
+// As suggested by the name, MustRegister panics if an error occurs. With the
+// Register function, the error is returned and can be handled.
 //
 // An error is returned if the registered Collector is incompatible or
 // inconsistent with already registered metrics. The registry aims for
-// consistency of the collected metrics according to the Prometheus data
-// model. Inconsistencies are ideally detected at registration time, not at
-// collect time. The former will usually be detected at start-up time of a
-// program, while the latter will only happen at scrape time, possibly not even
-// on the first scrape if the inconsistency only becomes relevant later. That is
-// the main reason why a Collector and a Metric have to describe themselves to
-// the registry.
+// consistency of the collected metrics according to the Prometheus data model.
+// Inconsistencies are ideally detected at registration time, not at collect
+// time. The former will usually be detected at start-up time of a program,
+// while the latter will only happen at scrape time, possibly not even on the
+// first scrape if the inconsistency only becomes relevant later. That is the
+// main reason why a Collector and a Metric have to describe themselves to the
+// registry.
 //
 // So far, everything we did operated on the so-called default registry, as it
-// can be found in the global DefaultRegistry variable. With NewRegistry, you
+// can be found in the global DefaultRegisterer variable. With NewRegistry, you
 // can create a custom registry, or you can even implement the Registerer or
-// Gatherer interfaces yourself. The methods Register and Unregister work in
-// the same way on a custom registry as the global functions Register and
-// Unregister on the default registry.
-//
-// There are a number of uses for custom registries: You can use registries
-// with special properties, see NewPedanticRegistry. You can avoid global state,
-// as it is imposed by the DefaultRegistry. You can use multiple registries at
-// the same time to expose different metrics in different ways. You can use
+// Gatherer interfaces yourself. The methods Register and Unregister work in the
+// same way on a custom registry as the global functions Register and Unregister
+// on the default registry.
+//
+// There are a number of uses for custom registries: You can use registries with
+// special properties, see NewPedanticRegistry. You can avoid global state, as
+// it is imposed by the DefaultRegisterer. You can use multiple registries at
+// the same time to expose different metrics in different ways.  You can use
 // separate registries for testing purposes.
 //
-// Also note that the DefaultRegistry comes registered with a Collector for Go
+// Also note that the DefaultRegisterer comes registered with a Collector for Go
 // runtime metrics (via NewGoCollector) and a Collector for process metrics (via
 // NewProcessCollector). With a custom registry, you are in control and decide
 // yourself about the Collectors to register.
@@ -166,16 +182,20 @@
 // The Registry implements the Gatherer interface. The caller of the Gather
 // method can then expose the gathered metrics in some way. Usually, the metrics
 // are served via HTTP on the /metrics endpoint. That's happening in the example
-// above. The tools to expose metrics via HTTP are in the promhttp
-// sub-package. (The top-level functions in the prometheus package are
-// deprecated.)
+// above. The tools to expose metrics via HTTP are in the promhttp sub-package.
+// (The top-level functions in the prometheus package are deprecated.)
 //
 // Pushing to the Pushgateway
 //
 // Function for pushing to the Pushgateway can be found in the push sub-package.
 //
+// Graphite Bridge
+//
+// Functions and examples to push metrics from a Gatherer to Graphite can be
+// found in the graphite sub-package.
+//
 // Other Means of Exposition
 //
-// More ways of exposing metrics can easily be added. Sending metrics to
-// Graphite would be an example that will soon be implemented.
+// More ways of exposing metrics can easily be added by following the approaches
+// of the existing implementations.
 package prometheus
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/fnv.go b/vendor/github.com/prometheus/client_golang/prometheus/fnv.go
index e3b67df..3d383a7 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/fnv.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/fnv.go
@@ -1,3 +1,16 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
 package prometheus
 
 // Inline and byte-free variant of hash/fnv's fnv64a.
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
index 8b70e51..17c72d7 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
@@ -13,6 +13,14 @@
 
 package prometheus
 
+import (
+	"math"
+	"sync/atomic"
+	"time"
+
+	dto "github.com/prometheus/client_model/go"
+)
+
 // Gauge is a Metric that represents a single numerical value that can
 // arbitrarily go up and down.
 //
@@ -27,29 +35,95 @@ type Gauge interface {
 
 	// Set sets the Gauge to an arbitrary value.
 	Set(float64)
-	// Inc increments the Gauge by 1.
+	// Inc increments the Gauge by 1. Use Add to increment it by arbitrary
+	// values.
 	Inc()
-	// Dec decrements the Gauge by 1.
+	// Dec decrements the Gauge by 1. Use Sub to decrement it by arbitrary
+	// values.
 	Dec()
-	// Add adds the given value to the Gauge. (The value can be
-	// negative, resulting in a decrease of the Gauge.)
+	// Add adds the given value to the Gauge. (The value can be negative,
+	// resulting in a decrease of the Gauge.)
 	Add(float64)
 	// Sub subtracts the given value from the Gauge. (The value can be
 	// negative, resulting in an increase of the Gauge.)
 	Sub(float64)
+
+	// SetToCurrentTime sets the Gauge to the current Unix time in seconds.
+	SetToCurrentTime()
 }
 
 // GaugeOpts is an alias for Opts. See there for doc comments.
 type GaugeOpts Opts
 
 // NewGauge creates a new Gauge based on the provided GaugeOpts.
+//
+// The returned implementation is optimized for a fast Set method. If you have a
+// choice for managing the value of a Gauge via Set vs. Inc/Dec/Add/Sub, pick
+// the former. For example, the Inc method of the returned Gauge is slower than
+// the Inc method of a Counter returned by NewCounter. This matches the typical
+// scenarios for Gauges and Counters, where the former tends to be Set-heavy and
+// the latter Inc-heavy.
 func NewGauge(opts GaugeOpts) Gauge {
-	return newValue(NewDesc(
+	desc := NewDesc(
 		BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
 		opts.Help,
 		nil,
 		opts.ConstLabels,
-	), GaugeValue, 0)
+	)
+	result := &gauge{desc: desc, labelPairs: desc.constLabelPairs}
+	result.init(result) // Init self-collection.
+	return result
+}
+
+type gauge struct {
+	// valBits contains the bits of the represented float64 value. It has
+	// to go first in the struct to guarantee alignment for atomic
+	// operations.  http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+	valBits uint64
+
+	selfCollector
+
+	desc       *Desc
+	labelPairs []*dto.LabelPair
+}
+
+func (g *gauge) Desc() *Desc {
+	return g.desc
+}
+
+func (g *gauge) Set(val float64) {
+	atomic.StoreUint64(&g.valBits, math.Float64bits(val))
+}
+
+func (g *gauge) SetToCurrentTime() {
+	g.Set(float64(time.Now().UnixNano()) / 1e9)
+}
+
+func (g *gauge) Inc() {
+	g.Add(1)
+}
+
+func (g *gauge) Dec() {
+	g.Add(-1)
+}
+
+func (g *gauge) Add(val float64) {
+	for {
+		oldBits := atomic.LoadUint64(&g.valBits)
+		newBits := math.Float64bits(math.Float64frombits(oldBits) + val)
+		if atomic.CompareAndSwapUint64(&g.valBits, oldBits, newBits) {
+			return
+		}
+	}
+}
+
+func (g *gauge) Sub(val float64) {
+	g.Add(val * -1)
+}
+
+func (g *gauge) Write(out *dto.Metric) error {
+	val := math.Float64frombits(atomic.LoadUint64(&g.valBits))
+	return populateMetric(GaugeValue, val, g.labelPairs, out)
 }
 
 // GaugeVec is a Collector that bundles a set of Gauges that all share the same
@@ -58,12 +132,11 @@ func NewGauge(opts GaugeOpts) Gauge {
 // (e.g. number of operations queued, partitioned by user and operation
 // type). Create instances with NewGaugeVec.
 type GaugeVec struct {
-	*MetricVec
+	*metricVec
 }
 
 // NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and
-// partitioned by the given label names. At least one label name must be
-// provided.
+// partitioned by the given label names.
 func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
 	desc := NewDesc(
 		BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
@@ -72,28 +145,62 @@ func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
 		opts.ConstLabels,
 	)
 	return &GaugeVec{
-		MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
-			return newValue(desc, GaugeValue, 0, lvs...)
+		metricVec: newMetricVec(desc, func(lvs ...string) Metric {
+			if len(lvs) != len(desc.variableLabels) {
+				panic(errInconsistentCardinality)
+			}
+			result := &gauge{desc: desc, labelPairs: makeLabelPairs(desc, lvs)}
+			result.init(result) // Init self-collection.
+			return result
 		}),
 	}
 }
 
-// GetMetricWithLabelValues replaces the method of the same name in
-// MetricVec. The difference is that this method returns a Gauge and not a
-// Metric so that no type conversion is required.
-func (m *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) {
-	metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
+// GetMetricWithLabelValues returns the Gauge for the given slice of label
+// values (same order as the VariableLabels in Desc). If that combination of
+// label values is accessed for the first time, a new Gauge is created.
+//
+// It is possible to call this method without using the returned Gauge to only
+// create the new Gauge but leave it at its starting value 0. See also the
+// SummaryVec example.
+//
+// Keeping the Gauge for later use is possible (and should be considered if
+// performance is critical), but keep in mind that Reset, DeleteLabelValues and
+// Delete can be used to delete the Gauge from the GaugeVec. In that case, the
+// Gauge will still exist, but it will not be exported anymore, even if a
+// Gauge with the same label values is created later. See also the CounterVec
+// example.
+//
+// An error is returned if the number of label values is not the same as the
+// number of VariableLabels in Desc (minus any curried labels).
+//
+// Note that for more than one label value, this method is prone to mistakes
+// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
+// an alternative to avoid that type of mistake. For higher label numbers, the
+// latter has a much more readable (albeit more verbose) syntax, but it comes
+// with a performance overhead (for creating and processing the Labels map).
+func (v *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) {
+	metric, err := v.metricVec.getMetricWithLabelValues(lvs...)
 	if metric != nil {
 		return metric.(Gauge), err
 	}
 	return nil, err
 }
 
-// GetMetricWith replaces the method of the same name in MetricVec. The
-// difference is that this method returns a Gauge and not a Metric so that no
-// type conversion is required.
-func (m *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) {
-	metric, err := m.MetricVec.GetMetricWith(labels)
+// GetMetricWith returns the Gauge for the given Labels map (the label names
+// must match those of the VariableLabels in Desc). If that label map is
+// accessed for the first time, a new Gauge is created. Implications of
+// creating a Gauge without using it and keeping the Gauge for later use are
+// the same as for GetMetricWithLabelValues.
+//
+// An error is returned if the number and names of the Labels are inconsistent
+// with those of the VariableLabels in Desc (minus any curried labels).
+//
+// This method is used for the same purpose as
+// GetMetricWithLabelValues(...string). See there for pros and cons of the two
+// methods.
+func (v *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) {
+	metric, err := v.metricVec.getMetricWith(labels)
 	if metric != nil {
 		return metric.(Gauge), err
 	}
@@ -101,18 +208,57 @@ func (m *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) {
 }
 
 // WithLabelValues works as GetMetricWithLabelValues, but panics where
-// GetMetricWithLabelValues would have returned an error. By not returning an
-// error, WithLabelValues allows shortcuts like
+// GetMetricWithLabelValues would have returned an error. Not returning an
+// error allows shortcuts like
 //     myVec.WithLabelValues("404", "GET").Add(42)
-func (m *GaugeVec) WithLabelValues(lvs ...string) Gauge {
-	return m.MetricVec.WithLabelValues(lvs...).(Gauge)
+func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge {
+	g, err := v.GetMetricWithLabelValues(lvs...)
+	if err != nil {
+		panic(err)
+	}
+	return g
 }
 
 // With works as GetMetricWith, but panics where GetMetricWithLabels would have
-// returned an error. By not returning an error, With allows shortcuts like
-//     myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
-func (m *GaugeVec) With(labels Labels) Gauge {
-	return m.MetricVec.With(labels).(Gauge)
+// returned an error. Not returning an error allows shortcuts like
+//     myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
+func (v *GaugeVec) With(labels Labels) Gauge {
+	g, err := v.GetMetricWith(labels)
+	if err != nil {
+		panic(err)
+	}
+	return g
+}
+
+// CurryWith returns a vector curried with the provided labels, i.e. the
+// returned vector has those labels pre-set for all labeled operations performed
+// on it. The cardinality of the curried vector is reduced accordingly. The
+// order of the remaining labels stays the same (just with the curried labels
+// taken out of the sequence – which is relevant for the
+// (GetMetric)WithLabelValues methods). It is possible to curry a curried
+// vector, but only with labels not yet used for currying before.
+//
+// The metrics contained in the GaugeVec are shared between the curried and
+// uncurried vectors. They are just accessed differently. Curried and uncurried
+// vectors behave identically in terms of collection. Only one must be
+// registered with a given registry (usually the uncurried version). The Reset
+// method deletes all metrics, even if called on a curried vector.
+func (v *GaugeVec) CurryWith(labels Labels) (*GaugeVec, error) {
+	vec, err := v.curryWith(labels)
+	if vec != nil {
+		return &GaugeVec{vec}, err
+	}
+	return nil, err
+}
+
+// MustCurryWith works as CurryWith but panics where CurryWith would have
+// returned an error.
+func (v *GaugeVec) MustCurryWith(labels Labels) *GaugeVec {
+	vec, err := v.CurryWith(labels)
+	if err != nil {
+		panic(err)
+	}
+	return vec
 }
 
 // GaugeFunc is a Gauge whose value is determined at collect time by calling a
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
index abc9d4e..ba3b933 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
@@ -1,3 +1,16 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
 package prometheus
 
 import (
@@ -8,26 +21,39 @@ import (
 )
 
 type goCollector struct {
-	goroutines Gauge
-	gcDesc     *Desc
+	goroutinesDesc *Desc
+	threadsDesc    *Desc
+	gcDesc         *Desc
+	goInfoDesc     *Desc
 
 	// metrics to describe and collect
 	metrics memStatsMetrics
 }
 
-// NewGoCollector returns a collector which exports metrics about the current
-// go process.
+// NewGoCollector returns a collector which exports metrics about the current Go
+// process. This includes memory stats. To collect those, runtime.ReadMemStats
+// is called. This causes a stop-the-world, which is very short with Go1.9+
+// (~25µs). However, with older Go versions, the stop-the-world duration depends
+// on the heap size and can be quite significant (~1.7 ms/GiB as per
+// https://go-review.googlesource.com/c/go/+/34937).
 func NewGoCollector() Collector {
 	return &goCollector{
-		goroutines: NewGauge(GaugeOpts{
-			Namespace: "go",
-			Name:      "goroutines",
-			Help:      "Number of goroutines that currently exist.",
-		}),
+		goroutinesDesc: NewDesc(
+			"go_goroutines",
+			"Number of goroutines that currently exist.",
+			nil, nil),
+		threadsDesc: NewDesc(
+			"go_threads",
+			"Number of OS threads created.",
+			nil, nil),
 		gcDesc: NewDesc(
 			"go_gc_duration_seconds",
 			"A summary of the GC invocation durations.",
 			nil, nil),
+		goInfoDesc: NewDesc(
+			"go_info",
+			"Information about the Go environment.",
+			nil, Labels{"version": runtime.Version()}),
 		metrics: memStatsMetrics{
 			{
 				desc: NewDesc(
@@ -48,7 +74,7 @@ func NewGoCollector() Collector {
 			}, {
 				desc: NewDesc(
 					memstatNamespace("sys_bytes"),
-					"Number of bytes obtained by system. Sum of all system allocations.",
+					"Number of bytes obtained from system.",
 					nil, nil,
 				),
 				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.Sys) },
@@ -111,12 +137,12 @@ func NewGoCollector() Collector {
 				valType: GaugeValue,
 			}, {
 				desc: NewDesc(
-					memstatNamespace("heap_released_bytes_total"),
-					"Total number of heap bytes released to OS.",
+					memstatNamespace("heap_released_bytes"),
+					"Number of heap bytes released to OS.",
 					nil, nil,
 				),
 				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) },
-				valType: CounterValue,
+				valType: GaugeValue,
 			}, {
 				desc: NewDesc(
 					memstatNamespace("heap_objects"),
@@ -213,6 +239,14 @@ func NewGoCollector() Collector {
 				),
 				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 },
 				valType: GaugeValue,
+			}, {
+				desc: NewDesc(
+					memstatNamespace("gc_cpu_fraction"),
+					"The fraction of this program's available CPU time used by the GC since the program started.",
+					nil, nil,
+				),
+				eval:    func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction },
+				valType: GaugeValue,
 			},
 		},
 	}
@@ -224,9 +258,10 @@ func memstatNamespace(s string) string {
 
 // Describe returns all descriptions of the collector.
 func (c *goCollector) Describe(ch chan<- *Desc) {
-	ch <- c.goroutines.Desc()
+	ch <- c.goroutinesDesc
+	ch <- c.threadsDesc
 	ch <- c.gcDesc
-
+	ch <- c.goInfoDesc
 	for _, i := range c.metrics {
 		ch <- i.desc
 	}
@@ -234,8 +269,9 @@ func (c *goCollector) Describe(ch chan<- *Desc) {
 
 // Collect returns the current state of all metrics of the collector.
 func (c *goCollector) Collect(ch chan<- Metric) {
-	c.goroutines.Set(float64(runtime.NumGoroutine()))
-	ch <- c.goroutines
+	ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine()))
+	n, _ := runtime.ThreadCreateProfile(nil)
+	ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n))
 
 	var stats debug.GCStats
 	stats.PauseQuantiles = make([]time.Duration, 5)
@@ -246,7 +282,9 @@ func (c *goCollector) Collect(ch chan<- Metric) {
 		quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds()
 	}
 	quantiles[0.0] = stats.PauseQuantiles[0].Seconds()
-	ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), float64(stats.PauseTotal.Seconds()), quantiles)
+	ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), stats.PauseTotal.Seconds(), quantiles)
+
+	ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1)
 
 	ms := &runtime.MemStats{}
 	runtime.ReadMemStats(ms)
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
index 9719e8f..4d7fa97 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
@@ -16,7 +16,9 @@ package prometheus
 import (
 	"fmt"
 	"math"
+	"runtime"
 	"sort"
+	"sync"
 	"sync/atomic"
 
 	"github.com/golang/protobuf/proto"
@@ -108,8 +110,9 @@ func ExponentialBuckets(start, factor float64, count int) []float64 {
 }
 
 // HistogramOpts bundles the options for creating a Histogram metric. It is
-// mandatory to set Name and Help to a non-empty string. All other fields are
-// optional and can safely be left at their zero value.
+// mandatory to set Name to a non-empty string. All other fields are optional
+// and can safely be left at their zero value, although it is strongly
+// encouraged to set a Help string.
 type HistogramOpts struct {
 	// Namespace, Subsystem, and Name are components of the fully-qualified
 	// name of the Histogram (created by joining these components with
@@ -120,29 +123,22 @@ type HistogramOpts struct {
 	Subsystem string
 	Name      string
 
-	// Help provides information about this Histogram. Mandatory!
+	// Help provides information about this Histogram.
 	//
 	// Metrics with the same fully-qualified name must have the same Help
 	// string.
 	Help string
 
-	// ConstLabels are used to attach fixed labels to this
-	// Histogram. Histograms with the same fully-qualified name must have the
-	// same label names in their ConstLabels.
+	// ConstLabels are used to attach fixed labels to this metric. Metrics
+	// with the same fully-qualified name must have the same label names in
+	// their ConstLabels.
 	//
-	// Note that in most cases, labels have a value that varies during the
-	// lifetime of a process. Those labels are usually managed with a
-	// HistogramVec. ConstLabels serve only special purposes. One is for the
-	// special case where the value of a label does not change during the
-	// lifetime of a process, e.g. if the revision of the running binary is
-	// put into a label. Another, more advanced purpose is if more than one
-	// Collector needs to collect Histograms with the same fully-qualified
-	// name. In that case, those Summaries must differ in the values of
-	// their ConstLabels. See the Collector examples.
-	//
-	// If the value of a label never changes (not even between binaries),
-	// that label most likely should not be a label at all (but part of the
-	// metric name).
+	// ConstLabels are only used rarely. In particular, do not use them to
+	// attach the same labels to all your metrics. Those use cases are
+	// better covered by target labels set by the scraping Prometheus
+	// server, or by one specific metric (e.g. a build_info or a
+	// machine_role metric). See also
+	// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels
 	ConstLabels Labels
 
 	// Buckets defines the buckets into which observations are counted. Each
@@ -191,6 +187,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
 		desc:        desc,
 		upperBounds: opts.Buckets,
 		labelPairs:  makeLabelPairs(desc, labelValues),
+		counts:      [2]*histogramCounts{&histogramCounts{}, &histogramCounts{}},
 	}
 	for i, upperBound := range h.upperBounds {
 		if i < len(h.upperBounds)-1 {
@@ -207,28 +204,53 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
 			}
 		}
 	}
-	// Finally we know the final length of h.upperBounds and can make counts.
-	h.counts = make([]uint64, len(h.upperBounds))
+	// Finally we know the final length of h.upperBounds and can make counts
+	// for both states:
+	h.counts[0].buckets = make([]uint64, len(h.upperBounds))
+	h.counts[1].buckets = make([]uint64, len(h.upperBounds))
 
 	h.init(h) // Init self-collection.
 	return h
 }
 
-type histogram struct {
+type histogramCounts struct {
 	// sumBits contains the bits of the float64 representing the sum of all
 	// observations. sumBits and count have to go first in the struct to
 	// guarantee alignment for atomic operations.
 	// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
 	sumBits uint64
 	count   uint64
+	buckets []uint64
+}
 
-	selfCollector
-	// Note that there is no mutex required.
+type histogram struct {
+	// countAndHotIdx is a complicated one. For lock-free yet atomic
+	// observations, we need to save the total count of observations again,
+	// combined with the index of the currently-hot counts struct, so that
+	// we can perform the operation on both values atomically. The least
+	// significant bit defines the hot counts struct. The remaining 63 bits
+	// represent the total count of observations. This happens under the
+	// assumption that the 63bit count will never overflow. Rationale: An
+	// observations takes about 30ns. Let's assume it could happen in
+	// 10ns. Overflowing the counter will then take at least (2^63)*10ns,
+	// which is about 3000 years.
+	//
+	// This has to be first in the struct for 64bit alignment. See
+	// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+	countAndHotIdx uint64
 
-	desc *Desc
+	selfCollector
+	desc     *Desc
+	writeMtx sync.Mutex // Only used in the Write method.
 
 	upperBounds []float64
-	counts      []uint64
+
+	// Two counts, one is "hot" for lock-free observations, the other is
+	// "cold" for writing out a dto.Metric. It has to be an array of
+	// pointers to guarantee 64bit alignment of the histogramCounts, see
+	// http://golang.org/pkg/sync/atomic/#pkg-note-BUG.
+	counts [2]*histogramCounts
+	hotIdx int // Index of currently-hot counts. Only used within Write.
 
 	labelPairs []*dto.LabelPair
 }
@@ -248,36 +270,113 @@ func (h *histogram) Observe(v float64) {
 	// 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op
 	// 300 buckets: 154 ns/op linear - binary 61.6 ns/op
 	i := sort.SearchFloat64s(h.upperBounds, v)
-	if i < len(h.counts) {
-		atomic.AddUint64(&h.counts[i], 1)
+
+	// We increment h.countAndHotIdx by 2 so that the counter in the upper
+	// 63 bits gets incremented by 1. At the same time, we get the new value
+	// back, which we can use to find the currently-hot counts.
+	n := atomic.AddUint64(&h.countAndHotIdx, 2)
+	hotCounts := h.counts[n%2]
+
+	if i < len(h.upperBounds) {
+		atomic.AddUint64(&hotCounts.buckets[i], 1)
 	}
-	atomic.AddUint64(&h.count, 1)
 	for {
-		oldBits := atomic.LoadUint64(&h.sumBits)
+		oldBits := atomic.LoadUint64(&hotCounts.sumBits)
 		newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
-		if atomic.CompareAndSwapUint64(&h.sumBits, oldBits, newBits) {
+		if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
 			break
 		}
 	}
+	// Increment count last as we take it as a signal that the observation
+	// is complete.
+	atomic.AddUint64(&hotCounts.count, 1)
 }
 
 func (h *histogram) Write(out *dto.Metric) error {
-	his := &dto.Histogram{}
-	buckets := make([]*dto.Bucket, len(h.upperBounds))
+	var (
+		his                   = &dto.Histogram{}
+		buckets               = make([]*dto.Bucket, len(h.upperBounds))
+		hotCounts, coldCounts *histogramCounts
+		count                 uint64
+	)
+
+	// For simplicity, we mutex the rest of this method. It is not in the
+	// hot path, i.e.  Observe is called much more often than Write. The
+	// complication of making Write lock-free isn't worth it.
+	h.writeMtx.Lock()
+	defer h.writeMtx.Unlock()
 
-	his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&h.sumBits)))
-	his.SampleCount = proto.Uint64(atomic.LoadUint64(&h.count))
-	var count uint64
+	// This is a bit arcane, which is why the following spells out this if
+	// clause in English:
+	//
+	// If the currently-hot counts struct is #0, we atomically increment
+	// h.countAndHotIdx by 1 so that from now on Observe will use the counts
+	// struct #1. Furthermore, the atomic increment gives us the new value,
+	// which, in its most significant 63 bits, tells us the count of
+	// observations done so far up to and including currently ongoing
+	// observations still using the counts struct just changed from hot to
+	// cold. To have a normal uint64 for the count, we bitshift by 1 and
+	// save the result in count. We also set h.hotIdx to 1 for the next
+	// Write call, and we will refer to counts #1 as hotCounts and to counts
+	// #0 as coldCounts.
+	//
+	// If the currently-hot counts struct is #1, we do the corresponding
+	// things the other way round. We have to _decrement_ h.countAndHotIdx
+	// (which is a bit arcane in itself, as we have to express -1 with an
+	// unsigned int...).
+	if h.hotIdx == 0 {
+		count = atomic.AddUint64(&h.countAndHotIdx, 1) >> 1
+		h.hotIdx = 1
+		hotCounts = h.counts[1]
+		coldCounts = h.counts[0]
+	} else {
+		count = atomic.AddUint64(&h.countAndHotIdx, ^uint64(0)) >> 1 // Decrement.
+		h.hotIdx = 0
+		hotCounts = h.counts[0]
+		coldCounts = h.counts[1]
+	}
+
+	// Now we have to wait for the now-declared-cold counts to actually cool
+	// down, i.e. wait for all observations still using it to finish. That's
+	// the case once the count in the cold counts struct is the same as the
+	// one atomically retrieved from the upper 63bits of h.countAndHotIdx.
+	for {
+		if count == atomic.LoadUint64(&coldCounts.count) {
+			break
+		}
+		runtime.Gosched() // Let observations get work done.
+	}
+
+	his.SampleCount = proto.Uint64(count)
+	his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits)))
+	var cumCount uint64
 	for i, upperBound := range h.upperBounds {
-		count += atomic.LoadUint64(&h.counts[i])
+		cumCount += atomic.LoadUint64(&coldCounts.buckets[i])
 		buckets[i] = &dto.Bucket{
-			CumulativeCount: proto.Uint64(count),
+			CumulativeCount: proto.Uint64(cumCount),
 			UpperBound:      proto.Float64(upperBound),
 		}
 	}
+
 	his.Bucket = buckets
 	out.Histogram = his
 	out.Label = h.labelPairs
+
+	// Finally add all the cold counts to the new hot counts and reset the cold counts.
+	atomic.AddUint64(&hotCounts.count, count)
+	atomic.StoreUint64(&coldCounts.count, 0)
+	for {
+		oldBits := atomic.LoadUint64(&hotCounts.sumBits)
+		newBits := math.Float64bits(math.Float64frombits(oldBits) + his.GetSampleSum())
+		if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
+			atomic.StoreUint64(&coldCounts.sumBits, 0)
+			break
+		}
+	}
+	for i := range h.upperBounds {
+		atomic.AddUint64(&hotCounts.buckets[i], atomic.LoadUint64(&coldCounts.buckets[i]))
+		atomic.StoreUint64(&coldCounts.buckets[i], 0)
+	}
 	return nil
 }
 
@@ -287,12 +386,11 @@ func (h *histogram) Write(out *dto.Metric) error {
 // (e.g. HTTP request latencies, partitioned by status code and method). Create
 // instances with NewHistogramVec.
 type HistogramVec struct {
-	*MetricVec
+	*metricVec
 }
 
 // NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and
-// partitioned by the given label names. At least one label name must be
-// provided.
+// partitioned by the given label names.
 func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
 	desc := NewDesc(
 		BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
@@ -301,47 +399,116 @@ func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
 		opts.ConstLabels,
 	)
 	return &HistogramVec{
-		MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
+		metricVec: newMetricVec(desc, func(lvs ...string) Metric {
 			return newHistogram(desc, opts, lvs...)
 		}),
 	}
 }
 
-// GetMetricWithLabelValues replaces the method of the same name in
-// MetricVec. The difference is that this method returns a Histogram and not a
-// Metric so that no type conversion is required.
-func (m *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Histogram, error) {
-	metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
+// GetMetricWithLabelValues returns the Histogram for the given slice of label
+// values (same order as the VariableLabels in Desc). If that combination of
+// label values is accessed for the first time, a new Histogram is created.
+//
+// It is possible to call this method without using the returned Histogram to only
+// create the new Histogram but leave it at its starting value, a Histogram without
+// any observations.
+//
+// Keeping the Histogram for later use is possible (and should be considered if
+// performance is critical), but keep in mind that Reset, DeleteLabelValues and
+// Delete can be used to delete the Histogram from the HistogramVec. In that case, the
+// Histogram will still exist, but it will not be exported anymore, even if a
+// Histogram with the same label values is created later. See also the CounterVec
+// example.
+//
+// An error is returned if the number of label values is not the same as the
+// number of VariableLabels in Desc (minus any curried labels).
+//
+// Note that for more than one label value, this method is prone to mistakes
+// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
+// an alternative to avoid that type of mistake. For higher label numbers, the
+// latter has a much more readable (albeit more verbose) syntax, but it comes
+// with a performance overhead (for creating and processing the Labels map).
+// See also the GaugeVec example.
+func (v *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) {
+	metric, err := v.metricVec.getMetricWithLabelValues(lvs...)
 	if metric != nil {
-		return metric.(Histogram), err
+		return metric.(Observer), err
 	}
 	return nil, err
 }
 
-// GetMetricWith replaces the method of the same name in MetricVec. The
-// difference is that this method returns a Histogram and not a Metric so that no
-// type conversion is required.
-func (m *HistogramVec) GetMetricWith(labels Labels) (Histogram, error) {
-	metric, err := m.MetricVec.GetMetricWith(labels)
+// GetMetricWith returns the Histogram for the given Labels map (the label names
+// must match those of the VariableLabels in Desc). If that label map is
+// accessed for the first time, a new Histogram is created. Implications of
+// creating a Histogram without using it and keeping the Histogram for later use
+// are the same as for GetMetricWithLabelValues.
+//
+// An error is returned if the number and names of the Labels are inconsistent
+// with those of the VariableLabels in Desc (minus any curried labels).
+//
+// This method is used for the same purpose as
+// GetMetricWithLabelValues(...string). See there for pros and cons of the two
+// methods.
+func (v *HistogramVec) GetMetricWith(labels Labels) (Observer, error) {
+	metric, err := v.metricVec.getMetricWith(labels)
 	if metric != nil {
-		return metric.(Histogram), err
+		return metric.(Observer), err
 	}
 	return nil, err
 }
 
 // WithLabelValues works as GetMetricWithLabelValues, but panics where
-// GetMetricWithLabelValues would have returned an error. By not returning an
-// error, WithLabelValues allows shortcuts like
+// GetMetricWithLabelValues would have returned an error. Not returning an
+// error allows shortcuts like
 //     myVec.WithLabelValues("404", "GET").Observe(42.21)
-func (m *HistogramVec) WithLabelValues(lvs ...string) Histogram {
-	return m.MetricVec.WithLabelValues(lvs...).(Histogram)
+func (v *HistogramVec) WithLabelValues(lvs ...string) Observer {
+	h, err := v.GetMetricWithLabelValues(lvs...)
+	if err != nil {
+		panic(err)
+	}
+	return h
+}
+
+// With works as GetMetricWith but panics where GetMetricWithLabels would have
+// returned an error. Not returning an error allows shortcuts like
+//     myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21)
+func (v *HistogramVec) With(labels Labels) Observer {
+	h, err := v.GetMetricWith(labels)
+	if err != nil {
+		panic(err)
+	}
+	return h
+}
+
+// CurryWith returns a vector curried with the provided labels, i.e. the
+// returned vector has those labels pre-set for all labeled operations performed
+// on it. The cardinality of the curried vector is reduced accordingly. The
+// order of the remaining labels stays the same (just with the curried labels
+// taken out of the sequence – which is relevant for the
+// (GetMetric)WithLabelValues methods). It is possible to curry a curried
+// vector, but only with labels not yet used for currying before.
+//
+// The metrics contained in the HistogramVec are shared between the curried and
+// uncurried vectors. They are just accessed differently. Curried and uncurried
+// vectors behave identically in terms of collection. Only one must be
+// registered with a given registry (usually the uncurried version). The Reset
+// method deletes all metrics, even if called on a curried vector.
+func (v *HistogramVec) CurryWith(labels Labels) (ObserverVec, error) {
+	vec, err := v.curryWith(labels)
+	if vec != nil {
+		return &HistogramVec{vec}, err
+	}
+	return nil, err
 }
 
-// With works as GetMetricWith, but panics where GetMetricWithLabels would have
-// returned an error. By not returning an error, With allows shortcuts like
-//     myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21)
-func (m *HistogramVec) With(labels Labels) Histogram {
-	return m.MetricVec.With(labels).(Histogram)
+// MustCurryWith works as CurryWith but panics where CurryWith would have
+// returned an error.
+func (v *HistogramVec) MustCurryWith(labels Labels) ObserverVec {
+	vec, err := v.CurryWith(labels)
+	if err != nil {
+		panic(err)
+	}
+	return vec
 }
 
 type constHistogram struct {
@@ -393,7 +560,7 @@ func (h *constHistogram) Write(out *dto.Metric) error {
 // bucket.
 //
 // NewConstHistogram returns an error if the length of labelValues is not
-// consistent with the variable labels in Desc.
+// consistent with the variable labels in Desc or if Desc is invalid.
 func NewConstHistogram(
 	desc *Desc,
 	count uint64,
@@ -401,8 +568,11 @@ func NewConstHistogram(
 	buckets map[float64]uint64,
 	labelValues ...string,
 ) (Metric, error) {
-	if len(desc.variableLabels) != len(labelValues) {
-		return nil, errInconsistentCardinality
+	if desc.err != nil {
+		return nil, desc.err
+	}
+	if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
+		return nil, err
 	}
 	return &constHistogram{
 		desc:       desc,
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/http.go b/vendor/github.com/prometheus/client_golang/prometheus/http.go
index 67ee5ac..4b8e602 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/http.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/http.go
@@ -61,15 +61,15 @@ func giveBuf(buf *bytes.Buffer) {
 // name).
 //
 // Deprecated: Please note the issues described in the doc comment of
-// InstrumentHandler. You might want to consider using promhttp.Handler instead
-// (which is non instrumented).
+// InstrumentHandler. You might want to consider using promhttp.Handler instead.
 func Handler() http.Handler {
 	return InstrumentHandler("prometheus", UninstrumentedHandler())
 }
 
 // UninstrumentedHandler returns an HTTP handler for the DefaultGatherer.
 //
-// Deprecated: Use promhttp.Handler instead. See there for further documentation.
+// Deprecated: Use promhttp.HandlerFor(DefaultGatherer, promhttp.HandlerOpts{})
+// instead. See there for further documentation.
 func UninstrumentedHandler() http.Handler {
 	return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
 		mfs, err := DefaultGatherer.Gather()
@@ -95,7 +95,7 @@ func UninstrumentedHandler() http.Handler {
 			closer.Close()
 		}
 		if lastErr != nil && buf.Len() == 0 {
-			http.Error(w, "No metrics encoded, last error:\n\n"+err.Error(), http.StatusInternalServerError)
+			http.Error(w, "No metrics encoded, last error:\n\n"+lastErr.Error(), http.StatusInternalServerError)
 			return
 		}
 		header := w.Header()
@@ -115,7 +115,7 @@ func decorateWriter(request *http.Request, writer io.Writer) (io.Writer, string)
 	header := request.Header.Get(acceptEncodingHeader)
 	parts := strings.Split(header, ",")
 	for _, part := range parts {
-		part := strings.TrimSpace(part)
+		part = strings.TrimSpace(part)
 		if part == "gzip" || strings.HasPrefix(part, "gzip;") {
 			return gzip.NewWriter(writer), "gzip"
 		}
@@ -139,16 +139,6 @@ var now nower = nowFunc(func() time.Time {
 	return time.Now()
 })
 
-func nowSeries(t ...time.Time) nower {
-	return nowFunc(func() time.Time {
-		defer func() {
-			t = t[1:]
-		}()
-
-		return t[0]
-	})
-}
-
 // InstrumentHandler wraps the given HTTP handler for instrumentation. It
 // registers four metric collectors (if not already done) and reports HTTP
 // metrics to the (newly or already) registered collectors: http_requests_total
@@ -158,23 +148,16 @@ func nowSeries(t ...time.Time) nower {
 // value. http_requests_total is a metric vector partitioned by HTTP method
 // (label name "method") and HTTP status code (label name "code").
 //
-// Deprecated: InstrumentHandler has several issues:
-//
-// - It uses Summaries rather than Histograms. Summaries are not useful if
-// aggregation across multiple instances is required.
-//
-// - It uses microseconds as unit, which is deprecated and should be replaced by
-// seconds.
-//
-// - The size of the request is calculated in a separate goroutine. Since this
-// calculator requires access to the request header, it creates a race with
-// any writes to the header performed during request handling.
-// httputil.ReverseProxy is a prominent example for a handler
-// performing such writes.
-//
-// Upcoming versions of this package will provide ways of instrumenting HTTP
-// handlers that are more flexible and have fewer issues. Please prefer direct
-// instrumentation in the meantime.
+// Deprecated: InstrumentHandler has several issues. Use the tooling provided in
+// package promhttp instead. The issues are the following: (1) It uses Summaries
+// rather than Histograms. Summaries are not useful if aggregation across
+// multiple instances is required. (2) It uses microseconds as unit, which is
+// deprecated and should be replaced by seconds. (3) The size of the request is
+// calculated in a separate goroutine. Since this calculator requires access to
+// the request header, it creates a race with any writes to the header performed
+// during request handling.  httputil.ReverseProxy is a prominent example for a
+// handler performing such writes. (4) It has additional issues with HTTP/2, cf.
+// https://github.com/prometheus/client_golang/issues/272.
 func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc {
 	return InstrumentHandlerFunc(handlerName, handler.ServeHTTP)
 }
@@ -184,12 +167,13 @@ func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFun
 // issues).
 //
 // Deprecated: InstrumentHandlerFunc is deprecated for the same reasons as
-// InstrumentHandler is.
+// InstrumentHandler is. Use the tooling provided in package promhttp instead.
 func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
 	return InstrumentHandlerFuncWithOpts(
 		SummaryOpts{
 			Subsystem:   "http",
 			ConstLabels: Labels{"handler": handlerName},
+			Objectives:  map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
 		},
 		handlerFunc,
 	)
@@ -222,7 +206,7 @@ func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWri
 // SummaryOpts.
 //
 // Deprecated: InstrumentHandlerWithOpts is deprecated for the same reasons as
-// InstrumentHandler is.
+// InstrumentHandler is. Use the tooling provided in package promhttp instead.
 func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc {
 	return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP)
 }
@@ -233,7 +217,7 @@ func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.Hand
 // SummaryOpts are used.
 //
 // Deprecated: InstrumentHandlerFuncWithOpts is deprecated for the same reasons
-// as InstrumentHandler is.
+// as InstrumentHandler is. Use the tooling provided in package promhttp instead.
 func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
 	reqCnt := NewCounterVec(
 		CounterOpts{
@@ -245,34 +229,52 @@ func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.Respo
 		},
 		instLabels,
 	)
+	if err := Register(reqCnt); err != nil {
+		if are, ok := err.(AlreadyRegisteredError); ok {
+			reqCnt = are.ExistingCollector.(*CounterVec)
+		} else {
+			panic(err)
+		}
+	}
 
 	opts.Name = "request_duration_microseconds"
 	opts.Help = "The HTTP request latencies in microseconds."
 	reqDur := NewSummary(opts)
+	if err := Register(reqDur); err != nil {
+		if are, ok := err.(AlreadyRegisteredError); ok {
+			reqDur = are.ExistingCollector.(Summary)
+		} else {
+			panic(err)
+		}
+	}
 
 	opts.Name = "request_size_bytes"
 	opts.Help = "The HTTP request sizes in bytes."
 	reqSz := NewSummary(opts)
+	if err := Register(reqSz); err != nil {
+		if are, ok := err.(AlreadyRegisteredError); ok {
+			reqSz = are.ExistingCollector.(Summary)
+		} else {
+			panic(err)
+		}
+	}
 
 	opts.Name = "response_size_bytes"
 	opts.Help = "The HTTP response sizes in bytes."
 	resSz := NewSummary(opts)
-
-	regReqCnt := MustRegisterOrGet(reqCnt).(*CounterVec)
-	regReqDur := MustRegisterOrGet(reqDur).(Summary)
-	regReqSz := MustRegisterOrGet(reqSz).(Summary)
-	regResSz := MustRegisterOrGet(resSz).(Summary)
+	if err := Register(resSz); err != nil {
+		if are, ok := err.(AlreadyRegisteredError); ok {
+			resSz = are.ExistingCollector.(Summary)
+		} else {
+			panic(err)
+		}
+	}
 
 	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
 		now := time.Now()
 
 		delegate := &responseWriterDelegator{ResponseWriter: w}
-		out := make(chan int)
-		urlLen := 0
-		if r.URL != nil {
-			urlLen = len(r.URL.String())
-		}
-		go computeApproximateRequestSize(r, out, urlLen)
+		out := computeApproximateRequestSize(r)
 
 		_, cn := w.(http.CloseNotifier)
 		_, fl := w.(http.Flusher)
@@ -290,39 +292,52 @@ func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.Respo
 
 		method := sanitizeMethod(r.Method)
 		code := sanitizeCode(delegate.status)
-		regReqCnt.WithLabelValues(method, code).Inc()
-		regReqDur.Observe(elapsed)
-		regResSz.Observe(float64(delegate.written))
-		regReqSz.Observe(float64(<-out))
+		reqCnt.WithLabelValues(method, code).Inc()
+		reqDur.Observe(elapsed)
+		resSz.Observe(float64(delegate.written))
+		reqSz.Observe(float64(<-out))
 	})
 }
 
-func computeApproximateRequestSize(r *http.Request, out chan int, s int) {
-	s += len(r.Method)
-	s += len(r.Proto)
-	for name, values := range r.Header {
-		s += len(name)
-		for _, value := range values {
-			s += len(value)
-		}
+func computeApproximateRequestSize(r *http.Request) <-chan int {
+	// Get URL length in current goroutine for avoiding a race condition.
+	// HandlerFunc that runs in parallel may modify the URL.
+	s := 0
+	if r.URL != nil {
+		s += len(r.URL.String())
 	}
-	s += len(r.Host)
 
-	// N.B. r.Form and r.MultipartForm are assumed to be included in r.URL.
+	out := make(chan int, 1)
 
-	if r.ContentLength != -1 {
-		s += int(r.ContentLength)
-	}
-	out <- s
+	go func() {
+		s += len(r.Method)
+		s += len(r.Proto)
+		for name, values := range r.Header {
+			s += len(name)
+			for _, value := range values {
+				s += len(value)
+			}
+		}
+		s += len(r.Host)
+
+		// N.B. r.Form and r.MultipartForm are assumed to be included in r.URL.
+
+		if r.ContentLength != -1 {
+			s += int(r.ContentLength)
+		}
+		out <- s
+		close(out)
+	}()
+
+	return out
 }
 
 type responseWriterDelegator struct {
 	http.ResponseWriter
 
-	handler, method string
-	status          int
-	written         int64
-	wroteHeader     bool
+	status      int
+	written     int64
+	wroteHeader bool
 }
 
 func (r *responseWriterDelegator) WriteHeader(code int) {
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go
new file mode 100644
index 0000000..351c26e
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go
@@ -0,0 +1,85 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal
+
+import (
+	"sort"
+
+	dto "github.com/prometheus/client_model/go"
+)
+
+// metricSorter is a sortable slice of *dto.Metric.
+type metricSorter []*dto.Metric
+
+func (s metricSorter) Len() int {
+	return len(s)
+}
+
+func (s metricSorter) Swap(i, j int) {
+	s[i], s[j] = s[j], s[i]
+}
+
+func (s metricSorter) Less(i, j int) bool {
+	if len(s[i].Label) != len(s[j].Label) {
+		// This should not happen. The metrics are
+		// inconsistent. However, we have to deal with the fact, as
+		// people might use custom collectors or metric family injection
+		// to create inconsistent metrics. So let's simply compare the
+		// number of labels in this case. That will still yield
+		// reproducible sorting.
+		return len(s[i].Label) < len(s[j].Label)
+	}
+	for n, lp := range s[i].Label {
+		vi := lp.GetValue()
+		vj := s[j].Label[n].GetValue()
+		if vi != vj {
+			return vi < vj
+		}
+	}
+
+	// We should never arrive here. Multiple metrics with the same
+	// label set in the same scrape will lead to undefined ingestion
+	// behavior. However, as above, we have to provide stable sorting
+	// here, even for inconsistent metrics. So sort equal metrics
+	// by their timestamp, with missing timestamps (implying "now")
+	// coming last.
+	if s[i].TimestampMs == nil {
+		return false
+	}
+	if s[j].TimestampMs == nil {
+		return true
+	}
+	return s[i].GetTimestampMs() < s[j].GetTimestampMs()
+}
+
+// NormalizeMetricFamilies returns a MetricFamily slice with empty
+// MetricFamilies pruned and the remaining MetricFamilies sorted by name within
+// the slice, with the contained Metrics sorted within each MetricFamily.
+func NormalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily {
+	for _, mf := range metricFamiliesByName {
+		sort.Sort(metricSorter(mf.Metric))
+	}
+	names := make([]string, 0, len(metricFamiliesByName))
+	for name, mf := range metricFamiliesByName {
+		if len(mf.Metric) > 0 {
+			names = append(names, name)
+		}
+	}
+	sort.Strings(names)
+	result := make([]*dto.MetricFamily, 0, len(names))
+	for _, name := range names {
+		result = append(result, metricFamiliesByName[name])
+	}
+	return result
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go
new file mode 100644
index 0000000..e68f132
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/labels.go
@@ -0,0 +1,70 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+	"errors"
+	"fmt"
+	"strings"
+	"unicode/utf8"
+
+	"github.com/prometheus/common/model"
+)
+
+// Labels represents a collection of label name -> value mappings. This type is
+// commonly used with the With(Labels) and GetMetricWith(Labels) methods of
+// metric vector Collectors, e.g.:
+//     myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
+//
+// The other use-case is the specification of constant label pairs in Opts or to
+// create a Desc.
+type Labels map[string]string
+
+// reservedLabelPrefix is a prefix which is not legal in user-supplied
+// label names.
+const reservedLabelPrefix = "__"
+
+var errInconsistentCardinality = errors.New("inconsistent label cardinality")
+
+func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error {
+	if len(labels) != expectedNumberOfValues {
+		return errInconsistentCardinality
+	}
+
+	for name, val := range labels {
+		if !utf8.ValidString(val) {
+			return fmt.Errorf("label %s: value %q is not valid UTF-8", name, val)
+		}
+	}
+
+	return nil
+}
+
+func validateLabelValues(vals []string, expectedNumberOfValues int) error {
+	if len(vals) != expectedNumberOfValues {
+		return errInconsistentCardinality
+	}
+
+	for _, val := range vals {
+		if !utf8.ValidString(val) {
+			return fmt.Errorf("label value %q is not valid UTF-8", val)
+		}
+	}
+
+	return nil
+}
+
+func checkLabelName(l string) bool {
+	return model.LabelName(l).IsValid() && !strings.HasPrefix(l, reservedLabelPrefix)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go
index d4063d9..55e6d86 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go
@@ -15,6 +15,9 @@ package prometheus
 
 import (
 	"strings"
+	"time"
+
+	"github.com/golang/protobuf/proto"
 
 	dto "github.com/prometheus/client_model/go"
 )
@@ -43,9 +46,8 @@ type Metric interface {
 	// While populating dto.Metric, it is the responsibility of the
 	// implementation to ensure validity of the Metric protobuf (like valid
 	// UTF-8 strings or syntactically valid metric and label names). It is
-	// recommended to sort labels lexicographically. (Implementers may find
-	// LabelPairSorter useful for that.) Callers of Write should still make
-	// sure of sorting if they depend on it.
+	// recommended to sort labels lexicographically. Callers of Write should
+	// still make sure of sorting if they depend on it.
 	Write(*dto.Metric) error
 	// TODO(beorn7): The original rationale of passing in a pre-allocated
 	// dto.Metric protobuf to save allocations has disappeared. The
@@ -57,8 +59,9 @@ type Metric interface {
 // implementation XXX has its own XXXOpts type, but in most cases, it is just be
 // an alias of this type (which might change when the requirement arises.)
 //
-// It is mandatory to set Name and Help to a non-empty string. All other fields
-// are optional and can safely be left at their zero value.
+// It is mandatory to set Name to a non-empty string. All other fields are
+// optional and can safely be left at their zero value, although it is strongly
+// encouraged to set a Help string.
 type Opts struct {
 	// Namespace, Subsystem, and Name are components of the fully-qualified
 	// name of the Metric (created by joining these components with
@@ -69,7 +72,7 @@ type Opts struct {
 	Subsystem string
 	Name      string
 
-	// Help provides information about this metric. Mandatory!
+	// Help provides information about this metric.
 	//
 	// Metrics with the same fully-qualified name must have the same Help
 	// string.
@@ -79,20 +82,12 @@ type Opts struct {
 	// with the same fully-qualified name must have the same label names in
 	// their ConstLabels.
 	//
-	// Note that in most cases, labels have a value that varies during the
-	// lifetime of a process. Those labels are usually managed with a metric
-	// vector collector (like CounterVec, GaugeVec, UntypedVec). ConstLabels
-	// serve only special purposes. One is for the special case where the
-	// value of a label does not change during the lifetime of a process,
-	// e.g. if the revision of the running binary is put into a
-	// label. Another, more advanced purpose is if more than one Collector
-	// needs to collect Metrics with the same fully-qualified name. In that
-	// case, those Metrics must differ in the values of their
-	// ConstLabels. See the Collector examples.
-	//
-	// If the value of a label never changes (not even between binaries),
-	// that label most likely should not be a label at all (but part of the
-	// metric name).
+	// ConstLabels are only used rarely. In particular, do not use them to
+	// attach the same labels to all your metrics. Those use cases are
+	// better covered by target labels set by the scraping Prometheus
+	// server, or by one specific metric (e.g. a build_info or a
+	// machine_role metric). See also
+	// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels
 	ConstLabels Labels
 }
 
@@ -118,37 +113,22 @@ func BuildFQName(namespace, subsystem, name string) string {
 	return name
 }
 
-// LabelPairSorter implements sort.Interface. It is used to sort a slice of
-// dto.LabelPair pointers. This is useful for implementing the Write method of
-// custom metrics.
-type LabelPairSorter []*dto.LabelPair
+// labelPairSorter implements sort.Interface. It is used to sort a slice of
+// dto.LabelPair pointers.
+type labelPairSorter []*dto.LabelPair
 
-func (s LabelPairSorter) Len() int {
+func (s labelPairSorter) Len() int {
 	return len(s)
 }
 
-func (s LabelPairSorter) Swap(i, j int) {
+func (s labelPairSorter) Swap(i, j int) {
 	s[i], s[j] = s[j], s[i]
 }
 
-func (s LabelPairSorter) Less(i, j int) bool {
+func (s labelPairSorter) Less(i, j int) bool {
 	return s[i].GetName() < s[j].GetName()
 }
 
-type hashSorter []uint64
-
-func (s hashSorter) Len() int {
-	return len(s)
-}
-
-func (s hashSorter) Swap(i, j int) {
-	s[i], s[j] = s[j], s[i]
-}
-
-func (s hashSorter) Less(i, j int) bool {
-	return s[i] < s[j]
-}
-
 type invalidMetric struct {
 	desc *Desc
 	err  error
@@ -164,3 +144,31 @@ func NewInvalidMetric(desc *Desc, err error) Metric {
 func (m *invalidMetric) Desc() *Desc { return m.desc }
 
 func (m *invalidMetric) Write(*dto.Metric) error { return m.err }
+
+type timestampedMetric struct {
+	Metric
+	t time.Time
+}
+
+func (m timestampedMetric) Write(pb *dto.Metric) error {
+	e := m.Metric.Write(pb)
+	pb.TimestampMs = proto.Int64(m.t.Unix()*1000 + int64(m.t.Nanosecond()/1000000))
+	return e
+}
+
+// NewMetricWithTimestamp returns a new Metric wrapping the provided Metric in a
+// way that it has an explicit timestamp set to the provided Time. This is only
+// useful in rare cases as the timestamp of a Prometheus metric should usually
+// be set by the Prometheus server during scraping. Exceptions include mirroring
+// metrics with given timestamps from other metric
+// sources.
+//
+// NewMetricWithTimestamp works best with MustNewConstMetric,
+// MustNewConstHistogram, and MustNewConstSummary, see example.
+//
+// Currently, the exposition formats used by Prometheus are limited to
+// millisecond resolution. Thus, the provided time will be rounded down to the
+// next full millisecond value.
+func NewMetricWithTimestamp(t time.Time, m Metric) Metric {
+	return timestampedMetric{Metric: m, t: t}
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/observer.go b/vendor/github.com/prometheus/client_golang/prometheus/observer.go
new file mode 100644
index 0000000..5806cd0
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/observer.go
@@ -0,0 +1,52 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+// Observer is the interface that wraps the Observe method, which is used by
+// Histogram and Summary to add observations.
+type Observer interface {
+	Observe(float64)
+}
+
+// The ObserverFunc type is an adapter to allow the use of ordinary
+// functions as Observers. If f is a function with the appropriate
+// signature, ObserverFunc(f) is an Observer that calls f.
+//
+// This adapter is usually used in connection with the Timer type, and there are
+// two general use cases:
+//
+// The most common one is to use a Gauge as the Observer for a Timer.
+// See the "Gauge" Timer example.
+//
+// The more advanced use case is to create a function that dynamically decides
+// which Observer to use for observing the duration. See the "Complex" Timer
+// example.
+type ObserverFunc func(float64)
+
+// Observe calls f(value). It implements Observer.
+func (f ObserverFunc) Observe(value float64) {
+	f(value)
+}
+
+// ObserverVec is an interface implemented by `HistogramVec` and `SummaryVec`.
+type ObserverVec interface {
+	GetMetricWith(Labels) (Observer, error)
+	GetMetricWithLabelValues(lvs ...string) (Observer, error)
+	With(Labels) Observer
+	WithLabelValues(...string) Observer
+	CurryWith(Labels) (ObserverVec, error)
+	MustCurryWith(Labels) ObserverVec
+
+	Collector
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
index e31e62e..55176d5 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
@@ -13,89 +13,139 @@
 
 package prometheus
 
-import "github.com/prometheus/procfs"
+import (
+	"errors"
+	"os"
+
+	"github.com/prometheus/procfs"
+)
 
 type processCollector struct {
-	pid             int
 	collectFn       func(chan<- Metric)
 	pidFn           func() (int, error)
-	cpuTotal        Counter
-	openFDs, maxFDs Gauge
-	vsize, rss      Gauge
-	startTime       Gauge
+	reportErrors    bool
+	cpuTotal        *Desc
+	openFDs, maxFDs *Desc
+	vsize, maxVsize *Desc
+	rss             *Desc
+	startTime       *Desc
 }
 
-// NewProcessCollector returns a collector which exports the current state of
-// process metrics including cpu, memory and file descriptor usage as well as
-// the process start time for the given process id under the given namespace.
-func NewProcessCollector(pid int, namespace string) Collector {
-	return NewProcessCollectorPIDFn(
-		func() (int, error) { return pid, nil },
-		namespace,
-	)
+// ProcessCollectorOpts defines the behavior of a process metrics collector
+// created with NewProcessCollector.
+type ProcessCollectorOpts struct {
+	// PidFn returns the PID of the process the collector collects metrics
+	// for. It is called upon each collection. By default, the PID of the
+	// current process is used, as determined on construction time by
+	// calling os.Getpid().
+	PidFn func() (int, error)
+	// If non-empty, each of the collected metrics is prefixed by the
+	// provided string and an underscore ("_").
+	Namespace string
+	// If true, any error encountered during collection is reported as an
+	// invalid metric (see NewInvalidMetric). Otherwise, errors are ignored
+	// and the collected metrics will be incomplete. (Possibly, no metrics
+	// will be collected at all.) While that's usually not desired, it is
+	// appropriate for the common "mix-in" of process metrics, where process
+	// metrics are nice to have, but failing to collect them should not
+	// disrupt the collection of the remaining metrics.
+	ReportErrors bool
 }
 
-// NewProcessCollectorPIDFn returns a collector which exports the current state
-// of process metrics including cpu, memory and file descriptor usage as well
-// as the process start time under the given namespace. The given pidFn is
-// called on each collect and is used to determine the process to export
-// metrics for.
-func NewProcessCollectorPIDFn(
-	pidFn func() (int, error),
-	namespace string,
-) Collector {
-	c := processCollector{
-		pidFn:     pidFn,
-		collectFn: func(chan<- Metric) {},
-
-		cpuTotal: NewCounter(CounterOpts{
-			Namespace: namespace,
-			Name:      "process_cpu_seconds_total",
-			Help:      "Total user and system CPU time spent in seconds.",
-		}),
-		openFDs: NewGauge(GaugeOpts{
-			Namespace: namespace,
-			Name:      "process_open_fds",
-			Help:      "Number of open file descriptors.",
-		}),
-		maxFDs: NewGauge(GaugeOpts{
-			Namespace: namespace,
-			Name:      "process_max_fds",
-			Help:      "Maximum number of open file descriptors.",
-		}),
-		vsize: NewGauge(GaugeOpts{
-			Namespace: namespace,
-			Name:      "process_virtual_memory_bytes",
-			Help:      "Virtual memory size in bytes.",
-		}),
-		rss: NewGauge(GaugeOpts{
-			Namespace: namespace,
-			Name:      "process_resident_memory_bytes",
-			Help:      "Resident memory size in bytes.",
-		}),
-		startTime: NewGauge(GaugeOpts{
-			Namespace: namespace,
-			Name:      "process_start_time_seconds",
-			Help:      "Start time of the process since unix epoch in seconds.",
-		}),
+// NewProcessCollector returns a collector which exports the current state of
+// process metrics including CPU, memory and file descriptor usage as well as
+// the process start time. The detailed behavior is defined by the provided
+// ProcessCollectorOpts. The zero value of ProcessCollectorOpts creates a
+// collector for the current process with an empty namespace string and no error
+// reporting.
+//
+// Currently, the collector depends on a Linux-style proc filesystem and
+// therefore only exports metrics for Linux.
+//
+// Note: An older version of this function had the following signature:
+//
+//     NewProcessCollector(pid int, namespace string) Collector
+//
+// Most commonly, it was called as
+//
+//     NewProcessCollector(os.Getpid(), "")
+//
+// The following call of the current version is equivalent to the above:
+//
+//     NewProcessCollector(ProcessCollectorOpts{})
+func NewProcessCollector(opts ProcessCollectorOpts) Collector {
+	ns := ""
+	if len(opts.Namespace) > 0 {
+		ns = opts.Namespace + "_"
+	}
+
+	c := &processCollector{
+		reportErrors: opts.ReportErrors,
+		cpuTotal: NewDesc(
+			ns+"process_cpu_seconds_total",
+			"Total user and system CPU time spent in seconds.",
+			nil, nil,
+		),
+		openFDs: NewDesc(
+			ns+"process_open_fds",
+			"Number of open file descriptors.",
+			nil, nil,
+		),
+		maxFDs: NewDesc(
+			ns+"process_max_fds",
+			"Maximum number of open file descriptors.",
+			nil, nil,
+		),
+		vsize: NewDesc(
+			ns+"process_virtual_memory_bytes",
+			"Virtual memory size in bytes.",
+			nil, nil,
+		),
+		maxVsize: NewDesc(
+			ns+"process_virtual_memory_max_bytes",
+			"Maximum amount of virtual memory available in bytes.",
+			nil, nil,
+		),
+		rss: NewDesc(
+			ns+"process_resident_memory_bytes",
+			"Resident memory size in bytes.",
+			nil, nil,
+		),
+		startTime: NewDesc(
+			ns+"process_start_time_seconds",
+			"Start time of the process since unix epoch in seconds.",
+			nil, nil,
+		),
+	}
+
+	if opts.PidFn == nil {
+		pid := os.Getpid()
+		c.pidFn = func() (int, error) { return pid, nil }
+	} else {
+		c.pidFn = opts.PidFn
 	}
 
 	// Set up process metric collection if supported by the runtime.
 	if _, err := procfs.NewStat(); err == nil {
 		c.collectFn = c.processCollect
+	} else {
+		c.collectFn = func(ch chan<- Metric) {
+			c.reportError(ch, nil, errors.New("process metrics not supported on this platform"))
+		}
 	}
 
-	return &c
+	return c
 }
 
 // Describe returns all descriptions of the collector.
 func (c *processCollector) Describe(ch chan<- *Desc) {
-	ch <- c.cpuTotal.Desc()
-	ch <- c.openFDs.Desc()
-	ch <- c.maxFDs.Desc()
-	ch <- c.vsize.Desc()
-	ch <- c.rss.Desc()
-	ch <- c.startTime.Desc()
+	ch <- c.cpuTotal
+	ch <- c.openFDs
+	ch <- c.maxFDs
+	ch <- c.vsize
+	ch <- c.maxVsize
+	ch <- c.rss
+	ch <- c.startTime
 }
 
 // Collect returns the current state of all metrics of the collector.
@@ -103,40 +153,52 @@ func (c *processCollector) Collect(ch chan<- Metric) {
 	c.collectFn(ch)
 }
 
-// TODO(ts): Bring back error reporting by reverting 7faf9e7 as soon as the
-// client allows users to configure the error behavior.
 func (c *processCollector) processCollect(ch chan<- Metric) {
 	pid, err := c.pidFn()
 	if err != nil {
+		c.reportError(ch, nil, err)
 		return
 	}
 
 	p, err := procfs.NewProc(pid)
 	if err != nil {
+		c.reportError(ch, nil, err)
 		return
 	}
 
 	if stat, err := p.NewStat(); err == nil {
-		c.cpuTotal.Set(stat.CPUTime())
-		ch <- c.cpuTotal
-		c.vsize.Set(float64(stat.VirtualMemory()))
-		ch <- c.vsize
-		c.rss.Set(float64(stat.ResidentMemory()))
-		ch <- c.rss
-
+		ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime())
+		ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory()))
+		ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory()))
 		if startTime, err := stat.StartTime(); err == nil {
-			c.startTime.Set(startTime)
-			ch <- c.startTime
+			ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime)
+		} else {
+			c.reportError(ch, c.startTime, err)
 		}
+	} else {
+		c.reportError(ch, nil, err)
 	}
 
 	if fds, err := p.FileDescriptorsLen(); err == nil {
-		c.openFDs.Set(float64(fds))
-		ch <- c.openFDs
+		ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds))
+	} else {
+		c.reportError(ch, c.openFDs, err)
 	}
 
 	if limits, err := p.NewLimits(); err == nil {
-		c.maxFDs.Set(float64(limits.OpenFiles))
-		ch <- c.maxFDs
+		ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles))
+		ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(limits.AddressSpace))
+	} else {
+		c.reportError(ch, nil, err)
+	}
+}
+
+func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) {
+	if !c.reportErrors {
+		return
+	}
+	if desc == nil {
+		desc = NewInvalidDesc(err)
 	}
+	ch <- NewInvalidMetric(desc, err)
 }
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
new file mode 100644
index 0000000..67b56d3
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
@@ -0,0 +1,199 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package promhttp
+
+import (
+	"bufio"
+	"io"
+	"net"
+	"net/http"
+)
+
+const (
+	closeNotifier = 1 << iota
+	flusher
+	hijacker
+	readerFrom
+	pusher
+)
+
+type delegator interface {
+	http.ResponseWriter
+
+	Status() int
+	Written() int64
+}
+
+type responseWriterDelegator struct {
+	http.ResponseWriter
+
+	handler, method    string
+	status             int
+	written            int64
+	wroteHeader        bool
+	observeWriteHeader func(int)
+}
+
+func (r *responseWriterDelegator) Status() int {
+	return r.status
+}
+
+func (r *responseWriterDelegator) Written() int64 {
+	return r.written
+}
+
+func (r *responseWriterDelegator) WriteHeader(code int) {
+	r.status = code
+	r.wroteHeader = true
+	r.ResponseWriter.WriteHeader(code)
+	if r.observeWriteHeader != nil {
+		r.observeWriteHeader(code)
+	}
+}
+
+func (r *responseWriterDelegator) Write(b []byte) (int, error) {
+	if !r.wroteHeader {
+		r.WriteHeader(http.StatusOK)
+	}
+	n, err := r.ResponseWriter.Write(b)
+	r.written += int64(n)
+	return n, err
+}
+
+type closeNotifierDelegator struct{ *responseWriterDelegator }
+type flusherDelegator struct{ *responseWriterDelegator }
+type hijackerDelegator struct{ *responseWriterDelegator }
+type readerFromDelegator struct{ *responseWriterDelegator }
+
+func (d closeNotifierDelegator) CloseNotify() <-chan bool {
+	return d.ResponseWriter.(http.CloseNotifier).CloseNotify()
+}
+func (d flusherDelegator) Flush() {
+	d.ResponseWriter.(http.Flusher).Flush()
+}
+func (d hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) {
+	return d.ResponseWriter.(http.Hijacker).Hijack()
+}
+func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) {
+	if !d.wroteHeader {
+		d.WriteHeader(http.StatusOK)
+	}
+	n, err := d.ResponseWriter.(io.ReaderFrom).ReadFrom(re)
+	d.written += n
+	return n, err
+}
+
+var pickDelegator = make([]func(*responseWriterDelegator) delegator, 32)
+
+func init() {
+	// TODO(beorn7): Code generation would help here.
+	pickDelegator[0] = func(d *responseWriterDelegator) delegator { // 0
+		return d
+	}
+	pickDelegator[closeNotifier] = func(d *responseWriterDelegator) delegator { // 1
+		return closeNotifierDelegator{d}
+	}
+	pickDelegator[flusher] = func(d *responseWriterDelegator) delegator { // 2
+		return flusherDelegator{d}
+	}
+	pickDelegator[flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 3
+		return struct {
+			*responseWriterDelegator
+			http.Flusher
+			http.CloseNotifier
+		}{d, flusherDelegator{d}, closeNotifierDelegator{d}}
+	}
+	pickDelegator[hijacker] = func(d *responseWriterDelegator) delegator { // 4
+		return hijackerDelegator{d}
+	}
+	pickDelegator[hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 5
+		return struct {
+			*responseWriterDelegator
+			http.Hijacker
+			http.CloseNotifier
+		}{d, hijackerDelegator{d}, closeNotifierDelegator{d}}
+	}
+	pickDelegator[hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 6
+		return struct {
+			*responseWriterDelegator
+			http.Hijacker
+			http.Flusher
+		}{d, hijackerDelegator{d}, flusherDelegator{d}}
+	}
+	pickDelegator[hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 7
+		return struct {
+			*responseWriterDelegator
+			http.Hijacker
+			http.Flusher
+			http.CloseNotifier
+		}{d, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
+	}
+	pickDelegator[readerFrom] = func(d *responseWriterDelegator) delegator { // 8
+		return readerFromDelegator{d}
+	}
+	pickDelegator[readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 9
+		return struct {
+			*responseWriterDelegator
+			io.ReaderFrom
+			http.CloseNotifier
+		}{d, readerFromDelegator{d}, closeNotifierDelegator{d}}
+	}
+	pickDelegator[readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 10
+		return struct {
+			*responseWriterDelegator
+			io.ReaderFrom
+			http.Flusher
+		}{d, readerFromDelegator{d}, flusherDelegator{d}}
+	}
+	pickDelegator[readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 11
+		return struct {
+			*responseWriterDelegator
+			io.ReaderFrom
+			http.Flusher
+			http.CloseNotifier
+		}{d, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
+	}
+	pickDelegator[readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 12
+		return struct {
+			*responseWriterDelegator
+			io.ReaderFrom
+			http.Hijacker
+		}{d, readerFromDelegator{d}, hijackerDelegator{d}}
+	}
+	pickDelegator[readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 13
+		return struct {
+			*responseWriterDelegator
+			io.ReaderFrom
+			http.Hijacker
+			http.CloseNotifier
+		}{d, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
+	}
+	pickDelegator[readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 14
+		return struct {
+			*responseWriterDelegator
+			io.ReaderFrom
+			http.Hijacker
+			http.Flusher
+		}{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
+	}
+	pickDelegator[readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 15
+		return struct {
+			*responseWriterDelegator
+			io.ReaderFrom
+			http.Hijacker
+			http.Flusher
+			http.CloseNotifier
+		}{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
+	}
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go
new file mode 100644
index 0000000..31a7069
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go
@@ -0,0 +1,181 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build go1.8
+
+package promhttp
+
+import (
+	"io"
+	"net/http"
+)
+
+type pusherDelegator struct{ *responseWriterDelegator }
+
+func (d pusherDelegator) Push(target string, opts *http.PushOptions) error {
+	return d.ResponseWriter.(http.Pusher).Push(target, opts)
+}
+
+func init() {
+	pickDelegator[pusher] = func(d *responseWriterDelegator) delegator { // 16
+		return pusherDelegator{d}
+	}
+	pickDelegator[pusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 17
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			http.CloseNotifier
+		}{d, pusherDelegator{d}, closeNotifierDelegator{d}}
+	}
+	pickDelegator[pusher+flusher] = func(d *responseWriterDelegator) delegator { // 18
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			http.Flusher
+		}{d, pusherDelegator{d}, flusherDelegator{d}}
+	}
+	pickDelegator[pusher+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 19
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			http.Flusher
+			http.CloseNotifier
+		}{d, pusherDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
+	}
+	pickDelegator[pusher+hijacker] = func(d *responseWriterDelegator) delegator { // 20
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			http.Hijacker
+		}{d, pusherDelegator{d}, hijackerDelegator{d}}
+	}
+	pickDelegator[pusher+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 21
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			http.Hijacker
+			http.CloseNotifier
+		}{d, pusherDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
+	}
+	pickDelegator[pusher+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 22
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			http.Hijacker
+			http.Flusher
+		}{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
+	}
+	pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { //23
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			http.Hijacker
+			http.Flusher
+			http.CloseNotifier
+		}{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
+	}
+	pickDelegator[pusher+readerFrom] = func(d *responseWriterDelegator) delegator { // 24
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			io.ReaderFrom
+		}{d, pusherDelegator{d}, readerFromDelegator{d}}
+	}
+	pickDelegator[pusher+readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 25
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			io.ReaderFrom
+			http.CloseNotifier
+		}{d, pusherDelegator{d}, readerFromDelegator{d}, closeNotifierDelegator{d}}
+	}
+	pickDelegator[pusher+readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 26
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			io.ReaderFrom
+			http.Flusher
+		}{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}}
+	}
+	pickDelegator[pusher+readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 27
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			io.ReaderFrom
+			http.Flusher
+			http.CloseNotifier
+		}{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
+	}
+	pickDelegator[pusher+readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 28
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			io.ReaderFrom
+			http.Hijacker
+		}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}}
+	}
+	pickDelegator[pusher+readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 29
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			io.ReaderFrom
+			http.Hijacker
+			http.CloseNotifier
+		}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
+	}
+	pickDelegator[pusher+readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 30
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			io.ReaderFrom
+			http.Hijacker
+			http.Flusher
+		}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
+	}
+	pickDelegator[pusher+readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 31
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			io.ReaderFrom
+			http.Hijacker
+			http.Flusher
+			http.CloseNotifier
+		}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
+	}
+}
+
+func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator {
+	d := &responseWriterDelegator{
+		ResponseWriter:     w,
+		observeWriteHeader: observeWriteHeaderFunc,
+	}
+
+	id := 0
+	if _, ok := w.(http.CloseNotifier); ok {
+		id += closeNotifier
+	}
+	if _, ok := w.(http.Flusher); ok {
+		id += flusher
+	}
+	if _, ok := w.(http.Hijacker); ok {
+		id += hijacker
+	}
+	if _, ok := w.(io.ReaderFrom); ok {
+		id += readerFrom
+	}
+	if _, ok := w.(http.Pusher); ok {
+		id += pusher
+	}
+
+	return pickDelegator[id](d)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go
new file mode 100644
index 0000000..8bb9b8b
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go
@@ -0,0 +1,44 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !go1.8
+
+package promhttp
+
+import (
+	"io"
+	"net/http"
+)
+
+func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator {
+	d := &responseWriterDelegator{
+		ResponseWriter:     w,
+		observeWriteHeader: observeWriteHeaderFunc,
+	}
+
+	id := 0
+	if _, ok := w.(http.CloseNotifier); ok {
+		id += closeNotifier
+	}
+	if _, ok := w.(http.Flusher); ok {
+		id += flusher
+	}
+	if _, ok := w.(http.Hijacker); ok {
+		id += hijacker
+	}
+	if _, ok := w.(io.ReaderFrom); ok {
+		id += readerFrom
+	}
+
+	return pickDelegator[id](d)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
index b6dd5a2..0135737 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
@@ -11,21 +11,24 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// Copyright (c) 2013, The Prometheus Authors
-// All rights reserved.
+// Package promhttp provides tooling around HTTP servers and clients.
 //
-// Use of this source code is governed by a BSD-style license that can be found
-// in the LICENSE file.
-
-// Package promhttp contains functions to create http.Handler instances to
-// expose Prometheus metrics via HTTP. In later versions of this package, it
-// will also contain tooling to instrument instances of http.Handler and
-// http.RoundTripper.
+// First, the package allows the creation of http.Handler instances to expose
+// Prometheus metrics via HTTP. promhttp.Handler acts on the
+// prometheus.DefaultGatherer. With HandlerFor, you can create a handler for a
+// custom registry or anything that implements the Gatherer interface. It also
+// allows the creation of handlers that act differently on errors or allow to
+// log errors.
+//
+// Second, the package provides tooling to instrument instances of http.Handler
+// via middleware. Middleware wrappers follow the naming scheme
+// InstrumentHandlerX, where X describes the intended use of the middleware.
+// See each function's doc comment for specific details.
 //
-// promhttp.Handler acts on the prometheus.DefaultGatherer. With HandlerFor,
-// you can create a handler for a custom registry or anything that implements
-// the Gatherer interface. It also allows to create handlers that act
-// differently on errors or allow to log errors.
+// Finally, the package allows for an http.RoundTripper to be instrumented via
+// middleware. Middleware wrappers follow the naming scheme
+// InstrumentRoundTripperX, where X describes the intended use of the
+// middleware. See each function's doc comment for specific details.
 package promhttp
 
 import (
@@ -36,6 +39,7 @@ import (
 	"net/http"
 	"strings"
 	"sync"
+	"time"
 
 	"github.com/prometheus/common/expfmt"
 
@@ -64,21 +68,51 @@ func giveBuf(buf *bytes.Buffer) {
 	bufPool.Put(buf)
 }
 
-// Handler returns an HTTP handler for the prometheus.DefaultGatherer. The
-// Handler uses the default HandlerOpts, i.e. report the first error as an HTTP
-// error, no error logging, and compression if requested by the client.
+// Handler returns an http.Handler for the prometheus.DefaultGatherer, using
+// default HandlerOpts, i.e. it reports the first error as an HTTP error, it has
+// no error logging, and it applies compression if requested by the client.
+//
+// The returned http.Handler is already instrumented using the
+// InstrumentMetricHandler function and the prometheus.DefaultRegisterer. If you
+// create multiple http.Handlers by separate calls of the Handler function, the
+// metrics used for instrumentation will be shared between them, providing
+// global scrape counts.
 //
-// If you want to create a Handler for the DefaultGatherer with different
-// HandlerOpts, create it with HandlerFor with prometheus.DefaultGatherer and
-// your desired HandlerOpts.
+// This function is meant to cover the bulk of basic use cases. If you are doing
+// anything that requires more customization (including using a non-default
+// Gatherer, different instrumentation, and non-default HandlerOpts), use the
+// HandlerFor function. See there for details.
 func Handler() http.Handler {
-	return HandlerFor(prometheus.DefaultGatherer, HandlerOpts{})
+	return InstrumentMetricHandler(
+		prometheus.DefaultRegisterer, HandlerFor(prometheus.DefaultGatherer, HandlerOpts{}),
+	)
 }
 
-// HandlerFor returns an http.Handler for the provided Gatherer. The behavior
-// of the Handler is defined by the provided HandlerOpts.
+// HandlerFor returns an uninstrumented http.Handler for the provided
+// Gatherer. The behavior of the Handler is defined by the provided
+// HandlerOpts. Thus, HandlerFor is useful to create http.Handlers for custom
+// Gatherers, with non-default HandlerOpts, and/or with custom (or no)
+// instrumentation. Use the InstrumentMetricHandler function to apply the same
+// kind of instrumentation as it is used by the Handler function.
 func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
-	return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+	var inFlightSem chan struct{}
+	if opts.MaxRequestsInFlight > 0 {
+		inFlightSem = make(chan struct{}, opts.MaxRequestsInFlight)
+	}
+
+	h := http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+		if inFlightSem != nil {
+			select {
+			case inFlightSem <- struct{}{}: // All good, carry on.
+				defer func() { <-inFlightSem }()
+			default:
+				http.Error(w, fmt.Sprintf(
+					"Limit of concurrent requests reached (%d), try again later.", opts.MaxRequestsInFlight,
+				), http.StatusServiceUnavailable)
+				return
+			}
+		}
+
 		mfs, err := reg.Gather()
 		if err != nil {
 			if opts.ErrorLog != nil {
@@ -125,7 +159,7 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
 			closer.Close()
 		}
 		if lastErr != nil && buf.Len() == 0 {
-			http.Error(w, "No metrics encoded, last error:\n\n"+err.Error(), http.StatusInternalServerError)
+			http.Error(w, "No metrics encoded, last error:\n\n"+lastErr.Error(), http.StatusInternalServerError)
 			return
 		}
 		header := w.Header()
@@ -134,9 +168,70 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
 		if encoding != "" {
 			header.Set(contentEncodingHeader, encoding)
 		}
-		w.Write(buf.Bytes())
+		if _, err := w.Write(buf.Bytes()); err != nil && opts.ErrorLog != nil {
+			opts.ErrorLog.Println("error while sending encoded metrics:", err)
+		}
 		// TODO(beorn7): Consider streaming serving of metrics.
 	})
+
+	if opts.Timeout <= 0 {
+		return h
+	}
+	return http.TimeoutHandler(h, opts.Timeout, fmt.Sprintf(
+		"Exceeded configured timeout of %v.\n",
+		opts.Timeout,
+	))
+}
+
+// InstrumentMetricHandler is usually used with an http.Handler returned by the
+// HandlerFor function. It instruments the provided http.Handler with two
+// metrics: A counter vector "promhttp_metric_handler_requests_total" to count
+// scrapes partitioned by HTTP status code, and a gauge
+// "promhttp_metric_handler_requests_in_flight" to track the number of
+// simultaneous scrapes. This function idempotently registers collectors for
+// both metrics with the provided Registerer. It panics if the registration
+// fails. The provided metrics are useful to see how many scrapes hit the
+// monitored target (which could be from different Prometheus servers or other
+// scrapers), and how often they overlap (which would result in more than one
+// scrape in flight at the same time). Note that the scrapes-in-flight gauge
+// will contain the scrape by which it is exposed, while the scrape counter will
+// only get incremented after the scrape is complete (as only then the status
+// code is known). For tracking scrape durations, use the
+// "scrape_duration_seconds" gauge created by the Prometheus server upon each
+// scrape.
+func InstrumentMetricHandler(reg prometheus.Registerer, handler http.Handler) http.Handler {
+	cnt := prometheus.NewCounterVec(
+		prometheus.CounterOpts{
+			Name: "promhttp_metric_handler_requests_total",
+			Help: "Total number of scrapes by HTTP status code.",
+		},
+		[]string{"code"},
+	)
+	// Initialize the most likely HTTP status codes.
+	cnt.WithLabelValues("200")
+	cnt.WithLabelValues("500")
+	cnt.WithLabelValues("503")
+	if err := reg.Register(cnt); err != nil {
+		if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
+			cnt = are.ExistingCollector.(*prometheus.CounterVec)
+		} else {
+			panic(err)
+		}
+	}
+
+	gge := prometheus.NewGauge(prometheus.GaugeOpts{
+		Name: "promhttp_metric_handler_requests_in_flight",
+		Help: "Current number of scrapes being served.",
+	})
+	if err := reg.Register(gge); err != nil {
+		if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
+			gge = are.ExistingCollector.(prometheus.Gauge)
+		} else {
+			panic(err)
+		}
+	}
+
+	return InstrumentHandlerCounter(cnt, InstrumentHandlerInFlight(gge, handler))
 }
 
 // HandlerErrorHandling defines how a Handler serving metrics will handle
@@ -180,6 +275,21 @@ type HandlerOpts struct {
 	// If DisableCompression is true, the handler will never compress the
 	// response, even if requested by the client.
 	DisableCompression bool
+	// The number of concurrent HTTP requests is limited to
+	// MaxRequestsInFlight. Additional requests are responded to with 503
+	// Service Unavailable and a suitable message in the body. If
+	// MaxRequestsInFlight is 0 or negative, no limit is applied.
+	MaxRequestsInFlight int
+	// If handling a request takes longer than Timeout, it is responded to
+	// with 503 ServiceUnavailable and a suitable Message. No timeout is
+	// applied if Timeout is 0 or negative. Note that with the current
+	// implementation, reaching the timeout simply ends the HTTP requests as
+	// described above (and even that only if sending of the body hasn't
+	// started yet), while the bulk work of gathering all the metrics keeps
+	// running in the background (with the eventual result to be thrown
+	// away). Until the implementation is improved, it is recommended to
+	// implement a separate timeout in potentially slow Collectors.
+	Timeout time.Duration
 }
 
 // decorateWriter wraps a writer to handle gzip compression if requested.  It
@@ -192,7 +302,7 @@ func decorateWriter(request *http.Request, writer io.Writer, compressionDisabled
 	header := request.Header.Get(acceptEncodingHeader)
 	parts := strings.Split(header, ",")
 	for _, part := range parts {
-		part := strings.TrimSpace(part)
+		part = strings.TrimSpace(part)
 		if part == "gzip" || strings.HasPrefix(part, "gzip;") {
 			return gzip.NewWriter(writer), "gzip"
 		}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
new file mode 100644
index 0000000..86fd564
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
@@ -0,0 +1,97 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package promhttp
+
+import (
+	"net/http"
+	"time"
+
+	"github.com/prometheus/client_golang/prometheus"
+)
+
+// The RoundTripperFunc type is an adapter to allow the use of ordinary
+// functions as RoundTrippers. If f is a function with the appropriate
+// signature, RountTripperFunc(f) is a RoundTripper that calls f.
+type RoundTripperFunc func(req *http.Request) (*http.Response, error)
+
+// RoundTrip implements the RoundTripper interface.
+func (rt RoundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) {
+	return rt(r)
+}
+
+// InstrumentRoundTripperInFlight is a middleware that wraps the provided
+// http.RoundTripper. It sets the provided prometheus.Gauge to the number of
+// requests currently handled by the wrapped http.RoundTripper.
+//
+// See the example for ExampleInstrumentRoundTripperDuration for example usage.
+func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripper) RoundTripperFunc {
+	return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
+		gauge.Inc()
+		defer gauge.Dec()
+		return next.RoundTrip(r)
+	})
+}
+
+// InstrumentRoundTripperCounter is a middleware that wraps the provided
+// http.RoundTripper to observe the request result with the provided CounterVec.
+// The CounterVec must have zero, one, or two non-const non-curried labels. For
+// those, the only allowed label names are "code" and "method". The function
+// panics otherwise. Partitioning of the CounterVec happens by HTTP status code
+// and/or HTTP method if the respective instance label names are present in the
+// CounterVec. For unpartitioned counting, use a CounterVec with zero labels.
+//
+// If the wrapped RoundTripper panics or returns a non-nil error, the Counter
+// is not incremented.
+//
+// See the example for ExampleInstrumentRoundTripperDuration for example usage.
+func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper) RoundTripperFunc {
+	code, method := checkLabels(counter)
+
+	return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
+		resp, err := next.RoundTrip(r)
+		if err == nil {
+			counter.With(labels(code, method, r.Method, resp.StatusCode)).Inc()
+		}
+		return resp, err
+	})
+}
+
+// InstrumentRoundTripperDuration is a middleware that wraps the provided
+// http.RoundTripper to observe the request duration with the provided
+// ObserverVec.  The ObserverVec must have zero, one, or two non-const
+// non-curried labels. For those, the only allowed label names are "code" and
+// "method". The function panics otherwise. The Observe method of the Observer
+// in the ObserverVec is called with the request duration in
+// seconds. Partitioning happens by HTTP status code and/or HTTP method if the
+// respective instance label names are present in the ObserverVec. For
+// unpartitioned observations, use an ObserverVec with zero labels. Note that
+// partitioning of Histograms is expensive and should be used judiciously.
+//
+// If the wrapped RoundTripper panics or returns a non-nil error, no values are
+// reported.
+//
+// Note that this method is only guaranteed to never observe negative durations
+// if used with Go1.9+.
+func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper) RoundTripperFunc {
+	code, method := checkLabels(obs)
+
+	return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
+		start := time.Now()
+		resp, err := next.RoundTrip(r)
+		if err == nil {
+			obs.With(labels(code, method, r.Method, resp.StatusCode)).Observe(time.Since(start).Seconds())
+		}
+		return resp, err
+	})
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go
new file mode 100644
index 0000000..a034d1e
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go
@@ -0,0 +1,144 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build go1.8
+
+package promhttp
+
+import (
+	"context"
+	"crypto/tls"
+	"net/http"
+	"net/http/httptrace"
+	"time"
+)
+
+// InstrumentTrace is used to offer flexibility in instrumenting the available
+// httptrace.ClientTrace hook functions. Each function is passed a float64
+// representing the time in seconds since the start of the http request. A user
+// may choose to use separately buckets Histograms, or implement custom
+// instance labels on a per function basis.
+type InstrumentTrace struct {
+	GotConn              func(float64)
+	PutIdleConn          func(float64)
+	GotFirstResponseByte func(float64)
+	Got100Continue       func(float64)
+	DNSStart             func(float64)
+	DNSDone              func(float64)
+	ConnectStart         func(float64)
+	ConnectDone          func(float64)
+	TLSHandshakeStart    func(float64)
+	TLSHandshakeDone     func(float64)
+	WroteHeaders         func(float64)
+	Wait100Continue      func(float64)
+	WroteRequest         func(float64)
+}
+
+// InstrumentRoundTripperTrace is a middleware that wraps the provided
+// RoundTripper and reports times to hook functions provided in the
+// InstrumentTrace struct. Hook functions that are not present in the provided
+// InstrumentTrace struct are ignored. Times reported to the hook functions are
+// time since the start of the request. Only with Go1.9+, those times are
+// guaranteed to never be negative. (Earlier Go versions are not using a
+// monotonic clock.) Note that partitioning of Histograms is expensive and
+// should be used judiciously.
+//
+// For hook functions that receive an error as an argument, no observations are
+// made in the event of a non-nil error value.
+//
+// See the example for ExampleInstrumentRoundTripperDuration for example usage.
+func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc {
+	return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
+		start := time.Now()
+
+		trace := &httptrace.ClientTrace{
+			GotConn: func(_ httptrace.GotConnInfo) {
+				if it.GotConn != nil {
+					it.GotConn(time.Since(start).Seconds())
+				}
+			},
+			PutIdleConn: func(err error) {
+				if err != nil {
+					return
+				}
+				if it.PutIdleConn != nil {
+					it.PutIdleConn(time.Since(start).Seconds())
+				}
+			},
+			DNSStart: func(_ httptrace.DNSStartInfo) {
+				if it.DNSStart != nil {
+					it.DNSStart(time.Since(start).Seconds())
+				}
+			},
+			DNSDone: func(_ httptrace.DNSDoneInfo) {
+				if it.DNSDone != nil {
+					it.DNSDone(time.Since(start).Seconds())
+				}
+			},
+			ConnectStart: func(_, _ string) {
+				if it.ConnectStart != nil {
+					it.ConnectStart(time.Since(start).Seconds())
+				}
+			},
+			ConnectDone: func(_, _ string, err error) {
+				if err != nil {
+					return
+				}
+				if it.ConnectDone != nil {
+					it.ConnectDone(time.Since(start).Seconds())
+				}
+			},
+			GotFirstResponseByte: func() {
+				if it.GotFirstResponseByte != nil {
+					it.GotFirstResponseByte(time.Since(start).Seconds())
+				}
+			},
+			Got100Continue: func() {
+				if it.Got100Continue != nil {
+					it.Got100Continue(time.Since(start).Seconds())
+				}
+			},
+			TLSHandshakeStart: func() {
+				if it.TLSHandshakeStart != nil {
+					it.TLSHandshakeStart(time.Since(start).Seconds())
+				}
+			},
+			TLSHandshakeDone: func(_ tls.ConnectionState, err error) {
+				if err != nil {
+					return
+				}
+				if it.TLSHandshakeDone != nil {
+					it.TLSHandshakeDone(time.Since(start).Seconds())
+				}
+			},
+			WroteHeaders: func() {
+				if it.WroteHeaders != nil {
+					it.WroteHeaders(time.Since(start).Seconds())
+				}
+			},
+			Wait100Continue: func() {
+				if it.Wait100Continue != nil {
+					it.Wait100Continue(time.Since(start).Seconds())
+				}
+			},
+			WroteRequest: func(_ httptrace.WroteRequestInfo) {
+				if it.WroteRequest != nil {
+					it.WroteRequest(time.Since(start).Seconds())
+				}
+			},
+		}
+		r = r.WithContext(httptrace.WithClientTrace(context.Background(), trace))
+
+		return next.RoundTrip(r)
+	})
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
new file mode 100644
index 0000000..9db2438
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
@@ -0,0 +1,447 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package promhttp
+
+import (
+	"errors"
+	"net/http"
+	"strconv"
+	"strings"
+	"time"
+
+	dto "github.com/prometheus/client_model/go"
+
+	"github.com/prometheus/client_golang/prometheus"
+)
+
+// magicString is used for the hacky label test in checkLabels. Remove once fixed.
+const magicString = "zZgWfBxLqvG8kc8IMv3POi2Bb0tZI3vAnBx+gBaFi9FyPzB/CzKUer1yufDa"
+
+// InstrumentHandlerInFlight is a middleware that wraps the provided
+// http.Handler. It sets the provided prometheus.Gauge to the number of
+// requests currently handled by the wrapped http.Handler.
+//
+// See the example for InstrumentHandlerDuration for example usage.
+func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handler {
+	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		g.Inc()
+		defer g.Dec()
+		next.ServeHTTP(w, r)
+	})
+}
+
+// InstrumentHandlerDuration is a middleware that wraps the provided
+// http.Handler to observe the request duration with the provided ObserverVec.
+// The ObserverVec must have zero, one, or two non-const non-curried labels. For
+// those, the only allowed label names are "code" and "method". The function
+// panics otherwise. The Observe method of the Observer in the ObserverVec is
+// called with the request duration in seconds. Partitioning happens by HTTP
+// status code and/or HTTP method if the respective instance label names are
+// present in the ObserverVec. For unpartitioned observations, use an
+// ObserverVec with zero labels. Note that partitioning of Histograms is
+// expensive and should be used judiciously.
+//
+// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
+//
+// If the wrapped Handler panics, no values are reported.
+//
+// Note that this method is only guaranteed to never observe negative durations
+// if used with Go1.9+.
+func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc {
+	code, method := checkLabels(obs)
+
+	if code {
+		return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+			now := time.Now()
+			d := newDelegator(w, nil)
+			next.ServeHTTP(d, r)
+
+			obs.With(labels(code, method, r.Method, d.Status())).Observe(time.Since(now).Seconds())
+		})
+	}
+
+	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		now := time.Now()
+		next.ServeHTTP(w, r)
+		obs.With(labels(code, method, r.Method, 0)).Observe(time.Since(now).Seconds())
+	})
+}
+
+// InstrumentHandlerCounter is a middleware that wraps the provided http.Handler
+// to observe the request result with the provided CounterVec.  The CounterVec
+// must have zero, one, or two non-const non-curried labels. For those, the only
+// allowed label names are "code" and "method". The function panics
+// otherwise. Partitioning of the CounterVec happens by HTTP status code and/or
+// HTTP method if the respective instance label names are present in the
+// CounterVec. For unpartitioned counting, use a CounterVec with zero labels.
+//
+// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
+//
+// If the wrapped Handler panics, the Counter is not incremented.
+//
+// See the example for InstrumentHandlerDuration for example usage.
+func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler) http.HandlerFunc {
+	code, method := checkLabels(counter)
+
+	if code {
+		return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+			d := newDelegator(w, nil)
+			next.ServeHTTP(d, r)
+			counter.With(labels(code, method, r.Method, d.Status())).Inc()
+		})
+	}
+
+	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		next.ServeHTTP(w, r)
+		counter.With(labels(code, method, r.Method, 0)).Inc()
+	})
+}
+
+// InstrumentHandlerTimeToWriteHeader is a middleware that wraps the provided
+// http.Handler to observe with the provided ObserverVec the request duration
+// until the response headers are written. The ObserverVec must have zero, one,
+// or two non-const non-curried labels. For those, the only allowed label names
+// are "code" and "method". The function panics otherwise. The Observe method of
+// the Observer in the ObserverVec is called with the request duration in
+// seconds. Partitioning happens by HTTP status code and/or HTTP method if the
+// respective instance label names are present in the ObserverVec. For
+// unpartitioned observations, use an ObserverVec with zero labels. Note that
+// partitioning of Histograms is expensive and should be used judiciously.
+//
+// If the wrapped Handler panics before calling WriteHeader, no value is
+// reported.
+//
+// Note that this method is only guaranteed to never observe negative durations
+// if used with Go1.9+.
+//
+// See the example for InstrumentHandlerDuration for example usage.
+func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc {
+	code, method := checkLabels(obs)
+
+	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		now := time.Now()
+		d := newDelegator(w, func(status int) {
+			obs.With(labels(code, method, r.Method, status)).Observe(time.Since(now).Seconds())
+		})
+		next.ServeHTTP(d, r)
+	})
+}
+
+// InstrumentHandlerRequestSize is a middleware that wraps the provided
+// http.Handler to observe the request size with the provided ObserverVec.  The
+// ObserverVec must have zero, one, or two non-const non-curried labels. For
+// those, the only allowed label names are "code" and "method". The function
+// panics otherwise. The Observe method of the Observer in the ObserverVec is
+// called with the request size in bytes. Partitioning happens by HTTP status
+// code and/or HTTP method if the respective instance label names are present in
+// the ObserverVec. For unpartitioned observations, use an ObserverVec with zero
+// labels. Note that partitioning of Histograms is expensive and should be used
+// judiciously.
+//
+// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
+//
+// If the wrapped Handler panics, no values are reported.
+//
+// See the example for InstrumentHandlerDuration for example usage.
+func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc {
+	code, method := checkLabels(obs)
+
+	if code {
+		return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+			d := newDelegator(w, nil)
+			next.ServeHTTP(d, r)
+			size := computeApproximateRequestSize(r)
+			obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(size))
+		})
+	}
+
+	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		next.ServeHTTP(w, r)
+		size := computeApproximateRequestSize(r)
+		obs.With(labels(code, method, r.Method, 0)).Observe(float64(size))
+	})
+}
+
+// InstrumentHandlerResponseSize is a middleware that wraps the provided
+// http.Handler to observe the response size with the provided ObserverVec.  The
+// ObserverVec must have zero, one, or two non-const non-curried labels. For
+// those, the only allowed label names are "code" and "method". The function
+// panics otherwise. The Observe method of the Observer in the ObserverVec is
+// called with the response size in bytes. Partitioning happens by HTTP status
+// code and/or HTTP method if the respective instance label names are present in
+// the ObserverVec. For unpartitioned observations, use an ObserverVec with zero
+// labels. Note that partitioning of Histograms is expensive and should be used
+// judiciously.
+//
+// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
+//
+// If the wrapped Handler panics, no values are reported.
+//
+// See the example for InstrumentHandlerDuration for example usage.
+func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler) http.Handler {
+	code, method := checkLabels(obs)
+	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		d := newDelegator(w, nil)
+		next.ServeHTTP(d, r)
+		obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(d.Written()))
+	})
+}
+
+func checkLabels(c prometheus.Collector) (code bool, method bool) {
+	// TODO(beorn7): Remove this hacky way to check for instance labels
+	// once Descriptors can have their dimensionality queried.
+	var (
+		desc *prometheus.Desc
+		m    prometheus.Metric
+		pm   dto.Metric
+		lvs  []string
+	)
+
+	// Get the Desc from the Collector.
+	descc := make(chan *prometheus.Desc, 1)
+	c.Describe(descc)
+
+	select {
+	case desc = <-descc:
+	default:
+		panic("no description provided by collector")
+	}
+	select {
+	case <-descc:
+		panic("more than one description provided by collector")
+	default:
+	}
+
+	close(descc)
+
+	// Create a ConstMetric with the Desc. Since we don't know how many
+	// variable labels there are, try for as long as it needs.
+	for err := errors.New("dummy"); err != nil; lvs = append(lvs, magicString) {
+		m, err = prometheus.NewConstMetric(desc, prometheus.UntypedValue, 0, lvs...)
+	}
+
+	// Write out the metric into a proto message and look at the labels.
+	// If the value is not the magicString, it is a constLabel, which doesn't interest us.
+	// If the label is curried, it doesn't interest us.
+	// In all other cases, only "code" or "method" is allowed.
+	if err := m.Write(&pm); err != nil {
+		panic("error checking metric for labels")
+	}
+	for _, label := range pm.Label {
+		name, value := label.GetName(), label.GetValue()
+		if value != magicString || isLabelCurried(c, name) {
+			continue
+		}
+		switch name {
+		case "code":
+			code = true
+		case "method":
+			method = true
+		default:
+			panic("metric partitioned with non-supported labels")
+		}
+	}
+	return
+}
+
+func isLabelCurried(c prometheus.Collector, label string) bool {
+	// This is even hackier than the label test above.
+	// We essentially try to curry again and see if it works.
+	// But for that, we need to type-convert to the two
+	// types we use here, ObserverVec or *CounterVec.
+	switch v := c.(type) {
+	case *prometheus.CounterVec:
+		if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil {
+			return false
+		}
+	case prometheus.ObserverVec:
+		if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil {
+			return false
+		}
+	default:
+		panic("unsupported metric vec type")
+	}
+	return true
+}
+
+// emptyLabels is a one-time allocation for non-partitioned metrics to avoid
+// unnecessary allocations on each request.
+var emptyLabels = prometheus.Labels{}
+
+func labels(code, method bool, reqMethod string, status int) prometheus.Labels {
+	if !(code || method) {
+		return emptyLabels
+	}
+	labels := prometheus.Labels{}
+
+	if code {
+		labels["code"] = sanitizeCode(status)
+	}
+	if method {
+		labels["method"] = sanitizeMethod(reqMethod)
+	}
+
+	return labels
+}
+
+func computeApproximateRequestSize(r *http.Request) int {
+	s := 0
+	if r.URL != nil {
+		s += len(r.URL.String())
+	}
+
+	s += len(r.Method)
+	s += len(r.Proto)
+	for name, values := range r.Header {
+		s += len(name)
+		for _, value := range values {
+			s += len(value)
+		}
+	}
+	s += len(r.Host)
+
+	// N.B. r.Form and r.MultipartForm are assumed to be included in r.URL.
+
+	if r.ContentLength != -1 {
+		s += int(r.ContentLength)
+	}
+	return s
+}
+
+func sanitizeMethod(m string) string {
+	switch m {
+	case "GET", "get":
+		return "get"
+	case "PUT", "put":
+		return "put"
+	case "HEAD", "head":
+		return "head"
+	case "POST", "post":
+		return "post"
+	case "DELETE", "delete":
+		return "delete"
+	case "CONNECT", "connect":
+		return "connect"
+	case "OPTIONS", "options":
+		return "options"
+	case "NOTIFY", "notify":
+		return "notify"
+	default:
+		return strings.ToLower(m)
+	}
+}
+
+// If the wrapped http.Handler has not set a status code, i.e. the value is
+// currently 0, santizeCode will return 200, for consistency with behavior in
+// the stdlib.
+func sanitizeCode(s int) string {
+	switch s {
+	case 100:
+		return "100"
+	case 101:
+		return "101"
+
+	case 200, 0:
+		return "200"
+	case 201:
+		return "201"
+	case 202:
+		return "202"
+	case 203:
+		return "203"
+	case 204:
+		return "204"
+	case 205:
+		return "205"
+	case 206:
+		return "206"
+
+	case 300:
+		return "300"
+	case 301:
+		return "301"
+	case 302:
+		return "302"
+	case 304:
+		return "304"
+	case 305:
+		return "305"
+	case 307:
+		return "307"
+
+	case 400:
+		return "400"
+	case 401:
+		return "401"
+	case 402:
+		return "402"
+	case 403:
+		return "403"
+	case 404:
+		return "404"
+	case 405:
+		return "405"
+	case 406:
+		return "406"
+	case 407:
+		return "407"
+	case 408:
+		return "408"
+	case 409:
+		return "409"
+	case 410:
+		return "410"
+	case 411:
+		return "411"
+	case 412:
+		return "412"
+	case 413:
+		return "413"
+	case 414:
+		return "414"
+	case 415:
+		return "415"
+	case 416:
+		return "416"
+	case 417:
+		return "417"
+	case 418:
+		return "418"
+
+	case 500:
+		return "500"
+	case 501:
+		return "501"
+	case 502:
+		return "502"
+	case 503:
+		return "503"
+	case 504:
+		return "504"
+	case 505:
+		return "505"
+
+	case 428:
+		return "428"
+	case 429:
+		return "429"
+	case 431:
+		return "431"
+	case 511:
+		return "511"
+
+	default:
+		return strconv.Itoa(s)
+	}
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go
index 32a3986..e422ef3 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go
@@ -15,15 +15,18 @@ package prometheus
 
 import (
 	"bytes"
-	"errors"
 	"fmt"
-	"os"
+	"runtime"
 	"sort"
+	"strings"
 	"sync"
+	"unicode/utf8"
 
 	"github.com/golang/protobuf/proto"
 
 	dto "github.com/prometheus/client_model/go"
+
+	"github.com/prometheus/client_golang/prometheus/internal"
 )
 
 const (
@@ -35,13 +38,14 @@ const (
 // DefaultRegisterer and DefaultGatherer are the implementations of the
 // Registerer and Gatherer interface a number of convenience functions in this
 // package act on. Initially, both variables point to the same Registry, which
-// has a process collector (see NewProcessCollector) and a Go collector (see
-// NewGoCollector) already registered. This approach to keep default instances
-// as global state mirrors the approach of other packages in the Go standard
-// library. Note that there are caveats. Change the variables with caution and
-// only if you understand the consequences. Users who want to avoid global state
-// altogether should not use the convenience function and act on custom
-// instances instead.
+// has a process collector (currently on Linux only, see NewProcessCollector)
+// and a Go collector (see NewGoCollector, in particular the note about
+// stop-the-world implication with Go versions older than 1.9) already
+// registered. This approach to keep default instances as global state mirrors
+// the approach of other packages in the Go standard library. Note that there
+// are caveats. Change the variables with caution and only if you understand the
+// consequences. Users who want to avoid global state altogether should not use
+// the convenience functions and act on custom instances instead.
 var (
 	defaultRegistry              = NewRegistry()
 	DefaultRegisterer Registerer = defaultRegistry
@@ -49,7 +53,7 @@ var (
 )
 
 func init() {
-	MustRegister(NewProcessCollector(os.Getpid(), ""))
+	MustRegister(NewProcessCollector(ProcessCollectorOpts{}))
 	MustRegister(NewGoCollector())
 }
 
@@ -65,7 +69,8 @@ func NewRegistry() *Registry {
 
 // NewPedanticRegistry returns a registry that checks during collection if each
 // collected Metric is consistent with its reported Desc, and if the Desc has
-// actually been registered with the registry.
+// actually been registered with the registry. Unchecked Collectors (those whose
+// Describe methed does not yield any descriptors) are excluded from the check.
 //
 // Usually, a Registry will be happy as long as the union of all collected
 // Metrics is consistent and valid even if some metrics are not consistent with
@@ -80,7 +85,7 @@ func NewPedanticRegistry() *Registry {
 
 // Registerer is the interface for the part of a registry in charge of
 // registering and unregistering. Users of custom registries should use
-// Registerer as type for registration purposes (rather then the Registry type
+// Registerer as type for registration purposes (rather than the Registry type
 // directly). In that way, they are free to use custom Registerer implementation
 // (e.g. for testing purposes).
 type Registerer interface {
@@ -95,8 +100,13 @@ type Registerer interface {
 	// returned error is an instance of AlreadyRegisteredError, which
 	// contains the previously registered Collector.
 	//
-	// It is in general not safe to register the same Collector multiple
-	// times concurrently.
+	// A Collector whose Describe method does not yield any Desc is treated
+	// as unchecked. Registration will always succeed. No check for
+	// re-registering (see previous paragraph) is performed. Thus, the
+	// caller is responsible for not double-registering the same unchecked
+	// Collector, and for providing a Collector that will not cause
+	// inconsistent metrics on collection. (This would lead to scrape
+	// errors.)
 	Register(Collector) error
 	// MustRegister works like Register but registers any number of
 	// Collectors and panics upon the first registration that causes an
@@ -105,7 +115,9 @@ type Registerer interface {
 	// Unregister unregisters the Collector that equals the Collector passed
 	// in as an argument.  (Two Collectors are considered equal if their
 	// Describe method yields the same set of descriptors.) The function
-	// returns whether a Collector was unregistered.
+	// returns whether a Collector was unregistered. Note that an unchecked
+	// Collector cannot be unregistered (as its Describe method does not
+	// yield any descriptor).
 	//
 	// Note that even after unregistering, it will not be possible to
 	// register a new Collector that is inconsistent with the unregistered
@@ -123,15 +135,23 @@ type Registerer interface {
 type Gatherer interface {
 	// Gather calls the Collect method of the registered Collectors and then
 	// gathers the collected metrics into a lexicographically sorted slice
-	// of MetricFamily protobufs. Even if an error occurs, Gather attempts
-	// to gather as many metrics as possible. Hence, if a non-nil error is
-	// returned, the returned MetricFamily slice could be nil (in case of a
-	// fatal error that prevented any meaningful metric collection) or
-	// contain a number of MetricFamily protobufs, some of which might be
-	// incomplete, and some might be missing altogether. The returned error
-	// (which might be a MultiError) explains the details. In scenarios
-	// where complete collection is critical, the returned MetricFamily
-	// protobufs should be disregarded if the returned error is non-nil.
+	// of uniquely named MetricFamily protobufs. Gather ensures that the
+	// returned slice is valid and self-consistent so that it can be used
+	// for valid exposition. As an exception to the strict consistency
+	// requirements described for metric.Desc, Gather will tolerate
+	// different sets of label names for metrics of the same metric family.
+	//
+	// Even if an error occurs, Gather attempts to gather as many metrics as
+	// possible. Hence, if a non-nil error is returned, the returned
+	// MetricFamily slice could be nil (in case of a fatal error that
+	// prevented any meaningful metric collection) or contain a number of
+	// MetricFamily protobufs, some of which might be incomplete, and some
+	// might be missing altogether. The returned error (which might be a
+	// MultiError) explains the details. Note that this is mostly useful for
+	// debugging purposes. If the gathered protobufs are to be used for
+	// exposition in actual monitoring, it is almost always better to not
+	// expose an incomplete result and instead disregard the returned
+	// MetricFamily protobufs in case the returned error is non-nil.
 	Gather() ([]*dto.MetricFamily, error)
 }
 
@@ -152,38 +172,6 @@ func MustRegister(cs ...Collector) {
 	DefaultRegisterer.MustRegister(cs...)
 }
 
-// RegisterOrGet registers the provided Collector with the DefaultRegisterer and
-// returns the Collector, unless an equal Collector was registered before, in
-// which case that Collector is returned.
-//
-// Deprecated: RegisterOrGet is merely a convenience function for the
-// implementation as described in the documentation for
-// AlreadyRegisteredError. As the use case is relatively rare, this function
-// will be removed in a future version of this package to clean up the
-// namespace.
-func RegisterOrGet(c Collector) (Collector, error) {
-	if err := Register(c); err != nil {
-		if are, ok := err.(AlreadyRegisteredError); ok {
-			return are.ExistingCollector, nil
-		}
-		return nil, err
-	}
-	return c, nil
-}
-
-// MustRegisterOrGet behaves like RegisterOrGet but panics instead of returning
-// an error.
-//
-// Deprecated: This is deprecated for the same reason RegisterOrGet is. See
-// there for details.
-func MustRegisterOrGet(c Collector) Collector {
-	c, err := RegisterOrGet(c)
-	if err != nil {
-		panic(err)
-	}
-	return c
-}
-
 // Unregister removes the registration of the provided Collector from the
 // DefaultRegisterer.
 //
@@ -201,25 +189,6 @@ func (gf GathererFunc) Gather() ([]*dto.MetricFamily, error) {
 	return gf()
 }
 
-// SetMetricFamilyInjectionHook replaces the DefaultGatherer with one that
-// gathers from the previous DefaultGatherers but then merges the MetricFamily
-// protobufs returned from the provided hook function with the MetricFamily
-// protobufs returned from the original DefaultGatherer.
-//
-// Deprecated: This function manipulates the DefaultGatherer variable. Consider
-// the implications, i.e. don't do this concurrently with any uses of the
-// DefaultGatherer. In the rare cases where you need to inject MetricFamily
-// protobufs directly, it is recommended to use a custom Registry and combine it
-// with a custom Gatherer using the Gatherers type (see
-// there). SetMetricFamilyInjectionHook only exists for compatibility reasons
-// with previous versions of this package.
-func SetMetricFamilyInjectionHook(hook func() []*dto.MetricFamily) {
-	DefaultGatherer = Gatherers{
-		DefaultGatherer,
-		GathererFunc(func() ([]*dto.MetricFamily, error) { return hook(), nil }),
-	}
-}
-
 // AlreadyRegisteredError is returned by the Register method if the Collector to
 // be registered has already been registered before, or a different Collector
 // that collects the same metrics has been registered before. Registration fails
@@ -252,6 +221,13 @@ func (errs MultiError) Error() string {
 	return buf.String()
 }
 
+// Append appends the provided error if it is not nil.
+func (errs *MultiError) Append(err error) {
+	if err != nil {
+		*errs = append(*errs, err)
+	}
+}
+
 // MaybeUnwrap returns nil if len(errs) is 0. It returns the first and only
 // contained error as error if len(errs is 1). In all other cases, it returns
 // the MultiError directly. This is helpful for returning a MultiError in a way
@@ -276,6 +252,7 @@ type Registry struct {
 	collectorsByID        map[uint64]Collector // ID is a hash of the descIDs.
 	descIDs               map[uint64]struct{}
 	dimHashesByName       map[string]uint64
+	uncheckedCollectors   []Collector
 	pedanticChecksEnabled bool
 }
 
@@ -293,8 +270,13 @@ func (r *Registry) Register(c Collector) error {
 		close(descChan)
 	}()
 	r.mtx.Lock()
-	defer r.mtx.Unlock()
-	// Coduct various tests...
+	defer func() {
+		// Drain channel in case of premature return to not leak a goroutine.
+		for range descChan {
+		}
+		r.mtx.Unlock()
+	}()
+	// Conduct various tests...
 	for desc := range descChan {
 
 		// Is the descriptor valid at all?
@@ -333,9 +315,10 @@ func (r *Registry) Register(c Collector) error {
 			}
 		}
 	}
-	// Did anything happen at all?
+	// A Collector yielding no Desc at all is considered unchecked.
 	if len(newDescIDs) == 0 {
-		return errors.New("collector has no descriptors")
+		r.uncheckedCollectors = append(r.uncheckedCollectors, c)
+		return nil
 	}
 	if existing, exists := r.collectorsByID[collectorID]; exists {
 		return AlreadyRegisteredError{
@@ -409,31 +392,25 @@ func (r *Registry) MustRegister(cs ...Collector) {
 // Gather implements Gatherer.
 func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
 	var (
-		metricChan        = make(chan Metric, capMetricChan)
-		metricHashes      = map[uint64]struct{}{}
-		dimHashes         = map[string]uint64{}
-		wg                sync.WaitGroup
-		errs              MultiError          // The collected errors to return in the end.
-		registeredDescIDs map[uint64]struct{} // Only used for pedantic checks
+		checkedMetricChan   = make(chan Metric, capMetricChan)
+		uncheckedMetricChan = make(chan Metric, capMetricChan)
+		metricHashes        = map[uint64]struct{}{}
+		wg                  sync.WaitGroup
+		errs                MultiError          // The collected errors to return in the end.
+		registeredDescIDs   map[uint64]struct{} // Only used for pedantic checks
 	)
 
 	r.mtx.RLock()
+	goroutineBudget := len(r.collectorsByID) + len(r.uncheckedCollectors)
 	metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName))
-
-	// Scatter.
-	// (Collectors could be complex and slow, so we call them all at once.)
-	wg.Add(len(r.collectorsByID))
-	go func() {
-		wg.Wait()
-		close(metricChan)
-	}()
+	checkedCollectors := make(chan Collector, len(r.collectorsByID))
+	uncheckedCollectors := make(chan Collector, len(r.uncheckedCollectors))
 	for _, collector := range r.collectorsByID {
-		go func(collector Collector) {
-			defer wg.Done()
-			collector.Collect(metricChan)
-		}(collector)
+		checkedCollectors <- collector
+	}
+	for _, collector := range r.uncheckedCollectors {
+		uncheckedCollectors <- collector
 	}
-
 	// In case pedantic checks are enabled, we have to copy the map before
 	// giving up the RLock.
 	if r.pedanticChecksEnabled {
@@ -442,127 +419,226 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
 			registeredDescIDs[id] = struct{}{}
 		}
 	}
-
 	r.mtx.RUnlock()
 
-	// Drain metricChan in case of premature return.
+	wg.Add(goroutineBudget)
+
+	collectWorker := func() {
+		for {
+			select {
+			case collector := <-checkedCollectors:
+				collector.Collect(checkedMetricChan)
+			case collector := <-uncheckedCollectors:
+				collector.Collect(uncheckedMetricChan)
+			default:
+				return
+			}
+			wg.Done()
+		}
+	}
+
+	// Start the first worker now to make sure at least one is running.
+	go collectWorker()
+	goroutineBudget--
+
+	// Close checkedMetricChan and uncheckedMetricChan once all collectors
+	// are collected.
+	go func() {
+		wg.Wait()
+		close(checkedMetricChan)
+		close(uncheckedMetricChan)
+	}()
+
+	// Drain checkedMetricChan and uncheckedMetricChan in case of premature return.
 	defer func() {
-		for _ = range metricChan {
+		if checkedMetricChan != nil {
+			for range checkedMetricChan {
+			}
+		}
+		if uncheckedMetricChan != nil {
+			for range uncheckedMetricChan {
+			}
 		}
 	}()
 
-	// Gather.
-	for metric := range metricChan {
-		// This could be done concurrently, too, but it required locking
-		// of metricFamiliesByName (and of metricHashes if checks are
-		// enabled). Most likely not worth it.
-		desc := metric.Desc()
-		dtoMetric := &dto.Metric{}
-		if err := metric.Write(dtoMetric); err != nil {
-			errs = append(errs, fmt.Errorf(
-				"error collecting metric %v: %s", desc, err,
+	// Copy the channel references so we can nil them out later to remove
+	// them from the select statements below.
+	cmc := checkedMetricChan
+	umc := uncheckedMetricChan
+
+	for {
+		select {
+		case metric, ok := <-cmc:
+			if !ok {
+				cmc = nil
+				break
+			}
+			errs.Append(processMetric(
+				metric, metricFamiliesByName,
+				metricHashes,
+				registeredDescIDs,
 			))
-			continue
-		}
-		metricFamily, ok := metricFamiliesByName[desc.fqName]
-		if ok {
-			if metricFamily.GetHelp() != desc.help {
-				errs = append(errs, fmt.Errorf(
-					"collected metric %s %s has help %q but should have %q",
-					desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(),
-				))
-				continue
+		case metric, ok := <-umc:
+			if !ok {
+				umc = nil
+				break
 			}
-			// TODO(beorn7): Simplify switch once Desc has type.
-			switch metricFamily.GetType() {
-			case dto.MetricType_COUNTER:
-				if dtoMetric.Counter == nil {
-					errs = append(errs, fmt.Errorf(
-						"collected metric %s %s should be a Counter",
-						desc.fqName, dtoMetric,
-					))
-					continue
-				}
-			case dto.MetricType_GAUGE:
-				if dtoMetric.Gauge == nil {
-					errs = append(errs, fmt.Errorf(
-						"collected metric %s %s should be a Gauge",
-						desc.fqName, dtoMetric,
-					))
-					continue
-				}
-			case dto.MetricType_SUMMARY:
-				if dtoMetric.Summary == nil {
-					errs = append(errs, fmt.Errorf(
-						"collected metric %s %s should be a Summary",
-						desc.fqName, dtoMetric,
+			errs.Append(processMetric(
+				metric, metricFamiliesByName,
+				metricHashes,
+				nil,
+			))
+		default:
+			if goroutineBudget <= 0 || len(checkedCollectors)+len(uncheckedCollectors) == 0 {
+				// All collectors are already being worked on or
+				// we have already as many goroutines started as
+				// there are collectors. Do the same as above,
+				// just without the default.
+				select {
+				case metric, ok := <-cmc:
+					if !ok {
+						cmc = nil
+						break
+					}
+					errs.Append(processMetric(
+						metric, metricFamiliesByName,
+						metricHashes,
+						registeredDescIDs,
 					))
-					continue
-				}
-			case dto.MetricType_UNTYPED:
-				if dtoMetric.Untyped == nil {
-					errs = append(errs, fmt.Errorf(
-						"collected metric %s %s should be Untyped",
-						desc.fqName, dtoMetric,
+				case metric, ok := <-umc:
+					if !ok {
+						umc = nil
+						break
+					}
+					errs.Append(processMetric(
+						metric, metricFamiliesByName,
+						metricHashes,
+						nil,
 					))
-					continue
 				}
-			case dto.MetricType_HISTOGRAM:
-				if dtoMetric.Histogram == nil {
-					errs = append(errs, fmt.Errorf(
-						"collected metric %s %s should be a Histogram",
-						desc.fqName, dtoMetric,
-					))
-					continue
-				}
-			default:
-				panic("encountered MetricFamily with invalid type")
+				break
 			}
-		} else {
-			metricFamily = &dto.MetricFamily{}
-			metricFamily.Name = proto.String(desc.fqName)
-			metricFamily.Help = proto.String(desc.help)
-			// TODO(beorn7): Simplify switch once Desc has type.
-			switch {
-			case dtoMetric.Gauge != nil:
-				metricFamily.Type = dto.MetricType_GAUGE.Enum()
-			case dtoMetric.Counter != nil:
-				metricFamily.Type = dto.MetricType_COUNTER.Enum()
-			case dtoMetric.Summary != nil:
-				metricFamily.Type = dto.MetricType_SUMMARY.Enum()
-			case dtoMetric.Untyped != nil:
-				metricFamily.Type = dto.MetricType_UNTYPED.Enum()
-			case dtoMetric.Histogram != nil:
-				metricFamily.Type = dto.MetricType_HISTOGRAM.Enum()
-			default:
-				errs = append(errs, fmt.Errorf(
-					"empty metric collected: %s", dtoMetric,
-				))
-				continue
-			}
-			metricFamiliesByName[desc.fqName] = metricFamily
+			// Start more workers.
+			go collectWorker()
+			goroutineBudget--
+			runtime.Gosched()
+		}
+		// Once both checkedMetricChan and uncheckdMetricChan are closed
+		// and drained, the contraption above will nil out cmc and umc,
+		// and then we can leave the collect loop here.
+		if cmc == nil && umc == nil {
+			break
 		}
-		if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes, dimHashes); err != nil {
-			errs = append(errs, err)
-			continue
+	}
+	return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
+}
+
+// processMetric is an internal helper method only used by the Gather method.
+func processMetric(
+	metric Metric,
+	metricFamiliesByName map[string]*dto.MetricFamily,
+	metricHashes map[uint64]struct{},
+	registeredDescIDs map[uint64]struct{},
+) error {
+	desc := metric.Desc()
+	// Wrapped metrics collected by an unchecked Collector can have an
+	// invalid Desc.
+	if desc.err != nil {
+		return desc.err
+	}
+	dtoMetric := &dto.Metric{}
+	if err := metric.Write(dtoMetric); err != nil {
+		return fmt.Errorf("error collecting metric %v: %s", desc, err)
+	}
+	metricFamily, ok := metricFamiliesByName[desc.fqName]
+	if ok { // Existing name.
+		if metricFamily.GetHelp() != desc.help {
+			return fmt.Errorf(
+				"collected metric %s %s has help %q but should have %q",
+				desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(),
+			)
 		}
-		if r.pedanticChecksEnabled {
-			// Is the desc registered at all?
-			if _, exist := registeredDescIDs[desc.id]; !exist {
-				errs = append(errs, fmt.Errorf(
-					"collected metric %s %s with unregistered descriptor %s",
-					metricFamily.GetName(), dtoMetric, desc,
-				))
-				continue
+		// TODO(beorn7): Simplify switch once Desc has type.
+		switch metricFamily.GetType() {
+		case dto.MetricType_COUNTER:
+			if dtoMetric.Counter == nil {
+				return fmt.Errorf(
+					"collected metric %s %s should be a Counter",
+					desc.fqName, dtoMetric,
+				)
 			}
-			if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil {
-				errs = append(errs, err)
-				continue
+		case dto.MetricType_GAUGE:
+			if dtoMetric.Gauge == nil {
+				return fmt.Errorf(
+					"collected metric %s %s should be a Gauge",
+					desc.fqName, dtoMetric,
+				)
 			}
+		case dto.MetricType_SUMMARY:
+			if dtoMetric.Summary == nil {
+				return fmt.Errorf(
+					"collected metric %s %s should be a Summary",
+					desc.fqName, dtoMetric,
+				)
+			}
+		case dto.MetricType_UNTYPED:
+			if dtoMetric.Untyped == nil {
+				return fmt.Errorf(
+					"collected metric %s %s should be Untyped",
+					desc.fqName, dtoMetric,
+				)
+			}
+		case dto.MetricType_HISTOGRAM:
+			if dtoMetric.Histogram == nil {
+				return fmt.Errorf(
+					"collected metric %s %s should be a Histogram",
+					desc.fqName, dtoMetric,
+				)
+			}
+		default:
+			panic("encountered MetricFamily with invalid type")
+		}
+	} else { // New name.
+		metricFamily = &dto.MetricFamily{}
+		metricFamily.Name = proto.String(desc.fqName)
+		metricFamily.Help = proto.String(desc.help)
+		// TODO(beorn7): Simplify switch once Desc has type.
+		switch {
+		case dtoMetric.Gauge != nil:
+			metricFamily.Type = dto.MetricType_GAUGE.Enum()
+		case dtoMetric.Counter != nil:
+			metricFamily.Type = dto.MetricType_COUNTER.Enum()
+		case dtoMetric.Summary != nil:
+			metricFamily.Type = dto.MetricType_SUMMARY.Enum()
+		case dtoMetric.Untyped != nil:
+			metricFamily.Type = dto.MetricType_UNTYPED.Enum()
+		case dtoMetric.Histogram != nil:
+			metricFamily.Type = dto.MetricType_HISTOGRAM.Enum()
+		default:
+			return fmt.Errorf("empty metric collected: %s", dtoMetric)
+		}
+		if err := checkSuffixCollisions(metricFamily, metricFamiliesByName); err != nil {
+			return err
 		}
-		metricFamily.Metric = append(metricFamily.Metric, dtoMetric)
+		metricFamiliesByName[desc.fqName] = metricFamily
 	}
-	return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
+	if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes); err != nil {
+		return err
+	}
+	if registeredDescIDs != nil {
+		// Is the desc registered at all?
+		if _, exist := registeredDescIDs[desc.id]; !exist {
+			return fmt.Errorf(
+				"collected metric %s %s with unregistered descriptor %s",
+				metricFamily.GetName(), dtoMetric, desc,
+			)
+		}
+		if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil {
+			return err
+		}
+	}
+	metricFamily.Metric = append(metricFamily.Metric, dtoMetric)
+	return nil
 }
 
 // Gatherers is a slice of Gatherer instances that implements the Gatherer
@@ -588,7 +664,6 @@ func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) {
 	var (
 		metricFamiliesByName = map[string]*dto.MetricFamily{}
 		metricHashes         = map[uint64]struct{}{}
-		dimHashes            = map[string]uint64{}
 		errs                 MultiError // The collected errors to return in the end.
 	)
 
@@ -625,10 +700,14 @@ func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) {
 				existingMF.Name = mf.Name
 				existingMF.Help = mf.Help
 				existingMF.Type = mf.Type
+				if err := checkSuffixCollisions(existingMF, metricFamiliesByName); err != nil {
+					errs = append(errs, err)
+					continue
+				}
 				metricFamiliesByName[mf.GetName()] = existingMF
 			}
 			for _, m := range mf.Metric {
-				if err := checkMetricConsistency(existingMF, m, metricHashes, dimHashes); err != nil {
+				if err := checkMetricConsistency(existingMF, m, metricHashes); err != nil {
 					errs = append(errs, err)
 					continue
 				}
@@ -636,88 +715,80 @@ func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) {
 			}
 		}
 	}
-	return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
+	return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
 }
 
-// metricSorter is a sortable slice of *dto.Metric.
-type metricSorter []*dto.Metric
-
-func (s metricSorter) Len() int {
-	return len(s)
-}
-
-func (s metricSorter) Swap(i, j int) {
-	s[i], s[j] = s[j], s[i]
-}
-
-func (s metricSorter) Less(i, j int) bool {
-	if len(s[i].Label) != len(s[j].Label) {
-		// This should not happen. The metrics are
-		// inconsistent. However, we have to deal with the fact, as
-		// people might use custom collectors or metric family injection
-		// to create inconsistent metrics. So let's simply compare the
-		// number of labels in this case. That will still yield
-		// reproducible sorting.
-		return len(s[i].Label) < len(s[j].Label)
-	}
-	for n, lp := range s[i].Label {
-		vi := lp.GetValue()
-		vj := s[j].Label[n].GetValue()
-		if vi != vj {
-			return vi < vj
+// checkSuffixCollisions checks for collisions with the “magic” suffixes the
+// Prometheus text format and the internal metric representation of the
+// Prometheus server add while flattening Summaries and Histograms.
+func checkSuffixCollisions(mf *dto.MetricFamily, mfs map[string]*dto.MetricFamily) error {
+	var (
+		newName              = mf.GetName()
+		newType              = mf.GetType()
+		newNameWithoutSuffix = ""
+	)
+	switch {
+	case strings.HasSuffix(newName, "_count"):
+		newNameWithoutSuffix = newName[:len(newName)-6]
+	case strings.HasSuffix(newName, "_sum"):
+		newNameWithoutSuffix = newName[:len(newName)-4]
+	case strings.HasSuffix(newName, "_bucket"):
+		newNameWithoutSuffix = newName[:len(newName)-7]
+	}
+	if newNameWithoutSuffix != "" {
+		if existingMF, ok := mfs[newNameWithoutSuffix]; ok {
+			switch existingMF.GetType() {
+			case dto.MetricType_SUMMARY:
+				if !strings.HasSuffix(newName, "_bucket") {
+					return fmt.Errorf(
+						"collected metric named %q collides with previously collected summary named %q",
+						newName, newNameWithoutSuffix,
+					)
+				}
+			case dto.MetricType_HISTOGRAM:
+				return fmt.Errorf(
+					"collected metric named %q collides with previously collected histogram named %q",
+					newName, newNameWithoutSuffix,
+				)
+			}
 		}
 	}
-
-	// We should never arrive here. Multiple metrics with the same
-	// label set in the same scrape will lead to undefined ingestion
-	// behavior. However, as above, we have to provide stable sorting
-	// here, even for inconsistent metrics. So sort equal metrics
-	// by their timestamp, with missing timestamps (implying "now")
-	// coming last.
-	if s[i].TimestampMs == nil {
-		return false
-	}
-	if s[j].TimestampMs == nil {
-		return true
-	}
-	return s[i].GetTimestampMs() < s[j].GetTimestampMs()
-}
-
-// normalizeMetricFamilies returns a MetricFamily slice whith empty
-// MetricFamilies pruned and the remaining MetricFamilies sorted by name within
-// the slice, with the contained Metrics sorted within each MetricFamily.
-func normalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily {
-	for _, mf := range metricFamiliesByName {
-		sort.Sort(metricSorter(mf.Metric))
-	}
-	names := make([]string, 0, len(metricFamiliesByName))
-	for name, mf := range metricFamiliesByName {
-		if len(mf.Metric) > 0 {
-			names = append(names, name)
+	if newType == dto.MetricType_SUMMARY || newType == dto.MetricType_HISTOGRAM {
+		if _, ok := mfs[newName+"_count"]; ok {
+			return fmt.Errorf(
+				"collected histogram or summary named %q collides with previously collected metric named %q",
+				newName, newName+"_count",
+			)
+		}
+		if _, ok := mfs[newName+"_sum"]; ok {
+			return fmt.Errorf(
+				"collected histogram or summary named %q collides with previously collected metric named %q",
+				newName, newName+"_sum",
+			)
 		}
 	}
-	sort.Strings(names)
-	result := make([]*dto.MetricFamily, 0, len(names))
-	for _, name := range names {
-		result = append(result, metricFamiliesByName[name])
+	if newType == dto.MetricType_HISTOGRAM {
+		if _, ok := mfs[newName+"_bucket"]; ok {
+			return fmt.Errorf(
+				"collected histogram named %q collides with previously collected metric named %q",
+				newName, newName+"_bucket",
+			)
+		}
 	}
-	return result
+	return nil
 }
 
 // checkMetricConsistency checks if the provided Metric is consistent with the
-// provided MetricFamily. It also hashed the Metric labels and the MetricFamily
-// name. If the resulting hash is alread in the provided metricHashes, an error
-// is returned. If not, it is added to metricHashes. The provided dimHashes maps
-// MetricFamily names to their dimHash (hashed sorted label names). If dimHashes
-// doesn't yet contain a hash for the provided MetricFamily, it is
-// added. Otherwise, an error is returned if the existing dimHashes in not equal
-// the calculated dimHash.
+// provided MetricFamily. It also hashes the Metric labels and the MetricFamily
+// name. If the resulting hash is already in the provided metricHashes, an error
+// is returned. If not, it is added to metricHashes.
 func checkMetricConsistency(
 	metricFamily *dto.MetricFamily,
 	dtoMetric *dto.Metric,
 	metricHashes map[uint64]struct{},
-	dimHashes map[string]uint64,
 ) error {
+	name := metricFamily.GetName()
+
 	// Type consistency with metric family.
 	if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil ||
 		metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil ||
@@ -725,41 +796,59 @@ func checkMetricConsistency(
 		metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil ||
 		metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil {
 		return fmt.Errorf(
-			"collected metric %s %s is not a %s",
-			metricFamily.GetName(), dtoMetric, metricFamily.GetType(),
+			"collected metric %q { %s} is not a %s",
+			name, dtoMetric, metricFamily.GetType(),
 		)
 	}
 
-	// Is the metric unique (i.e. no other metric with the same name and the same label values)?
+	previousLabelName := ""
+	for _, labelPair := range dtoMetric.GetLabel() {
+		labelName := labelPair.GetName()
+		if labelName == previousLabelName {
+			return fmt.Errorf(
+				"collected metric %q { %s} has two or more labels with the same name: %s",
+				name, dtoMetric, labelName,
+			)
+		}
+		if !checkLabelName(labelName) {
+			return fmt.Errorf(
+				"collected metric %q { %s} has a label with an invalid name: %s",
+				name, dtoMetric, labelName,
... 46584 lines suppressed ...


[camel-k] 02/02: use a shorter refresh period in the CLI

Posted by lb...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

lburgazzoli pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/camel-k.git

commit 980baca145d7169853c1247472b2cb39ee110a72
Author: nferraro <ni...@gmail.com>
AuthorDate: Fri Oct 26 23:37:35 2018 +0200

    use a shorter refresh period in the CLI
---
 Gopkg.lock                                                     |  7 ++++---
 Gopkg.toml                                                     | 10 +++++-----
 pkg/client/cmd/root.go                                         |  8 ++++++--
 .../operator-framework/operator-sdk/pkg/k8sclient/client.go    |  8 +++++++-
 4 files changed, 22 insertions(+), 11 deletions(-)

diff --git a/Gopkg.lock b/Gopkg.lock
index d82a832..b96c088 100644
--- a/Gopkg.lock
+++ b/Gopkg.lock
@@ -278,7 +278,8 @@
   version = "v3.9.0"
 
 [[projects]]
-  digest = "1:ce6ed8da0816d327c1d586722a13c05a3fd26cf9b8649cdcda300c16f026feca"
+  branch = "v0.0.7-custom"
+  digest = "1:8f0bbe43f11ba62deaae454383b93f9b8b58a1e4b68f9b39ee93f4a9633ec52a"
   name = "github.com/operator-framework/operator-sdk"
   packages = [
     "pkg/k8sclient",
@@ -288,8 +289,8 @@
     "version",
   ]
   pruneopts = "NUT"
-  revision = "e5a0ab096e1a7c0e6b937d2b41707eccb82c3c77"
-  version = "v0.0.7"
+  revision = "450f0742059bb08f2f0c041154435e340a7829a2"
+  source = "https://github.com/nicolaferraro/operator-sdk.git"
 
 [[projects]]
   branch = "master"
diff --git a/Gopkg.toml b/Gopkg.toml
index 4102eb3..c627513 100644
--- a/Gopkg.toml
+++ b/Gopkg.toml
@@ -48,9 +48,9 @@ required = [
 
 [[constraint]]
   name = "github.com/operator-framework/operator-sdk"
-  ## Using fork to customize the Kubernetes rest config
-  #source = "https://github.com/nicolaferraro/operator-sdk.git"
-  #branch = "custom-init"
-  # The version rule is used for a specific release and the master branch for in between releases.
+  # Using fork to customize the Kubernetes rest config
+  source = "https://github.com/nicolaferraro/operator-sdk.git"
+  branch = "v0.0.7-custom"
+  ## The version rule is used for a specific release and the master branch for in between releases.
   #branch = "master"
-  version = "=v0.0.7"
+  #version = "=v0.0.7"
diff --git a/pkg/client/cmd/root.go b/pkg/client/cmd/root.go
index 69920e7..58cc58a 100644
--- a/pkg/client/cmd/root.go
+++ b/pkg/client/cmd/root.go
@@ -18,9 +18,10 @@ limitations under the License.
 package cmd
 
 import (
-	"os"
-
 	"context"
+	"github.com/operator-framework/operator-sdk/pkg/k8sclient"
+	"os"
+	"time"
 
 	"github.com/apache/camel-k/pkg/util/kubernetes"
 	"github.com/pkg/errors"
@@ -66,6 +67,9 @@ func NewKamelCommand(ctx context.Context) (*cobra.Command, error) {
 		cmd.Flag("namespace").Value.Set(current)
 	}
 
+	// Let's use a fast refresh period when running with the CLI
+	k8sclient.ResetCacheEvery(2 * time.Second)
+
 	// Initialize the Kubernetes client to allow using the operator-sdk
 	err := kubernetes.InitKubeClient(options.KubeConfig)
 	if err != nil {
diff --git a/vendor/github.com/operator-framework/operator-sdk/pkg/k8sclient/client.go b/vendor/github.com/operator-framework/operator-sdk/pkg/k8sclient/client.go
index 466bd3b..d9f9b0b 100644
--- a/vendor/github.com/operator-framework/operator-sdk/pkg/k8sclient/client.go
+++ b/vendor/github.com/operator-framework/operator-sdk/pkg/k8sclient/client.go
@@ -43,6 +43,7 @@ var (
 	// this stores the singleton in a package local
 	singletonFactory *resourceClientFactory
 	once             sync.Once
+	cacheResetPeriod = 1 * time.Minute
 )
 
 // Private constructor for once.Do
@@ -63,7 +64,7 @@ func newSingletonFactory() {
 		dynamicClient: dynamicClient,
 		restMapper:    restMapper,
 	}
-	singletonFactory.runBackgroundCacheReset(1 * time.Minute)
+	singletonFactory.runBackgroundCacheReset(cacheResetPeriod)
 }
 
 // GetResourceClient returns the resource client using a singleton factory
@@ -106,6 +107,11 @@ func (c *resourceClientFactory) GetResourceClient(apiVersion, kind, namespace st
 	return resourceClient, pluralName, nil
 }
 
+// ResetCacheEvery sets the period of refresh of the client caches
+func ResetCacheEvery(duration time.Duration) {
+	cacheResetPeriod = duration
+}
+
 // apiResource consults the REST mapper to translate an <apiVersion, kind, namespace> tuple to a GroupVersionResource
 func gvkToGVR(gvk schema.GroupVersionKind, restMapper *restmapper.DeferredDiscoveryRESTMapper) (*schema.GroupVersionResource, error) {
 	mapping, err := restMapper.RESTMapping(gvk.GroupKind(), gvk.Version)