From 79b03e910475bdaa7ac4470645665053bd55ec1e Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Tue, 29 Oct 2019 10:31:02 +0100 Subject: [PATCH] update to csi-test v3.0.0 v3.0.0 fixed "go mod" support by adding the /v3 suffix to import paths (https://github.com/kubernetes-csi/csi-test/pull/232). dep currently does not support that (https://github.com/golang/dep/issues/1962#issuecomment-540079542) and can only import that with symlink workarounds in csi-test and by disabling pruning. --- Gopkg.lock | 20 +- Gopkg.toml | 7 +- test/e2e/storage/sanity.go | 8 +- .../csi-test/.github/PULL_REQUEST_TEMPLATE.md | 40 + .../kubernetes-csi/csi-test/.gitignore | 4 + .../kubernetes-csi/csi-test/.prow.sh | 25 + .../kubernetes-csi/csi-test/CHANGELOG-2.1.md | 16 + .../kubernetes-csi/csi-test/CHANGELOG-2.2.md | 7 + .../kubernetes-csi/csi-test/CHANGELOG-2.3.md | 18 + .../kubernetes-csi/csi-test/CONTRIBUTING.md | 22 + .../kubernetes-csi/csi-test/Makefile | 34 + .../github.com/kubernetes-csi/csi-test/OWNERS | 6 + .../kubernetes-csi/csi-test/README.md | 47 + .../kubernetes-csi/csi-test/SECURITY_CONTACTS | 14 + .../csi-test/cmd/csi-sanity/Makefile | 61 + .../csi-test/cmd/csi-sanity/README.md | 60 + .../csi-test/cmd/mock-driver/Dockerfile | 9 + .../csi-test/cmd/mock-driver/main.go | 184 +++ .../csi-test/code-of-conduct.md | 3 + .../csi-test/driver/driver-controller.go | 110 ++ .../csi-test/driver/driver-node.go | 109 ++ .../kubernetes-csi/csi-test/driver/driver.go | 305 +++++ .../csi-test/driver/driver.mock.go | 380 ++++++ .../kubernetes-csi/csi-test/driver/mock.go | 89 ++ .../github.com/kubernetes-csi/csi-test/go.mod | 20 + .../github.com/kubernetes-csi/csi-test/go.sum | 80 ++ .../kubernetes-csi/csi-test/hack/e2e.sh | 116 ++ .../kubernetes-csi/csi-test/mock/README.md | 22 + .../csi-test/mock/cache/SnapshotCache.go | 89 ++ .../csi-test/mock/mocksecret.yaml | 18 + .../csi-test/mock/service/controller.go | 678 +++++++++++ .../csi-test/mock/service/identity.go | 61 + .../csi-test/mock/service/node.go | 392 ++++++ .../csi-test/mock/service/service.go | 187 +++ .../csi-test/pkg/sanity/controller.go | 16 +- .../csi-test/pkg/sanity/sanity.go | 42 +- .../csi-test/release-tools/.prow.sh | 7 + .../csi-test/release-tools/CONTRIBUTING.md | 31 + .../csi-test/release-tools/OWNERS | 11 + .../csi-test/release-tools/README.md | 162 +++ .../csi-test/release-tools/RELEASE.md | 5 + .../csi-test/release-tools/SECURITY_CONTACTS | 14 + .../csi-test/release-tools/build.make | 218 ++++ .../csi-test/release-tools/code-of-conduct.md | 3 + .../csi-test/release-tools/filter-junit.go | 133 ++ .../release-tools/go-get-kubernetes.sh | 104 ++ .../csi-test/release-tools/prow.sh | 1071 +++++++++++++++++ .../csi-test/release-tools/travis.yml | 19 + .../csi-test/release-tools/util.sh | 148 +++ .../release-tools/verify-shellcheck.sh | 146 +++ .../csi-test/release-tools/verify-subtree.sh | 41 + .../csi-test/v3/driver/driver-controller.go | 1 + .../csi-test/v3/driver/driver-node.go | 1 + .../csi-test/v3/driver/driver.go | 1 + .../csi-test/v3/driver/driver.mock.go | 1 + .../kubernetes-csi/csi-test/v3/driver/mock.go | 1 + .../csi-test/v3/mock/cache/SnapshotCache.go | 1 + .../csi-test/v3/mock/service/controller.go | 1 + .../csi-test/v3/mock/service/identity.go | 1 + .../csi-test/v3/mock/service/node.go | 1 + .../csi-test/v3/mock/service/service.go | 1 + .../csi-test/v3/pkg/sanity/cleanup.go | 1 + .../csi-test/v3/pkg/sanity/controller.go | 1 + .../csi-test/v3/pkg/sanity/identity.go | 1 + .../csi-test/v3/pkg/sanity/node.go | 1 + .../csi-test/v3/pkg/sanity/sanity.go | 1 + .../csi-test/v3/pkg/sanity/tests.go | 1 + .../csi-test/v3/pkg/sanity/util.go | 1 + .../csi-test/v3/utils/grpcutil.go | 1 + .../csi-test/v3/utils/safegoroutinetester.go | 1 + 70 files changed, 5400 insertions(+), 31 deletions(-) create mode 100644 vendor/github.com/kubernetes-csi/csi-test/.github/PULL_REQUEST_TEMPLATE.md create mode 100644 vendor/github.com/kubernetes-csi/csi-test/.gitignore create mode 100755 vendor/github.com/kubernetes-csi/csi-test/.prow.sh create mode 100644 vendor/github.com/kubernetes-csi/csi-test/CHANGELOG-2.1.md create mode 100644 vendor/github.com/kubernetes-csi/csi-test/CHANGELOG-2.2.md create mode 100644 vendor/github.com/kubernetes-csi/csi-test/CHANGELOG-2.3.md create mode 100644 vendor/github.com/kubernetes-csi/csi-test/CONTRIBUTING.md create mode 100644 vendor/github.com/kubernetes-csi/csi-test/Makefile create mode 100644 vendor/github.com/kubernetes-csi/csi-test/OWNERS create mode 100644 vendor/github.com/kubernetes-csi/csi-test/README.md create mode 100644 vendor/github.com/kubernetes-csi/csi-test/SECURITY_CONTACTS create mode 100644 vendor/github.com/kubernetes-csi/csi-test/cmd/csi-sanity/Makefile create mode 100644 vendor/github.com/kubernetes-csi/csi-test/cmd/csi-sanity/README.md create mode 100644 vendor/github.com/kubernetes-csi/csi-test/cmd/mock-driver/Dockerfile create mode 100644 vendor/github.com/kubernetes-csi/csi-test/cmd/mock-driver/main.go create mode 100644 vendor/github.com/kubernetes-csi/csi-test/code-of-conduct.md create mode 100644 vendor/github.com/kubernetes-csi/csi-test/driver/driver-controller.go create mode 100644 vendor/github.com/kubernetes-csi/csi-test/driver/driver-node.go create mode 100644 vendor/github.com/kubernetes-csi/csi-test/driver/driver.go create mode 100644 vendor/github.com/kubernetes-csi/csi-test/driver/driver.mock.go create mode 100644 vendor/github.com/kubernetes-csi/csi-test/driver/mock.go create mode 100644 vendor/github.com/kubernetes-csi/csi-test/go.mod create mode 100644 vendor/github.com/kubernetes-csi/csi-test/go.sum create mode 100755 vendor/github.com/kubernetes-csi/csi-test/hack/e2e.sh create mode 100644 vendor/github.com/kubernetes-csi/csi-test/mock/README.md create mode 100644 vendor/github.com/kubernetes-csi/csi-test/mock/cache/SnapshotCache.go create mode 100644 vendor/github.com/kubernetes-csi/csi-test/mock/mocksecret.yaml create mode 100644 vendor/github.com/kubernetes-csi/csi-test/mock/service/controller.go create mode 100644 vendor/github.com/kubernetes-csi/csi-test/mock/service/identity.go create mode 100644 vendor/github.com/kubernetes-csi/csi-test/mock/service/node.go create mode 100644 vendor/github.com/kubernetes-csi/csi-test/mock/service/service.go create mode 100755 vendor/github.com/kubernetes-csi/csi-test/release-tools/.prow.sh create mode 100644 vendor/github.com/kubernetes-csi/csi-test/release-tools/CONTRIBUTING.md create mode 100644 vendor/github.com/kubernetes-csi/csi-test/release-tools/OWNERS create mode 100644 vendor/github.com/kubernetes-csi/csi-test/release-tools/README.md create mode 100644 vendor/github.com/kubernetes-csi/csi-test/release-tools/RELEASE.md create mode 100644 vendor/github.com/kubernetes-csi/csi-test/release-tools/SECURITY_CONTACTS create mode 100644 vendor/github.com/kubernetes-csi/csi-test/release-tools/build.make create mode 100644 vendor/github.com/kubernetes-csi/csi-test/release-tools/code-of-conduct.md create mode 100644 vendor/github.com/kubernetes-csi/csi-test/release-tools/filter-junit.go create mode 100755 vendor/github.com/kubernetes-csi/csi-test/release-tools/go-get-kubernetes.sh create mode 100755 vendor/github.com/kubernetes-csi/csi-test/release-tools/prow.sh create mode 100644 vendor/github.com/kubernetes-csi/csi-test/release-tools/travis.yml create mode 100755 vendor/github.com/kubernetes-csi/csi-test/release-tools/util.sh create mode 100755 vendor/github.com/kubernetes-csi/csi-test/release-tools/verify-shellcheck.sh create mode 100755 vendor/github.com/kubernetes-csi/csi-test/release-tools/verify-subtree.sh create mode 120000 vendor/github.com/kubernetes-csi/csi-test/v3/driver/driver-controller.go create mode 120000 vendor/github.com/kubernetes-csi/csi-test/v3/driver/driver-node.go create mode 120000 vendor/github.com/kubernetes-csi/csi-test/v3/driver/driver.go create mode 120000 vendor/github.com/kubernetes-csi/csi-test/v3/driver/driver.mock.go create mode 120000 vendor/github.com/kubernetes-csi/csi-test/v3/driver/mock.go create mode 120000 vendor/github.com/kubernetes-csi/csi-test/v3/mock/cache/SnapshotCache.go create mode 120000 vendor/github.com/kubernetes-csi/csi-test/v3/mock/service/controller.go create mode 120000 vendor/github.com/kubernetes-csi/csi-test/v3/mock/service/identity.go create mode 120000 vendor/github.com/kubernetes-csi/csi-test/v3/mock/service/node.go create mode 120000 vendor/github.com/kubernetes-csi/csi-test/v3/mock/service/service.go create mode 120000 vendor/github.com/kubernetes-csi/csi-test/v3/pkg/sanity/cleanup.go create mode 120000 vendor/github.com/kubernetes-csi/csi-test/v3/pkg/sanity/controller.go create mode 120000 vendor/github.com/kubernetes-csi/csi-test/v3/pkg/sanity/identity.go create mode 120000 vendor/github.com/kubernetes-csi/csi-test/v3/pkg/sanity/node.go create mode 120000 vendor/github.com/kubernetes-csi/csi-test/v3/pkg/sanity/sanity.go create mode 120000 vendor/github.com/kubernetes-csi/csi-test/v3/pkg/sanity/tests.go create mode 120000 vendor/github.com/kubernetes-csi/csi-test/v3/pkg/sanity/util.go create mode 120000 vendor/github.com/kubernetes-csi/csi-test/v3/utils/grpcutil.go create mode 120000 vendor/github.com/kubernetes-csi/csi-test/v3/utils/safegoroutinetester.go diff --git a/Gopkg.lock b/Gopkg.lock index f57045ba9d..731e70c027 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -252,15 +252,16 @@ version = "v0.6.1" [[projects]] - digest = "1:652c938b1510b21f775fcea8cadd523a9f7706f117e08338e9efdba7edd6662f" + branch = "fix-semver" + digest = "1:327fe26f8ea607baab0abf487b33ad9a7b3c91d08480f908f3738ea4636d3d89" name = "github.com/kubernetes-csi/csi-test" packages = [ - "pkg/sanity", - "utils", + "v3/pkg/sanity", + "v3/utils", ] - pruneopts = "UT" - revision = "82b05190c167c52bb6d5aaf2e1d7c833fa539783" - version = "v2.2.0" + pruneopts = "T" + revision = "2bc889d3e5e8a5eed24aec80be9359fe4e36919c" + source = "github.com/pohly/csi-test" [[projects]] digest = "1:ff5ebae34cfbf047d505ee150de27e60570e8c394b3b8fdbb720ff6ac71985fc" @@ -1265,9 +1266,11 @@ "github.com/golang/protobuf/proto", "github.com/google/uuid", "github.com/kubernetes-csi/csi-lib-utils/protosanitizer", - "github.com/kubernetes-csi/csi-test/pkg/sanity", - "github.com/kubernetes-csi/csi-test/utils", + "github.com/kubernetes-csi/csi-test/v3/pkg/sanity", + "github.com/kubernetes-csi/csi-test/v3/utils", "github.com/onsi/ginkgo", + "github.com/onsi/ginkgo/config", + "github.com/onsi/ginkgo/reporters", "github.com/onsi/gomega", "github.com/pkg/errors", "golang.org/x/net/context", @@ -1299,6 +1302,7 @@ "k8s.io/kubernetes/pkg/volume/util/hostutil", "k8s.io/kubernetes/test/e2e/framework", "k8s.io/kubernetes/test/e2e/framework/config", + "k8s.io/kubernetes/test/e2e/framework/ginkgowrapper", "k8s.io/kubernetes/test/e2e/framework/pod", "k8s.io/kubernetes/test/e2e/framework/podlogs", "k8s.io/kubernetes/test/e2e/framework/ssh", diff --git a/Gopkg.toml b/Gopkg.toml index 22c86e6bc3..1d3f44c1ee 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -24,6 +24,10 @@ go-tests = true unused-packages = true + [[prune.project]] + name = "github.com/kubernetes-csi/csi-test" + unused-packages = false + [[constraint]] name = "github.com/container-storage-interface/spec" version = "1.0.0" @@ -42,7 +46,8 @@ [[constraint]] name = "github.com/kubernetes-csi/csi-test" - version = "2.1.0" + source = "github.com/pohly/csi-test" + branch = "fix-semver" # We have to select the right version by name because of the "kubernetes-" prefix, dep doesn't # recognize these as normal releases. Upstream is considering to change the tagging, diff --git a/test/e2e/storage/sanity.go b/test/e2e/storage/sanity.go index 1d7779b7b7..2390e7e14e 100644 --- a/test/e2e/storage/sanity.go +++ b/test/e2e/storage/sanity.go @@ -31,8 +31,8 @@ import ( "time" "github.com/container-storage-interface/spec/lib/go/csi" - "github.com/kubernetes-csi/csi-test/pkg/sanity" - sanityutils "github.com/kubernetes-csi/csi-test/utils" + "github.com/kubernetes-csi/csi-test/v3/pkg/sanity" + sanityutils "github.com/kubernetes-csi/csi-test/v3/utils" "google.golang.org/grpc" appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" @@ -43,9 +43,9 @@ import ( clientset "k8s.io/client-go/kubernetes" clientexec "k8s.io/client-go/util/exec" "k8s.io/kubernetes/test/e2e/framework" - testutils "k8s.io/kubernetes/test/utils" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2essh "k8s.io/kubernetes/test/e2e/framework/ssh" + testutils "k8s.io/kubernetes/test/utils" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -67,7 +67,7 @@ var _ = Describe("sanity", func() { // and deletes all extra entries that it does not know about. TargetPath: "/var/lib/kubelet/plugins/kubernetes.io/csi/pv/pmem-sanity-target.XXXXXX", StagingPath: "/var/lib/kubelet/plugins/kubernetes.io/csi/pv/pmem-sanity-staging.XXXXXX", - IDGen: &sanity.DefaultIDGenerator{}, + IDGen: &sanity.DefaultIDGenerator{}, } f := framework.NewDefaultFramework("pmem") diff --git a/vendor/github.com/kubernetes-csi/csi-test/.github/PULL_REQUEST_TEMPLATE.md b/vendor/github.com/kubernetes-csi/csi-test/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 0000000000..d70526403f --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,40 @@ + + +**What type of PR is this?** +> Uncomment only one ` /kind <>` line, hit enter to put that in a new line, and remove leading whitespaces from that line: +> +> /kind api-change +> /kind bug +> /kind cleanup +> /kind design +> /kind documentation +> /kind failing-test +> /kind feature +> /kind flake + +**What this PR does / why we need it**: + +**Which issue(s) this PR fixes**: + +Fixes # + +**Special notes for your reviewer**: + +**Does this PR introduce a user-facing change?**: + +```release-note + +``` diff --git a/vendor/github.com/kubernetes-csi/csi-test/.gitignore b/vendor/github.com/kubernetes-csi/csi-test/.gitignore new file mode 100644 index 0000000000..4b6d3bc377 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/.gitignore @@ -0,0 +1,4 @@ + +#ignore files specific to csi-test +bin/mock-driver +cmd/csi-sanity/csi-sanity diff --git a/vendor/github.com/kubernetes-csi/csi-test/.prow.sh b/vendor/github.com/kubernetes-csi/csi-test/.prow.sh new file mode 100755 index 0000000000..2afa36f7a5 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/.prow.sh @@ -0,0 +1,25 @@ +#! /bin/bash -e + +# A Prow job can override these defaults, but this shouldn't be necessary. + +# Only these tests make sense for csi-sanity. +: ${CSI_PROW_TESTS:="unit sanity"} + +# What matters for csi-sanity is the version of the hostpath driver +# that we test against, not the version of Kubernetes that it runs on. +# We pick the latest stable Kubernetes here because the corresponding +# deployment has the current driver. v1.0.1 (from the current 1.13 +# deployment) does not pass csi-sanity testing. +: ${CSI_PROW_KUBERNETES_VERSION:=1.14.0} + +# This repo supports and wants sanity testing. +CSI_PROW_TESTS_SANITY=sanity + +. release-tools/prow.sh + +# Here we override "install_sanity" to use the pre-built one. +install_sanity () { + cp -a cmd/csi-sanity/csi-sanity "${CSI_PROW_WORK}/csi-sanity" +} + +main diff --git a/vendor/github.com/kubernetes-csi/csi-test/CHANGELOG-2.1.md b/vendor/github.com/kubernetes-csi/csi-test/CHANGELOG-2.1.md new file mode 100644 index 0000000000..9d576aba24 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/CHANGELOG-2.1.md @@ -0,0 +1,16 @@ +# Changelog since v2.0.1 + +## New Features + +- the sanity volume code can be used concurrently and avoids a potential volume leak because of an incorrect target path ([#210](https://github.com/kubernetes-csi/csi-test/pull/210), [@pohly](https://github.com/pohly)) +- csi-mock driver now supports relaxing the target path check (--permissive-target-path) and ephemeral inline volumes ([#209](https://github.com/kubernetes-csi/csi-test/pull/209), [@pohly](https://github.com/pohly)) + + +## Bug Fixes + +- Remove volume listing tests that assumed tokens were integers and replace them with one that confirms pagination works when adding/deleting volumes between page requests. ([#205](https://github.com/kubernetes-csi/csi-test/pull/205), [@Akrog](https://github.com/Akrog)) + + +## Other Notable Changes + +- Added tests for ControllerExpandVolume ([#203](https://github.com/kubernetes-csi/csi-test/pull/203), [@Ntr0](https://github.com/Ntr0)) diff --git a/vendor/github.com/kubernetes-csi/csi-test/CHANGELOG-2.2.md b/vendor/github.com/kubernetes-csi/csi-test/CHANGELOG-2.2.md new file mode 100644 index 0000000000..6d02fb9612 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/CHANGELOG-2.2.md @@ -0,0 +1,7 @@ +# Changelog since v2.1.0 + +## New Features + +- Adds an interface for generating Volume/Node IDs that users can plug in for ID's that must conform to valid format ([#212](https://github.com/kubernetes-csi/csi-test/pull/212), [@davidz627](https://github.com/davidz627)) + + diff --git a/vendor/github.com/kubernetes-csi/csi-test/CHANGELOG-2.3.md b/vendor/github.com/kubernetes-csi/csi-test/CHANGELOG-2.3.md new file mode 100644 index 0000000000..08696e437e --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/CHANGELOG-2.3.md @@ -0,0 +1,18 @@ +# Changelog since v2.2.0 + +## New Features + +- add '--csi.testvolumeexpandsize` flag ([#219](https://github.com/kubernetes-csi/csi-test/pull/219), [@wnxn](https://github.com/wnxn)) +- pass ControllerExpandVolumeSecret to volume expansion tests ([#217](https://github.com/kubernetes-csi/csi-test/pull/217), [@suneeth51](https://github.com/suneeth51)) + + +## Bug Fixes + +- fix nil pointer error when Config.IDGen is not set ([#220](https://github.com/kubernetes-csi/csi-test/pull/220), [@pohly](https://github.com/pohly)) +- fix cloning of first volume in mock CSI driver([#227](https://github.com/kubernetes-csi/csi-test/pull/227), [@avalluri](https://github.com/avalluri)) + + +## Other Notable Changes + +- add Go module support and thus can be built also outside of GOPATH ([#224](https://github.com/kubernetes-csi/csi-test/pull/224), [@pohly](https://github.com/pohly)) +- disable unreliable ListVolume pagination test ([#226](https://github.com/kubernetes-csi/csi-test/pull/226), [@avalluri](https://github.com/avalluri)) diff --git a/vendor/github.com/kubernetes-csi/csi-test/CONTRIBUTING.md b/vendor/github.com/kubernetes-csi/csi-test/CONTRIBUTING.md new file mode 100644 index 0000000000..41b73b76e0 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/CONTRIBUTING.md @@ -0,0 +1,22 @@ +# Contributing Guidelines + +Welcome to Kubernetes. We are excited about the prospect of you joining our [community](https://github.com/kubernetes/community)! The Kubernetes community abides by the CNCF [code of conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). Here is an excerpt: + +_As contributors and maintainers of this project, and in the interest of fostering an open and welcoming community, we pledge to respect all people who contribute through reporting issues, posting feature requests, updating documentation, submitting pull requests or patches, and other activities._ + +## Getting Started + +We have full documentation on how to get started contributing here: + +- [Contributor License Agreement](https://git.k8s.io/community/CLA.md) Kubernetes projects require that you sign a Contributor License Agreement (CLA) before we can accept your pull requests +- [Kubernetes Contributor Guide](http://git.k8s.io/community/contributors/guide) - Main contributor documentation, or you can just jump directly to the [contributing section](http://git.k8s.io/community/contributors/guide#contributing) +- [Contributor Cheat Sheet](https://git.k8s.io/community/contributors/guide/contributor-cheatsheet.md) - Common resources for existing developers + +## Mentorship + +- [Mentoring Initiatives](https://git.k8s.io/community/mentoring) - We have a diverse set of mentorship programs available that are always looking for volunteers! + +## Contact Information + +- [Slack channel](https://kubernetes.slack.com/messages/sig-storage) +- [Mailing list](https://groups.google.com/forum/#!forum/kubernetes-sig-storage) diff --git a/vendor/github.com/kubernetes-csi/csi-test/Makefile b/vendor/github.com/kubernetes-csi/csi-test/Makefile new file mode 100644 index 0000000000..cdfe793689 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/Makefile @@ -0,0 +1,34 @@ +# Copyright 2018 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This repository builds two commands, mock-driver and csi-sanity, +# but csi-sanity has its own build rules and only mock-driver gets +# published as a container image. +CMDS=mock-driver +all: build build-sanity + +include release-tools/build.make + +# We have to exclude generic testing of the csi-sanity command because +# the test binary only works in combination with a CSI driver. +# Instead we test with the special ./hack/e2e.sh. +TEST_GO_FILTER_CMD+=| grep -v /cmd/csi-sanity +.PHONY: test-sanity +test: test-sanity +test-sanity: + @ echo; echo "### $@:" + ./hack/e2e.sh + +build-sanity: + $(MAKE) -C cmd/csi-sanity all diff --git a/vendor/github.com/kubernetes-csi/csi-test/OWNERS b/vendor/github.com/kubernetes-csi/csi-test/OWNERS new file mode 100644 index 0000000000..d615fe4817 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/OWNERS @@ -0,0 +1,6 @@ +approvers: +- saad-ali +- lpabon +- pohly +- msau42 +- jsafrane diff --git a/vendor/github.com/kubernetes-csi/csi-test/README.md b/vendor/github.com/kubernetes-csi/csi-test/README.md new file mode 100644 index 0000000000..cddf799848 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/README.md @@ -0,0 +1,47 @@ +[![Build Status](https://travis-ci.org/kubernetes-csi/csi-test.svg?branch=master)](https://travis-ci.org/kubernetes-csi/csi-test) +[![Docker Repository on Quay](https://quay.io/repository/k8scsi/mock-driver/status "Docker Repository on +Quay")](https://quay.io/repository/k8scsi/mock-driver) + +# csi-test + +csi-test houses packages and libraries to help test CSI client and plugins. + +## For Container Orchestration Tests + +CO developers can use this framework to create drivers based on the +[Golang mock](https://github.com/golang/mock) framework. Please see +[co_test.go](test/co_test.go) for an example. + +### Mock driver for testing + +We also provide a container called `quay.io/k8scsi/mock-driver:canary` which can be used as an in-memory mock driver. +It follows the same release cycle as other containers, so the latest release is `quay.io/k8scsi/mock-driver:v0.3.0`. + +You will need to setup the environment variable `CSI_ENDPOINT` for the mock driver to know where to create the unix +domain socket. + +## For CSI Driver Tests + +To test drivers please take a look at [pkg/sanity](https://github.com/kubernetes-csi/csi-test/tree/master/pkg/sanity). +This package and [csi-sanity](https://github.com/kubernetes-csi/csi-test/tree/master/cmd/csi-sanity) are meant to test +the CSI API capability of a driver. They are meant to be an additional test to the unit, functional, and e2e tests of a +CSI driver. + +### Note + +* Master is for CSI v0.4.0. Please see the branches for other CSI releases. +* Only Golang 1.9+ supported. See [gRPC issue](https://github.com/grpc/grpc-go/issues/711#issuecomment-326626790) + +## Community, discussion, contribution, and support + +Learn how to engage with the Kubernetes community on the [community page](http://kubernetes.io/community/). + +You can reach the maintainers of this project at: + +* [Slack channel](https://kubernetes.slack.com/messages/sig-storage) + +* [Mailing list](https://groups.google.com/forum/#!forum/kubernetes-sig-storage) + +### Code of conduct + +Participation in the Kubernetes community is governed by the [Kubernetes Code of Conduct](code-of-conduct.md). diff --git a/vendor/github.com/kubernetes-csi/csi-test/SECURITY_CONTACTS b/vendor/github.com/kubernetes-csi/csi-test/SECURITY_CONTACTS new file mode 100644 index 0000000000..00e28e4ebc --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/SECURITY_CONTACTS @@ -0,0 +1,14 @@ +# Defined below are the security contacts for this repo. +# +# They are the contact point for the Product Security Team to reach out +# to for triaging and handling of incoming issues. +# +# The below names agree to abide by the +# [Embargo Policy](https://github.com/kubernetes/sig-release/blob/master/security-release-process-documentation/security-release-process.md#embargo-policy) +# and will be removed and replaced if they violate that agreement. +# +# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE +# INSTRUCTIONS AT https://kubernetes.io/security/ + +saad-ali +lpabon diff --git a/vendor/github.com/kubernetes-csi/csi-test/cmd/csi-sanity/Makefile b/vendor/github.com/kubernetes-csi/csi-test/cmd/csi-sanity/Makefile new file mode 100644 index 0000000000..520c2153c1 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/cmd/csi-sanity/Makefile @@ -0,0 +1,61 @@ +APP_NAME := csi-sanity +VER :=$(shell git describe) +RELEASEVER := $(shell git describe --abbrev=0) +BRANCH := $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD)) +SHA := $(shell git rev-parse --short HEAD) +ARCH := $(shell go env GOARCH) +GOOS := $(shell go env GOOS) +DIR=. + +ifdef APP_SUFFIX + VERSION = $(VER)-$(subst /,-,$(APP_SUFFIX)) +else +ifeq (master,$(BRANCH)) + VERSION = $(VER) +else + VERSION = $(VER)-$(BRANCH) +endif +endif + +LDFLAGS :=-ldflags "-w -X github.com/kubernetes-csi/csi-test/cmd/csi-sanity.VERSION=$(VERSION) -extldflags '-z relro -z now'" +PACKAGE :=$(DIR)/dist/$(APP_NAME)-$(RELEASEVER).$(GOOS).$(ARCH).tar.gz + +all: $(APP_NAME) + +$(APP_NAME): Makefile sanity_test.go + go test $(LDFLAGS) -c -o $(APP_NAME) + +install: $(APP_NAME) + cp $(APP_NAME) $(GOPATH)/bin + +clean: + rm -f csi-sanity + +dist-clean: + rm -rf $(DIR)/dist + +dist: clean $(PACKAGE) + +$(PACKAGE): $(APP_NAME) + @echo Packaging Binaries... + @mkdir -p tmp/$(APP_NAME) + @cp $(APP_NAME) tmp/$(APP_NAME)/ + @mkdir -p $(DIR)/dist/ + tar -czf $@ -C tmp $(APP_NAME); + @rm -rf tmp + @echo + @echo Package $@ saved in dist directory + +linux_amd64_dist: + GOOS=linux GOARCH=amd64 $(MAKE) dist + +linux_arm64_dist: + GOOS=linux GOARCH=arm64 $(MAKE) dist + +darwin_amd64_dist: + GOOS=darwin GOARCH=amd64 $(MAKE) dist + +release: dist-clean darwin_amd64_dist linux_amd64_dist linux_arm64_dist + +.PHONY: release darwin_amd64_dist linux_arm64_dist linux_amd64_dist \ + linux_arm_dist linux_amd64_dist clean dist-clean diff --git a/vendor/github.com/kubernetes-csi/csi-test/cmd/csi-sanity/README.md b/vendor/github.com/kubernetes-csi/csi-test/cmd/csi-sanity/README.md new file mode 100644 index 0000000000..0c1566434c --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/cmd/csi-sanity/README.md @@ -0,0 +1,60 @@ +# Sanity Test Command Line Program +This is the command line program that tests a CSI driver using the [`sanity`](https://github.com/kubernetes-csi/csi-test/tree/master/pkg/sanity) package test suite. + +Example: + +``` +$ csi-sanity --csi.endpoint= +``` + +If you want to specify a mount point: + +``` +$ csi-sanity --csi.endpoint= --csi.mountpoint=/mnt +``` + +For verbose type: + +``` +$ csi-sanity --ginkgo.v --csi.endpoint= +``` + +For csi-credentials, create a secrets file with all the secrets in it: +```yaml +CreateVolumeSecret: + secretKey: secretval1 +DeleteVolumeSecret: + secretKey: secretval2 +ControllerPublishVolumeSecret: + secretKey: secretval3 +ControllerUnpublishVolumeSecret: + secretKey: secretval4 +NodeStageVolumeSecret: + secretKey: secretval5 +NodePublishVolumeSecret: + secretKey: secretval6 +ControllerValidateVolumeCapabilitiesSecret: + secretKey: secretval7 +``` + +Pass the file path to csi-sanity as: +``` +$ csi-sanity --csi.endpoint= --csi.secrets= +``` + +Replace the keys and values of the credentials appropriately. Since the whole +secret is passed in the request, multiple key-val pairs can be used. + +### Help +The full Ginkgo and golang unit test parameters are available. Type + +``` +$ csi-sanity -h +``` + +to get more information + +### Download + +Please see the [Releases](https://github.com/kubernetes-csi/csi-test/releases) page +to download the latest version of `csi-sanity` diff --git a/vendor/github.com/kubernetes-csi/csi-test/cmd/mock-driver/Dockerfile b/vendor/github.com/kubernetes-csi/csi-test/cmd/mock-driver/Dockerfile new file mode 100644 index 0000000000..a3698dac93 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/cmd/mock-driver/Dockerfile @@ -0,0 +1,9 @@ +FROM gcr.io/distroless/static:latest +LABEL maintainers="Kubernetes Authors" +LABEL description="CSI Mock Driver" + +# For historic reasons the binary is called "mock" inside the container. +# It's kept that way because some .yaml file might use that name instead +# of relying on the entry point. +COPY ./bin/mock-driver mock +ENTRYPOINT ["/mock"] diff --git a/vendor/github.com/kubernetes-csi/csi-test/cmd/mock-driver/main.go b/vendor/github.com/kubernetes-csi/csi-test/cmd/mock-driver/main.go new file mode 100644 index 0000000000..0b8a477e29 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/cmd/mock-driver/main.go @@ -0,0 +1,184 @@ +/* +Copyright 2018 Kubernetes Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package main + +import ( + "flag" + "fmt" + "net" + "os" + "os/signal" + "strings" + "syscall" + + "github.com/kubernetes-csi/csi-test/v3/driver" + "github.com/kubernetes-csi/csi-test/v3/mock/service" +) + +func main() { + var config service.Config + flag.BoolVar(&config.DisableAttach, "disable-attach", false, "Disables RPC_PUBLISH_UNPUBLISH_VOLUME capability.") + flag.StringVar(&config.DriverName, "name", service.Name, "CSI driver name.") + flag.Int64Var(&config.AttachLimit, "attach-limit", 2, "number of attachable volumes on a node") + flag.BoolVar(&config.NodeExpansionRequired, "node-expand-required", false, "Enables NodeServiceCapability_RPC_EXPAND_VOLUME capacity.") + flag.BoolVar(&config.DisableControllerExpansion, "disable-controller-expansion", false, "Disables ControllerServiceCapability_RPC_EXPAND_VOLUME capability.") + flag.BoolVar(&config.DisableOnlineExpansion, "disable-online-expansion", false, "Disables online volume expansion capability.") + flag.BoolVar(&config.PermissiveTargetPath, "permissive-target-path", false, "Allows the CO to create PublishVolumeRequest.TargetPath, which violates the CSI spec.") + flag.Parse() + + endpoint := os.Getenv("CSI_ENDPOINT") + if len(endpoint) == 0 { + fmt.Println("CSI_ENDPOINT must be defined and must be a path") + os.Exit(1) + } + if strings.Contains(endpoint, ":") { + fmt.Println("CSI_ENDPOINT must be a unix path") + os.Exit(1) + } + + controllerEndpoint := os.Getenv("CSI_CONTROLLER_ENDPOINT") + if len(controllerEndpoint) == 0 { + // If empty, set to the common endpoint. + controllerEndpoint = endpoint + } + if strings.Contains(controllerEndpoint, ":") { + fmt.Println("CSI_CONTROLLER_ENDPOINT must be a unix path") + os.Exit(1) + } + + // Create mock driver + s := service.New(config) + + if endpoint == controllerEndpoint { + servers := &driver.CSIDriverServers{ + Controller: s, + Identity: s, + Node: s, + } + d := driver.NewCSIDriver(servers) + + // If creds is enabled, set the default creds. + setCreds := os.Getenv("CSI_ENABLE_CREDS") + if len(setCreds) > 0 && setCreds == "true" { + d.SetDefaultCreds() + } + + // Listen + os.Remove(endpoint) + os.Remove(controllerEndpoint) + l, err := net.Listen("unix", endpoint) + if err != nil { + fmt.Printf("Error: Unable to listen on %s socket: %v\n", + endpoint, + err) + os.Exit(1) + } + defer os.Remove(endpoint) + + // Start server + if err := d.Start(l); err != nil { + fmt.Printf("Error: Unable to start mock CSI server: %v\n", + err) + os.Exit(1) + } + fmt.Println("mock driver started") + + // Wait for signal + sigc := make(chan os.Signal, 1) + sigs := []os.Signal{ + syscall.SIGTERM, + syscall.SIGHUP, + syscall.SIGINT, + syscall.SIGQUIT, + } + signal.Notify(sigc, sigs...) + + <-sigc + d.Stop() + fmt.Println("mock driver stopped") + } else { + controllerServer := &driver.CSIDriverControllerServer{ + Controller: s, + Identity: s, + } + dc := driver.NewCSIDriverController(controllerServer) + + nodeServer := &driver.CSIDriverNodeServer{ + Node: s, + Identity: s, + } + dn := driver.NewCSIDriverNode(nodeServer) + + setCreds := os.Getenv("CSI_ENABLE_CREDS") + if len(setCreds) > 0 && setCreds == "true" { + dc.SetDefaultCreds() + dn.SetDefaultCreds() + } + + // Listen controller. + os.Remove(controllerEndpoint) + l, err := net.Listen("unix", controllerEndpoint) + if err != nil { + fmt.Printf("Error: Unable to listen on %s socket: %v\n", + controllerEndpoint, + err) + os.Exit(1) + } + defer os.Remove(controllerEndpoint) + + // Start controller server. + if err = dc.Start(l); err != nil { + fmt.Printf("Error: Unable to start mock CSI controller server: %v\n", + err) + os.Exit(1) + } + fmt.Println("mock controller driver started") + + // Listen node. + os.Remove(endpoint) + l, err = net.Listen("unix", endpoint) + if err != nil { + fmt.Printf("Error: Unable to listen on %s socket: %v\n", + endpoint, + err) + os.Exit(1) + } + defer os.Remove(endpoint) + + // Start node server. + if err = dn.Start(l); err != nil { + fmt.Printf("Error: Unable to start mock CSI node server: %v\n", + err) + os.Exit(1) + } + fmt.Println("mock node driver started") + + // Wait for signal + sigc := make(chan os.Signal, 1) + sigs := []os.Signal{ + syscall.SIGTERM, + syscall.SIGHUP, + syscall.SIGINT, + syscall.SIGQUIT, + } + signal.Notify(sigc, sigs...) + + <-sigc + dc.Stop() + dn.Stop() + fmt.Println("mock drivers stopped") + } +} diff --git a/vendor/github.com/kubernetes-csi/csi-test/code-of-conduct.md b/vendor/github.com/kubernetes-csi/csi-test/code-of-conduct.md new file mode 100644 index 0000000000..0d15c00cf3 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/code-of-conduct.md @@ -0,0 +1,3 @@ +# Kubernetes Community Code of Conduct + +Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md) diff --git a/vendor/github.com/kubernetes-csi/csi-test/driver/driver-controller.go b/vendor/github.com/kubernetes-csi/csi-test/driver/driver-controller.go new file mode 100644 index 0000000000..1d8d2bd771 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/driver/driver-controller.go @@ -0,0 +1,110 @@ +/* +Copyright 2019 Kubernetes Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package driver + +import ( + "context" + "net" + "sync" + + "google.golang.org/grpc/reflection" + + csi "github.com/container-storage-interface/spec/lib/go/csi" + "google.golang.org/grpc" +) + +// CSIDriverControllerServer is the Controller service component of the driver. +type CSIDriverControllerServer struct { + Controller csi.ControllerServer + Identity csi.IdentityServer +} + +// CSIDriverController is the CSI Driver Controller backend. +type CSIDriverController struct { + listener net.Listener + server *grpc.Server + controllerServer *CSIDriverControllerServer + wg sync.WaitGroup + running bool + lock sync.Mutex + creds *CSICreds +} + +func NewCSIDriverController(controllerServer *CSIDriverControllerServer) *CSIDriverController { + return &CSIDriverController{ + controllerServer: controllerServer, + } +} + +func (c *CSIDriverController) goServe(started chan<- bool) { + goServe(c.server, &c.wg, c.listener, started) +} + +func (c *CSIDriverController) Address() string { + return c.listener.Addr().String() +} + +func (c *CSIDriverController) Start(l net.Listener) error { + c.lock.Lock() + defer c.lock.Unlock() + + // Set listener. + c.listener = l + + // Create a new grpc server. + c.server = grpc.NewServer( + grpc.UnaryInterceptor(c.callInterceptor), + ) + + if c.controllerServer.Controller != nil { + csi.RegisterControllerServer(c.server, c.controllerServer.Controller) + } + if c.controllerServer.Identity != nil { + csi.RegisterIdentityServer(c.server, c.controllerServer.Identity) + } + + reflection.Register(c.server) + + waitForServer := make(chan bool) + c.goServe(waitForServer) + <-waitForServer + c.running = true + return nil +} + +func (c *CSIDriverController) Stop() { + stop(&c.lock, &c.wg, c.server, c.running) +} + +func (c *CSIDriverController) Close() { + c.server.Stop() +} + +func (c *CSIDriverController) IsRunning() bool { + c.lock.Lock() + defer c.lock.Unlock() + + return c.running +} + +func (c *CSIDriverController) SetDefaultCreds() { + setDefaultCreds(c.creds) +} + +func (c *CSIDriverController) callInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + return callInterceptor(ctx, c.creds, req, info, handler) +} diff --git a/vendor/github.com/kubernetes-csi/csi-test/driver/driver-node.go b/vendor/github.com/kubernetes-csi/csi-test/driver/driver-node.go new file mode 100644 index 0000000000..7720bfc493 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/driver/driver-node.go @@ -0,0 +1,109 @@ +/* +Copyright 2019 Kubernetes Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package driver + +import ( + context "context" + "net" + "sync" + + csi "github.com/container-storage-interface/spec/lib/go/csi" + "google.golang.org/grpc" + "google.golang.org/grpc/reflection" +) + +// CSIDriverNodeServer is the Node service component of the driver. +type CSIDriverNodeServer struct { + Node csi.NodeServer + Identity csi.IdentityServer +} + +// CSIDriverNode is the CSI Driver Node backend. +type CSIDriverNode struct { + listener net.Listener + server *grpc.Server + nodeServer *CSIDriverNodeServer + wg sync.WaitGroup + running bool + lock sync.Mutex + creds *CSICreds +} + +func NewCSIDriverNode(nodeServer *CSIDriverNodeServer) *CSIDriverNode { + return &CSIDriverNode{ + nodeServer: nodeServer, + } +} + +func (c *CSIDriverNode) goServe(started chan<- bool) { + goServe(c.server, &c.wg, c.listener, started) +} + +func (c *CSIDriverNode) Address() string { + return c.listener.Addr().String() +} + +func (c *CSIDriverNode) Start(l net.Listener) error { + c.lock.Lock() + defer c.lock.Unlock() + + // Set listener. + c.listener = l + + // Create a new grpc server. + c.server = grpc.NewServer( + grpc.UnaryInterceptor(c.callInterceptor), + ) + + if c.nodeServer.Node != nil { + csi.RegisterNodeServer(c.server, c.nodeServer.Node) + } + if c.nodeServer.Identity != nil { + csi.RegisterIdentityServer(c.server, c.nodeServer.Identity) + } + + reflection.Register(c.server) + + waitForServer := make(chan bool) + c.goServe(waitForServer) + <-waitForServer + c.running = true + return nil +} + +func (c *CSIDriverNode) Stop() { + stop(&c.lock, &c.wg, c.server, c.running) +} + +func (c *CSIDriverNode) Close() { + c.server.Stop() +} + +func (c *CSIDriverNode) IsRunning() bool { + c.lock.Lock() + defer c.lock.Unlock() + + return c.running +} + +func (c *CSIDriverNode) SetDefaultCreds() { + setDefaultCreds(c.creds) +} + +func (c *CSIDriverNode) callInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + return callInterceptor(ctx, c.creds, req, info, handler) +} diff --git a/vendor/github.com/kubernetes-csi/csi-test/driver/driver.go b/vendor/github.com/kubernetes-csi/csi-test/driver/driver.go new file mode 100644 index 0000000000..fecc900f8e --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/driver/driver.go @@ -0,0 +1,305 @@ +/* +Copyright 2017 Luis Pabón luis@portworx.com + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +//go:generate mockgen -package=driver -destination=driver.mock.go github.com/container-storage-interface/spec/lib/go/csi IdentityServer,ControllerServer,NodeServer + +package driver + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net" + "sync" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/container-storage-interface/spec/lib/go/csi" + "google.golang.org/grpc" + "google.golang.org/grpc/reflection" +) + +var ( + // ErrNoCredentials is the error when a secret is enabled but not passed in the request. + ErrNoCredentials = errors.New("secret must be provided") + // ErrAuthFailed is the error when the secret is incorrect. + ErrAuthFailed = errors.New("authentication failed") +) + +// CSIDriverServers is a unified driver component with both Controller and Node +// services. +type CSIDriverServers struct { + Controller csi.ControllerServer + Identity csi.IdentityServer + Node csi.NodeServer +} + +// This is the key name in all the CSI secret objects. +const secretField = "secretKey" + +// CSICreds is a driver specific secret type. Drivers can have a key-val pair of +// secrets. This mock driver has a single string secret with secretField as the +// key. +type CSICreds struct { + CreateVolumeSecret string + DeleteVolumeSecret string + ControllerPublishVolumeSecret string + ControllerUnpublishVolumeSecret string + NodeStageVolumeSecret string + NodePublishVolumeSecret string + CreateSnapshotSecret string + DeleteSnapshotSecret string + ControllerValidateVolumeCapabilitiesSecret string +} + +type CSIDriver struct { + listener net.Listener + server *grpc.Server + servers *CSIDriverServers + wg sync.WaitGroup + running bool + lock sync.Mutex + creds *CSICreds +} + +func NewCSIDriver(servers *CSIDriverServers) *CSIDriver { + return &CSIDriver{ + servers: servers, + } +} + +func (c *CSIDriver) goServe(started chan<- bool) { + goServe(c.server, &c.wg, c.listener, started) +} + +func (c *CSIDriver) Address() string { + return c.listener.Addr().String() +} +func (c *CSIDriver) Start(l net.Listener) error { + c.lock.Lock() + defer c.lock.Unlock() + + // Set listener + c.listener = l + + // Create a new grpc server + c.server = grpc.NewServer( + grpc.UnaryInterceptor(c.callInterceptor), + ) + + // Register Mock servers + if c.servers.Controller != nil { + csi.RegisterControllerServer(c.server, c.servers.Controller) + } + if c.servers.Identity != nil { + csi.RegisterIdentityServer(c.server, c.servers.Identity) + } + if c.servers.Node != nil { + csi.RegisterNodeServer(c.server, c.servers.Node) + } + reflection.Register(c.server) + + // Start listening for requests + waitForServer := make(chan bool) + c.goServe(waitForServer) + <-waitForServer + c.running = true + return nil +} + +func (c *CSIDriver) Stop() { + stop(&c.lock, &c.wg, c.server, c.running) +} + +func (c *CSIDriver) Close() { + c.server.Stop() +} + +func (c *CSIDriver) IsRunning() bool { + c.lock.Lock() + defer c.lock.Unlock() + + return c.running +} + +// SetDefaultCreds sets the default secrets for CSI creds. +func (c *CSIDriver) SetDefaultCreds() { + setDefaultCreds(c.creds) +} + +func (c *CSIDriver) callInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + return callInterceptor(ctx, c.creds, req, info, handler) +} + +// goServe starts a grpc server. +func goServe(server *grpc.Server, wg *sync.WaitGroup, listener net.Listener, started chan<- bool) { + wg.Add(1) + go func() { + defer wg.Done() + started <- true + err := server.Serve(listener) + if err != nil { + panic(err.Error()) + } + }() +} + +// stop stops a grpc server. +func stop(lock *sync.Mutex, wg *sync.WaitGroup, server *grpc.Server, running bool) { + lock.Lock() + defer lock.Unlock() + + if !running { + return + } + + server.Stop() + wg.Wait() +} + +// setDefaultCreds sets the default credentials, given a CSICreds instance. +func setDefaultCreds(creds *CSICreds) { + creds = &CSICreds{ + CreateVolumeSecret: "secretval1", + DeleteVolumeSecret: "secretval2", + ControllerPublishVolumeSecret: "secretval3", + ControllerUnpublishVolumeSecret: "secretval4", + NodeStageVolumeSecret: "secretval5", + NodePublishVolumeSecret: "secretval6", + CreateSnapshotSecret: "secretval7", + DeleteSnapshotSecret: "secretval8", + ControllerValidateVolumeCapabilitiesSecret: "secretval9", + } +} + +func callInterceptor(ctx context.Context, creds *CSICreds, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + err := authInterceptor(creds, req) + if err != nil { + logGRPC(info.FullMethod, req, nil, err) + return nil, err + } + rsp, err := handler(ctx, req) + logGRPC(info.FullMethod, req, rsp, err) + return rsp, err +} + +func authInterceptor(creds *CSICreds, req interface{}) error { + if creds != nil { + authenticated, authErr := isAuthenticated(req, creds) + if !authenticated { + if authErr == ErrNoCredentials { + return status.Error(codes.InvalidArgument, authErr.Error()) + } + if authErr == ErrAuthFailed { + return status.Error(codes.Unauthenticated, authErr.Error()) + } + } + } + return nil +} + +func logGRPC(method string, request, reply interface{}, err error) { + // Log JSON with the request and response for easier parsing + logMessage := struct { + Method string + Request interface{} + Response interface{} + Error string + }{ + Method: method, + Request: request, + Response: reply, + } + if err != nil { + logMessage.Error = err.Error() + } + msg, _ := json.Marshal(logMessage) + fmt.Printf("gRPCCall: %s\n", msg) +} + +func isAuthenticated(req interface{}, creds *CSICreds) (bool, error) { + switch r := req.(type) { + case *csi.CreateVolumeRequest: + return authenticateCreateVolume(r, creds) + case *csi.DeleteVolumeRequest: + return authenticateDeleteVolume(r, creds) + case *csi.ControllerPublishVolumeRequest: + return authenticateControllerPublishVolume(r, creds) + case *csi.ControllerUnpublishVolumeRequest: + return authenticateControllerUnpublishVolume(r, creds) + case *csi.NodeStageVolumeRequest: + return authenticateNodeStageVolume(r, creds) + case *csi.NodePublishVolumeRequest: + return authenticateNodePublishVolume(r, creds) + case *csi.CreateSnapshotRequest: + return authenticateCreateSnapshot(r, creds) + case *csi.DeleteSnapshotRequest: + return authenticateDeleteSnapshot(r, creds) + case *csi.ValidateVolumeCapabilitiesRequest: + return authenticateControllerValidateVolumeCapabilities(r, creds) + default: + return true, nil + } +} + +func authenticateCreateVolume(req *csi.CreateVolumeRequest, creds *CSICreds) (bool, error) { + return credsCheck(req.GetSecrets(), creds.CreateVolumeSecret) +} + +func authenticateDeleteVolume(req *csi.DeleteVolumeRequest, creds *CSICreds) (bool, error) { + return credsCheck(req.GetSecrets(), creds.DeleteVolumeSecret) +} + +func authenticateControllerPublishVolume(req *csi.ControllerPublishVolumeRequest, creds *CSICreds) (bool, error) { + return credsCheck(req.GetSecrets(), creds.ControllerPublishVolumeSecret) +} + +func authenticateControllerUnpublishVolume(req *csi.ControllerUnpublishVolumeRequest, creds *CSICreds) (bool, error) { + return credsCheck(req.GetSecrets(), creds.ControllerUnpublishVolumeSecret) +} + +func authenticateNodeStageVolume(req *csi.NodeStageVolumeRequest, creds *CSICreds) (bool, error) { + return credsCheck(req.GetSecrets(), creds.NodeStageVolumeSecret) +} + +func authenticateNodePublishVolume(req *csi.NodePublishVolumeRequest, creds *CSICreds) (bool, error) { + return credsCheck(req.GetSecrets(), creds.NodePublishVolumeSecret) +} + +func authenticateCreateSnapshot(req *csi.CreateSnapshotRequest, creds *CSICreds) (bool, error) { + return credsCheck(req.GetSecrets(), creds.CreateSnapshotSecret) +} + +func authenticateDeleteSnapshot(req *csi.DeleteSnapshotRequest, creds *CSICreds) (bool, error) { + return credsCheck(req.GetSecrets(), creds.DeleteSnapshotSecret) +} + +func authenticateControllerValidateVolumeCapabilities(req *csi.ValidateVolumeCapabilitiesRequest, creds *CSICreds) (bool, error) { + return credsCheck(req.GetSecrets(), creds.ControllerValidateVolumeCapabilitiesSecret) +} + +func credsCheck(secrets map[string]string, secretVal string) (bool, error) { + if len(secrets) == 0 { + return false, ErrNoCredentials + } + + if secrets[secretField] != secretVal { + return false, ErrAuthFailed + } + return true, nil +} diff --git a/vendor/github.com/kubernetes-csi/csi-test/driver/driver.mock.go b/vendor/github.com/kubernetes-csi/csi-test/driver/driver.mock.go new file mode 100644 index 0000000000..d52c0447e6 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/driver/driver.mock.go @@ -0,0 +1,380 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/container-storage-interface/spec/lib/go/csi (interfaces: IdentityServer,ControllerServer,NodeServer) + +// Package driver is a generated GoMock package. +package driver + +import ( + context "context" + csi "github.com/container-storage-interface/spec/lib/go/csi" + gomock "github.com/golang/mock/gomock" + reflect "reflect" +) + +// MockIdentityServer is a mock of IdentityServer interface +type MockIdentityServer struct { + ctrl *gomock.Controller + recorder *MockIdentityServerMockRecorder +} + +// MockIdentityServerMockRecorder is the mock recorder for MockIdentityServer +type MockIdentityServerMockRecorder struct { + mock *MockIdentityServer +} + +// NewMockIdentityServer creates a new mock instance +func NewMockIdentityServer(ctrl *gomock.Controller) *MockIdentityServer { + mock := &MockIdentityServer{ctrl: ctrl} + mock.recorder = &MockIdentityServerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockIdentityServer) EXPECT() *MockIdentityServerMockRecorder { + return m.recorder +} + +// GetPluginCapabilities mocks base method +func (m *MockIdentityServer) GetPluginCapabilities(arg0 context.Context, arg1 *csi.GetPluginCapabilitiesRequest) (*csi.GetPluginCapabilitiesResponse, error) { + ret := m.ctrl.Call(m, "GetPluginCapabilities", arg0, arg1) + ret0, _ := ret[0].(*csi.GetPluginCapabilitiesResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPluginCapabilities indicates an expected call of GetPluginCapabilities +func (mr *MockIdentityServerMockRecorder) GetPluginCapabilities(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPluginCapabilities", reflect.TypeOf((*MockIdentityServer)(nil).GetPluginCapabilities), arg0, arg1) +} + +// GetPluginInfo mocks base method +func (m *MockIdentityServer) GetPluginInfo(arg0 context.Context, arg1 *csi.GetPluginInfoRequest) (*csi.GetPluginInfoResponse, error) { + ret := m.ctrl.Call(m, "GetPluginInfo", arg0, arg1) + ret0, _ := ret[0].(*csi.GetPluginInfoResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPluginInfo indicates an expected call of GetPluginInfo +func (mr *MockIdentityServerMockRecorder) GetPluginInfo(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPluginInfo", reflect.TypeOf((*MockIdentityServer)(nil).GetPluginInfo), arg0, arg1) +} + +// Probe mocks base method +func (m *MockIdentityServer) Probe(arg0 context.Context, arg1 *csi.ProbeRequest) (*csi.ProbeResponse, error) { + ret := m.ctrl.Call(m, "Probe", arg0, arg1) + ret0, _ := ret[0].(*csi.ProbeResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Probe indicates an expected call of Probe +func (mr *MockIdentityServerMockRecorder) Probe(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Probe", reflect.TypeOf((*MockIdentityServer)(nil).Probe), arg0, arg1) +} + +// MockControllerServer is a mock of ControllerServer interface +type MockControllerServer struct { + ctrl *gomock.Controller + recorder *MockControllerServerMockRecorder +} + +// MockControllerServerMockRecorder is the mock recorder for MockControllerServer +type MockControllerServerMockRecorder struct { + mock *MockControllerServer +} + +// NewMockControllerServer creates a new mock instance +func NewMockControllerServer(ctrl *gomock.Controller) *MockControllerServer { + mock := &MockControllerServer{ctrl: ctrl} + mock.recorder = &MockControllerServerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockControllerServer) EXPECT() *MockControllerServerMockRecorder { + return m.recorder +} + +// ControllerExpandVolume mocks base method +func (m *MockControllerServer) ControllerExpandVolume(arg0 context.Context, arg1 *csi.ControllerExpandVolumeRequest) (*csi.ControllerExpandVolumeResponse, error) { + ret := m.ctrl.Call(m, "ControllerExpandVolume", arg0, arg1) + ret0, _ := ret[0].(*csi.ControllerExpandVolumeResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ControllerExpandVolume indicates an expected call of ControllerExpandVolume +func (mr *MockControllerServerMockRecorder) ControllerExpandVolume(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ControllerExpandVolume", reflect.TypeOf((*MockControllerServer)(nil).ControllerExpandVolume), arg0, arg1) +} + +// ControllerGetCapabilities mocks base method +func (m *MockControllerServer) ControllerGetCapabilities(arg0 context.Context, arg1 *csi.ControllerGetCapabilitiesRequest) (*csi.ControllerGetCapabilitiesResponse, error) { + ret := m.ctrl.Call(m, "ControllerGetCapabilities", arg0, arg1) + ret0, _ := ret[0].(*csi.ControllerGetCapabilitiesResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ControllerGetCapabilities indicates an expected call of ControllerGetCapabilities +func (mr *MockControllerServerMockRecorder) ControllerGetCapabilities(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ControllerGetCapabilities", reflect.TypeOf((*MockControllerServer)(nil).ControllerGetCapabilities), arg0, arg1) +} + +// ControllerPublishVolume mocks base method +func (m *MockControllerServer) ControllerPublishVolume(arg0 context.Context, arg1 *csi.ControllerPublishVolumeRequest) (*csi.ControllerPublishVolumeResponse, error) { + ret := m.ctrl.Call(m, "ControllerPublishVolume", arg0, arg1) + ret0, _ := ret[0].(*csi.ControllerPublishVolumeResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ControllerPublishVolume indicates an expected call of ControllerPublishVolume +func (mr *MockControllerServerMockRecorder) ControllerPublishVolume(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ControllerPublishVolume", reflect.TypeOf((*MockControllerServer)(nil).ControllerPublishVolume), arg0, arg1) +} + +// ControllerUnpublishVolume mocks base method +func (m *MockControllerServer) ControllerUnpublishVolume(arg0 context.Context, arg1 *csi.ControllerUnpublishVolumeRequest) (*csi.ControllerUnpublishVolumeResponse, error) { + ret := m.ctrl.Call(m, "ControllerUnpublishVolume", arg0, arg1) + ret0, _ := ret[0].(*csi.ControllerUnpublishVolumeResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ControllerUnpublishVolume indicates an expected call of ControllerUnpublishVolume +func (mr *MockControllerServerMockRecorder) ControllerUnpublishVolume(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ControllerUnpublishVolume", reflect.TypeOf((*MockControllerServer)(nil).ControllerUnpublishVolume), arg0, arg1) +} + +// CreateSnapshot mocks base method +func (m *MockControllerServer) CreateSnapshot(arg0 context.Context, arg1 *csi.CreateSnapshotRequest) (*csi.CreateSnapshotResponse, error) { + ret := m.ctrl.Call(m, "CreateSnapshot", arg0, arg1) + ret0, _ := ret[0].(*csi.CreateSnapshotResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateSnapshot indicates an expected call of CreateSnapshot +func (mr *MockControllerServerMockRecorder) CreateSnapshot(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateSnapshot", reflect.TypeOf((*MockControllerServer)(nil).CreateSnapshot), arg0, arg1) +} + +// CreateVolume mocks base method +func (m *MockControllerServer) CreateVolume(arg0 context.Context, arg1 *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) { + ret := m.ctrl.Call(m, "CreateVolume", arg0, arg1) + ret0, _ := ret[0].(*csi.CreateVolumeResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateVolume indicates an expected call of CreateVolume +func (mr *MockControllerServerMockRecorder) CreateVolume(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateVolume", reflect.TypeOf((*MockControllerServer)(nil).CreateVolume), arg0, arg1) +} + +// DeleteSnapshot mocks base method +func (m *MockControllerServer) DeleteSnapshot(arg0 context.Context, arg1 *csi.DeleteSnapshotRequest) (*csi.DeleteSnapshotResponse, error) { + ret := m.ctrl.Call(m, "DeleteSnapshot", arg0, arg1) + ret0, _ := ret[0].(*csi.DeleteSnapshotResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteSnapshot indicates an expected call of DeleteSnapshot +func (mr *MockControllerServerMockRecorder) DeleteSnapshot(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteSnapshot", reflect.TypeOf((*MockControllerServer)(nil).DeleteSnapshot), arg0, arg1) +} + +// DeleteVolume mocks base method +func (m *MockControllerServer) DeleteVolume(arg0 context.Context, arg1 *csi.DeleteVolumeRequest) (*csi.DeleteVolumeResponse, error) { + ret := m.ctrl.Call(m, "DeleteVolume", arg0, arg1) + ret0, _ := ret[0].(*csi.DeleteVolumeResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteVolume indicates an expected call of DeleteVolume +func (mr *MockControllerServerMockRecorder) DeleteVolume(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteVolume", reflect.TypeOf((*MockControllerServer)(nil).DeleteVolume), arg0, arg1) +} + +// GetCapacity mocks base method +func (m *MockControllerServer) GetCapacity(arg0 context.Context, arg1 *csi.GetCapacityRequest) (*csi.GetCapacityResponse, error) { + ret := m.ctrl.Call(m, "GetCapacity", arg0, arg1) + ret0, _ := ret[0].(*csi.GetCapacityResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetCapacity indicates an expected call of GetCapacity +func (mr *MockControllerServerMockRecorder) GetCapacity(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCapacity", reflect.TypeOf((*MockControllerServer)(nil).GetCapacity), arg0, arg1) +} + +// ListSnapshots mocks base method +func (m *MockControllerServer) ListSnapshots(arg0 context.Context, arg1 *csi.ListSnapshotsRequest) (*csi.ListSnapshotsResponse, error) { + ret := m.ctrl.Call(m, "ListSnapshots", arg0, arg1) + ret0, _ := ret[0].(*csi.ListSnapshotsResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListSnapshots indicates an expected call of ListSnapshots +func (mr *MockControllerServerMockRecorder) ListSnapshots(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSnapshots", reflect.TypeOf((*MockControllerServer)(nil).ListSnapshots), arg0, arg1) +} + +// ListVolumes mocks base method +func (m *MockControllerServer) ListVolumes(arg0 context.Context, arg1 *csi.ListVolumesRequest) (*csi.ListVolumesResponse, error) { + ret := m.ctrl.Call(m, "ListVolumes", arg0, arg1) + ret0, _ := ret[0].(*csi.ListVolumesResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListVolumes indicates an expected call of ListVolumes +func (mr *MockControllerServerMockRecorder) ListVolumes(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListVolumes", reflect.TypeOf((*MockControllerServer)(nil).ListVolumes), arg0, arg1) +} + +// ValidateVolumeCapabilities mocks base method +func (m *MockControllerServer) ValidateVolumeCapabilities(arg0 context.Context, arg1 *csi.ValidateVolumeCapabilitiesRequest) (*csi.ValidateVolumeCapabilitiesResponse, error) { + ret := m.ctrl.Call(m, "ValidateVolumeCapabilities", arg0, arg1) + ret0, _ := ret[0].(*csi.ValidateVolumeCapabilitiesResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ValidateVolumeCapabilities indicates an expected call of ValidateVolumeCapabilities +func (mr *MockControllerServerMockRecorder) ValidateVolumeCapabilities(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateVolumeCapabilities", reflect.TypeOf((*MockControllerServer)(nil).ValidateVolumeCapabilities), arg0, arg1) +} + +// MockNodeServer is a mock of NodeServer interface +type MockNodeServer struct { + ctrl *gomock.Controller + recorder *MockNodeServerMockRecorder +} + +// MockNodeServerMockRecorder is the mock recorder for MockNodeServer +type MockNodeServerMockRecorder struct { + mock *MockNodeServer +} + +// NewMockNodeServer creates a new mock instance +func NewMockNodeServer(ctrl *gomock.Controller) *MockNodeServer { + mock := &MockNodeServer{ctrl: ctrl} + mock.recorder = &MockNodeServerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockNodeServer) EXPECT() *MockNodeServerMockRecorder { + return m.recorder +} + +// NodeExpandVolume mocks base method +func (m *MockNodeServer) NodeExpandVolume(arg0 context.Context, arg1 *csi.NodeExpandVolumeRequest) (*csi.NodeExpandVolumeResponse, error) { + ret := m.ctrl.Call(m, "NodeExpandVolume", arg0, arg1) + ret0, _ := ret[0].(*csi.NodeExpandVolumeResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NodeExpandVolume indicates an expected call of NodeExpandVolume +func (mr *MockNodeServerMockRecorder) NodeExpandVolume(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeExpandVolume", reflect.TypeOf((*MockNodeServer)(nil).NodeExpandVolume), arg0, arg1) +} + +// NodeGetCapabilities mocks base method +func (m *MockNodeServer) NodeGetCapabilities(arg0 context.Context, arg1 *csi.NodeGetCapabilitiesRequest) (*csi.NodeGetCapabilitiesResponse, error) { + ret := m.ctrl.Call(m, "NodeGetCapabilities", arg0, arg1) + ret0, _ := ret[0].(*csi.NodeGetCapabilitiesResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NodeGetCapabilities indicates an expected call of NodeGetCapabilities +func (mr *MockNodeServerMockRecorder) NodeGetCapabilities(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeGetCapabilities", reflect.TypeOf((*MockNodeServer)(nil).NodeGetCapabilities), arg0, arg1) +} + +// NodeGetInfo mocks base method +func (m *MockNodeServer) NodeGetInfo(arg0 context.Context, arg1 *csi.NodeGetInfoRequest) (*csi.NodeGetInfoResponse, error) { + ret := m.ctrl.Call(m, "NodeGetInfo", arg0, arg1) + ret0, _ := ret[0].(*csi.NodeGetInfoResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NodeGetInfo indicates an expected call of NodeGetInfo +func (mr *MockNodeServerMockRecorder) NodeGetInfo(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeGetInfo", reflect.TypeOf((*MockNodeServer)(nil).NodeGetInfo), arg0, arg1) +} + +// NodeGetVolumeStats mocks base method +func (m *MockNodeServer) NodeGetVolumeStats(arg0 context.Context, arg1 *csi.NodeGetVolumeStatsRequest) (*csi.NodeGetVolumeStatsResponse, error) { + ret := m.ctrl.Call(m, "NodeGetVolumeStats", arg0, arg1) + ret0, _ := ret[0].(*csi.NodeGetVolumeStatsResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NodeGetVolumeStats indicates an expected call of NodeGetVolumeStats +func (mr *MockNodeServerMockRecorder) NodeGetVolumeStats(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeGetVolumeStats", reflect.TypeOf((*MockNodeServer)(nil).NodeGetVolumeStats), arg0, arg1) +} + +// NodePublishVolume mocks base method +func (m *MockNodeServer) NodePublishVolume(arg0 context.Context, arg1 *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) { + ret := m.ctrl.Call(m, "NodePublishVolume", arg0, arg1) + ret0, _ := ret[0].(*csi.NodePublishVolumeResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NodePublishVolume indicates an expected call of NodePublishVolume +func (mr *MockNodeServerMockRecorder) NodePublishVolume(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodePublishVolume", reflect.TypeOf((*MockNodeServer)(nil).NodePublishVolume), arg0, arg1) +} + +// NodeStageVolume mocks base method +func (m *MockNodeServer) NodeStageVolume(arg0 context.Context, arg1 *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error) { + ret := m.ctrl.Call(m, "NodeStageVolume", arg0, arg1) + ret0, _ := ret[0].(*csi.NodeStageVolumeResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NodeStageVolume indicates an expected call of NodeStageVolume +func (mr *MockNodeServerMockRecorder) NodeStageVolume(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeStageVolume", reflect.TypeOf((*MockNodeServer)(nil).NodeStageVolume), arg0, arg1) +} + +// NodeUnpublishVolume mocks base method +func (m *MockNodeServer) NodeUnpublishVolume(arg0 context.Context, arg1 *csi.NodeUnpublishVolumeRequest) (*csi.NodeUnpublishVolumeResponse, error) { + ret := m.ctrl.Call(m, "NodeUnpublishVolume", arg0, arg1) + ret0, _ := ret[0].(*csi.NodeUnpublishVolumeResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NodeUnpublishVolume indicates an expected call of NodeUnpublishVolume +func (mr *MockNodeServerMockRecorder) NodeUnpublishVolume(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeUnpublishVolume", reflect.TypeOf((*MockNodeServer)(nil).NodeUnpublishVolume), arg0, arg1) +} + +// NodeUnstageVolume mocks base method +func (m *MockNodeServer) NodeUnstageVolume(arg0 context.Context, arg1 *csi.NodeUnstageVolumeRequest) (*csi.NodeUnstageVolumeResponse, error) { + ret := m.ctrl.Call(m, "NodeUnstageVolume", arg0, arg1) + ret0, _ := ret[0].(*csi.NodeUnstageVolumeResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NodeUnstageVolume indicates an expected call of NodeUnstageVolume +func (mr *MockNodeServerMockRecorder) NodeUnstageVolume(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeUnstageVolume", reflect.TypeOf((*MockNodeServer)(nil).NodeUnstageVolume), arg0, arg1) +} diff --git a/vendor/github.com/kubernetes-csi/csi-test/driver/mock.go b/vendor/github.com/kubernetes-csi/csi-test/driver/mock.go new file mode 100644 index 0000000000..399d01708f --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/driver/mock.go @@ -0,0 +1,89 @@ +/* +Copyright 2017 Luis Pabón luis@portworx.com + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package driver + +import ( + "net" + + "github.com/kubernetes-csi/csi-test/v3/utils" + "google.golang.org/grpc" +) + +type MockCSIDriverServers struct { + Controller *MockControllerServer + Identity *MockIdentityServer + Node *MockNodeServer +} + +type MockCSIDriver struct { + CSIDriver + conn *grpc.ClientConn +} + +func NewMockCSIDriver(servers *MockCSIDriverServers) *MockCSIDriver { + return &MockCSIDriver{ + CSIDriver: CSIDriver{ + servers: &CSIDriverServers{ + Controller: servers.Controller, + Node: servers.Node, + Identity: servers.Identity, + }, + }, + } +} + +// StartOnAddress starts a new gRPC server listening on given address. +func (m *MockCSIDriver) StartOnAddress(network, address string) error { + l, err := net.Listen(network, address) + if err != nil { + return err + } + + if err := m.CSIDriver.Start(l); err != nil { + l.Close() + return err + } + + return nil +} + +// Start starts a new gRPC server listening on a random TCP loopback port. +func (m *MockCSIDriver) Start() error { + // Listen on a port assigned by the net package + return m.StartOnAddress("tcp", "127.0.0.1:0") +} + +func (m *MockCSIDriver) Nexus() (*grpc.ClientConn, error) { + // Start server + err := m.Start() + if err != nil { + return nil, err + } + + // Create a client connection + m.conn, err = utils.Connect(m.Address()) + if err != nil { + return nil, err + } + + return m.conn, nil +} + +func (m *MockCSIDriver) Close() { + m.conn.Close() + m.server.Stop() +} diff --git a/vendor/github.com/kubernetes-csi/csi-test/go.mod b/vendor/github.com/kubernetes-csi/csi-test/go.mod new file mode 100644 index 0000000000..0a68cf51d3 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/go.mod @@ -0,0 +1,20 @@ +module github.com/kubernetes-csi/csi-test/v3 + +go 1.12 + +require ( + github.com/container-storage-interface/spec v1.1.0 + github.com/golang/mock v1.2.0 + github.com/golang/protobuf v1.3.1 + github.com/google/uuid v1.1.1 + github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect + github.com/onsi/ginkgo v1.8.0 + github.com/onsi/gomega v1.5.0 + github.com/sirupsen/logrus v1.4.0 + golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a // indirect + golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53 + golang.org/x/sys v0.0.0-20190318195719-6c81ef8f67ca // indirect + google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19 // indirect + google.golang.org/grpc v1.19.0 + gopkg.in/yaml.v2 v2.2.2 +) diff --git a/vendor/github.com/kubernetes-csi/csi-test/go.sum b/vendor/github.com/kubernetes-csi/csi-test/go.sum new file mode 100644 index 0000000000..1574d7b1ed --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/go.sum @@ -0,0 +1,80 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/container-storage-interface/spec v1.1.0 h1:qPsTqtR1VUPvMPeK0UnCZMtXaKGyyLPG8gj/wG6VqMs= +github.com/container-storage-interface/spec v1.1.0/go.mod h1:6URME8mwIBbpVyZV93Ce5St17xBiQJQY67NDsuohiy4= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0 h1:28o5sBqPkBsMGnC6b4MvE2TzSr5/AT4c/1fLqVGIwlk= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/sirupsen/logrus v1.4.0 h1:yKenngtzGh+cUSSh6GWbxW2abRqhYUSR/t/6+2QqNvE= +github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a h1:YX8ljsm6wXlHZO+aRz9Exqr0evNhKRNe5K/gi+zKh4U= +golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53 h1:kcXqo9vE6fsZY5X5Rd7R1l7fTgnWaDCVmln65REefiE= +golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190318195719-6c81ef8f67ca h1:o2TLx1bGN3W+Ei0EMU5fShLupLmTOU95KvJJmfYhAzM= +golang.org/x/sys v0.0.0-20190318195719-6c81ef8f67ca/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19 h1:Lj2SnHtxkRGJDqnGaSjo+CCdIieEnwVazbOXILwQemk= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/github.com/kubernetes-csi/csi-test/hack/e2e.sh b/vendor/github.com/kubernetes-csi/csi-test/hack/e2e.sh new file mode 100755 index 0000000000..e3a8c6cf2d --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/hack/e2e.sh @@ -0,0 +1,116 @@ +#!/bin/bash + +TESTARGS=$@ +UDS="/tmp/e2e-csi-sanity.sock" +UDS_NODE="/tmp/e2e-csi-sanity-node.sock" +UDS_CONTROLLER="/tmp/e2e-csi-sanity-ctrl.sock" +CSI_ENDPOINTS="$CSI_ENDPOINTS ${UDS}" +CSI_MOCK_VERSION="master" + +# cleanup mock_driver_pid files... +cleanup () { + local pid="$1" + shift + kill -9 "$pid" + # We don't care about the 'hack/e2e.sh: line 15: 117018 Killed CSI_ENDPOINT=$1 ./bin/mock-driver' + wait "$pid" 2>/dev/null + rm -f "$@" +} + +# +# $1 - endpoint for mock. +# $2 - endpoint for csi-sanity in Grpc format. +# See https://github.com/grpc/grpc/blob/master/doc/naming.md +runTest() +( + CSI_ENDPOINT=$1 ./bin/mock-driver & + local pid=$! + trap 'cleanup $pid $1' EXIT + + ./cmd/csi-sanity/csi-sanity $TESTARGS --csi.endpoint=$2 --csi.testnodevolumeattachlimit +) + +runTestWithDifferentAddresses() +( + CSI_ENDPOINT=$1 CSI_CONTROLLER_ENDPOINT=$2 ./bin/mock-driver & + local pid=$! + trap 'cleanup $pid $1' EXIT + + ./cmd/csi-sanity/csi-sanity $TESTARGS --csi.endpoint=$1 --csi.controllerendpoint=$2 +) + +runTestWithCreds() +( + CSI_ENDPOINT=$1 CSI_ENABLE_CREDS=true ./bin/mock-driver & + local pid=$! + trap 'cleanup $pid $1' EXIT + + ./cmd/csi-sanity/csi-sanity $TESTARGS --csi.endpoint=$2 --csi.secrets=mock/mocksecret.yaml --csi.testnodevolumeattachlimit +) + +runTestAPI() +( + CSI_ENDPOINT=$1 ./bin/mock-driver & + local pid=$! + trap 'cleanup $pid $1' EXIT + + go test -count=1 -v ./hack/_apitest/api_test.go && \ + go test -count=1 -v ./hack/_embedded/embedded_test.go +) + +runTestAPIWithCustomTargetPaths() +( + CSI_ENDPOINT=$1 ./bin/mock-driver & + local pid=$! + trap 'cleanup $pid $1' EXIT + + # Running a specific test to verify that the custom target paths are called + # a deterministic number of times. + go test -count=1 -v ./hack/_apitest2/api_test.go -ginkgo.focus="NodePublishVolume" +) + +runTestWithCustomTargetPaths() +( + + # Create a script for custom target path creation. + echo '#!/bin/bash +targetpath="/tmp/csi/$@" +mkdir -p $targetpath +echo $targetpath +' > custompathcreation.bash + + # Create a script for custom target path removal. + echo '#!/bin/bash +rm -rf $@ +' > custompathremoval.bash + + chmod +x custompathcreation.bash custompathremoval.bash + local creationscriptpath="$PWD/custompathcreation.bash" + local removalscriptpath="$PWD/custompathremoval.bash" + + CSI_ENDPOINT=$1 ./bin/mock-driver & + local pid=$! + trap 'cleanup $pid $1; rm $creationscriptpath $removalscriptpath' EXIT + + ./cmd/csi-sanity/csi-sanity $TESTARGS \ + --csi.endpoint=$2 \ + --csi.mountdir="foo/target/mount" \ + --csi.stagingdir="foo/staging/mount" \ + --csi.createmountpathcmd=$creationscriptpath \ + --csi.createstagingpathcmd=$creationscriptpath \ + --csi.removemountpathcmd=$removalscriptpath \ + --csi.removestagingpathcmd=$removalscriptpath +) + +make + +cd cmd/csi-sanity + make clean install || exit 1 +cd ../.. + +runTest "${UDS}" "${UDS}" && +runTestWithCreds "${UDS}" "${UDS}" && +runTestAPI "${UDS}" && +runTestWithDifferentAddresses "${UDS_NODE}" "${UDS_CONTROLLER}" && +runTestAPIWithCustomTargetPaths "${UDS}" && +runTestWithCustomTargetPaths "${UDS}" "${UDS}" diff --git a/vendor/github.com/kubernetes-csi/csi-test/mock/README.md b/vendor/github.com/kubernetes-csi/csi-test/mock/README.md new file mode 100644 index 0000000000..8274aa2c6b --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/mock/README.md @@ -0,0 +1,22 @@ +# Mock CSI Driver +Extremely simple mock driver used to test `csi-sanity` based on `rexray/gocsi/mock`. +It can be used for testing of Container Orchestrators that implement client side +of CSI interface. + +``` +Usage of mock: + -disable-attach + Disables RPC_PUBLISH_UNPUBLISH_VOLUME capability. + -name string + CSI driver name. (default "io.kubernetes.storage.mock") +``` + +It prints all received CSI messages to stdout encoded as json, so a test can check that +CO sent the right CSI message. + +Example of such output: + +``` +gRPCCall: {"Method":"/csi.v0.Controller/ControllerGetCapabilities","Request":{},"Response":{"capabilities":[{"Type":{"Rpc":{"type":1}}},{"Type":{"Rpc":{"type":3}}},{"Type":{"Rpc":{"type":4}}},{"Type":{"Rpc":{"type":6}}},{"Type":{"Rpc":{"type":5}}},{"Type":{"Rpc":{"type":2}}}]},"Error":""} +gRPCCall: {"Method":"/csi.v0.Controller/ControllerPublishVolume","Request":{"volume_id":"12","node_id":"some-fake-node-id","volume_capability":{"AccessType":{"Mount":{}},"access_mode":{"mode":1}}},"Response":null,"Error":"rpc error: code = NotFound desc = Not matching Node ID some-fake-node-id to Mock Node ID io.kubernetes.storage.mock"} +``` diff --git a/vendor/github.com/kubernetes-csi/csi-test/mock/cache/SnapshotCache.go b/vendor/github.com/kubernetes-csi/csi-test/mock/cache/SnapshotCache.go new file mode 100644 index 0000000000..89835e11f2 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/mock/cache/SnapshotCache.go @@ -0,0 +1,89 @@ +package cache + +import ( + "strings" + "sync" + + "github.com/container-storage-interface/spec/lib/go/csi" +) + +type SnapshotCache interface { + Add(snapshot Snapshot) + + Delete(i int) + + List(ready bool) []csi.Snapshot + + FindSnapshot(k, v string) (int, Snapshot) +} + +type Snapshot struct { + Name string + Parameters map[string]string + SnapshotCSI csi.Snapshot +} + +type snapshotCache struct { + snapshotsRWL sync.RWMutex + snapshots []Snapshot +} + +func NewSnapshotCache() SnapshotCache { + return &snapshotCache{ + snapshots: make([]Snapshot, 0), + } +} + +func (snap *snapshotCache) Add(snapshot Snapshot) { + snap.snapshotsRWL.Lock() + defer snap.snapshotsRWL.Unlock() + + snap.snapshots = append(snap.snapshots, snapshot) +} + +func (snap *snapshotCache) Delete(i int) { + snap.snapshotsRWL.Lock() + defer snap.snapshotsRWL.Unlock() + + copy(snap.snapshots[i:], snap.snapshots[i+1:]) + snap.snapshots = snap.snapshots[:len(snap.snapshots)-1] +} + +func (snap *snapshotCache) List(ready bool) []csi.Snapshot { + snap.snapshotsRWL.RLock() + defer snap.snapshotsRWL.RUnlock() + + snapshots := make([]csi.Snapshot, 0) + for _, v := range snap.snapshots { + if v.SnapshotCSI.GetReadyToUse() { + snapshots = append(snapshots, v.SnapshotCSI) + } + } + + return snapshots +} + +func (snap *snapshotCache) FindSnapshot(k, v string) (int, Snapshot) { + snap.snapshotsRWL.RLock() + defer snap.snapshotsRWL.RUnlock() + + snapshotIdx := -1 + for i, vi := range snap.snapshots { + switch k { + case "id": + if strings.EqualFold(v, vi.SnapshotCSI.GetSnapshotId()) { + return i, vi + } + case "sourceVolumeId": + if strings.EqualFold(v, vi.SnapshotCSI.SourceVolumeId) { + return i, vi + } + case "name": + if vi.Name == v { + return i, vi + } + } + } + + return snapshotIdx, Snapshot{} +} diff --git a/vendor/github.com/kubernetes-csi/csi-test/mock/mocksecret.yaml b/vendor/github.com/kubernetes-csi/csi-test/mock/mocksecret.yaml new file mode 100644 index 0000000000..5b5f076a31 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/mock/mocksecret.yaml @@ -0,0 +1,18 @@ +CreateVolumeSecret: + secretKey: secretval1 +DeleteVolumeSecret: + secretKey: secretval2 +ControllerPublishVolumeSecret: + secretKey: secretval3 +ControllerUnpublishVolumeSecret: + secretKey: secretval4 +NodeStageVolumeSecret: + secretKey: secretval5 +NodePublishVolumeSecret: + secretKey: secretval6 +CreateSnapshotSecret: + secretKey: secretval7 +DeleteSnapshotSecret: + secretKey: secretval8 +ControllerValidateVolumeCapabilitiesSecret: + secretKey: secretval9 diff --git a/vendor/github.com/kubernetes-csi/csi-test/mock/service/controller.go b/vendor/github.com/kubernetes-csi/csi-test/mock/service/controller.go new file mode 100644 index 0000000000..4674981b14 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/mock/service/controller.go @@ -0,0 +1,678 @@ +package service + +import ( + "fmt" + "math" + "path" + "reflect" + "strconv" + + log "github.com/sirupsen/logrus" + "golang.org/x/net/context" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/container-storage-interface/spec/lib/go/csi" +) + +const ( + MaxStorageCapacity = tib + ReadOnlyKey = "readonly" +) + +func (s *service) CreateVolume( + ctx context.Context, + req *csi.CreateVolumeRequest) ( + *csi.CreateVolumeResponse, error) { + + if len(req.Name) == 0 { + return nil, status.Error(codes.InvalidArgument, "Volume Name cannot be empty") + } + if req.VolumeCapabilities == nil { + return nil, status.Error(codes.InvalidArgument, "Volume Capabilities cannot be empty") + } + + // Check to see if the volume already exists. + if i, v := s.findVolByName(ctx, req.Name); i >= 0 { + // Requested volume name already exists, need to check if the existing volume's + // capacity is more or equal to new request's capacity. + if v.GetCapacityBytes() < req.GetCapacityRange().GetRequiredBytes() { + return nil, status.Error(codes.AlreadyExists, + fmt.Sprintf("Volume with name %s already exists", req.GetName())) + } + return &csi.CreateVolumeResponse{Volume: &v}, nil + } + + // If no capacity is specified then use 100GiB + capacity := gib100 + if cr := req.CapacityRange; cr != nil { + if rb := cr.RequiredBytes; rb > 0 { + capacity = rb + } + if lb := cr.LimitBytes; lb > 0 { + capacity = lb + } + } + // Check for maximum available capacity + if capacity >= MaxStorageCapacity { + return nil, status.Errorf(codes.OutOfRange, "Requested capacity %d exceeds maximum allowed %d", capacity, MaxStorageCapacity) + } + + var v csi.Volume + // Create volume from content source if provided. + if req.GetVolumeContentSource() != nil { + switch req.GetVolumeContentSource().GetType().(type) { + case *csi.VolumeContentSource_Snapshot: + sid := req.GetVolumeContentSource().GetSnapshot().GetSnapshotId() + // Check if the source snapshot exists. + if snapID, _ := s.snapshots.FindSnapshot("id", sid); snapID >= 0 { + v = s.newVolumeFromSnapshot(req.Name, capacity, snapID) + } else { + return nil, status.Errorf(codes.NotFound, "Requested source snapshot %s not found", sid) + } + case *csi.VolumeContentSource_Volume: + vid := req.GetVolumeContentSource().GetVolume().GetVolumeId() + // Check if the source volume exists. + if volID, _ := s.findVolNoLock("id", vid); volID >= 0 { + v = s.newVolumeFromVolume(req.Name, capacity, volID) + } else { + return nil, status.Errorf(codes.NotFound, "Requested source volume %s not found", vid) + } + } + } else { + v = s.newVolume(req.Name, capacity) + } + + // Add the created volume to the service's in-mem volume slice. + s.volsRWL.Lock() + defer s.volsRWL.Unlock() + s.vols = append(s.vols, v) + MockVolumes[v.GetVolumeId()] = Volume{ + VolumeCSI: v, + NodeID: "", + ISStaged: false, + ISPublished: false, + StageTargetPath: "", + TargetPath: "", + } + + return &csi.CreateVolumeResponse{Volume: &v}, nil +} + +func (s *service) DeleteVolume( + ctx context.Context, + req *csi.DeleteVolumeRequest) ( + *csi.DeleteVolumeResponse, error) { + + s.volsRWL.Lock() + defer s.volsRWL.Unlock() + + // If the volume is not specified, return error + if len(req.VolumeId) == 0 { + return nil, status.Error(codes.InvalidArgument, "Volume ID cannot be empty") + } + + // If the volume does not exist then return an idempotent response. + i, _ := s.findVolNoLock("id", req.VolumeId) + if i < 0 { + return &csi.DeleteVolumeResponse{}, nil + } + + // This delete logic preserves order and prevents potential memory + // leaks. The slice's elements may not be pointers, but the structs + // themselves have fields that are. + copy(s.vols[i:], s.vols[i+1:]) + s.vols[len(s.vols)-1] = csi.Volume{} + s.vols = s.vols[:len(s.vols)-1] + log.WithField("volumeID", req.VolumeId).Debug("mock delete volume") + return &csi.DeleteVolumeResponse{}, nil +} + +func (s *service) ControllerPublishVolume( + ctx context.Context, + req *csi.ControllerPublishVolumeRequest) ( + *csi.ControllerPublishVolumeResponse, error) { + + if s.config.DisableAttach { + return nil, status.Error(codes.Unimplemented, "ControllerPublish is not supported") + } + + if len(req.VolumeId) == 0 { + return nil, status.Error(codes.InvalidArgument, "Volume ID cannot be empty") + } + if len(req.NodeId) == 0 { + return nil, status.Error(codes.InvalidArgument, "Node ID cannot be empty") + } + if req.VolumeCapability == nil { + return nil, status.Error(codes.InvalidArgument, "Volume Capabilities cannot be empty") + } + + if req.NodeId != s.nodeID { + return nil, status.Errorf(codes.NotFound, "Not matching Node ID %s to Mock Node ID %s", req.NodeId, s.nodeID) + } + + s.volsRWL.Lock() + defer s.volsRWL.Unlock() + + i, v := s.findVolNoLock("id", req.VolumeId) + if i < 0 { + return nil, status.Error(codes.NotFound, req.VolumeId) + } + + // devPathKey is the key in the volume's attributes that is set to a + // mock device path if the volume has been published by the controller + // to the specified node. + devPathKey := path.Join(req.NodeId, "dev") + + // Check to see if the volume is already published. + if device := v.VolumeContext[devPathKey]; device != "" { + var volRo bool + var roVal string + if ro, ok := v.VolumeContext[ReadOnlyKey]; ok { + roVal = ro + } + + if roVal == "true" { + volRo = true + } else { + volRo = false + } + + // Check if readonly flag is compatible with the publish request. + if req.GetReadonly() != volRo { + return nil, status.Error(codes.AlreadyExists, "Volume published but has incompatible readonly flag") + } + + return &csi.ControllerPublishVolumeResponse{ + PublishContext: map[string]string{ + "device": device, + "readonly": roVal, + }, + }, nil + } + + // Check attach limit before publishing only if attach limit is set. + if s.config.AttachLimit > 0 && s.getAttachCount(devPathKey) >= s.config.AttachLimit { + return nil, status.Errorf(codes.ResourceExhausted, "Cannot attach any more volumes to this node") + } + + var roVal string + if req.GetReadonly() { + roVal = "true" + } else { + roVal = "false" + } + + // Publish the volume. + device := "/dev/mock" + v.VolumeContext[devPathKey] = device + v.VolumeContext[ReadOnlyKey] = roVal + s.vols[i] = v + + return &csi.ControllerPublishVolumeResponse{ + PublishContext: map[string]string{ + "device": device, + "readonly": roVal, + }, + }, nil +} + +func (s *service) ControllerUnpublishVolume( + ctx context.Context, + req *csi.ControllerUnpublishVolumeRequest) ( + *csi.ControllerUnpublishVolumeResponse, error) { + + if s.config.DisableAttach { + return nil, status.Error(codes.Unimplemented, "ControllerPublish is not supported") + } + + if len(req.VolumeId) == 0 { + return nil, status.Error(codes.InvalidArgument, "Volume ID cannot be empty") + } + nodeID := req.NodeId + if len(nodeID) == 0 { + // If node id is empty, no failure as per Spec + nodeID = s.nodeID + } + + if req.NodeId != s.nodeID { + return nil, status.Errorf(codes.NotFound, "Node ID %s does not match to expected Node ID %s", req.NodeId, s.nodeID) + } + + s.volsRWL.Lock() + defer s.volsRWL.Unlock() + + i, v := s.findVolNoLock("id", req.VolumeId) + if i < 0 { + return nil, status.Error(codes.NotFound, req.VolumeId) + } + + // devPathKey is the key in the volume's attributes that is set to a + // mock device path if the volume has been published by the controller + // to the specified node. + devPathKey := path.Join(nodeID, "dev") + + // Check to see if the volume is already unpublished. + if v.VolumeContext[devPathKey] == "" { + return &csi.ControllerUnpublishVolumeResponse{}, nil + } + + // Unpublish the volume. + delete(v.VolumeContext, devPathKey) + delete(v.VolumeContext, ReadOnlyKey) + s.vols[i] = v + + return &csi.ControllerUnpublishVolumeResponse{}, nil +} + +func (s *service) ValidateVolumeCapabilities( + ctx context.Context, + req *csi.ValidateVolumeCapabilitiesRequest) ( + *csi.ValidateVolumeCapabilitiesResponse, error) { + + if len(req.GetVolumeId()) == 0 { + return nil, status.Error(codes.InvalidArgument, "Volume ID cannot be empty") + } + if len(req.VolumeCapabilities) == 0 { + return nil, status.Error(codes.InvalidArgument, req.VolumeId) + } + i, _ := s.findVolNoLock("id", req.VolumeId) + if i < 0 { + return nil, status.Error(codes.NotFound, req.VolumeId) + } + + return &csi.ValidateVolumeCapabilitiesResponse{ + Confirmed: &csi.ValidateVolumeCapabilitiesResponse_Confirmed{ + VolumeContext: req.GetVolumeContext(), + VolumeCapabilities: req.GetVolumeCapabilities(), + Parameters: req.GetParameters(), + }, + }, nil +} + +func (s *service) ListVolumes( + ctx context.Context, + req *csi.ListVolumesRequest) ( + *csi.ListVolumesResponse, error) { + + // Copy the mock volumes into a new slice in order to avoid + // locking the service's volume slice for the duration of the + // ListVolumes RPC. + var vols []csi.Volume + func() { + s.volsRWL.RLock() + defer s.volsRWL.RUnlock() + vols = make([]csi.Volume, len(s.vols)) + copy(vols, s.vols) + }() + + var ( + ulenVols = int32(len(vols)) + maxEntries = req.MaxEntries + startingToken int32 + ) + + if v := req.StartingToken; v != "" { + i, err := strconv.ParseUint(v, 10, 32) + if err != nil { + return nil, status.Errorf( + codes.Aborted, + "startingToken=%d !< int32=%d", + startingToken, math.MaxUint32) + } + startingToken = int32(i) + } + + if startingToken > ulenVols { + return nil, status.Errorf( + codes.Aborted, + "startingToken=%d > len(vols)=%d", + startingToken, ulenVols) + } + + // Discern the number of remaining entries. + rem := ulenVols - startingToken + + // If maxEntries is 0 or greater than the number of remaining entries then + // set maxEntries to the number of remaining entries. + if maxEntries == 0 || maxEntries > rem { + maxEntries = rem + } + + var ( + i int + j = startingToken + entries = make( + []*csi.ListVolumesResponse_Entry, + maxEntries) + ) + + for i = 0; i < len(entries); i++ { + entries[i] = &csi.ListVolumesResponse_Entry{ + Volume: &vols[j], + } + j++ + } + + var nextToken string + if n := startingToken + int32(i); n < ulenVols { + nextToken = fmt.Sprintf("%d", n) + } + + return &csi.ListVolumesResponse{ + Entries: entries, + NextToken: nextToken, + }, nil +} + +func (s *service) GetCapacity( + ctx context.Context, + req *csi.GetCapacityRequest) ( + *csi.GetCapacityResponse, error) { + + return &csi.GetCapacityResponse{ + AvailableCapacity: MaxStorageCapacity, + }, nil +} + +func (s *service) ControllerGetCapabilities( + ctx context.Context, + req *csi.ControllerGetCapabilitiesRequest) ( + *csi.ControllerGetCapabilitiesResponse, error) { + + caps := []*csi.ControllerServiceCapability{ + { + Type: &csi.ControllerServiceCapability_Rpc{ + Rpc: &csi.ControllerServiceCapability_RPC{ + Type: csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME, + }, + }, + }, + { + Type: &csi.ControllerServiceCapability_Rpc{ + Rpc: &csi.ControllerServiceCapability_RPC{ + Type: csi.ControllerServiceCapability_RPC_LIST_VOLUMES, + }, + }, + }, + { + Type: &csi.ControllerServiceCapability_Rpc{ + Rpc: &csi.ControllerServiceCapability_RPC{ + Type: csi.ControllerServiceCapability_RPC_GET_CAPACITY, + }, + }, + }, + { + Type: &csi.ControllerServiceCapability_Rpc{ + Rpc: &csi.ControllerServiceCapability_RPC{ + Type: csi.ControllerServiceCapability_RPC_LIST_SNAPSHOTS, + }, + }, + }, + { + Type: &csi.ControllerServiceCapability_Rpc{ + Rpc: &csi.ControllerServiceCapability_RPC{ + Type: csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT, + }, + }, + }, + { + Type: &csi.ControllerServiceCapability_Rpc{ + Rpc: &csi.ControllerServiceCapability_RPC{ + Type: csi.ControllerServiceCapability_RPC_PUBLISH_READONLY, + }, + }, + }, + { + Type: &csi.ControllerServiceCapability_Rpc{ + Rpc: &csi.ControllerServiceCapability_RPC{ + Type: csi.ControllerServiceCapability_RPC_CLONE_VOLUME, + }, + }, + }, + } + + if !s.config.DisableAttach { + caps = append(caps, &csi.ControllerServiceCapability{ + Type: &csi.ControllerServiceCapability_Rpc{ + Rpc: &csi.ControllerServiceCapability_RPC{ + Type: csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME, + }, + }, + }) + } + + if !s.config.DisableControllerExpansion { + caps = append(caps, &csi.ControllerServiceCapability{ + Type: &csi.ControllerServiceCapability_Rpc{ + Rpc: &csi.ControllerServiceCapability_RPC{ + Type: csi.ControllerServiceCapability_RPC_EXPAND_VOLUME, + }, + }, + }) + } + + return &csi.ControllerGetCapabilitiesResponse{ + Capabilities: caps, + }, nil +} + +func (s *service) CreateSnapshot(ctx context.Context, + req *csi.CreateSnapshotRequest) (*csi.CreateSnapshotResponse, error) { + // Check arguments + if len(req.GetName()) == 0 { + return nil, status.Error(codes.InvalidArgument, "Snapshot Name cannot be empty") + } + if len(req.GetSourceVolumeId()) == 0 { + return nil, status.Error(codes.InvalidArgument, "Snapshot SourceVolumeId cannot be empty") + } + + // Check to see if the snapshot already exists. + if i, v := s.snapshots.FindSnapshot("name", req.GetName()); i >= 0 { + // Requested snapshot name already exists + if v.SnapshotCSI.GetSourceVolumeId() != req.GetSourceVolumeId() || !reflect.DeepEqual(v.Parameters, req.GetParameters()) { + return nil, status.Error(codes.AlreadyExists, + fmt.Sprintf("Snapshot with name %s already exists", req.GetName())) + } + return &csi.CreateSnapshotResponse{Snapshot: &v.SnapshotCSI}, nil + } + + // Create the snapshot and add it to the service's in-mem snapshot slice. + snapshot := s.newSnapshot(req.GetName(), req.GetSourceVolumeId(), req.GetParameters()) + s.snapshots.Add(snapshot) + + return &csi.CreateSnapshotResponse{Snapshot: &snapshot.SnapshotCSI}, nil +} + +func (s *service) DeleteSnapshot(ctx context.Context, + req *csi.DeleteSnapshotRequest) (*csi.DeleteSnapshotResponse, error) { + + // If the snapshot is not specified, return error + if len(req.SnapshotId) == 0 { + return nil, status.Error(codes.InvalidArgument, "Snapshot ID cannot be empty") + } + + // If the snapshot does not exist then return an idempotent response. + i, _ := s.snapshots.FindSnapshot("id", req.SnapshotId) + if i < 0 { + return &csi.DeleteSnapshotResponse{}, nil + } + + // This delete logic preserves order and prevents potential memory + // leaks. The slice's elements may not be pointers, but the structs + // themselves have fields that are. + s.snapshots.Delete(i) + log.WithField("SnapshotId", req.SnapshotId).Debug("mock delete snapshot") + return &csi.DeleteSnapshotResponse{}, nil +} + +func (s *service) ListSnapshots(ctx context.Context, + req *csi.ListSnapshotsRequest) (*csi.ListSnapshotsResponse, error) { + + // case 1: SnapshotId is not empty, return snapshots that match the snapshot id. + if len(req.GetSnapshotId()) != 0 { + return getSnapshotById(s, req) + } + + // case 2: SourceVolumeId is not empty, return snapshots that match the source volume id. + if len(req.GetSourceVolumeId()) != 0 { + return getSnapshotByVolumeId(s, req) + } + + // case 3: no parameter is set, so we return all the snapshots. + return getAllSnapshots(s, req) +} + +func (s *service) ControllerExpandVolume( + ctx context.Context, + req *csi.ControllerExpandVolumeRequest) (*csi.ControllerExpandVolumeResponse, error) { + if len(req.VolumeId) == 0 { + return nil, status.Error(codes.InvalidArgument, "Volume ID cannot be empty") + } + + if req.CapacityRange == nil { + return nil, status.Error(codes.InvalidArgument, "Request capacity cannot be empty") + } + + s.volsRWL.Lock() + defer s.volsRWL.Unlock() + + i, v := s.findVolNoLock("id", req.VolumeId) + if i < 0 { + return nil, status.Error(codes.NotFound, req.VolumeId) + } + + if s.config.DisableOnlineExpansion && MockVolumes[v.GetVolumeId()].ISPublished { + return nil, status.Error(codes.Aborted, "volume is published and online volume expansion is not supported") + } + + requestBytes := req.CapacityRange.RequiredBytes + + if v.CapacityBytes > requestBytes { + return nil, status.Error(codes.InvalidArgument, "cannot change volume capacity to a smaller size") + } + + resp := &csi.ControllerExpandVolumeResponse{ + CapacityBytes: requestBytes, + NodeExpansionRequired: s.config.NodeExpansionRequired, + } + + // Check to see if the volume already satisfied request size. + if v.CapacityBytes == requestBytes { + log.WithField("volumeID", v.VolumeId).Infof("Volume capacity is already %d, no need to expand", requestBytes) + return resp, nil + } + + // Update volume's capacity to the requested size. + v.CapacityBytes = requestBytes + s.vols[i] = v + + return resp, nil +} + +func getSnapshotById(s *service, req *csi.ListSnapshotsRequest) (*csi.ListSnapshotsResponse, error) { + if len(req.GetSnapshotId()) != 0 { + i, snapshot := s.snapshots.FindSnapshot("id", req.GetSnapshotId()) + if i < 0 { + return &csi.ListSnapshotsResponse{}, nil + } + + if len(req.GetSourceVolumeId()) != 0 { + if snapshot.SnapshotCSI.GetSourceVolumeId() != req.GetSourceVolumeId() { + return &csi.ListSnapshotsResponse{}, nil + } + } + + return &csi.ListSnapshotsResponse{ + Entries: []*csi.ListSnapshotsResponse_Entry{ + { + Snapshot: &snapshot.SnapshotCSI, + }, + }, + }, nil + } + return nil, nil +} + +func getSnapshotByVolumeId(s *service, req *csi.ListSnapshotsRequest) (*csi.ListSnapshotsResponse, error) { + if len(req.GetSourceVolumeId()) != 0 { + i, snapshot := s.snapshots.FindSnapshot("sourceVolumeId", req.SourceVolumeId) + if i < 0 { + return &csi.ListSnapshotsResponse{}, nil + } + return &csi.ListSnapshotsResponse{ + Entries: []*csi.ListSnapshotsResponse_Entry{ + { + Snapshot: &snapshot.SnapshotCSI, + }, + }, + }, nil + } + return nil, nil +} + +func getAllSnapshots(s *service, req *csi.ListSnapshotsRequest) (*csi.ListSnapshotsResponse, error) { + // Copy the mock snapshots into a new slice in order to avoid + // locking the service's snapshot slice for the duration of the + // ListSnapshots RPC. + readyToUse := true + snapshots := s.snapshots.List(readyToUse) + + var ( + ulenSnapshots = int32(len(snapshots)) + maxEntries = req.MaxEntries + startingToken int32 + ) + + if v := req.StartingToken; v != "" { + i, err := strconv.ParseUint(v, 10, 32) + if err != nil { + return nil, status.Errorf( + codes.Aborted, + "startingToken=%d !< int32=%d", + startingToken, math.MaxUint32) + } + startingToken = int32(i) + } + + if startingToken > ulenSnapshots { + return nil, status.Errorf( + codes.Aborted, + "startingToken=%d > len(snapshots)=%d", + startingToken, ulenSnapshots) + } + + // Discern the number of remaining entries. + rem := ulenSnapshots - startingToken + + // If maxEntries is 0 or greater than the number of remaining entries then + // set maxEntries to the number of remaining entries. + if maxEntries == 0 || maxEntries > rem { + maxEntries = rem + } + + var ( + i int + j = startingToken + entries = make( + []*csi.ListSnapshotsResponse_Entry, + maxEntries) + ) + + for i = 0; i < len(entries); i++ { + entries[i] = &csi.ListSnapshotsResponse_Entry{ + Snapshot: &snapshots[j], + } + j++ + } + + var nextToken string + if n := startingToken + int32(i); n < ulenSnapshots { + nextToken = fmt.Sprintf("%d", n) + } + + return &csi.ListSnapshotsResponse{ + Entries: entries, + NextToken: nextToken, + }, nil +} diff --git a/vendor/github.com/kubernetes-csi/csi-test/mock/service/identity.go b/vendor/github.com/kubernetes-csi/csi-test/mock/service/identity.go new file mode 100644 index 0000000000..41d08aaa8b --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/mock/service/identity.go @@ -0,0 +1,61 @@ +package service + +import ( + "golang.org/x/net/context" + + "github.com/container-storage-interface/spec/lib/go/csi" + "github.com/golang/protobuf/ptypes/wrappers" +) + +func (s *service) GetPluginInfo( + ctx context.Context, + req *csi.GetPluginInfoRequest) ( + *csi.GetPluginInfoResponse, error) { + + return &csi.GetPluginInfoResponse{ + Name: s.config.DriverName, + VendorVersion: VendorVersion, + Manifest: Manifest, + }, nil +} + +func (s *service) Probe( + ctx context.Context, + req *csi.ProbeRequest) ( + *csi.ProbeResponse, error) { + + return &csi.ProbeResponse{ + Ready: &wrappers.BoolValue{Value: true}, + }, nil +} + +func (s *service) GetPluginCapabilities( + ctx context.Context, + req *csi.GetPluginCapabilitiesRequest) ( + *csi.GetPluginCapabilitiesResponse, error) { + + volExpType := csi.PluginCapability_VolumeExpansion_ONLINE + + if s.config.DisableOnlineExpansion { + volExpType = csi.PluginCapability_VolumeExpansion_OFFLINE + } + + return &csi.GetPluginCapabilitiesResponse{ + Capabilities: []*csi.PluginCapability{ + { + Type: &csi.PluginCapability_Service_{ + Service: &csi.PluginCapability_Service{ + Type: csi.PluginCapability_Service_CONTROLLER_SERVICE, + }, + }, + }, + { + Type: &csi.PluginCapability_VolumeExpansion_{ + VolumeExpansion: &csi.PluginCapability_VolumeExpansion{ + Type: volExpType, + }, + }, + }, + }, + }, nil +} diff --git a/vendor/github.com/kubernetes-csi/csi-test/mock/service/node.go b/vendor/github.com/kubernetes-csi/csi-test/mock/service/node.go new file mode 100644 index 0000000000..b147aeceac --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/mock/service/node.go @@ -0,0 +1,392 @@ +package service + +import ( + "os" + "path" + "strconv" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "golang.org/x/net/context" + + "github.com/container-storage-interface/spec/lib/go/csi" +) + +func (s *service) NodeStageVolume( + ctx context.Context, + req *csi.NodeStageVolumeRequest) ( + *csi.NodeStageVolumeResponse, error) { + + device, ok := req.PublishContext["device"] + if !ok { + if s.config.DisableAttach { + device = "mock device" + } else { + return nil, status.Error( + codes.InvalidArgument, + "stage volume info 'device' key required") + } + } + + if len(req.GetVolumeId()) == 0 { + return nil, status.Error(codes.InvalidArgument, "Volume ID cannot be empty") + } + + if len(req.GetStagingTargetPath()) == 0 { + return nil, status.Error(codes.InvalidArgument, "Staging Target Path cannot be empty") + } + + if req.GetVolumeCapability() == nil { + return nil, status.Error(codes.InvalidArgument, "Volume Capability cannot be empty") + } + + exists, err := checkTargetExists(req.StagingTargetPath) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + if !exists { + status.Errorf(codes.Internal, "staging target path %s does not exist", req.StagingTargetPath) + } + + s.volsRWL.Lock() + defer s.volsRWL.Unlock() + + i, v := s.findVolNoLock("id", req.VolumeId) + if i < 0 { + return nil, status.Error(codes.NotFound, req.VolumeId) + } + + // nodeStgPathKey is the key in the volume's attributes that is set to a + // mock stage path if the volume has been published by the node + nodeStgPathKey := path.Join(s.nodeID, req.StagingTargetPath) + + // Check to see if the volume has already been staged. + if v.VolumeContext[nodeStgPathKey] != "" { + // TODO: Check for the capabilities to be equal. Return "ALREADY_EXISTS" + // if the capabilities don't match. + return &csi.NodeStageVolumeResponse{}, nil + } + + // Stage the volume. + v.VolumeContext[nodeStgPathKey] = device + s.vols[i] = v + + return &csi.NodeStageVolumeResponse{}, nil +} + +func (s *service) NodeUnstageVolume( + ctx context.Context, + req *csi.NodeUnstageVolumeRequest) ( + *csi.NodeUnstageVolumeResponse, error) { + + if len(req.GetVolumeId()) == 0 { + return nil, status.Error(codes.InvalidArgument, "Volume ID cannot be empty") + } + + if len(req.GetStagingTargetPath()) == 0 { + return nil, status.Error(codes.InvalidArgument, "Staging Target Path cannot be empty") + } + + s.volsRWL.Lock() + defer s.volsRWL.Unlock() + + i, v := s.findVolNoLock("id", req.VolumeId) + if i < 0 { + return nil, status.Error(codes.NotFound, req.VolumeId) + } + + // nodeStgPathKey is the key in the volume's attributes that is set to a + // mock stage path if the volume has been published by the node + nodeStgPathKey := path.Join(s.nodeID, req.StagingTargetPath) + + // Check to see if the volume has already been unstaged. + if v.VolumeContext[nodeStgPathKey] == "" { + return &csi.NodeUnstageVolumeResponse{}, nil + } + + // Unpublish the volume. + delete(v.VolumeContext, nodeStgPathKey) + s.vols[i] = v + + return &csi.NodeUnstageVolumeResponse{}, nil +} + +func (s *service) NodePublishVolume( + ctx context.Context, + req *csi.NodePublishVolumeRequest) ( + *csi.NodePublishVolumeResponse, error) { + + ephemeralVolume := req.GetVolumeContext()["csi.storage.k8s.io/ephemeral"] == "true" + device, ok := req.PublishContext["device"] + if !ok { + if ephemeralVolume || s.config.DisableAttach { + device = "mock device" + } else { + return nil, status.Error( + codes.InvalidArgument, + "stage volume info 'device' key required") + } + } + + if len(req.GetVolumeId()) == 0 { + return nil, status.Error(codes.InvalidArgument, "Volume ID cannot be empty") + } + + if len(req.GetTargetPath()) == 0 { + return nil, status.Error(codes.InvalidArgument, "Target Path cannot be empty") + } + + if req.GetVolumeCapability() == nil { + return nil, status.Error(codes.InvalidArgument, "Volume Capability cannot be empty") + } + + // May happen with old (or, at this time, even the current) Kubernetes + // although it shouldn't (https://github.com/kubernetes/kubernetes/issues/75535). + exists, err := checkTargetExists(req.TargetPath) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + if !s.config.PermissiveTargetPath && exists { + status.Errorf(codes.Internal, "target path %s does exist", req.TargetPath) + } + + s.volsRWL.Lock() + defer s.volsRWL.Unlock() + + i, v := s.findVolNoLock("id", req.VolumeId) + if i < 0 && !ephemeralVolume { + return nil, status.Error(codes.NotFound, req.VolumeId) + } + if i >= 0 && ephemeralVolume { + return nil, status.Error(codes.AlreadyExists, req.VolumeId) + } + + // nodeMntPathKey is the key in the volume's attributes that is set to a + // mock mount path if the volume has been published by the node + nodeMntPathKey := path.Join(s.nodeID, req.TargetPath) + + // Check to see if the volume has already been published. + if v.VolumeContext[nodeMntPathKey] != "" { + + // Requests marked Readonly fail due to volumes published by + // the Mock driver supporting only RW mode. + if req.Readonly { + return nil, status.Error(codes.AlreadyExists, req.VolumeId) + } + + return &csi.NodePublishVolumeResponse{}, nil + } + + // Publish the volume. + if ephemeralVolume { + MockVolumes[req.VolumeId] = Volume{ + ISEphemeral: true, + } + } else { + if req.GetStagingTargetPath() != "" { + exists, err := checkTargetExists(req.GetStagingTargetPath()) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + if !exists { + status.Errorf(codes.Internal, "staging target path %s does not exist", req.GetStagingTargetPath()) + } + v.VolumeContext[nodeMntPathKey] = req.GetStagingTargetPath() + } else { + v.VolumeContext[nodeMntPathKey] = device + } + s.vols[i] = v + } + + return &csi.NodePublishVolumeResponse{}, nil +} + +func (s *service) NodeUnpublishVolume( + ctx context.Context, + req *csi.NodeUnpublishVolumeRequest) ( + *csi.NodeUnpublishVolumeResponse, error) { + + if len(req.GetVolumeId()) == 0 { + return nil, status.Error(codes.InvalidArgument, "Volume ID cannot be empty") + } + if len(req.GetTargetPath()) == 0 { + return nil, status.Error(codes.InvalidArgument, "Target Path cannot be empty") + } + + s.volsRWL.Lock() + defer s.volsRWL.Unlock() + + ephemeralVolume := MockVolumes[req.VolumeId].ISEphemeral + i, v := s.findVolNoLock("id", req.VolumeId) + if i < 0 && !ephemeralVolume { + return nil, status.Error(codes.NotFound, req.VolumeId) + } + + if ephemeralVolume { + delete(MockVolumes, req.VolumeId) + } else { + // nodeMntPathKey is the key in the volume's attributes that is set to a + // mock mount path if the volume has been published by the node + nodeMntPathKey := path.Join(s.nodeID, req.TargetPath) + + // Check to see if the volume has already been unpublished. + if v.VolumeContext[nodeMntPathKey] == "" { + return &csi.NodeUnpublishVolumeResponse{}, nil + } + + // Unpublish the volume. + delete(v.VolumeContext, nodeMntPathKey) + s.vols[i] = v + } + + return &csi.NodeUnpublishVolumeResponse{}, nil +} + +func (s *service) NodeExpandVolume(ctx context.Context, req *csi.NodeExpandVolumeRequest) (*csi.NodeExpandVolumeResponse, error) { + if len(req.GetVolumeId()) == 0 { + return nil, status.Error(codes.InvalidArgument, "Volume ID cannot be empty") + } + if len(req.GetVolumePath()) == 0 { + return nil, status.Error(codes.InvalidArgument, "Volume Path cannot be empty") + } + if req.GetCapacityRange() == nil { + return nil, status.Error(codes.InvalidArgument, "Request capacity cannot be empty") + } + + s.volsRWL.Lock() + defer s.volsRWL.Unlock() + + i, v := s.findVolNoLock("id", req.VolumeId) + if i < 0 { + return nil, status.Error(codes.NotFound, req.VolumeId) + } + + // TODO: NodeExpandVolume MUST be called after successful NodeStageVolume as we has STAGE_UNSTAGE_VOLUME node capacity. + + requestCapacity := req.GetCapacityRange().RequiredBytes + resp := &csi.NodeExpandVolumeResponse{CapacityBytes: requestCapacity} + + // fsCapacityKey is the key in the volume's attributes that is set to the file system's size. + fsCapacityKey := path.Join(s.nodeID, req.GetVolumePath(), "size") + oldCapacityStr, exist := v.VolumeContext[fsCapacityKey] + if exist { + oldCapacity, err := strconv.ParseInt(oldCapacityStr, 10, 64) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + if oldCapacity > requestCapacity { + return nil, status.Error(codes.InvalidArgument, "cannot change file system size to a smaller size") + } + if oldCapacity == requestCapacity { + // File system capacity is equal to requested size, no need to expand. + return resp, nil + } + } + + // Update volume's fs capacity to requested size. + v.VolumeContext[fsCapacityKey] = strconv.FormatInt(requestCapacity, 10) + s.vols[i] = v + + return resp, nil +} + +func (s *service) NodeGetCapabilities( + ctx context.Context, + req *csi.NodeGetCapabilitiesRequest) ( + *csi.NodeGetCapabilitiesResponse, error) { + + capabilities := []*csi.NodeServiceCapability{ + { + Type: &csi.NodeServiceCapability_Rpc{ + Rpc: &csi.NodeServiceCapability_RPC{ + Type: csi.NodeServiceCapability_RPC_UNKNOWN, + }, + }, + }, + { + Type: &csi.NodeServiceCapability_Rpc{ + Rpc: &csi.NodeServiceCapability_RPC{ + Type: csi.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME, + }, + }, + }, + { + Type: &csi.NodeServiceCapability_Rpc{ + Rpc: &csi.NodeServiceCapability_RPC{ + Type: csi.NodeServiceCapability_RPC_GET_VOLUME_STATS, + }, + }, + }, + } + if s.config.NodeExpansionRequired { + capabilities = append(capabilities, &csi.NodeServiceCapability{ + Type: &csi.NodeServiceCapability_Rpc{ + Rpc: &csi.NodeServiceCapability_RPC{ + Type: csi.NodeServiceCapability_RPC_EXPAND_VOLUME, + }, + }, + }) + } + + return &csi.NodeGetCapabilitiesResponse{ + Capabilities: capabilities, + }, nil +} + +func (s *service) NodeGetInfo(ctx context.Context, + req *csi.NodeGetInfoRequest) (*csi.NodeGetInfoResponse, error) { + csiNodeResponse := &csi.NodeGetInfoResponse{ + NodeId: s.nodeID, + } + if s.config.AttachLimit > 0 { + csiNodeResponse.MaxVolumesPerNode = s.config.AttachLimit + } + return csiNodeResponse, nil +} + +func (s *service) NodeGetVolumeStats(ctx context.Context, + req *csi.NodeGetVolumeStatsRequest) (*csi.NodeGetVolumeStatsResponse, error) { + + if len(req.GetVolumeId()) == 0 { + return nil, status.Error(codes.InvalidArgument, "Volume ID cannot be empty") + } + + if len(req.GetVolumePath()) == 0 { + return nil, status.Error(codes.InvalidArgument, "Volume Path cannot be empty") + } + + i, v := s.findVolNoLock("id", req.VolumeId) + if i < 0 { + return nil, status.Error(codes.NotFound, req.VolumeId) + } + + nodeMntPathKey := path.Join(s.nodeID, req.VolumePath) + + _, exists := v.VolumeContext[nodeMntPathKey] + if !exists { + return nil, status.Errorf(codes.NotFound, "volume %q doest not exist on the specified path %q", req.VolumeId, req.VolumeId) + } + + return &csi.NodeGetVolumeStatsResponse{ + Usage: []*csi.VolumeUsage{ + { + Total: v.GetCapacityBytes(), + Unit: csi.VolumeUsage_BYTES, + }, + }, + }, nil +} + +// checkTargetExists checks if a given path exists. +func checkTargetExists(targetPath string) (bool, error) { + _, err := os.Stat(targetPath) + switch { + case err == nil: + return true, nil + case os.IsNotExist(err): + return false, nil + default: + return false, err + } +} diff --git a/vendor/github.com/kubernetes-csi/csi-test/mock/service/service.go b/vendor/github.com/kubernetes-csi/csi-test/mock/service/service.go new file mode 100644 index 0000000000..3ef85a87af --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/mock/service/service.go @@ -0,0 +1,187 @@ +package service + +import ( + "fmt" + "strings" + "sync" + "sync/atomic" + + "github.com/container-storage-interface/spec/lib/go/csi" + "github.com/kubernetes-csi/csi-test/v3/mock/cache" + "golang.org/x/net/context" + + "github.com/golang/protobuf/ptypes" +) + +const ( + // Name is the name of the CSI plug-in. + Name = "io.kubernetes.storage.mock" + + // VendorVersion is the version returned by GetPluginInfo. + VendorVersion = "0.3.0" +) + +// Manifest is the SP's manifest. +var Manifest = map[string]string{ + "url": "https://github.com/kubernetes-csi/csi-test/mock", +} + +type Config struct { + DisableAttach bool + DriverName string + AttachLimit int64 + NodeExpansionRequired bool + DisableControllerExpansion bool + DisableOnlineExpansion bool + PermissiveTargetPath bool +} + +// Service is the CSI Mock service provider. +type Service interface { + csi.ControllerServer + csi.IdentityServer + csi.NodeServer +} + +type service struct { + sync.Mutex + nodeID string + vols []csi.Volume + volsRWL sync.RWMutex + volsNID uint64 + snapshots cache.SnapshotCache + snapshotsNID uint64 + config Config +} + +type Volume struct { + sync.Mutex + VolumeCSI csi.Volume + NodeID string + ISStaged bool + ISPublished bool + ISEphemeral bool + StageTargetPath string + TargetPath string +} + +var MockVolumes map[string]Volume + +// New returns a new Service. +func New(config Config) Service { + s := &service{ + nodeID: config.DriverName, + config: config, + } + s.snapshots = cache.NewSnapshotCache() + s.vols = []csi.Volume{ + s.newVolume("Mock Volume 1", gib100), + s.newVolume("Mock Volume 2", gib100), + s.newVolume("Mock Volume 3", gib100), + } + MockVolumes = map[string]Volume{} + + s.snapshots.Add(s.newSnapshot("Mock Snapshot 1", "1", map[string]string{"Description": "snapshot 1"})) + s.snapshots.Add(s.newSnapshot("Mock Snapshot 2", "2", map[string]string{"Description": "snapshot 2"})) + s.snapshots.Add(s.newSnapshot("Mock Snapshot 3", "3", map[string]string{"Description": "snapshot 3"})) + + return s +} + +const ( + kib int64 = 1024 + mib int64 = kib * 1024 + gib int64 = mib * 1024 + gib100 int64 = gib * 100 + tib int64 = gib * 1024 + tib100 int64 = tib * 100 +) + +func (s *service) newVolume(name string, capcity int64) csi.Volume { + return csi.Volume{ + VolumeId: fmt.Sprintf("%d", atomic.AddUint64(&s.volsNID, 1)), + VolumeContext: map[string]string{"name": name}, + CapacityBytes: capcity, + } +} + +func (s *service) newVolumeFromSnapshot(name string, capacity int64, snapshotID int) csi.Volume { + vol := s.newVolume(name, capacity) + vol.ContentSource = &csi.VolumeContentSource{ + Type: &csi.VolumeContentSource_Snapshot{ + Snapshot: &csi.VolumeContentSource_SnapshotSource{ + SnapshotId: string(snapshotID), + }, + }, + } + return vol +} + +func (s *service) newVolumeFromVolume(name string, capacity int64, volumeID int) csi.Volume { + vol := s.newVolume(name, capacity) + vol.ContentSource = &csi.VolumeContentSource{ + Type: &csi.VolumeContentSource_Volume{ + Volume: &csi.VolumeContentSource_VolumeSource{ + VolumeId: string(volumeID), + }, + }, + } + return vol +} + +func (s *service) findVol(k, v string) (volIdx int, volInfo csi.Volume) { + s.volsRWL.RLock() + defer s.volsRWL.RUnlock() + return s.findVolNoLock(k, v) +} + +func (s *service) findVolNoLock(k, v string) (volIdx int, volInfo csi.Volume) { + volIdx = -1 + + for i, vi := range s.vols { + switch k { + case "id": + if strings.EqualFold(v, vi.GetVolumeId()) { + return i, vi + } + case "name": + if n, ok := vi.VolumeContext["name"]; ok && strings.EqualFold(v, n) { + return i, vi + } + } + } + + return +} + +func (s *service) findVolByName( + ctx context.Context, name string) (int, csi.Volume) { + + return s.findVol("name", name) +} + +func (s *service) newSnapshot(name, sourceVolumeId string, parameters map[string]string) cache.Snapshot { + + ptime := ptypes.TimestampNow() + return cache.Snapshot{ + Name: name, + Parameters: parameters, + SnapshotCSI: csi.Snapshot{ + SnapshotId: fmt.Sprintf("%d", atomic.AddUint64(&s.snapshotsNID, 1)), + CreationTime: ptime, + SourceVolumeId: sourceVolumeId, + ReadyToUse: true, + }, + } +} + +// getAttachCount returns the number of attached volumes on the node. +func (s *service) getAttachCount(devPathKey string) int64 { + var count int64 + for _, v := range s.vols { + if device := v.VolumeContext[devPathKey]; device != "" { + count++ + } + } + return count +} diff --git a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/controller.go b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/controller.go index a40879e100..0f7d23e962 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/controller.go +++ b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/controller.go @@ -259,11 +259,18 @@ var _ = DescribeSanity("Controller Service [Controller Server]", func(sc *Sanity Expect(len(vols.GetEntries())).To(Equal(totalVols)) }) - It("pagination should detect volumes added between pages and accept tokens when the last volume from a page is deleted", func() { + // Disabling this below case as it is fragile and results are inconsistent + // when no of volumes are different. The test might fail on a driver + // which implements the pagination based on index just by altering + // minVolCount := 4 and maxEntries := 3 + // Related discussion links: + // https://github.com/intel/pmem-csi/pull/424#issuecomment-540499938 + // https://github.com/kubernetes-csi/csi-test/issues/223 + XIt("pagination should detect volumes added between pages and accept tokens when the last volume from a page is deleted", func() { // minVolCount is the minimum number of volumes expected to exist, // based on which paginated volume listing is performed. minVolCount := 3 - // maxEntried is the maximum entries in list volume request. + // maxEntries is the maximum entries in list volume request. maxEntries := 2 // existing_vols to keep a record of the volumes that should exist existing_vols := map[string]bool{} @@ -358,6 +365,8 @@ var _ = DescribeSanity("Controller Service [Controller Server]", func(sc *Sanity Expect(err).NotTo(HaveOccurred()) Expect(vol).NotTo(BeNil()) Expect(vol.Volume).NotTo(BeNil()) + // Register the volume so it's automatically cleaned + cl.RegisterVolume(vol.Volume.VolumeId, VolumeInfo{VolumeID: vol.Volume.VolumeId}) existing_vols[vol.Volume.VolumeId] = true vols, err = c.ListVolumes( @@ -2097,6 +2106,7 @@ var _ = DescribeSanity("ExpandVolume [Controller Server]", func(sc *SanityContex CapacityRange: &csi.CapacityRange{ RequiredBytes: TestVolumeExpandSize(sc), }, + Secrets: sc.Secrets.ControllerExpandVolumeSecret, } rsp, err := c.ControllerExpandVolume(context.Background(), expReq) Expect(err).To(HaveOccurred()) @@ -2110,6 +2120,7 @@ var _ = DescribeSanity("ExpandVolume [Controller Server]", func(sc *SanityContex It("should fail if no capacity range is given", func() { expReq := &csi.ControllerExpandVolumeRequest{ VolumeId: "", + Secrets: sc.Secrets.ControllerExpandVolumeSecret, } rsp, err := c.ControllerExpandVolume(context.Background(), expReq) Expect(err).To(HaveOccurred()) @@ -2156,6 +2167,7 @@ var _ = DescribeSanity("ExpandVolume [Controller Server]", func(sc *SanityContex CapacityRange: &csi.CapacityRange{ RequiredBytes: TestVolumeExpandSize(sc), }, + Secrets: sc.Secrets.ControllerExpandVolumeSecret, } rsp, err := c.ControllerExpandVolume(context.Background(), expReq) Expect(err).NotTo(HaveOccurred()) diff --git a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/sanity.go b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/sanity.go index c42fb5f8b6..27c983ff0f 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/sanity.go +++ b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/sanity.go @@ -27,7 +27,7 @@ import ( "testing" "time" - "github.com/kubernetes-csi/csi-test/utils" + "github.com/kubernetes-csi/csi-test/v3/utils" yaml "gopkg.in/yaml.v2" "google.golang.org/grpc" @@ -48,6 +48,7 @@ type CSISecrets struct { NodePublishVolumeSecret map[string]string `yaml:"NodePublishVolumeSecret"` CreateSnapshotSecret map[string]string `yaml:"CreateSnapshotSecret"` DeleteSnapshotSecret map[string]string `yaml:"DeleteSnapshotSecret"` + ControllerExpandVolumeSecret map[string]string `yaml:"ControllerExpandVolumeSecret"` } // Config provides the configuration for the sanity tests. It @@ -121,9 +122,10 @@ type Config struct { // Timeout for the executed commands for path removal. RemovePathCmdTimeout int - // IDGen is an optional interface for callers to provide a generator for - // valid Volume and Node IDs. Defaults to DefaultIDGenerator which generates - // generic string IDs + // IDGen is an optional interface for callers to provide a + // generator for valid Volume and Node IDs. If unset, + // it will be set to a DefaultIDGenerator instance when + // passing the config to Test or GinkgoTest. IDGen IDGenerator } @@ -143,6 +145,21 @@ type SanityContext struct { StagingPath string } +// newContext sets up sanity testing with a config supplied by the +// user of the sanity package. Ownership of that config is shared +// between the sanity package and the caller. +func newContext(reqConfig *Config) *SanityContext { + // To avoid runtime if checks when using IDGen, a default + // is set here. + if reqConfig.IDGen == nil { + reqConfig.IDGen = &DefaultIDGenerator{} + } + + return &SanityContext{ + Config: reqConfig, + } +} + // Test will test the CSI driver at the specified address by // setting up a Ginkgo suite and running it. func Test(t *testing.T, reqConfig *Config) { @@ -158,14 +175,7 @@ func Test(t *testing.T, reqConfig *Config) { } } - if reqConfig.IDGen == nil { - reqConfig.IDGen = &DefaultIDGenerator{} - } - - sc := &SanityContext{ - Config: reqConfig, - } - + sc := newContext(reqConfig) registerTestsInGinkgo(sc) RegisterFailHandler(Fail) @@ -180,11 +190,11 @@ func Test(t *testing.T, reqConfig *Config) { } } +// GinkoTest is another entry point for sanity testing: instead of directly +// running tests like Test does, it merely registers the tests. This can +// be used to embed sanity testing in a custom Ginkgo test suite. func GinkgoTest(reqConfig *Config) { - sc := &SanityContext{ - Config: reqConfig, - } - + sc := newContext(reqConfig) registerTestsInGinkgo(sc) } diff --git a/vendor/github.com/kubernetes-csi/csi-test/release-tools/.prow.sh b/vendor/github.com/kubernetes-csi/csi-test/release-tools/.prow.sh new file mode 100755 index 0000000000..b18c535813 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/release-tools/.prow.sh @@ -0,0 +1,7 @@ +#! /bin/bash -e +# +# This is for testing csi-release-tools itself in Prow. All other +# repos use prow.sh for that, but as csi-release-tools isn't a normal +# repo with some Go code in it, it has a custom Prow test script. + +./verify-shellcheck.sh "$(pwd)" diff --git a/vendor/github.com/kubernetes-csi/csi-test/release-tools/CONTRIBUTING.md b/vendor/github.com/kubernetes-csi/csi-test/release-tools/CONTRIBUTING.md new file mode 100644 index 0000000000..de47115137 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/release-tools/CONTRIBUTING.md @@ -0,0 +1,31 @@ +# Contributing Guidelines + +Welcome to Kubernetes. We are excited about the prospect of you joining our [community](https://github.com/kubernetes/community)! The Kubernetes community abides by the CNCF [code of conduct](code-of-conduct.md). Here is an excerpt: + +_As contributors and maintainers of this project, and in the interest of fostering an open and welcoming community, we pledge to respect all people who contribute through reporting issues, posting feature requests, updating documentation, submitting pull requests or patches, and other activities._ + +## Getting Started + +We have full documentation on how to get started contributing here: + + + +- [Contributor License Agreement](https://git.k8s.io/community/CLA.md) Kubernetes projects require that you sign a Contributor License Agreement (CLA) before we can accept your pull requests +- [Kubernetes Contributor Guide](http://git.k8s.io/community/contributors/guide) - Main contributor documentation, or you can just jump directly to the [contributing section](http://git.k8s.io/community/contributors/guide#contributing) +- [Contributor Cheat Sheet](https://git.k8s.io/community/contributors/guide/contributor-cheatsheet.md) - Common resources for existing developers + +## Mentorship + +- [Mentoring Initiatives](https://git.k8s.io/community/mentoring) - We have a diverse set of mentorship programs available that are always looking for volunteers! + + diff --git a/vendor/github.com/kubernetes-csi/csi-test/release-tools/OWNERS b/vendor/github.com/kubernetes-csi/csi-test/release-tools/OWNERS new file mode 100644 index 0000000000..6d2f474e13 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/release-tools/OWNERS @@ -0,0 +1,11 @@ +# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md + +approvers: +- saad-ali +- msau42 +- pohly + +reviewers: +- saad-ali +- msau42 +- pohly diff --git a/vendor/github.com/kubernetes-csi/csi-test/release-tools/README.md b/vendor/github.com/kubernetes-csi/csi-test/release-tools/README.md new file mode 100644 index 0000000000..cc40f161cd --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/release-tools/README.md @@ -0,0 +1,162 @@ +# [csi-release-tools](https://github.com/kubernetes-csi/csi-release-tools) + +These build and test rules can be shared between different Go projects +without modifications. Customization for the different projects happen +in the top-level Makefile. + +The rules include support for building and pushing Docker images, with +the following features: + - one or more command and image per project + - push canary and/or tagged release images + - automatically derive the image tag(s) from repo tags + - the source code revision is stored in a "revision" image label + - never overwrites an existing release image + +Usage +----- + +The expected repository layout is: + - `cmd/*/*.go` - source code for each command + - `cmd/*/Dockerfile` - docker file for each command or + Dockerfile in the root when only building a single command + - `Makefile` - includes `release-tools/build.make` and sets + configuration variables + - `.travis.yml` - a symlink to `release-tools/.travis.yml` + +To create a release, tag a certain revision with a name that +starts with `v`, for example `v1.0.0`, then `make push` +while that commit is checked out. + +It does not matter on which branch that revision exists, i.e. it is +possible to create releases directly from master. A release branch can +still be created for maintenance releases later if needed. + +Release branches are expected to be named `release-x.y` for releases +`x.y.z`. Building from such a branch creates `x.y-canary` +images. Building from master creates the main `canary` image. + +Sharing and updating +-------------------- + +[`git subtree`](https://github.com/git/git/blob/master/contrib/subtree/git-subtree.txt) +is the recommended way of maintaining a copy of the rules inside the +`release-tools` directory of a project. This way, it is possible to make +changes also locally, test them and then push them back to the shared +repository at a later time. + +Cheat sheet: + +- `git subtree add --prefix=release-tools https://github.com/kubernetes-csi/csi-release-tools.git master` - add release tools to a repo which does not have them yet (only once) +- `git subtree pull --prefix=release-tools https://github.com/kubernetes-csi/csi-release-tools.git master` - update local copy to latest upstream (whenever upstream changes) +- edit, `git commit`, `git subtree push --prefix=release-tools git@github.com:/csi-release-tools.git ` - push to a new branch before submitting a PR + +verify-shellcheck.sh +-------------------- + +The [verify-shellcheck.sh](./verify-shellcheck.sh) script in this repo +is a stripped down copy of the [corresponding +script](https://github.com/kubernetes/kubernetes/blob/release-1.14/hack/verify-shellcheck.sh) +in the Kubernetes repository. It can be used to check for certain +errors shell scripts, like missing quotation marks. The default +`test-shellcheck` target in [build.make](./build.make) only checks the +scripts in this directory. Components can add more directories to +`TEST_SHELLCHECK_DIRS` to check also other scripts. + +End-to-end testing +------------------ + +A repo that wants to opt into testing via Prow must set up a top-level +`.prow.sh`. Typically that will source `prow.sh` and then transfer +control to it: + +``` bash +#! /bin/bash -e + +. release-tools/prow.sh +main +``` + +All Kubernetes-CSI repos are expected to switch to Prow. For details +on what is enabled in Prow, see +https://github.com/kubernetes/test-infra/tree/master/config/jobs/kubernetes-csi + +Test results for periodic jobs are visible in +https://testgrid.k8s.io/sig-storage-csi + +It is possible to reproduce the Prow testing locally on a suitable machine: +- Linux host +- Docker installed +- code to be tested checkout out in `$GOPATH/src/` +- `cd $GOPATH/src/ && ./.prow.sh` + +Beware that the script intentionally doesn't clean up after itself and +modifies the content of `$GOPATH`, in particular the `kubernetes` and +`kind` repositories there. Better run it in an empty, disposable +`$GOPATH`. + +When it terminates, the following command can be used to get access to +the Kubernetes cluster that was brought up for testing (assuming that +this step succeeded): + + export KUBECONFIG="$(kind get kubeconfig-path --name="csi-prow")" + +It is possible to control the execution via environment variables. See +`prow.sh` for details. Particularly useful is testing against different +Kubernetes releases: + + CSI_PROW_KUBERNETES_VERSION=1.13.3 ./.prow.sh + CSI_PROW_KUBERNETES_VERSION=latest ./.prow.sh + +Dependencies and vendoring +-------------------------- + +Most projects will (eventually) use `go mod` to manage +dependencies. `dep` is also still supported by `csi-release-tools`, +but not documented here because it's not recommended anymore. + +The usual instructions for using [go +modules](https://github.com/golang/go/wiki/Modules) apply. Here's a cheat sheet +for some of the relevant commands: +- list available updates: `GO111MODULE=on go list -u -m all` +- update or add a single dependency: `GO111MODULE=on go get ` +- update all dependencies to their next minor or patch release: + `GO111MODULE=on go get ./...` (add `-u=patch` to limit to patch + releases) +- lock onto a specific version: `GO111MODULE=on go get @` +- clean up `go.mod`: `GO111MODULE=on go mod tidy` +- update vendor directory: `GO111MODULE=on go mod vendor` + +`GO111MODULE=on` can be left out when using Go >= 1.13 or when the +source is checked out outside of `$GOPATH`. + +`go mod tidy` must be used to ensure that the listed dependencies are +really still needed. Changing import statements or a tentative `go +get` can result in stale dependencies. + +The `test-vendor` verifies that it was used when run locally or in a +pre-merge CI job. If a `vendor` directory is present, it will also +verify that it's content is up-to-date. + +The `vendor` directory is optional. It is still present in projects +because it avoids downloading sources during CI builds. If this is no +longer deemed necessary, then a project can also remove the directory. + +When using packages that are part of the Kubernetes source code, the +commands above are not enough because the [lack of semantic +versioning](https://github.com/kubernetes/kubernetes/issues/72638) +prevents `go mod` from finding newer releases. Importing directly from +`kubernetes/kubernetes` also needs `replace` statements to override +the fake `v0.0.0` versions +(https://github.com/kubernetes/kubernetes/issues/79384). The +`go-get-kubernetes.sh` script can be used to update all packages in +lockstep to a different Kubernetes version. It takes a single version +number like "1.16.0". + +Conversion of a repository that uses `dep` to `go mod` can be done with: + + GO111MODULE=on go mod init + release-tools/go-get-kubernetes.sh + GO111MODULE=on go mod tidy + GO111MODULE=on go mod vendor + git rm -f Gopkg.toml Gopkg.lock + git add go.mod go.sum vendor diff --git a/vendor/github.com/kubernetes-csi/csi-test/release-tools/RELEASE.md b/vendor/github.com/kubernetes-csi/csi-test/release-tools/RELEASE.md new file mode 100644 index 0000000000..a0fd815b05 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/release-tools/RELEASE.md @@ -0,0 +1,5 @@ +# Release Process + +No tagged releases are planned at this point. The intention is to keep +the master branch in a state such that it can be used for all +supported branches in downstream repos which use these files. diff --git a/vendor/github.com/kubernetes-csi/csi-test/release-tools/SECURITY_CONTACTS b/vendor/github.com/kubernetes-csi/csi-test/release-tools/SECURITY_CONTACTS new file mode 100644 index 0000000000..2af1414e05 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/release-tools/SECURITY_CONTACTS @@ -0,0 +1,14 @@ +# Defined below are the security contacts for this repo. +# +# They are the contact point for the Product Security Team to reach out +# to for triaging and handling of incoming issues. +# +# The below names agree to abide by the +# [Embargo Policy](https://github.com/kubernetes/sig-release/blob/master/security-release-process-documentation/security-release-process.md#embargo-policy) +# and will be removed and replaced if they violate that agreement. +# +# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE +# INSTRUCTIONS AT https://kubernetes.io/security/ + +saad-ali +msau42 diff --git a/vendor/github.com/kubernetes-csi/csi-test/release-tools/build.make b/vendor/github.com/kubernetes-csi/csi-test/release-tools/build.make new file mode 100644 index 0000000000..cbf6d4553f --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/release-tools/build.make @@ -0,0 +1,218 @@ +# Copyright 2019 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +.PHONY: build-% build container-% container push-% push clean test + +# A space-separated list of all commands in the repository, must be +# set in main Makefile of a repository. +# CMDS= + +# This is the default. It can be overridden in the main Makefile after +# including build.make. +REGISTRY_NAME=quay.io/k8scsi + +# Revision that gets built into each binary via the main.version +# string. Uses the `git describe` output based on the most recent +# version tag with a short revision suffix or, if nothing has been +# tagged yet, just the revision. +# +# Beware that tags may also be missing in shallow clones as done by +# some CI systems (like TravisCI, which pulls only 50 commits). +REV=$(shell git describe --long --tags --match='v*' --dirty 2>/dev/null || git rev-list -n1 HEAD) + +# A space-separated list of image tags under which the current build is to be pushed. +# Determined dynamically. +IMAGE_TAGS= + +# A "canary" image gets built if the current commit is the head of the remote "master" branch. +# That branch does not exist when building some other branch in TravisCI. +IMAGE_TAGS+=$(shell if [ "$$(git rev-list -n1 HEAD)" = "$$(git rev-list -n1 origin/master 2>/dev/null)" ]; then echo "canary"; fi) + +# A "X.Y.Z-canary" image gets built if the current commit is the head of a "origin/release-X.Y.Z" branch. +# The actual suffix does not matter, only the "release-" prefix is checked. +IMAGE_TAGS+=$(shell git branch -r --points-at=HEAD | grep 'origin/release-' | grep -v -e ' -> ' | sed -e 's;.*/release-\(.*\);\1-canary;') + +# A release image "vX.Y.Z" gets built if there is a tag of that format for the current commit. +# --abbrev=0 suppresses long format, only showing the closest tag. +IMAGE_TAGS+=$(shell tagged="$$(git describe --tags --match='v*' --abbrev=0)"; if [ "$$tagged" ] && [ "$$(git rev-list -n1 HEAD)" = "$$(git rev-list -n1 $$tagged)" ]; then echo $$tagged; fi) + +# Images are named after the command contained in them. +IMAGE_NAME=$(REGISTRY_NAME)/$* + +ifdef V +# Adding "-alsologtostderr" assumes that all test binaries contain glog. This is not guaranteed. +TESTARGS = -v -args -alsologtostderr -v 5 +else +TESTARGS = +endif + +ARCH := $(if $(GOARCH),$(GOARCH),$(shell go env GOARCH)) + +# Specific packages can be excluded from each of the tests below by setting the *_FILTER_CMD variables +# to something like "| grep -v 'github.com/kubernetes-csi/project/pkg/foobar'". See usage below. + +build-%: + mkdir -p bin + CGO_ENABLED=0 GOOS=linux go build -a -ldflags '-X main.version=$(REV) -extldflags "-static"' -o ./bin/$* ./cmd/$* + if [ "$$ARCH" = "amd64" ]; then \ + CGO_ENABLED=0 GOOS=windows go build -a -ldflags '-X main.version=$(REV) -extldflags "-static"' -o ./bin/$*.exe ./cmd/$* ; \ + fi + +container-%: build-% + docker build -t $*:latest -f $(shell if [ -e ./cmd/$*/Dockerfile ]; then echo ./cmd/$*/Dockerfile; else echo Dockerfile; fi) --label revision=$(REV) . + +push-%: container-% + set -ex; \ + push_image () { \ + docker tag $*:latest $(IMAGE_NAME):$$tag; \ + docker push $(IMAGE_NAME):$$tag; \ + }; \ + for tag in $(IMAGE_TAGS); do \ + if [ "$$tag" = "canary" ] || echo "$$tag" | grep -q -e '-canary$$'; then \ + : "creating or overwriting canary image"; \ + push_image; \ + elif docker pull $(IMAGE_NAME):$$tag 2>&1 | tee /dev/stderr | grep -q "manifest for $(IMAGE_NAME):$$tag not found"; then \ + : "creating release image"; \ + push_image; \ + else \ + : "release image $(IMAGE_NAME):$$tag already exists, skipping push"; \ + fi; \ + done + +build: $(CMDS:%=build-%) +container: $(CMDS:%=container-%) +push: $(CMDS:%=push-%) + +clean: + -rm -rf bin + +test: + +.PHONY: test-go +test: test-go +test-go: + @ echo; echo "### $@:" + go test `go list ./... | grep -v -e 'vendor' -e '/test/e2e$$' $(TEST_GO_FILTER_CMD)` $(TESTARGS) + +.PHONY: test-vet +test: test-vet +test-vet: + @ echo; echo "### $@:" + go vet `go list ./... | grep -v vendor $(TEST_VET_FILTER_CMD)` + +.PHONY: test-fmt +test: test-fmt +test-fmt: + @ echo; echo "### $@:" + files=$$(find . -name '*.go' | grep -v './vendor' $(TEST_FMT_FILTER_CMD)); \ + if [ $$(gofmt -d $$files | wc -l) -ne 0 ]; then \ + echo "formatting errors:"; \ + gofmt -d $$files; \ + false; \ + fi + +# This test only runs when dep >= 0.5 is installed, which is the case for the CI setup. +# When using 'go mod', we allow the test to be skipped in the Prow CI under some special +# circumstances, because it depends on accessing all remote repos and thus +# running it all the time would defeat the purpose of vendoring: +# - not handling a PR or +# - the fabricated merge commit leaves go.mod, go.sum and vendor dir unchanged +# - release-tools also didn't change (changing rules or Go version might lead to +# a different result and thus must be tested) +# - import statements not changed (because if they change, go.mod might have to be updated) +# +# "git diff" is intelligent enough to annotate changes inside the "import" block in +# the start of the diff hunk: +# +# diff --git a/rpc/common.go b/rpc/common.go +# index bb4a5c4..5fa4271 100644 +# --- a/rpc/common.go +# +++ b/rpc/common.go +# @@ -21,7 +21,6 @@ import ( +# "fmt" +# "time" +# +# - "google.golang.org/grpc" +# "google.golang.org/grpc/codes" +# "google.golang.org/grpc/status" +# +# We rely on that to find such changes. +# +# Vendoring is optional when using go.mod. +.PHONY: test-vendor +test: test-vendor +test-vendor: + @ echo; echo "### $@:" + @ if [ -f Gopkg.toml ]; then \ + echo "Repo uses 'dep' for vendoring."; \ + case "$$(dep version 2>/dev/null | grep 'version *:')" in \ + *v0.[56789]*) dep check && echo "vendor up-to-date" || false;; \ + *) echo "skipping check, dep >= 0.5 required";; \ + esac; \ + elif [ -f go.mod ]; then \ + echo "Repo uses 'go mod'."; \ + if [ "$${JOB_NAME}" ] && \ + ( [ "$${JOB_TYPE}" != "presubmit" ] || \ + [ $$( (git diff "${PULL_BASE_SHA}..HEAD" -- go.mod go.sum vendor release-tools; \ + git diff "${PULL_BASE_SHA}..HEAD" | grep -e '^@@.*@@ import (' -e '^[+-]import') | \ + wc -l) -eq 0 ] ); then \ + echo "Skipping vendor check because the Prow pre-submit job does not affect dependencies."; \ + elif ! GO111MODULE=on go mod tidy; then \ + echo "ERROR: vendor check failed."; \ + false; \ + elif [ $$(git status --porcelain -- go.mod go.sum | wc -l) -gt 0 ]; then \ + echo "ERROR: go module files *not* up-to-date, they did get modified by 'GO111MODULE=on go mod tidy':"; \ + git diff -- go.mod go.sum; \ + false; \ + elif [ -d vendor ]; then \ + if ! GO111MODULE=on go mod vendor; then \ + echo "ERROR: vendor check failed."; \ + false; \ + elif [ $$(git status --porcelain -- vendor | wc -l) -gt 0 ]; then \ + echo "ERROR: vendor directory *not* up-to-date, it did get modified by 'GO111MODULE=on go mod vendor':"; \ + git status -- vendor; \ + git diff -- vendor; \ + false; \ + else \ + echo "Go dependencies and vendor directory up-to-date."; \ + fi; \ + else \ + echo "Go dependencies up-to-date."; \ + fi; \ + fi + +.PHONY: test-subtree +test: test-subtree +test-subtree: + @ echo; echo "### $@:" + ./release-tools/verify-subtree.sh release-tools + +# Components can extend the set of directories which must pass shellcheck. +# The default is to check only the release-tools directory itself. +TEST_SHELLCHECK_DIRS=release-tools +.PHONY: test-shellcheck +test: test-shellcheck +test-shellcheck: + @ echo; echo "### $@:" + @ ret=0; \ + if ! command -v docker; then \ + echo "skipped, no Docker"; \ + exit 0; \ + fi; \ + for dir in $(abspath $(TEST_SHELLCHECK_DIRS)); do \ + echo; \ + echo "$$dir:"; \ + ./release-tools/verify-shellcheck.sh "$$dir" || ret=1; \ + done; \ + exit $$ret diff --git a/vendor/github.com/kubernetes-csi/csi-test/release-tools/code-of-conduct.md b/vendor/github.com/kubernetes-csi/csi-test/release-tools/code-of-conduct.md new file mode 100644 index 0000000000..0d15c00cf3 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/release-tools/code-of-conduct.md @@ -0,0 +1,3 @@ +# Kubernetes Community Code of Conduct + +Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md) diff --git a/vendor/github.com/kubernetes-csi/csi-test/release-tools/filter-junit.go b/vendor/github.com/kubernetes-csi/csi-test/release-tools/filter-junit.go new file mode 100644 index 0000000000..2f51be00e8 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/release-tools/filter-junit.go @@ -0,0 +1,133 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* + * This command filters a JUnit file such that only tests with a name + * matching a regular expression are passed through. By concatenating + * multiple input files it is possible to merge them into a single file. + */ +package main + +import ( + "encoding/xml" + "flag" + "io/ioutil" + "os" + "regexp" +) + +var ( + output = flag.String("o", "-", "junit file to write, - for stdout") + tests = flag.String("t", "", "regular expression matching the test names that are to be included in the output") +) + +/* + * TestSuite represents a JUnit file. Due to how encoding/xml works, we have + * represent all fields that we want to be passed through. It's therefore + * not a complete solution, but good enough for Ginkgo + Spyglass. + */ +type TestSuite struct { + XMLName string `xml:"testsuite"` + TestCases []TestCase `xml:"testcase"` +} + +type TestCase struct { + Name string `xml:"name,attr"` + Time string `xml:"time,attr"` + SystemOut string `xml:"system-out,omitempty"` + Failure string `xml:"failure,omitempty"` + Skipped SkipReason `xml:"skipped,omitempty"` +} + +// SkipReason deals with the special : +// if present, we must re-encode it, even if empty. +type SkipReason string + +func (s *SkipReason) UnmarshalText(text []byte) error { + *s = SkipReason(text) + if *s == "" { + *s = " " + } + return nil +} + +func (s SkipReason) MarshalText() ([]byte, error) { + if s == " " { + return []byte{}, nil + } + return []byte(s), nil +} + +func main() { + var junit TestSuite + var data []byte + + flag.Parse() + + re := regexp.MustCompile(*tests) + + // Read all input files. + for _, input := range flag.Args() { + if input == "-" { + if _, err := os.Stdin.Read(data); err != nil { + panic(err) + } + } else { + var err error + data, err = ioutil.ReadFile(input) + if err != nil { + panic(err) + } + } + if err := xml.Unmarshal(data, &junit); err != nil { + panic(err) + } + } + + // Keep only matching testcases. Testcases skipped in all test runs are only stored once. + filtered := map[string]TestCase{} + for _, testcase := range junit.TestCases { + if !re.MatchString(testcase.Name) { + continue + } + entry, ok := filtered[testcase.Name] + if !ok || // not present yet + entry.Skipped != "" && testcase.Skipped == "" { // replaced skipped test with real test run + filtered[testcase.Name] = testcase + } + } + junit.TestCases = nil + for _, testcase := range filtered { + junit.TestCases = append(junit.TestCases, testcase) + } + + // Re-encode. + data, err := xml.MarshalIndent(junit, "", " ") + if err != nil { + panic(err) + } + + // Write to output. + if *output == "-" { + if _, err := os.Stdout.Write(data); err != nil { + panic(err) + } + } else { + if err := ioutil.WriteFile(*output, data, 0644); err != nil { + panic(err) + } + } +} diff --git a/vendor/github.com/kubernetes-csi/csi-test/release-tools/go-get-kubernetes.sh b/vendor/github.com/kubernetes-csi/csi-test/release-tools/go-get-kubernetes.sh new file mode 100755 index 0000000000..8c4e302445 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/release-tools/go-get-kubernetes.sh @@ -0,0 +1,104 @@ +#!/usr/bin/env bash + +# Copyright 2019 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# This script can be used while converting a repo from "dep" to "go mod" +# by calling it after "go mod init" or to update the Kubernetes packages +# in a repo that has already been converted. Only packages that are +# part of kubernetes/kubernetes and thus part of a Kubernetes release +# are modified. Other k8.io packages (like k8s.io/klog, k8s.io/utils) +# need to be updated separately. + +set -o pipefail + +cmd=$0 + +function help () { + echo "$cmd - update all components from kubernetes/kubernetes to that version" +} + +if [ $# -ne 1 ]; then + help + exit 1 +fi +case "$1" in -h|--help|help) help; exit 0;; esac + +die () { + echo >&2 "$@" + exit 1 +} + +k8s="$1" + +# If the repo imports k8s.io/kubernetes (directly or indirectly), then +# "go mod" will try to find "v0.0.0" versions because +# k8s.io/kubernetes has those in it's go.mod file +# (https://github.com/kubernetes/kubernetes/blob/2bd9643cee5b3b3a5ecbd3af49d09018f0773c77/go.mod#L146-L157). +# (https://github.com/kubernetes/kubernetes/issues/79384). +# +# We need to replicate the replace statements to override those fake +# versions also in our go.mod file (idea and some code from +# https://github.com/kubernetes/kubernetes/issues/79384#issuecomment-521493597). +mods=$( (set -x; curl --silent --show-error --fail "https://raw.githubusercontent.com/kubernetes/kubernetes/v${k8s}/go.mod") | + sed -n 's|.*k8s.io/\(.*\) => ./staging/src/k8s.io/.*|k8s.io/\1|p' + ) || die "failed to determine Kubernetes staging modules" +for mod in $mods; do + # The presence of a potentially incomplete go.mod file affects this command, + # so move elsewhere. + modinfo=$(set -x; cd /; env GO111MODULE=on go mod download -json "$mod@kubernetes-${k8s}") || + die "failed to determine version of $mod: $modinfo" + v=$(echo "$modinfo" | sed -n 's|.*"Version": "\(.*\)".*|\1|p') + (set -x; env GO111MODULE=on go mod edit "-replace=$mod=$mod@$v") || die "'go mod edit' failed" +done + +packages= + +# Beware that we have to work with packages, not modules (i.e. no -m +# flag), because some modules trigger a "no Go code except tests" +# error. Getting their packages works. +if ! packages=$( (set -x; env GO111MODULE=on go list all) | grep ^k8s.io/ | sed -e 's; *;;'); then + cat >&2 <&2 <" go.mod; then + deps="$deps $(echo "$package" | sed -e "s;\$;@kubernetes-$k8s;" -e 's;^k8s.io/kubernetes\(/.*\)@kubernetes-;k8s.io/kubernetes\1@v;')" + fi +done + +# shellcheck disable=SC2086 +(set -x; env GO111MODULE=on go get $deps 2>&1) || die "go get failed" +echo "SUCCESS" diff --git a/vendor/github.com/kubernetes-csi/csi-test/release-tools/prow.sh b/vendor/github.com/kubernetes-csi/csi-test/release-tools/prow.sh new file mode 100755 index 0000000000..d93b6fa7d6 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/release-tools/prow.sh @@ -0,0 +1,1071 @@ +#! /bin/bash +# +# Copyright 2019 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# This script runs inside a Prow job. It can run unit tests ("make test") +# and E2E testing. This E2E testing covers different scenarios (see +# https://github.com/kubernetes/enhancements/pull/807): +# - running the stable hostpath example against a Kubernetes release +# - running the canary hostpath example against a Kubernetes release +# - building the component in the current repo and running the +# stable hostpath example with that one component replaced against +# a Kubernetes release +# +# The intended usage of this script is that individual repos import +# csi-release-tools, then link their top-level prow.sh to this or +# include it in that file. When including it, several of the variables +# can be overridden in the top-level prow.sh to customize the script +# for the repo. +# +# The expected environment is: +# - $GOPATH/src/ for the repository that is to be tested, +# with PR branch merged (when testing a PR) +# - running on linux-amd64 +# - bazel installed (when testing against Kubernetes master), must be recent +# enough for Kubernetes master +# - kind (https://github.com/kubernetes-sigs/kind) installed +# - optional: Go already installed + +RELEASE_TOOLS_ROOT="$(realpath "$(dirname "${BASH_SOURCE[0]}")")" +REPO_DIR="$(pwd)" + +# Sets the default value for a variable if not set already and logs the value. +# Any variable set this way is usually something that a repo's .prow.sh +# or the job can set. +configvar () { + # Ignore: Word is of the form "A"B"C" (B indicated). Did you mean "ABC" or "A\"B\"C"? + # shellcheck disable=SC2140 + eval : \$\{"$1":="\$2"\} + eval echo "\$3:" "$1=\${$1}" +} + +# Takes the minor version of $CSI_PROW_KUBERNETES_VERSION and overrides it to +# $1 if they are equal minor versions. Ignores versions that begin with +# "release-". +override_k8s_version () { + local current_minor_version + local override_minor_version + + # Ignore: See if you can use ${variable//search/replace} instead. + # shellcheck disable=SC2001 + current_minor_version="$(echo "${CSI_PROW_KUBERNETES_VERSION}" | sed -e 's/\([0-9]*\)\.\([0-9]*\).*/\1\.\2/')" + + # Ignore: See if you can use ${variable//search/replace} instead. + # shellcheck disable=SC2001 + override_minor_version="$(echo "${1}" | sed -e 's/\([0-9]*\)\.\([0-9]*\).*/\1\.\2/')" + if [ "${current_minor_version}" == "${override_minor_version}" ]; then + CSI_PROW_KUBERNETES_VERSION="$1" + echo "Overriding CSI_PROW_KUBERNETES_VERSION with $1: $CSI_PROW_KUBERNETES_VERSION" + fi +} + +# Prints the value of a variable + version suffix, falling back to variable + "LATEST". +get_versioned_variable () { + local var="$1" + local version="$2" + local value + + eval value="\${${var}_${version}}" + if ! [ "$value" ]; then + eval value="\${${var}_LATEST}" + fi + echo "$value" +} + +# Go versions can be specified seperately for different tasks +# If the pre-installed Go is missing or a different +# version, the required version here will get installed +# from https://golang.org/dl/. +go_from_travis_yml () { + grep "^ *- go:" "${RELEASE_TOOLS_ROOT}/travis.yml" | sed -e 's/.*go: *//' +} +configvar CSI_PROW_GO_VERSION_BUILD "$(go_from_travis_yml)" "Go version for building the component" # depends on component's source code +configvar CSI_PROW_GO_VERSION_E2E "" "override Go version for building the Kubernetes E2E test suite" # normally doesn't need to be set, see install_e2e +configvar CSI_PROW_GO_VERSION_SANITY "${CSI_PROW_GO_VERSION_BUILD}" "Go version for building the csi-sanity test suite" # depends on CSI_PROW_SANITY settings below +configvar CSI_PROW_GO_VERSION_KIND "${CSI_PROW_GO_VERSION_BUILD}" "Go version for building 'kind'" # depends on CSI_PROW_KIND_VERSION below +configvar CSI_PROW_GO_VERSION_GINKGO "${CSI_PROW_GO_VERSION_BUILD}" "Go version for building ginkgo" # depends on CSI_PROW_GINKGO_VERSION below + +# kind version to use. If the pre-installed version is different, +# the desired version is downloaded from https://github.com/kubernetes-sigs/kind/releases/download/ +# (if available), otherwise it is built from source. +configvar CSI_PROW_KIND_VERSION "v0.5.0" "kind" + +# ginkgo test runner version to use. If the pre-installed version is +# different, the desired version is built from source. +configvar CSI_PROW_GINKGO_VERSION v1.7.0 "Ginkgo" + +# Ginkgo runs the E2E test in parallel. The default is based on the number +# of CPUs, but typically this can be set to something higher in the job. +configvar CSI_PROW_GINKO_PARALLEL "-p" "Ginko parallelism parameter(s)" + +# Enables building the code in the repository. On by default, can be +# disabled in jobs which only use pre-built components. +configvar CSI_PROW_BUILD_JOB true "building code in repo enabled" + +# Kubernetes version to test against. This must be a version number +# (like 1.13.3) for which there is a pre-built kind image (see +# https://hub.docker.com/r/kindest/node/tags), "latest" (builds +# Kubernetes from the master branch) or "release-x.yy" (builds +# Kubernetes from a release branch). +# +# This can also be a version that was not released yet at the time +# that the settings below were chose. The script will then +# use the same settings as for "latest" Kubernetes. This works +# as long as there are no breaking changes in Kubernetes, like +# deprecating or changing the implementation of an alpha feature. +configvar CSI_PROW_KUBERNETES_VERSION 1.15.3 "Kubernetes" + +# This is a hack to workaround the issue that each version +# of kind currently only supports specific patch versions of +# Kubernetes. We need to override CSI_PROW_KUBERNETES_VERSION +# passed in by our CI/pull jobs to the versions that +# kind v0.5.0 supports. +# +# If the version is prefixed with "release-", then nothing +# is overridden. +override_k8s_version "1.13.10" +override_k8s_version "1.14.6" +override_k8s_version "1.15.3" + +# CSI_PROW_KUBERNETES_VERSION reduced to first two version numbers and +# with underscore (1_13 instead of 1.13.3) and in uppercase (LATEST +# instead of latest). +# +# This is used to derive the right defaults for the variables below +# when a Prow job just defines the Kubernetes version. +csi_prow_kubernetes_version_suffix="$(echo "${CSI_PROW_KUBERNETES_VERSION}" | tr . _ | tr '[:lower:]' '[:upper:]' | sed -e 's/^RELEASE-//' -e 's/\([0-9]*\)_\([0-9]*\).*/\1_\2/')" + +# Work directory. It has to allow running executables, therefore /tmp +# is avoided. Cleaning up after the script is intentionally left to +# the caller. +configvar CSI_PROW_WORK "$(mkdir -p "$GOPATH/pkg" && mktemp -d "$GOPATH/pkg/csiprow.XXXXXXXXXX")" "work directory" + +# The hostpath deployment script is searched for in several places. +# +# - The "deploy" directory in the current repository: this is useful +# for the situation that a component becomes incompatible with the +# shared deployment, because then it can (temporarily!) provide its +# own example until the shared one can be updated; it's also how +# csi-driver-host-path itself provides the example. +# +# - CSI_PROW_HOSTPATH_VERSION of the CSI_PROW_HOSTPATH_REPO is checked +# out: this allows other repos to reference a version of the example +# that is known to be compatible. +# +# - The csi-driver-host-path/deploy directory has multiple sub-directories, +# each with different deployments (stable set of images for Kubernetes 1.13, +# stable set of images for Kubernetes 1.14, canary for latest Kubernetes, etc.). +# This is necessary because there may be incompatible changes in the +# "API" of a component (for example, its command line options or RBAC rules) +# or in its support for different Kubernetes versions (CSIDriverInfo as +# CRD in Kubernetes 1.13 vs builtin API in Kubernetes 1.14). +# +# When testing an update for a component in a PR job, the +# CSI_PROW_DEPLOYMENT variable can be set in the +# .prow.sh of each component when there are breaking changes +# that require using a non-default deployment. The default +# is a deployment named "kubernetes-x.yy" (if available), +# otherwise "kubernetes-latest". +# "none" disables the deployment of the hostpath driver. +# +# When no deploy script is found (nothing in `deploy` directory, +# CSI_PROW_HOSTPATH_REPO=none), nothing gets deployed. +configvar CSI_PROW_HOSTPATH_VERSION "v1.2.0-rc8" "hostpath driver" +configvar CSI_PROW_HOSTPATH_REPO https://github.com/kubernetes-csi/csi-driver-host-path "hostpath repo" +configvar CSI_PROW_DEPLOYMENT "" "deployment" +configvar CSI_PROW_HOSTPATH_DRIVER_NAME "hostpath.csi.k8s.io" "the hostpath driver name" + +# If CSI_PROW_HOSTPATH_CANARY is set (typically to "canary", but also +# "1.0-canary"), then all image versions are replaced with that +# version tag. +configvar CSI_PROW_HOSTPATH_CANARY "" "hostpath image" + +# The E2E testing can come from an arbitrary repo. The expectation is that +# the repo supports "go test ./test/e2e -args --storage.testdriver" (https://github.com/kubernetes/kubernetes/pull/72836) +# after setting KUBECONFIG. As a special case, if the repository is Kubernetes, +# then `make WHAT=test/e2e/e2e.test` is called first to ensure that +# all generated files are present. +# +# CSI_PROW_E2E_REPO=none disables E2E testing. +configvar CSI_PROW_E2E_VERSION_1_13 v1.14.0 "E2E version for Kubernetes 1.13.x" # we can't use the one from 1.13.x because it didn't have --storage.testdriver +configvar CSI_PROW_E2E_VERSION_1_14 v1.14.0 "E2E version for Kubernetes 1.14.x" +configvar CSI_PROW_E2E_VERSION_1_15 v1.15.0 "E2E version for Kubernetes 1.15.x" +# TODO: add new CSI_PROW_E2E_VERSION entry for future Kubernetes releases +configvar CSI_PROW_E2E_VERSION_LATEST master "E2E version for Kubernetes master" # testing against Kubernetes master is already tracking a moving target, so we might as well use a moving E2E version +configvar CSI_PROW_E2E_REPO_LATEST https://github.com/kubernetes/kubernetes "E2E repo for Kubernetes >= 1.13.x" # currently the same for all versions +configvar CSI_PROW_E2E_IMPORT_PATH_LATEST k8s.io/kubernetes "E2E package for Kubernetes >= 1.13.x" # currently the same for all versions +configvar CSI_PROW_E2E_VERSION "$(get_versioned_variable CSI_PROW_E2E_VERSION "${csi_prow_kubernetes_version_suffix}")" "E2E version" +configvar CSI_PROW_E2E_REPO "$(get_versioned_variable CSI_PROW_E2E_REPO "${csi_prow_kubernetes_version_suffix}")" "E2E repo" +configvar CSI_PROW_E2E_IMPORT_PATH "$(get_versioned_variable CSI_PROW_E2E_IMPORT_PATH "${csi_prow_kubernetes_version_suffix}")" "E2E package" + +# csi-sanity testing from the csi-test repo can be run against the installed +# CSI driver. For this to work, deploying the driver must expose the Unix domain +# csi.sock as a TCP service for use by the csi-sanity command, which runs outside +# of the cluster. The alternative would have been to (cross-)compile csi-sanity +# and install it inside the cluster, which is not necessarily easier. +configvar CSI_PROW_SANITY_REPO https://github.com/kubernetes-csi/csi-test "csi-test repo" +configvar CSI_PROW_SANITY_VERSION 5421d9f3c37be3b95b241b44a094a3db11bee789 "csi-test version" # latest master +configvar CSI_PROW_SANITY_IMPORT_PATH github.com/kubernetes-csi/csi-test "csi-test package" +configvar CSI_PROW_SANITY_SERVICE "hostpath-service" "Kubernetes TCP service name that exposes csi.sock" +configvar CSI_PROW_SANITY_POD "csi-hostpathplugin-0" "Kubernetes pod with CSI driver" +configvar CSI_PROW_SANITY_CONTAINER "hostpath" "Kubernetes container with CSI driver" + +# The version of dep to use for 'make test-vendor'. Ignored if the project doesn't +# use dep. Only binary releases of dep are supported (https://github.com/golang/dep/releases). +configvar CSI_PROW_DEP_VERSION v0.5.1 "golang dep version to be used for vendor checking" + +# Each job can run one or more of the following tests, identified by +# a single word: +# - unit testing +# - parallel excluding alpha features +# - serial excluding alpha features +# - parallel, only alpha feature +# - serial, only alpha features +# - sanity +# +# Unknown or unsupported entries are ignored. +# +# Sanity testing with csi-sanity only covers the CSI driver itself and +# thus only makes sense in repos which provide their own CSI +# driver. Repos can enable sanity testing by setting +# CSI_PROW_TESTS_SANITY=sanity. +configvar CSI_PROW_TESTS "unit parallel serial parallel-alpha serial-alpha sanity" "tests to run" +tests_enabled () { + local t1 t2 + # We want word-splitting here, so ignore: Quote to prevent word splitting, or split robustly with mapfile or read -a. + # shellcheck disable=SC2206 + local tests=(${CSI_PROW_TESTS}) + for t1 in "$@"; do + for t2 in "${tests[@]}"; do + if [ "$t1" = "$t2" ]; then + return + fi + done + done + return 1 +} +sanity_enabled () { + [ "${CSI_PROW_TESTS_SANITY}" = "sanity" ] && tests_enabled "sanity" +} +tests_need_kind () { + tests_enabled "parallel" "serial" "serial-alpha" "parallel-alpha" || + sanity_enabled +} +tests_need_non_alpha_cluster () { + tests_enabled "parallel" "serial" || + sanity_enabled +} +tests_need_alpha_cluster () { + tests_enabled "parallel-alpha" "serial-alpha" +} + + +# Serial vs. parallel is always determined by these regular expressions. +# Individual regular expressions are seperated by spaces for readability +# and expected to not contain spaces. Use dots instead. The complete +# regex for Ginkgo will be created by joining the individual terms. +configvar CSI_PROW_E2E_SERIAL '\[Serial\] \[Disruptive\]' "tags for serial E2E tests" +regex_join () { + echo "$@" | sed -e 's/ */|/g' -e 's/^|*//' -e 's/|*$//' -e 's/^$/this-matches-nothing/g' +} + +# Which tests are alpha depends on the Kubernetes version. We could +# use the same E2E test for all Kubernetes version. This would have +# the advantage that new tests can be applied to older versions +# without having to backport tests. +# +# But the feature tag gets removed from E2E tests when the corresponding +# feature becomes beta, so we would have to track which tests were +# alpha in previous Kubernetes releases. This was considered too +# error prone. Therefore we use E2E tests that match the Kubernetes +# version that is getting tested. +# +# However, for 1.13.x testing we have to use the E2E tests from 1.14 +# because 1.13 didn't have --storage.testdriver yet, so for that (and only +# that version) we have to define alpha tests differently. +configvar CSI_PROW_E2E_ALPHA_1_13 '\[Feature: \[Testpattern:.Dynamic.PV..block.volmode.\] should.create.and.delete.block.persistent.volumes' "alpha tests for Kubernetes 1.13" # Raw block was an alpha feature in 1.13. +configvar CSI_PROW_E2E_ALPHA_LATEST '\[Feature:' "alpha tests for Kubernetes >= 1.14" # there's no need to update this, adding a new case for CSI_PROW_E2E for a new Kubernetes is enough +configvar CSI_PROW_E2E_ALPHA "$(get_versioned_variable CSI_PROW_E2E_ALPHA "${csi_prow_kubernetes_version_suffix}")" "alpha tests" + +# After the parallel E2E test without alpha features, a test cluster +# with alpha features is brought up and tests that were previously +# disabled are run. The alpha gates in each release have to be listed +# explicitly. If none are set (= variable empty), alpha testing +# is skipped. +# +# Testing against "latest" Kubernetes is problematic because some alpha +# feature which used to work might stop working or change their behavior +# such that the current tests no longer pass. If that happens, +# kubernetes-csi components must be updated, either by disabling +# the failing test for "latest" or by updating the test and not running +# it anymore for older releases. +configvar CSI_PROW_E2E_ALPHA_GATES_1_13 'VolumeSnapshotDataSource=true,BlockVolume=true,CSIBlockVolume=true' "alpha feature gates for Kubernetes 1.13" +configvar CSI_PROW_E2E_ALPHA_GATES_1_14 'VolumeSnapshotDataSource=true,ExpandCSIVolumes=true' "alpha feature gates for Kubernetes 1.14" +configvar CSI_PROW_E2E_ALPHA_GATES_1_15 'VolumeSnapshotDataSource=true,ExpandCSIVolumes=true' "alpha feature gates for Kubernetes 1.15" +# TODO: add new CSI_PROW_ALPHA_GATES_xxx entry for future Kubernetes releases and +# add new gates to CSI_PROW_E2E_ALPHA_GATES_LATEST. +configvar CSI_PROW_E2E_ALPHA_GATES_LATEST 'VolumeSnapshotDataSource=true,ExpandCSIVolumes=true' "alpha feature gates for latest Kubernetes" +configvar CSI_PROW_E2E_ALPHA_GATES "$(get_versioned_variable CSI_PROW_E2E_ALPHA_GATES "${csi_prow_kubernetes_version_suffix}")" "alpha E2E feature gates" + +# Some tests are known to be unusable in a KinD cluster. For example, +# stopping kubelet with "ssh systemctl stop kubelet" simply +# doesn't work. Such tests should be written in a way that they verify +# whether they can run with the current cluster provider, but until +# they are, we filter them out by name. Like the other test selection +# variables, this is again a space separated list of regular expressions. +configvar CSI_PROW_E2E_SKIP 'Disruptive' "tests that need to be skipped" + +# This is the directory for additional result files. Usually set by Prow, but +# if not (for example, when invoking manually) it defaults to the work directory. +configvar ARTIFACTS "${CSI_PROW_WORK}/artifacts" "artifacts" +mkdir -p "${ARTIFACTS}" + +run () { + echo "$(date) $(go version | sed -e 's/.*version \(go[^ ]*\).*/\1/') $(if [ "$(pwd)" != "${REPO_DIR}" ]; then pwd; fi)\$" "$@" >&2 + "$@" +} + +info () { + echo >&2 INFO: "$@" +} + +warn () { + echo >&2 WARNING: "$@" +} + +die () { + echo >&2 ERROR: "$@" + exit 1 +} + +# For additional tools. +CSI_PROW_BIN="${CSI_PROW_WORK}/bin" +mkdir -p "${CSI_PROW_BIN}" +PATH="${CSI_PROW_BIN}:$PATH" + +# Ensure that PATH has the desired version of the Go tools, then run command given as argument. +# Empty parameter uses the already installed Go. In Prow, that version is kept up-to-date by +# bumping the container image regularly. +run_with_go () { + local version + version="$1" + shift + + if ! [ "$version" ] || go version 2>/dev/null | grep -q "go$version"; then + run "$@" + else + if ! [ -d "${CSI_PROW_WORK}/go-$version" ]; then + run curl --fail --location "https://dl.google.com/go/go$version.linux-amd64.tar.gz" | tar -C "${CSI_PROW_WORK}" -zxf - || die "installation of Go $version failed" + mv "${CSI_PROW_WORK}/go" "${CSI_PROW_WORK}/go-$version" + fi + PATH="${CSI_PROW_WORK}/go-$version/bin:$PATH" run "$@" + fi +} + +# Ensure that we have the desired version of kind. +install_kind () { + if kind --version 2>/dev/null | grep -q " ${CSI_PROW_KIND_VERSION}$"; then + return + fi + if run curl --fail --location -o "${CSI_PROW_WORK}/bin/kind" "https://github.com/kubernetes-sigs/kind/releases/download/${CSI_PROW_KIND_VERSION}/kind-linux-amd64"; then + chmod u+x "${CSI_PROW_WORK}/bin/kind" + else + git_checkout https://github.com/kubernetes-sigs/kind "${GOPATH}/src/sigs.k8s.io/kind" "${CSI_PROW_KIND_VERSION}" --depth=1 && + (cd "${GOPATH}/src/sigs.k8s.io/kind" && make install INSTALL_DIR="${CSI_PROW_WORK}/bin") + fi +} + +# Ensure that we have the desired version of the ginkgo test runner. +install_ginkgo () { + # CSI_PROW_GINKGO_VERSION contains the tag with v prefix, the command line output does not. + if [ "v$(ginkgo version 2>/dev/null | sed -e 's/.* //')" = "${CSI_PROW_GINKGO_VERSION}" ]; then + return + fi + git_checkout https://github.com/onsi/ginkgo "$GOPATH/src/github.com/onsi/ginkgo" "${CSI_PROW_GINKGO_VERSION}" --depth=1 && + # We have to get dependencies and hence can't call just "go build". + run_with_go "${CSI_PROW_GO_VERSION_GINKGO}" go get github.com/onsi/ginkgo/ginkgo || die "building ginkgo failed" && + mv "$GOPATH/bin/ginkgo" "${CSI_PROW_BIN}" +} + +# Ensure that we have the desired version of dep. +install_dep () { + if dep version 2>/dev/null | grep -q "version:.*${CSI_PROW_DEP_VERSION}$"; then + return + fi + run curl --fail --location -o "${CSI_PROW_WORK}/bin/dep" "https://github.com/golang/dep/releases/download/v0.5.4/dep-linux-amd64" && + chmod u+x "${CSI_PROW_WORK}/bin/dep" +} + +# This checks out a repo ("https://github.com/kubernetes/kubernetes") +# in a certain location ("$GOPATH/src/k8s.io/kubernetes") at +# a certain revision (a hex commit hash, v1.13.1, master). It's okay +# for that directory to exist already. +git_checkout () { + local repo path revision + repo="$1" + shift + path="$1" + shift + revision="$1" + shift + + mkdir -p "$path" + if ! [ -d "$path/.git" ]; then + run git init "$path" + fi + if (cd "$path" && run git fetch "$@" "$repo" "$revision"); then + (cd "$path" && run git checkout FETCH_HEAD) || die "checking out $repo $revision failed" + else + # Might have been because fetching by revision is not + # supported by GitHub (https://github.com/isaacs/github/issues/436). + # Fall back to fetching everything. + (cd "$path" && run git fetch "$repo" '+refs/heads/*:refs/remotes/csiprow/heads/*' '+refs/tags/*:refs/tags/*') || die "fetching $repo failed" + (cd "$path" && run git checkout "$revision") || die "checking out $repo $revision failed" + fi + # This is useful for local testing or when switching between different revisions in the same + # repo. + (cd "$path" && run git clean -fdx) || die "failed to clean $path" +} + +# This clones a repo ("https://github.com/kubernetes/kubernetes") +# in a certain location ("$GOPATH/src/k8s.io/kubernetes") at +# a the head of a specific branch (i.e., release-1.13, master). +# The directory cannot exist. +git_clone_branch () { + local repo path branch parent + repo="$1" + shift + path="$1" + shift + branch="$1" + shift + + parent="$(dirname "$path")" + mkdir -p "$parent" + (cd "$parent" && run git clone --single-branch --branch "$branch" "$repo" "$path") || die "cloning $repo" failed + # This is useful for local testing or when switching between different revisions in the same + # repo. + (cd "$path" && run git clean -fdx) || die "failed to clean $path" +} + +list_gates () ( + set -f; IFS=',' + # Ignore: Double quote to prevent globbing and word splitting. + # shellcheck disable=SC2086 + set -- $1 + while [ "$1" ]; do + # Ignore: See if you can use ${variable//search/replace} instead. + # shellcheck disable=SC2001 + echo "$1" | sed -e 's/ *\([^ =]*\) *= *\([^ ]*\) */ \1: \2/' + shift + done +) + +go_version_for_kubernetes () ( + local path="$1" + local version="$2" + local go_version + + # We use the minimal Go version specified for each K8S release (= minimum_go_version in hack/lib/golang.sh). + # More recent versions might also work, but we don't want to count on that. + go_version="$(grep minimum_go_version= "$path/hack/lib/golang.sh" | sed -e 's/.*=go//')" + if ! [ "$go_version" ]; then + die "Unable to determine Go version for Kubernetes $version from hack/lib/golang.sh." + fi + echo "$go_version" +) + +csi_prow_kind_have_kubernetes=false +# Brings up a Kubernetes cluster and sets KUBECONFIG. +# Accepts additional feature gates in the form gate1=true|false,gate2=... +start_cluster () { + local image gates + gates="$1" + + if kind get clusters | grep -q csi-prow; then + run kind delete cluster --name=csi-prow || die "kind delete failed" + fi + + # Build from source? + if [[ "${CSI_PROW_KUBERNETES_VERSION}" =~ ^release-|^latest$ ]]; then + if ! ${csi_prow_kind_have_kubernetes}; then + local version="${CSI_PROW_KUBERNETES_VERSION}" + if [ "$version" = "latest" ]; then + version=master + fi + git_clone_branch https://github.com/kubernetes/kubernetes "${CSI_PROW_WORK}/src/kubernetes" "$version" || die "checking out Kubernetes $version failed" + + go_version="$(go_version_for_kubernetes "${CSI_PROW_WORK}/src/kubernetes" "$version")" || die "cannot proceed without knowing Go version for Kubernetes" + run_with_go "$go_version" kind build node-image --type bazel --image csiprow/node:latest --kube-root "${CSI_PROW_WORK}/src/kubernetes" || die "'kind build node-image' failed" + csi_prow_kind_have_kubernetes=true + fi + image="csiprow/node:latest" + else + image="kindest/node:v${CSI_PROW_KUBERNETES_VERSION}" + fi + cat >"${CSI_PROW_WORK}/kind-config.yaml" <>"${CSI_PROW_WORK}/kind-config.yaml" </dev/null; wait) + info "For container output see job artifacts." + die "deploying the hostpath driver with ${deploy_hostpath} failed" + fi +} + +# collect logs and cluster status (like the version of all components, Kubernetes version, test version) +collect_cluster_info () { + cat <>"${ARTIFACTS}/$pod/$container.log" & + echo "$!" + done + done +} + +# Makes the E2E test suite binary available as "${CSI_PROW_WORK}/e2e.test". +install_e2e () { + if [ -e "${CSI_PROW_WORK}/e2e.test" ]; then + return + fi + + git_checkout "${CSI_PROW_E2E_REPO}" "${GOPATH}/src/${CSI_PROW_E2E_IMPORT_PATH}" "${CSI_PROW_E2E_VERSION}" --depth=1 && + if [ "${CSI_PROW_E2E_IMPORT_PATH}" = "k8s.io/kubernetes" ]; then + go_version="${CSI_PROW_GO_VERSION_E2E:-$(go_version_for_kubernetes "${GOPATH}/src/${CSI_PROW_E2E_IMPORT_PATH}" "${CSI_PROW_E2E_VERSION}")}" && + run_with_go "$go_version" make WHAT=test/e2e/e2e.test "-C${GOPATH}/src/${CSI_PROW_E2E_IMPORT_PATH}" && + ln -s "${GOPATH}/src/${CSI_PROW_E2E_IMPORT_PATH}/_output/bin/e2e.test" "${CSI_PROW_WORK}" + else + run_with_go "${CSI_PROW_GO_VERSION_E2E}" go test -c -o "${CSI_PROW_WORK}/e2e.test" "${CSI_PROW_E2E_IMPORT_PATH}/test/e2e" + fi +} + +# Makes the csi-sanity test suite binary available as +# "${CSI_PROW_WORK}/csi-sanity". +install_sanity () ( + if [ -e "${CSI_PROW_WORK}/csi-sanity" ]; then + return + fi + + git_checkout "${CSI_PROW_SANITY_REPO}" "${GOPATH}/src/${CSI_PROW_SANITY_IMPORT_PATH}" "${CSI_PROW_SANITY_VERSION}" --depth=1 || die "checking out csi-sanity failed" + run_with_go "${CSI_PROW_GO_VERSION_SANITY}" go test -c -o "${CSI_PROW_WORK}/csi-sanity" "${CSI_PROW_SANITY_IMPORT_PATH}/cmd/csi-sanity" || die "building csi-sanity failed" +) + +# Whether the hostpath driver supports raw block devices depends on which version +# we are testing. It would be much nicer if we could determine that by querying the +# installed driver's capabilities instead of having to do a version check. +hostpath_supports_block () { + local result + result="$(docker exec csi-prow-control-plane docker image ls --format='{{.Repository}} {{.Tag}} {{.ID}}' | grep hostpath | while read -r repo tag id; do + if [ "$tag" == "v1.0.1" ]; then + # Old version because the revision label is missing: didn't have support yet. + echo "false" + return + fi + done)" + # If not set, then it must be a newer driver with support. + echo "${result:-true}" +} + +# The default implementation of this function generates a external +# driver test configuration for the hostpath driver. +# +# The content depends on both what the E2E suite expects and what the +# installed hostpath driver supports. Generating it here seems prone +# to breakage, but it is uncertain where a better place might be. +generate_test_driver () { + cat <"${CSI_PROW_WORK}/test-driver.yaml" || die "generating test-driver.yaml failed" + + # Rename, merge and filter JUnit files. Necessary in case that we run the E2E suite again + # and to avoid the large number of "skipped" tests that we get from using + # the full Kubernetes E2E testsuite while only running a few tests. + move_junit () { + if ls "${ARTIFACTS}"/junit_[0-9]*.xml 2>/dev/null >/dev/null; then + run_filter_junit -t="External Storage" -o "${ARTIFACTS}/junit_${name}.xml" "${ARTIFACTS}"/junit_[0-9]*.xml && rm -f "${ARTIFACTS}"/junit_[0-9]*.xml + fi + } + trap move_junit EXIT + + cd "${GOPATH}/src/${CSI_PROW_E2E_IMPORT_PATH}" && + run_with_loggers ginkgo -v "$@" "${CSI_PROW_WORK}/e2e.test" -- -report-dir "${ARTIFACTS}" -storage.testdriver="${CSI_PROW_WORK}/test-driver.yaml" +) + +# Run csi-sanity against installed CSI driver. +run_sanity () ( + install_sanity || die "installing csi-sanity failed" + + cat >"${CSI_PROW_WORK}/mkdir_in_pod.sh" <"${CSI_PROW_WORK}/rmdir_in_pod.sh" </\>/g' -e 's/\x1B...//g' +} + +# The "make test" output starts each test with "### :" +# and then ends when the next test starts or with "make: *** +# [] Error 1" when there was a failure. Here we read each +# line of that output, split it up into individual tests and generate +# a make-test.xml file in JUnit format. +make_test_to_junit () { + local ret out testname testoutput + ret=0 + # Plain make-test.xml was not delivered as text/xml by the web + # server and ignored by spyglass. It seems that the name has to + # match junit*.xml. + out="${ARTIFACTS}/junit_make_test.xml" + testname= + echo "" >>"$out" + + while IFS= read -r line; do + echo "$line" # pass through + if echo "$line" | grep -q "^### [^ ]*:$"; then + if [ "$testname" ]; then + # previous test succesful + echo " " >>"$out" + echo " " >>"$out" + fi + # Ignore: See if you can use ${variable//search/replace} instead. + # shellcheck disable=SC2001 + # + # start new test + testname="$(echo "$line" | sed -e 's/^### \([^ ]*\):$/\1/')" + testoutput= + echo " " >>"$out" + echo " " >>"$out" + elif echo "$line" | grep -q '^make: .*Error [0-9]*$'; then + if [ "$testname" ]; then + # Ignore: Consider using { cmd1; cmd2; } >> file instead of individual redirects. + # shellcheck disable=SC2129 + # + # end test with failure + echo " " >>"$out" + # Include the same text as in also in , + # because then it is easier to view in spyglass (shown directly + # instead of having to click through to stdout). + echo " " >>"$out" + echo -n "$testoutput" | ascii_to_xml >>"$out" + echo " " >>"$out" + echo " " >>"$out" + fi + # remember failure for exit code + ret=1 + # not currently inside a test + testname= + else + if [ "$testname" ]; then + # Test output. + echo "$line" | ascii_to_xml >>"$out" + testoutput="$testoutput$line +" + fi + fi + done + # if still in a test, close it now + if [ "$testname" ]; then + echo " " >>"$out" + echo " " >>"$out" + fi + echo "" >>"$out" + + # this makes the error more visible in spyglass + if [ "$ret" -ne 0 ]; then + echo "ERROR: 'make test' failed" + return 1 + fi +} + +main () { + local images ret + ret=0 + + images= + if ${CSI_PROW_BUILD_JOB}; then + # A successful build is required for testing. + run_with_go "${CSI_PROW_GO_VERSION_BUILD}" make all || die "'make all' failed" + # We don't want test failures to prevent E2E testing below, because the failure + # might have been minor or unavoidable, for example when experimenting with + # changes in "release-tools" in a PR (that fails the "is release-tools unmodified" + # test). + if tests_enabled "unit"; then + if [ -f Gopkg.toml ] && ! install_dep; then + warn "installing 'dep' failed, cannot test vendoring" + ret=1 + fi + if ! run_with_go "${CSI_PROW_GO_VERSION_BUILD}" make -k test 2>&1 | make_test_to_junit; then + warn "'make test' failed, proceeding anyway" + ret=1 + fi + fi + # Required for E2E testing. + run_with_go "${CSI_PROW_GO_VERSION_BUILD}" make container || die "'make container' failed" + fi + + if tests_need_kind; then + install_kind || die "installing kind failed" + + if ${CSI_PROW_BUILD_JOB}; then + cmds="$(grep '^\s*CMDS\s*=' Makefile | sed -e 's/\s*CMDS\s*=//')" + # Get the image that was just built (if any) from the + # top-level Makefile CMDS variable and set the + # deploy-hostpath.sh env variables for it. We also need to + # side-load those images into the cluster. + for i in $cmds; do + e=$(echo "$i" | tr '[:lower:]' '[:upper:]' | tr - _) + images="$images ${e}_REGISTRY=none ${e}_TAG=csiprow" + + # We must avoid the tag "latest" because that implies + # always pulling the image + # (https://github.com/kubernetes-sigs/kind/issues/328). + docker tag "$i:latest" "$i:csiprow" || die "tagging the locally built container image for $i failed" + done + + if [ -e deploy/kubernetes/rbac.yaml ]; then + # This is one of those components which has its own RBAC rules (like external-provisioner). + # We are testing a locally built image and also want to test with the the current, + # potentially modified RBAC rules. + if [ "$(echo "$cmds" | wc -w)" != 1 ]; then + die "ambiguous deploy/kubernetes/rbac.yaml: need exactly one command, got: $cmds" + fi + e=$(echo "$cmds" | tr '[:lower:]' '[:upper:]' | tr - _) + images="$images ${e}_RBAC=$(pwd)/deploy/kubernetes/rbac.yaml" + fi + fi + + if tests_need_non_alpha_cluster; then + start_cluster || die "starting the non-alpha cluster failed" + + # Installing the driver might be disabled. + if install_hostpath "$images"; then + collect_cluster_info + + if sanity_enabled; then + if ! run_sanity; then + ret=1 + fi + fi + + if tests_enabled "parallel"; then + # Ignore: Double quote to prevent globbing and word splitting. + # shellcheck disable=SC2086 + if ! run_e2e parallel ${CSI_PROW_GINKO_PARALLEL} \ + -focus="External.Storage" \ + -skip="$(regex_join "${CSI_PROW_E2E_SERIAL}" "${CSI_PROW_E2E_ALPHA}" "${CSI_PROW_E2E_SKIP}")"; then + warn "E2E parallel failed" + ret=1 + fi + fi + + if tests_enabled "serial"; then + if ! run_e2e serial \ + -focus="External.Storage.*($(regex_join "${CSI_PROW_E2E_SERIAL}"))" \ + -skip="$(regex_join "${CSI_PROW_E2E_ALPHA}" "${CSI_PROW_E2E_SKIP}")"; then + warn "E2E serial failed" + ret=1 + fi + fi + fi + fi + + if tests_need_alpha_cluster && [ "${CSI_PROW_E2E_ALPHA_GATES}" ]; then + # Need to (re)create the cluster. + start_cluster "${CSI_PROW_E2E_ALPHA_GATES}" || die "starting alpha cluster failed" + + # Installing the driver might be disabled. + if install_hostpath "$images"; then + collect_cluster_info + + if tests_enabled "parallel-alpha"; then + # Ignore: Double quote to prevent globbing and word splitting. + # shellcheck disable=SC2086 + if ! run_e2e parallel-alpha ${CSI_PROW_GINKO_PARALLEL} \ + -focus="External.Storage.*($(regex_join "${CSI_PROW_E2E_ALPHA}"))" \ + -skip="$(regex_join "${CSI_PROW_E2E_SERIAL}" "${CSI_PROW_E2E_SKIP}")"; then + warn "E2E parallel alpha failed" + ret=1 + fi + fi + + if tests_enabled "serial-alpha"; then + if ! run_e2e serial-alpha \ + -focus="External.Storage.*(($(regex_join "${CSI_PROW_E2E_SERIAL}")).*($(regex_join "${CSI_PROW_E2E_ALPHA}"))|($(regex_join "${CSI_PROW_E2E_ALPHA}")).*($(regex_join "${CSI_PROW_E2E_SERIAL}")))" \ + -skip="$(regex_join "${CSI_PROW_E2E_SKIP}")"; then + warn "E2E serial alpha failed" + ret=1 + fi + fi + fi + fi + fi + + # Merge all junit files into one. This gets rid of duplicated "skipped" tests. + if ls "${ARTIFACTS}"/junit_*.xml 2>/dev/null >&2; then + run_filter_junit -o "${CSI_PROW_WORK}/junit_final.xml" "${ARTIFACTS}"/junit_*.xml && rm "${ARTIFACTS}"/junit_*.xml && mv "${CSI_PROW_WORK}/junit_final.xml" "${ARTIFACTS}" + fi + + return "$ret" +} diff --git a/vendor/github.com/kubernetes-csi/csi-test/release-tools/travis.yml b/vendor/github.com/kubernetes-csi/csi-test/release-tools/travis.yml new file mode 100644 index 0000000000..5b0425226f --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/release-tools/travis.yml @@ -0,0 +1,19 @@ +language: go +sudo: required +services: + - docker +matrix: + include: + - go: 1.12.4 +before_script: +- mkdir -p bin +- wget https://github.com/golang/dep/releases/download/v0.5.1/dep-linux-amd64 -O bin/dep +- chmod u+x bin/dep +- export PATH=$PWD/bin:$PATH +script: +- make -k all test +after_success: + - if [ "${TRAVIS_PULL_REQUEST}" == "false" ]; then + docker login -u "${DOCKER_USERNAME}" -p "${DOCKER_PASSWORD}" quay.io; + make push; + fi diff --git a/vendor/github.com/kubernetes-csi/csi-test/release-tools/util.sh b/vendor/github.com/kubernetes-csi/csi-test/release-tools/util.sh new file mode 100755 index 0000000000..abeb1b2e17 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/release-tools/util.sh @@ -0,0 +1,148 @@ +#!/usr/bin/env bash + +# Copyright 2014 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +function kube::util::sourced_variable { + # Call this function to tell shellcheck that a variable is supposed to + # be used from other calling context. This helps quiet an "unused + # variable" warning from shellcheck and also document your code. + true +} + +kube::util::sortable_date() { + date "+%Y%m%d-%H%M%S" +} + +# arguments: target, item1, item2, item3, ... +# returns 0 if target is in the given items, 1 otherwise. +kube::util::array_contains() { + local search="$1" + local element + shift + for element; do + if [[ "${element}" == "${search}" ]]; then + return 0 + fi + done + return 1 +} + +# Example: kube::util::trap_add 'echo "in trap DEBUG"' DEBUG +# See: http://stackoverflow.com/questions/3338030/multiple-bash-traps-for-the-same-signal +kube::util::trap_add() { + local trap_add_cmd + trap_add_cmd=$1 + shift + + for trap_add_name in "$@"; do + local existing_cmd + local new_cmd + + # Grab the currently defined trap commands for this trap + existing_cmd=$(trap -p "${trap_add_name}" | awk -F"'" '{print $2}') + + if [[ -z "${existing_cmd}" ]]; then + new_cmd="${trap_add_cmd}" + else + new_cmd="${trap_add_cmd};${existing_cmd}" + fi + + # Assign the test. Disable the shellcheck warning telling that trap + # commands should be single quoted to avoid evaluating them at this + # point instead evaluating them at run time. The logic of adding new + # commands to a single trap requires them to be evaluated right away. + # shellcheck disable=SC2064 + trap "${new_cmd}" "${trap_add_name}" + done +} + +kube::util::download_file() { + local -r url=$1 + local -r destination_file=$2 + + rm "${destination_file}" 2&> /dev/null || true + + for i in $(seq 5) + do + if ! curl -fsSL --retry 3 --keepalive-time 2 "${url}" -o "${destination_file}"; then + echo "Downloading ${url} failed. $((5-i)) retries left." + sleep 1 + else + echo "Downloading ${url} succeed" + return 0 + fi + done + return 1 +} + +# Wait for background jobs to finish. Return with +# an error status if any of the jobs failed. +kube::util::wait-for-jobs() { + local fail=0 + local job + for job in $(jobs -p); do + wait "${job}" || fail=$((fail + 1)) + done + return ${fail} +} + +# kube::util::join +# Concatenates the list elements with the delimiter passed as first parameter +# +# Ex: kube::util::join , a b c +# -> a,b,c +function kube::util::join { + local IFS="$1" + shift + echo "$*" +} + +# kube::util::check-file-in-alphabetical-order +# Check that the file is in alphabetical order +# +function kube::util::check-file-in-alphabetical-order { + local failure_file="$1" + if ! diff -u "${failure_file}" <(LC_ALL=C sort "${failure_file}"); then + { + echo + echo "${failure_file} is not in alphabetical order. Please sort it:" + echo + echo " LC_ALL=C sort -o ${failure_file} ${failure_file}" + echo + } >&2 + false + fi +} + +# Some useful colors. +if [[ -z "${color_start-}" ]]; then + declare -r color_start="\033[" + declare -r color_red="${color_start}0;31m" + declare -r color_yellow="${color_start}0;33m" + declare -r color_green="${color_start}0;32m" + declare -r color_blue="${color_start}1;34m" + declare -r color_cyan="${color_start}1;36m" + declare -r color_norm="${color_start}0m" + + kube::util::sourced_variable "${color_start}" + kube::util::sourced_variable "${color_red}" + kube::util::sourced_variable "${color_yellow}" + kube::util::sourced_variable "${color_green}" + kube::util::sourced_variable "${color_blue}" + kube::util::sourced_variable "${color_cyan}" + kube::util::sourced_variable "${color_norm}" +fi + +# ex: ts=2 sw=2 et filetype=sh diff --git a/vendor/github.com/kubernetes-csi/csi-test/release-tools/verify-shellcheck.sh b/vendor/github.com/kubernetes-csi/csi-test/release-tools/verify-shellcheck.sh new file mode 100755 index 0000000000..fd28021ace --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/release-tools/verify-shellcheck.sh @@ -0,0 +1,146 @@ +#!/usr/bin/env bash + +# Copyright 2018 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o nounset +set -o pipefail + +# The csi-release-tools directory. +TOOLS="$(dirname "${BASH_SOURCE[0]}")" +. "${TOOLS}/util.sh" + +# Directory to check. Default is the parent of the tools themselves. +ROOT="${1:-${TOOLS}/..}" + +# required version for this script, if not installed on the host we will +# use the official docker image instead. keep this in sync with SHELLCHECK_IMAGE +SHELLCHECK_VERSION="0.6.0" +# upstream shellcheck latest stable image as of January 10th, 2019 +SHELLCHECK_IMAGE="koalaman/shellcheck-alpine:v0.6.0@sha256:7d4d712a2686da99d37580b4e2f45eb658b74e4b01caf67c1099adc294b96b52" + +# fixed name for the shellcheck docker container so we can reliably clean it up +SHELLCHECK_CONTAINER="k8s-shellcheck" + +# disabled lints +disabled=( + # this lint disallows non-constant source, which we use extensively without + # any known bugs + 1090 + # this lint prefers command -v to which, they are not the same + 2230 +) +# comma separate for passing to shellcheck +join_by() { + local IFS="$1"; + shift; + echo "$*"; +} +SHELLCHECK_DISABLED="$(join_by , "${disabled[@]}")" +readonly SHELLCHECK_DISABLED + +# creates the shellcheck container for later use +create_container () { + # TODO(bentheelder): this is a performance hack, we create the container with + # a sleep MAX_INT32 so that it is effectively paused. + # We then repeatedly exec to it to run each shellcheck, and later rm it when + # we're done. + # This is incredibly much faster than creating a container for each shellcheck + # call ... + docker run --name "${SHELLCHECK_CONTAINER}" -d --rm -v "${ROOT}:${ROOT}" -w "${ROOT}" --entrypoint="sleep" "${SHELLCHECK_IMAGE}" 2147483647 +} +# removes the shellcheck container +remove_container () { + docker rm -f "${SHELLCHECK_CONTAINER}" &> /dev/null || true +} + +# ensure we're linting the source tree +cd "${ROOT}" + +# find all shell scripts excluding ./_*, ./.git/*, ./vendor*, +# and anything git-ignored +all_shell_scripts=() +while IFS=$'\n' read -r script; + do git check-ignore -q "$script" || all_shell_scripts+=("$script"); +done < <(find . -name "*.sh" \ + -not \( \ + -path ./_\* -o \ + -path ./.git\* -o \ + -path ./vendor\* \ + \)) + +# detect if the host machine has the required shellcheck version installed +# if so, we will use that instead. +HAVE_SHELLCHECK=false +if which shellcheck &>/dev/null; then + detected_version="$(shellcheck --version | grep 'version: .*')" + if [[ "${detected_version}" = "version: ${SHELLCHECK_VERSION}" ]]; then + HAVE_SHELLCHECK=true + fi +fi + +# tell the user which we've selected and possibly set up the container +if ${HAVE_SHELLCHECK}; then + echo "Using host shellcheck ${SHELLCHECK_VERSION} binary." +else + echo "Using shellcheck ${SHELLCHECK_VERSION} docker image." + # remove any previous container, ensure we will attempt to cleanup on exit, + # and create the container + remove_container + kube::util::trap_add 'remove_container' EXIT + if ! output="$(create_container 2>&1)"; then + { + echo "Failed to create shellcheck container with output: " + echo "" + echo "${output}" + } >&2 + exit 1 + fi +fi + +# lint each script, tracking failures +errors=() +for f in "${all_shell_scripts[@]}"; do + set +o errexit + if ${HAVE_SHELLCHECK}; then + failedLint=$(shellcheck --exclude="${SHELLCHECK_DISABLED}" "${f}") + else + failedLint=$(docker exec -t ${SHELLCHECK_CONTAINER} \ + shellcheck --exclude="${SHELLCHECK_DISABLED}" "${f}") + fi + set -o errexit + if [[ -n "${failedLint}" ]]; then + errors+=( "${failedLint}" ) + fi +done + +# Check to be sure all the packages that should pass lint are. +if [ ${#errors[@]} -eq 0 ]; then + echo 'Congratulations! All shell files are passing lint.' +else + { + echo "Errors from shellcheck:" + for err in "${errors[@]}"; do + echo "$err" + done + echo + echo 'Please review the above warnings. You can test via "./hack/verify-shellcheck"' + echo 'If the above warnings do not make sense, you can exempt them from shellcheck' + echo 'checking by adding the "shellcheck disable" directive' + echo '(https://github.com/koalaman/shellcheck/wiki/Directive#disable).' + echo + } >&2 + false +fi diff --git a/vendor/github.com/kubernetes-csi/csi-test/release-tools/verify-subtree.sh b/vendor/github.com/kubernetes-csi/csi-test/release-tools/verify-subtree.sh new file mode 100755 index 0000000000..f04a9fa26c --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/release-tools/verify-subtree.sh @@ -0,0 +1,41 @@ +#! /bin/sh -e +# +# Copyright 2019 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script verifies that the content of a directory managed +# by "git subtree" has not been modified locally. It does that +# by looking for commits that modify the files with the +# subtree prefix (aka directory) while ignoring merge +# commits. Merge commits are where "git subtree" pulls the +# upstream files into the directory. +# +# Theoretically a developer can subvert this check by modifying files +# in a merge commit, but in practice that shouldn't happen. + +DIR="$1" +if [ ! "$DIR" ]; then + echo "usage: $0 " >&2 + exit 1 +fi + +REV=$(git log -n1 --remove-empty --format=format:%H --no-merges -- "$DIR") +if [ "$REV" ]; then + echo "Directory '$DIR' contains non-upstream changes:" + echo + git log --no-merges -- "$DIR" + exit 1 +else + echo "$DIR is a clean copy of upstream." +fi diff --git a/vendor/github.com/kubernetes-csi/csi-test/v3/driver/driver-controller.go b/vendor/github.com/kubernetes-csi/csi-test/v3/driver/driver-controller.go new file mode 120000 index 0000000000..9d51eebc2e --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/v3/driver/driver-controller.go @@ -0,0 +1 @@ +../../driver/driver-controller.go \ No newline at end of file diff --git a/vendor/github.com/kubernetes-csi/csi-test/v3/driver/driver-node.go b/vendor/github.com/kubernetes-csi/csi-test/v3/driver/driver-node.go new file mode 120000 index 0000000000..fed1880779 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/v3/driver/driver-node.go @@ -0,0 +1 @@ +../../driver/driver-node.go \ No newline at end of file diff --git a/vendor/github.com/kubernetes-csi/csi-test/v3/driver/driver.go b/vendor/github.com/kubernetes-csi/csi-test/v3/driver/driver.go new file mode 120000 index 0000000000..6a987fdf48 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/v3/driver/driver.go @@ -0,0 +1 @@ +../../driver/driver.go \ No newline at end of file diff --git a/vendor/github.com/kubernetes-csi/csi-test/v3/driver/driver.mock.go b/vendor/github.com/kubernetes-csi/csi-test/v3/driver/driver.mock.go new file mode 120000 index 0000000000..15e6e3ed30 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/v3/driver/driver.mock.go @@ -0,0 +1 @@ +../../driver/driver.mock.go \ No newline at end of file diff --git a/vendor/github.com/kubernetes-csi/csi-test/v3/driver/mock.go b/vendor/github.com/kubernetes-csi/csi-test/v3/driver/mock.go new file mode 120000 index 0000000000..3aaf6c436f --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/v3/driver/mock.go @@ -0,0 +1 @@ +../../driver/mock.go \ No newline at end of file diff --git a/vendor/github.com/kubernetes-csi/csi-test/v3/mock/cache/SnapshotCache.go b/vendor/github.com/kubernetes-csi/csi-test/v3/mock/cache/SnapshotCache.go new file mode 120000 index 0000000000..f4e14d24aa --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/v3/mock/cache/SnapshotCache.go @@ -0,0 +1 @@ +../../../mock/cache/SnapshotCache.go \ No newline at end of file diff --git a/vendor/github.com/kubernetes-csi/csi-test/v3/mock/service/controller.go b/vendor/github.com/kubernetes-csi/csi-test/v3/mock/service/controller.go new file mode 120000 index 0000000000..b2043f888a --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/v3/mock/service/controller.go @@ -0,0 +1 @@ +../../../mock/service/controller.go \ No newline at end of file diff --git a/vendor/github.com/kubernetes-csi/csi-test/v3/mock/service/identity.go b/vendor/github.com/kubernetes-csi/csi-test/v3/mock/service/identity.go new file mode 120000 index 0000000000..b77a5303a6 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/v3/mock/service/identity.go @@ -0,0 +1 @@ +../../../mock/service/identity.go \ No newline at end of file diff --git a/vendor/github.com/kubernetes-csi/csi-test/v3/mock/service/node.go b/vendor/github.com/kubernetes-csi/csi-test/v3/mock/service/node.go new file mode 120000 index 0000000000..5eaad7e1bf --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/v3/mock/service/node.go @@ -0,0 +1 @@ +../../../mock/service/node.go \ No newline at end of file diff --git a/vendor/github.com/kubernetes-csi/csi-test/v3/mock/service/service.go b/vendor/github.com/kubernetes-csi/csi-test/v3/mock/service/service.go new file mode 120000 index 0000000000..3dff34862f --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/v3/mock/service/service.go @@ -0,0 +1 @@ +../../../mock/service/service.go \ No newline at end of file diff --git a/vendor/github.com/kubernetes-csi/csi-test/v3/pkg/sanity/cleanup.go b/vendor/github.com/kubernetes-csi/csi-test/v3/pkg/sanity/cleanup.go new file mode 120000 index 0000000000..6e6c03751c --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/v3/pkg/sanity/cleanup.go @@ -0,0 +1 @@ +../../../pkg/sanity/cleanup.go \ No newline at end of file diff --git a/vendor/github.com/kubernetes-csi/csi-test/v3/pkg/sanity/controller.go b/vendor/github.com/kubernetes-csi/csi-test/v3/pkg/sanity/controller.go new file mode 120000 index 0000000000..01753eb921 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/v3/pkg/sanity/controller.go @@ -0,0 +1 @@ +../../../pkg/sanity/controller.go \ No newline at end of file diff --git a/vendor/github.com/kubernetes-csi/csi-test/v3/pkg/sanity/identity.go b/vendor/github.com/kubernetes-csi/csi-test/v3/pkg/sanity/identity.go new file mode 120000 index 0000000000..2c696c5167 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/v3/pkg/sanity/identity.go @@ -0,0 +1 @@ +../../../pkg/sanity/identity.go \ No newline at end of file diff --git a/vendor/github.com/kubernetes-csi/csi-test/v3/pkg/sanity/node.go b/vendor/github.com/kubernetes-csi/csi-test/v3/pkg/sanity/node.go new file mode 120000 index 0000000000..b3cbff94a3 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/v3/pkg/sanity/node.go @@ -0,0 +1 @@ +../../../pkg/sanity/node.go \ No newline at end of file diff --git a/vendor/github.com/kubernetes-csi/csi-test/v3/pkg/sanity/sanity.go b/vendor/github.com/kubernetes-csi/csi-test/v3/pkg/sanity/sanity.go new file mode 120000 index 0000000000..9656c4bb63 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/v3/pkg/sanity/sanity.go @@ -0,0 +1 @@ +../../../pkg/sanity/sanity.go \ No newline at end of file diff --git a/vendor/github.com/kubernetes-csi/csi-test/v3/pkg/sanity/tests.go b/vendor/github.com/kubernetes-csi/csi-test/v3/pkg/sanity/tests.go new file mode 120000 index 0000000000..c2ff5c226f --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/v3/pkg/sanity/tests.go @@ -0,0 +1 @@ +../../../pkg/sanity/tests.go \ No newline at end of file diff --git a/vendor/github.com/kubernetes-csi/csi-test/v3/pkg/sanity/util.go b/vendor/github.com/kubernetes-csi/csi-test/v3/pkg/sanity/util.go new file mode 120000 index 0000000000..723383c01c --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/v3/pkg/sanity/util.go @@ -0,0 +1 @@ +../../../pkg/sanity/util.go \ No newline at end of file diff --git a/vendor/github.com/kubernetes-csi/csi-test/v3/utils/grpcutil.go b/vendor/github.com/kubernetes-csi/csi-test/v3/utils/grpcutil.go new file mode 120000 index 0000000000..41483ec227 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/v3/utils/grpcutil.go @@ -0,0 +1 @@ +../../utils/grpcutil.go \ No newline at end of file diff --git a/vendor/github.com/kubernetes-csi/csi-test/v3/utils/safegoroutinetester.go b/vendor/github.com/kubernetes-csi/csi-test/v3/utils/safegoroutinetester.go new file mode 120000 index 0000000000..241ea3f986 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/v3/utils/safegoroutinetester.go @@ -0,0 +1 @@ +../../utils/safegoroutinetester.go \ No newline at end of file