diff --git a/.github/.codecov.yml b/.github/.codecov.yml new file mode 100644 index 00000000..ef9ddeb5 --- /dev/null +++ b/.github/.codecov.yml @@ -0,0 +1,29 @@ +# validate +# cat .codecov.yml | curl --data-binary @- https://codecov.io/validate + +codecov: + require_ci_to_pass: yes + +coverage: + precision: 2 + round: down + range: "70...100" + status: + project: + default: + if_ci_failed: error #success, failure, error, ignore + informational: true + only_pulls: true + +comment: + layout: "reach,diff,flags,files,footer" + behavior: default + require_changes: no + +ignore: + - "config/**/*" + - "pkg/apis/**/*" + - "mocks/**/*" + - "integration/shared/scenarios/**/*" + - "pkg/common/logger.go" + - "test/*" diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 00000000..9beafcac --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,11 @@ +# To get started with Dependabot version updates, you'll need to specify which +# package ecosystems to update and where the package manifests are located. +# Please see the documentation for all configuration options: +# https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates + +version: 2 +updates: + - package-ecosystem: "gomod" # See documentation for possible values + directory: "/" # Location of package manifests + schedule: + interval: "monthly" diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index aa238822..65185648 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -1,6 +1,8 @@ name: build on: + push: + branches: [ main ] pull_request: branches: [ main ] @@ -9,15 +11,24 @@ jobs: build: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 - name: Set up Go - uses: actions/setup-go@v2 + uses: actions/setup-go@v5 with: - go-version: 1.17 + go-version: 1.19 + + - name: checkout + uses: actions/checkout@v4 - - name: Run unit tests + - name: Unit tests run: make test - - name: Run acceptance tests - run: make e2e-test + - name: Upload code coverage + uses: codecov/codecov-action@v4 + with: + files: cover.out + + - name: golangci-lint + uses: golangci/golangci-lint-action@v5 + with: + version: v1.50.1 diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml new file mode 100644 index 00000000..1fd747ee --- /dev/null +++ b/.github/workflows/codeql-analysis.yml @@ -0,0 +1,71 @@ +# For most projects, this workflow file will not need changing; you simply need +# to commit it to your repository. +# +# You may wish to alter this file to override the set of languages analyzed, +# or to provide custom queries or build logic. +# +# ******** NOTE ******** +# We have attempted to detect the languages in your repository. Please check +# the `language` matrix defined below to confirm you have the correct set of +# supported CodeQL languages. +# +name: "CodeQL" + +on: + push: + branches: [ main ] + pull_request: + # The branches below must be a subset of the branches above + branches: [ main ] + schedule: + - cron: '35 16 * * 5' + +jobs: + analyze: + name: Analyze + runs-on: ubuntu-latest + permissions: + actions: read + contents: read + security-events: write + + strategy: + fail-fast: false + matrix: + language: [ 'go' ] + # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ] + # Learn more: + # https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + # Initializes the CodeQL tools for scanning. + - name: Initialize CodeQL + uses: github/codeql-action/init@v3 + with: + languages: ${{ matrix.language }} + # If you wish to specify custom queries, you can do so here or in a config file. + # By default, queries listed here will override any specified in a config file. + # Prefix the list here with "+" to use these queries and those in the config file. + # queries: ./path/to/local/query, your-org/your-repo/queries@main + + # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). + # If this step fails, then you should remove it and run the build manually (see below) + - name: Autobuild + uses: github/codeql-action/autobuild@v3 + + # â„šī¸ Command-line programs to run using the OS shell. + # 📚 https://git.io/JvXDl + + # âœī¸ If the Autobuild fails above, remove it and uncomment the following three lines + # and modify them (or add more) to build your code if your project + # uses a compiled language + + #- run: | + # make bootstrap + # make release + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v3 diff --git a/.github/workflows/deploy.yml b/.github/workflows/deploy.yml index 254a796e..fc5588d3 100644 --- a/.github/workflows/deploy.yml +++ b/.github/workflows/deploy.yml @@ -14,18 +14,18 @@ jobs: contents: read packages: write steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 - name: Set up Go - uses: actions/setup-go@v2 + uses: actions/setup-go@v5 with: - go-version: 1.17 + go-version: 1.19 - name: Build image run: make docker-build - name: Login to GitHub Container Registry - uses: docker/login-action@v1 + uses: docker/login-action@v3 with: registry: ghcr.io username: ${{ github.actor }} diff --git a/.github/workflows/integration-test.yml b/.github/workflows/integration-test.yml new file mode 100644 index 00000000..4214f557 --- /dev/null +++ b/.github/workflows/integration-test.yml @@ -0,0 +1,38 @@ +name: integration +on: + push: + branches: + - main +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true +jobs: + integration-test: + name: Run Integration Test + runs-on: ubuntu-latest + environment: Integration Test + permissions: + id-token: write + steps: + - name: Configure AWS credentials + uses: aws-actions/configure-aws-credentials@v4 + with: + aws-region: us-west-2 + role-to-assume: ${{ secrets.AWS_ROLE_TO_ASSUME }} + role-session-name: IntegrationTestSession + - name: Checkout code + uses: actions/checkout@v4 + - name: Set up Go + uses: actions/setup-go@v5 + with: + go-version: 1.19 + - name: Set up env + run: source ~/.bashrc + - name: Start clean + run: make kind-integration-cleanup + - name: Set up cluster + run: make kind-integration-setup + - name: Run tests + run: make kind-integration-run + - name: Clean up clusters + run: make kind-integration-cleanup diff --git a/.github/workflows/mkdocs.yml b/.github/workflows/mkdocs.yml new file mode 100644 index 00000000..314e33ba --- /dev/null +++ b/.github/workflows/mkdocs.yml @@ -0,0 +1,22 @@ +name: mkdocs +on: + push: + branches: + - main +jobs: + deploy: + runs-on: ubuntu-latest + steps: + - name: Checkout main + uses: actions/checkout@v4 + + - name: Setup python + uses: actions/setup-python@v5 + with: + python-version: 3.x + + - name: Install mkdocs + run: pip install mkdocs-material + + - name: Publish mkdocs + run: mkdocs gh-deploy --force diff --git a/.gitignore b/.gitignore index e828964d..3748628d 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,7 @@ vendor/ bin/ testbin/ +testlog/ cover.out # Files generated by JetBrains IDEs, e.g. IntelliJ IDEA @@ -10,8 +11,5 @@ cover.out # OSX trash .DS_Store -# mocks generated by mockgen +#mocks generated by mockgen mocks/ - -# e2e test artifacts -_artifacts/ diff --git a/.golangci.yaml b/.golangci.yaml new file mode 100644 index 00000000..aae8ba43 --- /dev/null +++ b/.golangci.yaml @@ -0,0 +1,35 @@ +linters-settings: + errcheck: + check-type-assertions: true + goconst: + min-len: 2 + min-occurrences: 3 + govet: + check-shadowing: true + nolintlint: + require-explanation: true + require-specific: true + +linters: + enable: + - dupl + - goconst + - gocritic + - gofmt + - goimports + - misspell + - whitespace + +issues: + exclude-rules: + - path: _test\.go # disable some linters on test files + linters: + - dupl + +run: + issues-exit-code: 1 + concurrency: 4 + skip: + - .*_mock.go + - mocks/ + - pkg/apis/ diff --git a/CHANGELOG.md b/CHANGELOG.md deleted file mode 100644 index a0cf709b..00000000 --- a/CHANGELOG.md +++ /dev/null @@ -1 +0,0 @@ -# CHANGELOG diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 6d97c743..3fdd9f64 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -6,17 +6,61 @@ documentation, we greatly value feedback and contributions from our community. Please read through this document before submitting any issues or pull requests to ensure we have all the necessary information to effectively respond to your bug report or contribution. + +* [Contributing Guidelines](#contributing-guidelines) + * [Architecture Overview](#architecture-overview) + * [Getting Started](#getting-started) + * [Build and Unit Tests](#build-and-unit-tests) + * [Local Setup](#local-setup) + * [Prerequisites](#prerequisites) + * [Kind Cluster Setup](#kind-cluster-setup) + * [Run the controller from outside the cluster](#run-the-controller-from-outside-the-cluster) + * [Build and deploy controller into the cluster](#build-and-deploy-controller-into-the-cluster) + * [Local integration testing](#local-integration-testing) + * [Build and push docker image](#build-and-push-docker-image) + * [Reporting Bugs/Feature Requests](#reporting-bugsfeature-requests) + * [Contributing via Pull Requests](#contributing-via-pull-requests) + * [Finding contributions to work on](#finding-contributions-to-work-on) + * [Code of Conduct](#code-of-conduct) + * [Security issue notifications](#security-issue-notifications) + * [Licensing](#licensing) + + +## Architecture Overview + +![Architecture diagram](docs/architecture-overview.png?raw=true) + +* `pkg/controllers/serviceexport_controller` is watching changes on K8s `ServiceExport` resources (and corresponding services/endpoints). As soon as any change in configuration is detected, it registers all exported service endpoints to corresponding (same namespace/service names) AWS Cloud Map structures (namespace, service, instances). +* `pkg/controllers/cloudmap_controller` is periodically polling for changes in corresponding AWS Cloud Map namespaces (based on namespace "sameness" - a K8s namespace with the same name as a Cloud Map namespace). When new service or endpoints are discovered they are automatically created locally as a `ServiceImport`. + ## Getting Started -### Build and Run locally +### Build and Unit Tests + +Use command below to run the unit test: +```sh +make test +``` + +Use command below to build: +```sh +make build +``` + +Use the command below to perform cleanup: +```sh +make clean +``` + +### Local Setup #### Prerequisites In order to build and run locally: -* Make sure to have `kubectl` [installed](https://kubernetes.io/docs/tasks/tools/install-kubectl/), at least version `1.15` or above. +* Make sure to have `kubectl` [installed](https://kubernetes.io/docs/tasks/tools/install-kubectl/), at least version `1.19` or above. * Make sure to have `kind` [installed](https://kind.sigs.k8s.io/docs/user/quick-start/#installation). -* Make sure, you have created a [HttpNamespace](https://docs.aws.amazon.com/cloud-map/latest/api/API_CreateHttpNamespace.html) in AWS Cloud Map. The examples below assumes the namespace name to be `demo` +* Make sure, you have access to AWS Cloud Map. As per exercise below, AWS Cloud Map namespace `example` of the type [HttpNamespace](https://docs.aws.amazon.com/cloud-map/latest/api/API_CreateHttpNamespace.html) will be automatically created. Note that this walk-through assumes throughout to operate in the `us-west-2` region. @@ -24,107 +68,140 @@ Note that this walk-through assumes throughout to operate in the `us-west-2` reg export AWS_REGION=us-west-2 ``` -#### Cluster provisioning +#### Kind Cluster Setup + +Spin up a local Kubernetes cluster using `kind`: -Spin up a local Kubernetes cluster using `kind` ```sh kind create cluster --name my-cluster # Creating cluster "my-cluster" ... # ... ``` -When completed, set the kubectl context +When completed, set the kubectl context: ```sh kind export kubeconfig --name my-cluster # Set kubectl context to "kind-my-cluster" ``` -To register the custom CRDs (`ServiceImport`, `ServiceExport`) in the cluster and create installers +Create `example` namespace in the cluster: +```sh +kubectl create namespace example +# namespace/example created +``` + +#### Run the controller from outside the cluster + +To register the custom CRDs (`ClusterProperties`, `ServiceImport`, `ServiceExport`) in the cluster and create installers: ```sh make install # ... +# customresourcedefinition.apiextensions.k8s.io/clusterproperties.about.k8s.io created # customresourcedefinition.apiextensions.k8s.io/serviceexports.multicluster.x-k8s.io created # customresourcedefinition.apiextensions.k8s.io/serviceimports.multicluster.x-k8s.io created ``` -To run the controller, run the following command. The controller runs in an infinite loop so open another terminal to create CRDs. -```sh -make run +Register a unique `cluster.clusterset.k8s.io` and `clusterset.k8s.io` in your cluster: +```bash +kubectl apply -f samples/example-clusterproperty.yaml +# clusterproperty.about.k8s.io/cluster.clusterset.k8s.io created +# clusterproperty.about.k8s.io/clusterset.k8s.io created ``` +> ⚠ **Note:** If you are creating multiple clusters, ensure you create unique `cluster.clusterset.k8s.io` identifiers for each cluster. -Create `demo` namespace + +To run the controller, run the following command. The controller runs in an infinite loop so open another terminal to create CRDs. (Ctrl+C to exit) ```sh -kubectl create namespace demo -# namespace/demo created +make run ``` -Apply deployment, service and export configs +Apply deployment, service and serviceexport configs: ```sh -kubectl apply -f samples/demo-deployment.yaml +kubectl apply -f samples/example-deployment.yaml # deployment.apps/nginx-deployment created -kubectl apply -f samples/demo-service.yaml -# service/demo-service created -kubectl apply -f samples/demo-export.yaml -# serviceexport.multicluster.x-k8s.io/demo-service created +kubectl apply -f samples/example-service.yaml +# service/my-service created +kubectl apply -f samples/example-serviceexport.yaml +# serviceexport.multicluster.x-k8s.io/my-service created ``` -Check running controller if it correctly detects newly created resources +Check running controller if it correctly detects newly created resources: ``` -controllers.ServiceExport updating Cloud Map service {"serviceexport": "demo/demo-service", "namespace": "demo", "name": "demo-service"} -cloudmap fetching a service {"namespaceName": "demo", "serviceName": "demo-service"} -cloudmap creating a new service {"namespace": "demo", "name": "demo-service"} +controllers.ServiceExport updating Cloud Map service {"serviceexport": "example/my-service", "namespace": "example", "name": "my-service"} +cloudmap fetching a service {"namespaceName": "example", "serviceName": "my-service"} +cloudmap creating a new service {"namespace": "example", "name": "my-service"} ``` -#### Run unit tests - -Use command below to run the unit test +Use the command below to remove the CRDs from the cluster: ```sh -make test +make uninstall ``` -#### Cleanup +#### Build and deploy controller into the cluster -Use the command below to clean all the generated files +Build local `controller` docker image: ```sh -make clean +make docker-build IMG=controller:local +# ... +# docker build --no-cache -t controller:local . +# ... +# ``` -Use the command below to delete the cluster `my-cluster` +Load the controller docker image into the kind cluster `my-cluster`: ```sh -kind delete cluster --name my-cluster +kind load docker-image controller:local --name my-cluster +# Image: "controller:local" with ID "sha256:xxx" not yet present on node "my-cluster-control-plane", loading... ``` -### Deploying to a cluster +> ⚠ **The controller still needs credentials to interact to AWS SDK.** We are not supporting this configuration out of box. There are multiple ways to achieve this within the cluster. + +Finally, create the controller resources in the cluster: +```sh +make deploy IMG=controller:local AWS_REGION=us-west-2 +# customresourcedefinition.apiextensions.k8s.io/clusterproperties.about.k8s.io created +# customresourcedefinition.apiextensions.k8s.io/serviceexports.multicluster.x-k8s.io created +# customresourcedefinition.apiextensions.k8s.io/serviceimports.multicluster.x-k8s.io created +# ... +# deployment.apps/cloud-map-mcs-controller-manager created +``` -You must first push a Docker image containing the changes to a Docker repository like ECR. +Stream the controller logs: +```shell +kubectl logs -f -l control-plane=controller-manager -c manager -n cloud-map-mcs-system +``` -### Build and push docker image to ECR +To remove the controller from your cluster, run: +```sh +make undeploy +``` +Use the command below to delete the cluster `my-cluster`: ```sh -make docker-build docker-push IMG=.dkr.ecr..amazonaws.com/ +kind delete cluster --name my-cluster ``` -#### Deployment +### Local integration testing -You must specify AWS access credentials for the operator. You can do so via environment variables, or by creating them. +The end-to-end integration test suite can be run locally to validate controller core functionality. This will provision a local Kind cluster and build and run the AWS Cloud Map MCS Controller for K8s. The test will verify service endpoints sync with AWS Cloud Map. If successful, the suite will then de-provision the local test cluster and delete AWS Cloud Map namespace `aws-cloud-map-mcs-e2e` along with test service and service instance resources: +```sh +make kind-integration-suite +``` -Any one of below three options will work: +If integration test suite fails for some reason, you can perform a cleanup: ```sh -# With an IAM user. -make deploy +make kind-integration-cleanup +``` -# Use an existing access key -OPERATOR_AWS_ACCESS_KEY_ID=xxx OPERATOR_AWS_SECRET_KEY=yyy make deploy +## Build and push docker image -# Use an AWS profile -OPERATOR_AWS_PROFILE=default make deploy -``` +You must first push a Docker image containing the changes to a Docker repository like ECR, Github packages, or DockerHub. The repo is configured to use Github Actions to automatically publish the docker image upon push to `main` branch. The image URI will be `ghcr.io/[Your forked repo name here]` You can enable this for forked repos by enabling Github actions on your forked repo in the "Actions" tab of forked repo. -#### Uninstallation +If you are deploying to cluster using kustomize templates from the `config` directory, you will need to override the image URI away from `ghcr.io/aws/aws-cloud-map-mcs-controller-for-k8s` in order to use your own docker images. -To remove the operator from your cluster, run +To push the docker image into personal repo: ```sh -make undeploy +make docker-build docker-push IMG=[Your personal repo] ``` ## Reporting Bugs/Feature Requests diff --git a/Dockerfile b/Dockerfile index 130b3a4e..1f742f3e 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # Build the manager binary -FROM golang:1.17 as builder +FROM golang:1.19 as builder WORKDIR /workspace diff --git a/Makefile b/Makefile index dc4a79bf..f6a2fb24 100644 --- a/Makefile +++ b/Makefile @@ -4,8 +4,8 @@ PKG:=github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/version # Image URL to use all building/pushing image targets IMG ?= controller:latest -# Produce CRDs that work back to Kubernetes 1.11 (no version conversion) -CRD_OPTIONS ?= "crd:trivialVersions=true,preserveUnknownFields=false" +# AWS Region +AWS_REGION ?= us-east-1 # Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set) ifeq (,$(shell go env GOBIN)) @@ -15,7 +15,6 @@ GOBIN=$(shell go env GOBIN) endif # Setting SHELL to bash allows bash commands to be executed by recipes. -# This is a requirement for 'setup-envtest.sh' in the test target. # Options are set to exit when a recipe line exits non-zero or a piped command fails. SHELL = /usr/bin/env bash -o pipefail .SHELLFLAGS = -ec @@ -41,7 +40,7 @@ help: ## Display this help. ##@ Development manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects. - $(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role webhook paths="./..." output:crd:artifacts:config=config/crd/bases + $(CONTROLLER_GEN) crd rbac:roleName=manager-role webhook paths="./..." output:crd:artifacts:config=config/crd/bases generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations. $(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..." @@ -52,40 +51,82 @@ fmt: ## Run go fmt against code. vet: ## Run go vet against code. go vet ./... -ENVTEST_ASSETS_DIR=$(shell pwd)/testbin -test: manifests generate generate-mocks fmt vet ## Run tests. - mkdir -p ${ENVTEST_ASSETS_DIR} - test -f ${ENVTEST_ASSETS_DIR}/setup-envtest.sh || curl -sSLo ${ENVTEST_ASSETS_DIR}/setup-envtest.sh https://raw.githubusercontent.com/kubernetes-sigs/controller-runtime/v0.7.2/hack/setup-envtest.sh - source ${ENVTEST_ASSETS_DIR}/setup-envtest.sh; fetch_envtest_tools $(ENVTEST_ASSETS_DIR); setup_envtest_env $(ENVTEST_ASSETS_DIR); go test ./... -coverprofile cover.out - -KUBECTL=$(ENVTEST_ASSETS_DIR)/bin/kubectl -TEST_CONFIG=$(shell pwd)/testconfig -E2E_CLUSTER=aws-cloudmap-mcs-e2e -e2e-test: manifests kustomize kubetest2 fmt vet - $(KUBETEST2-KIND) --cluster-name $(E2E_CLUSTER) --up - $(KUSTOMIZE) build config/crd | $(KUBECTL) apply -f - - $(KUBECTL) create namespace aws-cloudmap-mcs-e2e - $(KUBECTL) apply -f $(TEST_CONFIG)/e2e-deployment.yaml - $(KUBECTL) apply -f $(TEST_CONFIG)/e2e-service-one.yaml - $(KUBECTL) apply -f $(TEST_CONFIG)/e2e-export.yaml - $(KUBETEST2-KIND) --cluster-name $(E2E_CLUSTER) --down +mod: + go mod download + +tidy: + go mod tidy + +.PHONY: lint +lint: golangci-lint ## Run linter + $(GOLANGCI_LINT) run + +.PHONY: goimports +goimports: goimports-bin ## run goimports updating files in place + $(GOIMPORTS) -w . + +.PHONY: test +test: manifests generate generate-mocks fmt vet goimports lint ## Run tests + @echo Testing... + go test ./... -coverprofile=cover.out -covermode=atomic + +kind-integration-suite: ## Provision and run integration tests with cleanup + export ADDRESS_TYPE="IPv4" && \ + make kind-integration-setup && \ + make kind-integration-run && \ + make kind-integration-cleanup + + export ADDRESS_TYPE="IPv6" && \ + make kind-integration-setup && \ + make kind-integration-run && \ + make kind-integration-cleanup + +kind-integration-setup: build kind ## Setup the integration test using kind clusters + @./integration/kind-test/scripts/setup-kind.sh + +kind-integration-run: ## Run the integration test controller + @./integration/kind-test/scripts/run-helper.sh + +kind-integration-cleanup: kind ## Cleanup integration test resources in Cloud Map and local kind cluster + @./integration/kind-test/scripts/cleanup-kind.sh + +eks-integration-suite: ## Provision and run EKS integration tests with cleanup + make eks-integration-setup && \ + make eks-integration-run && \ + make eks-integration-cleanup + +eks-integration-setup: build ## Setup the integration test using EKS clusters + @./integration/eks-test/scripts/eks-setup.sh + +eks-integration-run: ## Run the integration test controller + @./integration/eks-test/scripts/eks-run-tests.sh + +eks-integration-cleanup: ## Cleanup integration test resources in Cloud Map and EKS cluster + @./integration/eks-test/scripts/eks-cleanup.sh + +eks-test: + @./integration/eks-test/scripts/eks-test.sh ##@ Build -build: manifests generate generate-mocks fmt vet ## Build manager binary. +.DEFAULT: build +build: test ## Build manager binary. go build -ldflags="-s -w -X ${PKG}.GitVersion=${GIT_TAG} -X ${PKG}.GitCommit=${GIT_COMMIT}" -o bin/manager main.go -run: manifests generate generate-mocks fmt vet ## Run a controller from your host. - go run ./main.go +run: test ## Run a controller from your host. + go run -ldflags="-s -w -X ${PKG}.GitVersion=${GIT_TAG} -X ${PKG}.GitCommit=${GIT_COMMIT}" ./main.go --zap-devel=true --zap-time-encoding=rfc3339 $(ARGS) docker-build: test ## Build docker image with the manager. - docker build -t ${IMG} . + docker build --no-cache -t ${IMG} . docker-push: ## Push docker image with the manager. docker push ${IMG} +.PHONY: clean clean: - rm -rf $(MOCKS_DESTINATION) bin/ testbin/ cover.out + @echo Cleaning... + go clean + rm -rf $(MOCKS_DESTINATION)/ bin/ cover.out ##@ Deployment @@ -97,30 +138,51 @@ uninstall: manifests kustomize ## Uninstall CRDs from the K8s cluster specified deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config. cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} - $(KUSTOMIZE) build config/default | kubectl apply -f - + AWS_REGION=${AWS_REGION} $(KUSTOMIZE) build config/default | kubectl apply -f - undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/config. $(KUSTOMIZE) build config/default | kubectl delete -f - MOCKS_DESTINATION=mocks generate-mocks: mockgen - $(MOCKGEN) --source pkg/cloudmap/client.go --destination $(MOCKS_DESTINATION)/pkg/cloudmap/client_mock.go --package cloudmap +ifneq ($(shell test -d $(MOCKS_DESTINATION); echo $$?), 0) + @echo Generating mocks... + $(MOCKGEN) --source pkg/cloudmap/client.go --destination $(MOCKS_DESTINATION)/pkg/cloudmap/client_mock.go --package cloudmap_mock + $(MOCKGEN) --source pkg/cloudmap/cache.go --destination $(MOCKS_DESTINATION)/pkg/cloudmap/cache_mock.go --package cloudmap_mock + $(MOCKGEN) --source pkg/cloudmap/operation_poller.go --destination $(MOCKS_DESTINATION)/pkg/cloudmap/operation_poller_mock.go --package cloudmap_mock + $(MOCKGEN) --source pkg/cloudmap/api.go --destination $(MOCKS_DESTINATION)/pkg/cloudmap/api_mock.go --package cloudmap_mock + $(MOCKGEN) --source pkg/cloudmap/aws_facade.go --destination $(MOCKS_DESTINATION)/pkg/cloudmap/aws_facade_mock.go --package cloudmap_mock + $(MOCKGEN) --source integration/janitor/api.go --destination $(MOCKS_DESTINATION)/integration/janitor/api_mock.go --package janitor_mock + $(MOCKGEN) --source integration/janitor/aws_facade.go --destination $(MOCKS_DESTINATION)/integration/janitor/aws_facade_mock.go --package janitor_mock +endif CONTROLLER_GEN = $(shell pwd)/bin/controller-gen controller-gen: ## Download controller-gen locally if necessary. - $(call go-get-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen@v0.4.1) + $(call go-get-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen@v0.9.2) KUSTOMIZE = $(shell pwd)/bin/kustomize kustomize: ## Download kustomize locally if necessary. - $(call go-get-tool,$(KUSTOMIZE),sigs.k8s.io/kustomize/kustomize/v3@v3.8.7) + $(call go-get-tool,$(KUSTOMIZE),sigs.k8s.io/kustomize/kustomize/v4@v4.5.5) MOCKGEN = $(shell pwd)/bin/mockgen mockgen: ## Download mockgen $(call go-get-tool,$(MOCKGEN),github.com/golang/mock/mockgen@v1.6.0) -KUBETEST2-KIND = $(shell pwd)/bin/kubetest2-kind -kubetest2: ## Download kubetest2 - $(call go-get-tool,$(KUBETEST2-KIND),sigs.k8s.io/kubetest2/kubetest2-kind@latest) +GOLANGCI_LINT=$(shell pwd)/bin/golangci-lint +golangci-lint: ## Download golangci-lint +ifneq ($(shell test -f $(GOLANGCI_LINT); echo $$?), 0) + @echo Getting golangci-lint... + @mkdir -p $(shell pwd)/bin + curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(shell pwd)/bin v1.50.1 +endif + +GOIMPORTS = $(shell pwd)/bin/goimports +goimports-bin: ## Download mockgen + $(call go-get-tool,$(GOIMPORTS),golang.org/x/tools/cmd/goimports@v0.1.12) + +KIND = $(shell pwd)/bin/kind +kind: ## Download kind + $(call go-get-tool,$(KIND),sigs.k8s.io/kind@v0.14.0) # go-get-tool will 'go get' any package $2 and install it to $1. PROJECT_DIR := $(shell dirname $(abspath $(lastword $(MAKEFILE_LIST)))) @@ -131,7 +193,7 @@ TMP_DIR=$$(mktemp -d) ;\ cd $$TMP_DIR ;\ go mod init tmp ;\ echo "Downloading $(2)" ;\ -GOBIN=$(PROJECT_DIR)/bin go get $(2) ;\ +GOBIN=$(PROJECT_DIR)/bin go install $(2) ;\ rm -rf $$TMP_DIR ;\ } endef diff --git a/PROJECT b/PROJECT index 60ca5e0f..83e31c52 100644 --- a/PROJECT +++ b/PROJECT @@ -3,6 +3,7 @@ layout: - go.kubebuilder.io/v3 projectName: aws-cloud-map-mcs-controller-for-k8s repo: github.com/aws/aws-cloud-map-mcs-controller-for-k8s +multigroup: true resources: - api: crdVersion: v1 @@ -11,7 +12,7 @@ resources: domain: x-k8s.io group: multicluster kind: ServiceExport - path: github.com/aws/aws-cloud-map-mcs-controller-for-k8s/api/v1alpha1 + path: github.com/aws/aws-cloud-map-mcs-controller-for-k8s/apis/multicluster/v1alpha1 version: v1alpha1 - api: crdVersion: v1 @@ -20,6 +21,6 @@ resources: domain: x-k8s.io group: multicluster kind: ServiceImport - path: github.com/aws/aws-cloud-map-mcs-controller-for-k8s/api/v1alpha1 + path: github.com/aws/aws-cloud-map-mcs-controller-for-k8s/apis/multicluster/v1alpha1 version: v1alpha1 version: "3" diff --git a/README.md b/README.md index 829847e9..7458d960 100644 --- a/README.md +++ b/README.md @@ -1,21 +1,186 @@ # AWS Cloud Map MCS Controller for K8s -[![contributions welcome](https://img.shields.io/badge/contributions-welcome-brightgreen.svg?style=flat)](https://github.com/aws/aws-cloud-map-mcs-controller-for-k8s/issues) -[![License](https://img.shields.io/badge/license-Apache--2.0-blue.svg?color=success)](http://www.apache.org/licenses/LICENSE-2.0) -![GitHub issues](https://img.shields.io/github/issues-raw/aws/aws-cloud-map-mcs-controller-for-k8s?style=flat) +[![Documentation](https://img.shields.io/badge/godoc-reference-blue.svg)](https://godoc.org/github.com/aws/aws-cloud-map-mcs-controller-for-k8s) +[![CodeQL](https://github.com/aws/aws-cloud-map-mcs-controller-for-k8s/actions/workflows/codeql-analysis.yml/badge.svg?branch=main)](https://github.com/aws/aws-cloud-map-mcs-controller-for-k8s/actions/workflows/codeql-analysis.yml) +[![Build status](https://github.com/aws/aws-cloud-map-mcs-controller-for-k8s/actions/workflows/build.yml/badge.svg?branch=main)](https://github.com/aws/aws-cloud-map-mcs-controller-for-k8s/actions/workflows/build.yml) +[![Deploy status](https://github.com/aws/aws-cloud-map-mcs-controller-for-k8s/actions/workflows/deploy.yml/badge.svg?branch=main)](https://github.com/aws/aws-cloud-map-mcs-controller-for-k8s/actions/workflows/deploy.yml) +[![Integration status](https://github.com/aws/aws-cloud-map-mcs-controller-for-k8s/actions/workflows/integration-test.yml/badge.svg?branch=main)](https://github.com/aws/aws-cloud-map-mcs-controller-for-k8s/actions/workflows/integration-test.yml) +[![codecov](https://codecov.io/gh/aws/aws-cloud-map-mcs-controller-for-k8s/branch/main/graph/badge.svg)](https://codecov.io/gh/aws/aws-cloud-map-mcs-controller-for-k8s) -[![Deploy status](https://github.com/aws/aws-cloud-map-mcs-controller-for-k8s/actions/workflows/deploy.yml/badge.svg)](https://github.com/aws/aws-cloud-map-mcs-controller-for-k8s/actions/workflows/deploy.yml) +[![License](https://img.shields.io/badge/license-Apache--2.0-blue.svg?color=success)](http://www.apache.org/licenses/LICENSE-2.0) +[![contributions welcome](https://img.shields.io/badge/contributions-welcome-brightgreen.svg?style=flat)](https://github.com/aws/aws-cloud-map-mcs-controller-for-k8s/issues) +[![GitHub issues](https://img.shields.io/github/issues-raw/aws/aws-cloud-map-mcs-controller-for-k8s?style=flat)](https://github.com/aws/aws-cloud-map-mcs-controller-for-k8s/issues) [![Go Report Card](https://goreportcard.com/badge/github.com/aws/aws-cloud-map-mcs-controller-for-k8s)](https://goreportcard.com/report/github.com/aws/aws-cloud-map-mcs-controller-for-k8s) ## Introduction -AWS Cloud Map multi-cluster service discovery for Kubernetes (K8s) is a controller that implements existing multi-cluster services API that allows services to communicate across multiple clusters. The implementation relies on [AWS Cloud Map](https://aws.amazon.com/cloud-map/) for enabling cross-cluster service discovery. +The AWS Cloud Map Multi-cluster Service Discovery Controller for Kubernetes (K8s) implements the Kubernetes [KEP-1645: Multi-Cluster Services API](https://github.com/kubernetes/enhancements/tree/master/keps/sig-multicluster/1645-multi-cluster-services-api) and [KEP-2149: ClusterId for ClusterSet identification](https://github.com/kubernetes/enhancements/tree/master/keps/sig-multicluster/2149-clusterid), which allows services to communicate across multiple clusters. The implementation relies on [AWS Cloud Map](https://aws.amazon.com/cloud-map/) for enabling cross-cluster service discovery. We have detailed [step-by-step setup guide](https://aws.amazon.com/blogs/opensource/kubernetes-multi-cluster-service-discovery-using-open-source-aws-cloud-map-mcs-controller/)! + +**⚠ NOTE: The current version [![GitHub Release](https://img.shields.io/github/release/aws/aws-cloud-map-mcs-controller-for-k8s.svg?style=flat&label=)]() is in *Alpha* phase, and NOT intended for production use. The support will be limited to critical bug fixes.** + +*Checkout the [Graduation Criteria](#graduation-criteria) for moving the project to the next phase.* + +## Installation + +Perform the following installation steps on each participating cluster. + +- For multi-cluster service discovery and consumption, the controller should be installed on a minimum of 2 EKS clusters. +- Participating clusters should be provisioned into a single AWS account, within a single AWS region. + +### Dependencies + +#### Network + +> **The AWS Cloud Map MCS Controller for K8s provides service discovery and communication across multiple clusters, therefore implementations depend on end-end network connectivity between workloads provisioned within each participating cluster.** + +- In deployment scenarios where participating clusters are provisioned into separate VPCs, connectivity will depend on correctly configured [VPC Peering](https://docs.aws.amazon.com/vpc/latest/peering/create-vpc-peering-connection.html), [inter-VPC routing](https://docs.aws.amazon.com/vpc/latest/peering/vpc-peering-routing.html), and Security Group configuration. The [VPC Reachability Analyzer](https://docs.aws.amazon.com/vpc/latest/reachability/getting-started.html) can be used to test and validate end-end connectivity between worker nodes within each cluster. +- Undefined behavior may occur if controllers are deployed without the required network connectivity between clusters. + +#### Configure CoreDNS + +Install the CoreDNS multicluster plugin into each participating cluster. The multicluster plugin enables CoreDNS to lifecycle manage DNS records for `ServiceImport` objects. + +To install the plugin, run the following commands. + +```bash +kubectl apply -k "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/samples/coredns-clusterrole.yaml" +kubectl apply -k "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/samples/coredns-configmap.yaml" +kubectl apply -k "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/samples/coredns-deployment.yaml" +``` + +### Install Controller + +To install the latest release of the controller, run the following commands. + +> **_NOTE:_** AWS region environment variable can be _optionaly_ set like `export AWS_REGION=us-west-2` Otherwise the controller will infer region in the order `AWS_REGION` environment variable, ~/.aws/config file, then EC2 metadata (for EKS environment) + +```sh +kubectl apply -k "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/config/controller_install_release" +``` + +> 📌 See [Releases](#Releases) section for details on how to install other versions. + +The controller must have sufficient IAM permissions to perform required Cloud Map operations. Grant IAM access rights `AWSCloudMapFullAccess` to the controller Service Account to enable the controller to manage Cloud Map resources. + +## Usage + +### Configure `cluster.clusterset.k8s.io` and `clusterset.k8s.io` + +`cluster.clusterset.k8s.io` is a unique identifier for the cluster. + +`clusterset.k8s.io` is an identifier that relates to the `ClusterSet` in which the cluster belongs. + +```yaml +apiVersion: about.k8s.io/v1alpha1 +kind: ClusterProperty +metadata: + name: cluster.clusterset.k8s.io +spec: + value: [Your Cluster identifier] +--- +apiVersion: about.k8s.io/v1alpha1 +kind: ClusterProperty +metadata: + name: clusterset.k8s.io +spec: + value: [Your ClusterSet identifier] +``` + +**Example:** +```yaml +apiVersion: about.k8s.io/v1alpha1 +kind: ClusterProperty +metadata: + name: cluster.clusterset.k8s.io +spec: + value: my-first-cluster +--- +apiVersion: about.k8s.io/v1alpha1 +kind: ClusterProperty +metadata: + name: clusterset.k8s.io +spec: + value: my-clusterset +``` + +### Export services + +Then assuming you already have a Service installed, apply a `ServiceExport` yaml to the cluster in which you want to export a service. This can be done for each service you want to export. + +```yaml +kind: ServiceExport +apiVersion: multicluster.x-k8s.io/v1alpha1 +metadata: + namespace: [Your service namespace here] + name: [Your service name] +``` + +**Example:** This will export a service with name *my-amazing-service* in namespace *hello* +```yaml +kind: ServiceExport +apiVersion: multicluster.x-k8s.io/v1alpha1 +metadata: + namespace: hello + name: my-amazing-service +``` + +*See the `samples` directory for a set of example yaml files to set up a service and export it. To apply the sample files run the following commands.* + +```sh +kubectl create namespace example +kubectl apply -k "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/samples/example-deployment.yaml +kubectl apply -k "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/samples/example-service.yaml +kubectl apply -k "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/samples/example-serviceexport.yaml +``` + +### Import services + +In your other cluster, the controller will automatically sync services registered in AWS Cloud Map by applying the appropriate `ServiceImport`. To list them all, run the following command. +```sh +kubectl get ServiceImport -A +``` ## Releases AWS Cloud Map MCS Controller for K8s adheres to the [SemVer](https://semver.org/) specification. Each release updates the major version tag (eg. `vX`), a major/minor version tag (eg. `vX.Y`) and a major/minor/patch version tag (eg. `vX.Y.Z`). To see a full list of all releases, refer to our [Github releases page](https://github.com/aws/aws-cloud-map-mcs-controller-for-k8s/releases). +> **_NOTE:_** AWS region environment variable can be _optionally_ set like `export AWS_REGION=us-west-2` Otherwise controller will infer region in the order `AWS_REGION` environment variable, ~/.aws/config file, then EC2 metadata (for EKS environment) + +The following command format is used to install from a particular release. +```sh +kubectl apply -k "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/config/controller_install_release[?ref=*git version tag*]" +``` + +Run the following command to install the latest release. +```sh +kubectl apply -k "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/config/controller_install_release" +``` + +The following example will install release v0.1.0. +```sh +kubectl apply -k "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/config/controller_install_release?ref=v0.1.0" +``` + We also maintain a `latest` tag, which is updated to stay in line with the `main` branch. We **do not** recommend installing this on any production cluster, as any new major versions updated on the `main` branch will introduce breaking changes. +To install from `latest` tag run the following command. +```sh +kubectl apply -k "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/config/controller_install_latest" +``` + +## Graduation Criteria + +### Alpha -> Beta Graduation +* Implement the [resiliency milestone](https://github.com/aws/aws-cloud-map-mcs-controller-for-k8s/milestone/3). + +### Beta -> GA Graduation +* Scalability/performance testing. +* Implement the [observability and deployment milestone](https://github.com/aws/aws-cloud-map-mcs-controller-for-k8s/milestone/6). +* [KEP-1645: Multi-Cluster Services API](https://github.com/kubernetes/enhancements/tree/master/keps/sig-multicluster/1645-multi-cluster-services-api) and [KEP-2149: ClusterId for ClusterSet identification](https://github.com/kubernetes/enhancements/tree/master/keps/sig-multicluster/2149-clusterid) are Beta or GA. + +## Slack community +We have an open Slack community where users may get support with integration, discuss controller functionality and provide input on our feature roadmap. https://awsappmesh.slack.com/#k8s-mcs-controller +Join the channel with this [invite](https://join.slack.com/t/awsappmesh/shared_invite/zt-dwgbt85c-Sj_md92__quV8YADKfsQSA). + ## Contributing `aws-cloud-map-mcs-controller-for-k8s` is an open source project. See [CONTRIBUTING](https://github.com/aws/aws-cloud-map-mcs-controller-for-k8s/blob/main/CONTRIBUTING.md) for details. diff --git a/config/controller_install_release/kustomization.yaml b/config/controller_install_release/kustomization.yaml new file mode 100644 index 00000000..e6dd7732 --- /dev/null +++ b/config/controller_install_release/kustomization.yaml @@ -0,0 +1,7 @@ +bases: +- ../default + +images: +- name: controller + newName: ghcr.io/aws/aws-cloud-map-mcs-controller-for-k8s + newTag: v0.3.1 diff --git a/config/crd/bases/about.k8s.io_clusterproperties.yaml b/config/crd/bases/about.k8s.io_clusterproperties.yaml new file mode 100644 index 00000000..45de7c04 --- /dev/null +++ b/config/crd/bases/about.k8s.io_clusterproperties.yaml @@ -0,0 +1,59 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.9.2 + creationTimestamp: null + name: clusterproperties.about.k8s.io +spec: + group: about.k8s.io + names: + kind: ClusterProperty + listKind: ClusterPropertyList + plural: clusterproperties + singular: clusterproperty + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .spec.value + name: value + type: string + - jsonPath: .metadata.creationTimestamp + name: age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: ClusterProperty is the Schema for the clusterproperties API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ClusterPropertySpec defines the desired state of ClusterProperty + properties: + value: + description: ClusterProperty value + minLength: 1 + type: string + required: + - value + type: object + status: + description: ClusterPropertyStatus defines the observed state of ClusterProperty + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/multicluster.x-k8s.io_serviceexports.yaml b/config/crd/bases/multicluster.x-k8s.io_serviceexports.yaml index b0303f67..1e41af78 100644 --- a/config/crd/bases/multicluster.x-k8s.io_serviceexports.yaml +++ b/config/crd/bases/multicluster.x-k8s.io_serviceexports.yaml @@ -1,10 +1,9 @@ - --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.4.1 + controller-gen.kubebuilder.io/version: v0.9.2 creationTimestamp: null name: serviceexports.multicluster.x-k8s.io spec: @@ -45,13 +44,12 @@ spec: description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, - type FooStatus struct{ // Represents the observations of a - foo's current state. // Known .status.conditions.type are: - \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type - \ // +patchStrategy=merge // +listType=map // +listMapKey=type - \ Conditions []metav1.Condition `json:\"conditions,omitempty\" - patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` - \n // other fields }" + type FooStatus struct{ // Represents the observations of a foo's + current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" properties: lastTransitionTime: description: lastTransitionTime is the last time the condition @@ -116,9 +114,3 @@ spec: type: object served: true storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/config/crd/bases/multicluster.x-k8s.io_serviceimports.yaml b/config/crd/bases/multicluster.x-k8s.io_serviceimports.yaml index ec3fafb9..3086e8d5 100644 --- a/config/crd/bases/multicluster.x-k8s.io_serviceimports.yaml +++ b/config/crd/bases/multicluster.x-k8s.io_serviceimports.yaml @@ -1,10 +1,9 @@ - --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.4.1 + controller-gen.kubebuilder.io/version: v0.9.2 creationTimestamp: null name: serviceimports.multicluster.x-k8s.io spec: @@ -42,7 +41,6 @@ spec: is ClusterSetIP. items: type: string - maxItems: 1 type: array ports: items: @@ -137,9 +135,3 @@ spec: type: object served: true storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index 8dac89bc..6dc8a039 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -2,6 +2,7 @@ # since it depends on service name and namespace that are out of this kustomize package. # It should be run by config/default resources: +- bases/about.k8s.io_clusterproperties.yaml - bases/multicluster.x-k8s.io_serviceexports.yaml - bases/multicluster.x-k8s.io_serviceimports.yaml #+kubebuilder:scaffold:crdkustomizeresource @@ -9,16 +10,21 @@ resources: patchesStrategicMerge: # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. # patches here are for enabling the conversion webhook for each CRD +#- patches/webhook_in_clusterproperties.yaml #- patches/webhook_in_serviceexports.yaml #- patches/webhook_in_serviceimports.yaml #+kubebuilder:scaffold:crdkustomizewebhookpatch # [CERTMANAGER] To enable webhook, uncomment all the sections with [CERTMANAGER] prefix. # patches here are for enabling the CA injection for each CRD +#- patches/cainjection_in_clusterproperties.yaml #- patches/cainjection_in_serviceexports.yaml #- patches/cainjection_in_serviceimports.yaml #+kubebuilder:scaffold:crdkustomizecainjectionpatch +# Patch adds an annotation to pass protected groups approval required to use domain "k8s.io" +- patches/annotation_for_clusterproperties.yaml + # the following config is for teaching kustomize how to do kustomization for CRDs. configurations: - kustomizeconfig.yaml diff --git a/config/crd/patches/annotation_for_clusterproperties.yaml b/config/crd/patches/annotation_for_clusterproperties.yaml new file mode 100644 index 00000000..8dc306ce --- /dev/null +++ b/config/crd/patches/annotation_for_clusterproperties.yaml @@ -0,0 +1,7 @@ +# The following patch adds an annotation to pass protected groups approval required to use domain "k8s.io" +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.kubernetes.io: "https://github.com/kubernetes/enhancements/pull/3084" + name: clusterproperties.about.k8s.io diff --git a/config/crd/patches/cainjection_in_clusterproperties.yaml b/config/crd/patches/cainjection_in_clusterproperties.yaml new file mode 100644 index 00000000..31e5f39b --- /dev/null +++ b/config/crd/patches/cainjection_in_clusterproperties.yaml @@ -0,0 +1,7 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: clusterproperties.about.k8s.io diff --git a/config/crd/patches/webhook_in_clusterproperties.yaml b/config/crd/patches/webhook_in_clusterproperties.yaml new file mode 100644 index 00000000..c1880095 --- /dev/null +++ b/config/crd/patches/webhook_in_clusterproperties.yaml @@ -0,0 +1,16 @@ +# The following patch enables a conversion webhook for the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: clusterproperties.about.k8s.io +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + namespace: system + name: webhook-service + path: /convert + conversionReviewVersions: + - v1 diff --git a/config/default/kustomization.yaml b/config/default/kustomization.yaml index 757b1dd2..08158e36 100644 --- a/config/default/kustomization.yaml +++ b/config/default/kustomization.yaml @@ -1,12 +1,12 @@ # Adds namespace to all resources. -namespace: migration-system +namespace: cloud-map-mcs-system # Value of this field is prepended to the # names of all resources, e.g. a deployment named # "wordpress" becomes "alices-wordpress". # Note that it should also match with the prefix (text before '-') of the namespace # field above. -namePrefix: migration- +namePrefix: cloud-map-mcs- # Labels to add to all resources and selectors. #commonLabels: diff --git a/config/manager/aws.properties b/config/manager/aws.properties new file mode 100644 index 00000000..2c72a849 --- /dev/null +++ b/config/manager/aws.properties @@ -0,0 +1 @@ +AWS_REGION diff --git a/config/manager/controller_manager_config.yaml b/config/manager/controller_manager_config.yaml index 8153cb17..55c709b8 100644 --- a/config/manager/controller_manager_config.yaml +++ b/config/manager/controller_manager_config.yaml @@ -8,4 +8,4 @@ webhook: port: 9443 leaderElection: leaderElect: true - resourceName: db692913.x-k8s.io + resourceName: aws-cloud-map-mcs-controller-for-k8s-lock diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml index 5e793dd1..b01ba653 100644 --- a/config/manager/kustomization.yaml +++ b/config/manager/kustomization.yaml @@ -8,8 +8,10 @@ configMapGenerator: - files: - controller_manager_config.yaml name: manager-config -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization +- envs: + - aws.properties + name: aws-config + images: - name: controller newName: controller diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index 79adfe72..a75d3168 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -52,5 +52,11 @@ spec: requests: cpu: 100m memory: 20Mi + env: + - name: AWS_REGION + valueFrom: + configMapKeyRef: + name: aws-config + key: AWS_REGION serviceAccountName: controller-manager terminationGracePeriodSeconds: 10 diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 565b958e..5754c2e7 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -1,4 +1,3 @@ - --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole @@ -6,6 +5,48 @@ metadata: creationTimestamp: null name: manager-role rules: +- apiGroups: + - "" + resources: + - namespaces + verbs: + - list + - watch +- apiGroups: + - "" + resources: + - services + verbs: + - create + - delete + - get + - list + - update + - watch +- apiGroups: + - about.k8s.io + resources: + - clusterproperties + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - create + - delete + - deletecollection + - get + - list + - update + - watch - apiGroups: - multicluster.x-k8s.io resources: @@ -28,7 +69,10 @@ rules: resources: - serviceimports verbs: + - create + - delete - get - list - patch - update + - watch diff --git a/config/samples/multicluster_v1alpha1_serviceimport.yaml b/config/samples/multicluster_v1alpha1_serviceimport.yaml deleted file mode 100644 index 57515e17..00000000 --- a/config/samples/multicluster_v1alpha1_serviceimport.yaml +++ /dev/null @@ -1,7 +0,0 @@ -apiVersion: multicluster.x-k8s.io/v1alpha1 -kind: ServiceImport -metadata: - name: serviceimport-sample -spec: - # Add fields here - foo: bar diff --git a/docs/architecture-overview.png b/docs/architecture-overview.png new file mode 100644 index 00000000..3e02f364 Binary files /dev/null and b/docs/architecture-overview.png differ diff --git a/docs/images/cloudmap.svg b/docs/images/cloudmap.svg new file mode 100644 index 00000000..f4af2aaf --- /dev/null +++ b/docs/images/cloudmap.svg @@ -0,0 +1,18 @@ + + + + Icon-Architecture/64/Arch_AWS-CloudMap_64 + Created with Sketch. + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/images/service-consumption.png b/docs/images/service-consumption.png new file mode 100644 index 00000000..8c9840ac Binary files /dev/null and b/docs/images/service-consumption.png differ diff --git a/docs/images/service-provisioning.png b/docs/images/service-provisioning.png new file mode 100644 index 00000000..c690bdee Binary files /dev/null and b/docs/images/service-provisioning.png differ diff --git a/docs/images/solution-baseline.png b/docs/images/solution-baseline.png new file mode 100644 index 00000000..e7030c81 Binary files /dev/null and b/docs/images/solution-baseline.png differ diff --git a/docs/images/solution-overview.png b/docs/images/solution-overview.png new file mode 100644 index 00000000..239f5cd7 Binary files /dev/null and b/docs/images/solution-overview.png differ diff --git a/docs/index.md b/docs/index.md new file mode 100644 index 00000000..fd24c032 --- /dev/null +++ b/docs/index.md @@ -0,0 +1,588 @@ +# + +## Introduction + +Kubernetes, with it's implementation of the cluster construct has simplified the ability to schedule workloads across a collection of VMs or nodes. Declarative configuration, immutability, auto-scaling, and self healing have vastly simplified the paradigm of workload management within the cluster - which has enabled teams to move at increasing velocities. + +As the rate of Kubernetes adoption continues to increase, there has been a corresponding increase in the number of use cases that require workloads to break through the perimeter of the single cluster construct. Requirements concerning workload location/proximity, isolation, and reliability have been the primary catalyst for the emergence of deployment scenarios where a single logical workload will span multiple Kubernetes clusters: + +- **Location** based concerns include network latency requirements (e.g. bringing the application as close to users as possible), data gravity requirements (e.g. bringing elements of the application as close to fixed data sources as possible), and jurisdiction based requirements (e.g. data residency limitations imposed via governing bodies); +- **Isolation** based concerns include performance (e.g. reduction in "noisy-neighbor" influence in mixed workload clusters), environmental (e.g. by staged or sandboxed workload constructs such as "dev", "test", and "prod" environments), security (e.g. separating untrusted code or sensitive data), organisational (e.g. teams fall under different business units or management domains), and cost based (e.g. teams are subject to separate budgetary constraints); +- **Reliability** based concerns include blast radius and infrastructure diversity (e.g. preventing an application based or underlying infrastructure issue in one cluster or provider zone from impacting the entire solution), and scale based (e.g. the workload may outgrow a single cluster) + +![alt text](images/solution-overview.png "Solution Overview") + + +Multi-cluster application architectures tend to be designed to either be **replicated** in nature - with this pattern each participating cluster runs a full copy of each given application; or alternatively they implement more of a **group-by-service** pattern where the services of a single application or system are split or divided amongst multiple clusters. + +When it comes to the configuration of Kubernetes (and the surrounding infrastructure) to support a given multi-cluster application architecture - the space has evolved over time to include a number of approaches. Implementations tend draw upon a combination of components at various levels of the stack, and generally speaking they also vary in terms of the "weight" or complexity of the implementation, number and scope of features offered, as well as the associated management overhead. In simple terms these approaches can be loosely grouped into two main categories: + +* **Network-centric** approaches focus on network interconnection tooling to implement connectivity between clusters in order to facilitate cross-cluster application communication. The various network-centric approaches include those that are tightly coupled with the CNI (e.g. Cillium Mesh), as well as more CNI agnostic implementations such as Submariner and Skupper. Service mesh implementations also fall into the network-centric category, and these include Istio’s multi-cluster support, Linkerd service mirroring, Kuma from Kong, AWS App Mesh, and Consul’s mesh gateway. There are also various multi-cluster ingress approaches, as well as virtual-kubelet based approaches including Admiralty, Tensile-kube, and Liqo. +* **Kubernetes-centric** approaches focus on supporting and extending the core Kubernetes primitives in order to support multi-cluster use cases. These approaches fall under the stewardship of the Kubernetes [Multicluster Special Interest Group](https://github.com/kubernetes/community/tree/master/sig-multicluster) whose charter is focused on designing, implementing, and maintaining API’s, tools, and documentation related to multi-cluster administration and application management. Subprojects include: + * **[kubefed](https://github.com/kubernetes-sigs/kubefed)** (Kubernetes Cluster Federation) which implements a mechanism to coordinate the configuration of multiple Kubernetes clusters from a single set of APIs in a hosting cluster. kubefed is considered to be foundational for more complex multi-cluster use cases such as deploying multi-geo applications, and disaster recovery. + * **[work-api](https://github.com/kubernetes-sigs/work-api)** (Multi-Cluster Works API) aims to group a set of Kubernetes API resources to be applied to one or multiple clusters together as a concept of “work” or “workload” for the purpose of multi-cluster workload lifecycle mangement. + * **[mcs-api](https://github.com/kubernetes-sigs/mcs-api)** (Multi-cluster Services APIs) implements an API specification to extend the single-cluster bounded Kubernetes service concept to function across multiple clusters. + +### About the Multi-cluster Services API + +Kubernetes' familiar [Service](https://cloud.google.com/kubernetes-engine/docs/concepts/service) object lets you discover and access services within the boundary of a single Kubernetes cluster. The mcs-api implements a Kubernetes-native extension to the Service API, extending the scope of the service resource concept beyond the cluster boundary - providing a mechanism to weave multiple clusters together using standard (and familiar) DNS based service discovery. + +> *[KEP-1645: Multi-Cluster Services API](https://github.com/kubernetes/enhancements/tree/master/keps/sig-multicluster/1645-multi-cluster-services-api#kep-1645-multi-cluster-services-api) provides the formal description of the Multi Cluster Service API. KEP-1645 doesn't define a complete implementation - it serves to define how an implementation should behave.At the time of writing the mcs-api version is: `multicluster.k8s.io/v1alpha1`* + +The primary deployment scenarios covered by the mcs-api include: + +- **Different services each deployed to separate clusters:** I have 2 clusters, each running different services managed by different teams, where services from one team depend on services from the other team. I want to ensure that a service from one team can discover a service from the other team (via DNS resolving to VIP), regardless of the cluster that they reside in. In addition, I want to make sure that if the dependent service is migrated to another cluster, the dependee is not impacted. +- **Single service deployed to multiple clusters:** I have deployed my stateless service to multiple clusters for redundancy or scale. Now I want to propagate topologically-aware service endpoints (local, regional, global) to all clusters, so that other services in my clusters can access instances of this service in priority order based on availability and locality. + +The mcs-api is able to support these use cases through the described properties of a `ClusterSet`, which is a group of clusters with a high degree of mutual trust and shared ownership that share services amongst themselves - along with two additional API objects: the `ServiceExport` and the `ServiceImport`. + +Services are not visible to other clusters in the `ClusterSet` by default, they must be explicitly marked for export by the user. Creating a `ServiceExport` object for a given service specifies that the service should be exposed across all clusters in the `ClusterSet`. The mcs-api implementation (typically a controller) will automatically generate a corresponding `ServiceImport` object (which serves as the in-cluster representation of a multi-cluster service) in each importing cluster - for consumer workloads to be able to locate and consume the exported service. + +DNS-based service discovery for `ServiceImport` objects is facilitated by the [Kubernetes DNS-Based Multicluster Service Discovery Specification](https://github.com/kubernetes/enhancements/pull/2577) which extends the standard Kubernetes DNS paradigms by implementing records named by service and namespace for `ServiceImport` objects, but as differentiated from regular in-cluster DNS service names by using the special zone `.clusterset.local`. I.e. When a `ServiceExport` is created, this will cause a FQDN for the multi-cluster service to become available from within the `ClusterSet`. The domain name will be of the format `..svc.clusterset.local`. + +#### AWS Cloud Map MCS Controller for Kubernetes + +The [AWS Cloud Map MCS Controller for Kubernetes](https://github.com/aws/aws-cloud-map-mcs-controller-for-k8s) (MCS-Controller) is an open source project that implements the multi-cluster services API specification. + +The MCS-Controller is a controller that syncs services across clusters and makes them available for multi-cluster service discovery and connectivity. The implementation model is decentralized, and utilises AWS Cloud Map as registry for management and distribution of multi-cluster service data. + +> *At the time of writing, the MCS-Controller release version is [v0.3.0](https://github.com/aws/aws-cloud-map-mcs-controller-for-k8s/releases/tag/v0.3.0) which introduces new features including the [ClusterProperty CRD](https://github.com/kubernetes/enhancements/tree/master/keps/sig-multicluster/2149-clusterid#-crd), and support for headless services. Milestones are currently in place to bring the project up to v1.0 (GA), which will include full compliance with the mcs-api specification, support for multiple AWS accounts, and Cloud Map client-side traffic shaping.* + +#### AWS Cloud Map + +[AWS Cloud Map](https://aws.amazon.com/cloud-map) is a cloud resource discovery service that allows applications to discover web-based services via the AWS SDK, API calls, or DNS queries. Cloud Map is a fully managed service which eliminates the need to set up, update, and manage your own service discovery tools and software.. + +## Tutorial + +### Overview + +Let's consider a deployment scenario where we provision a Service into a single EKS cluster, then make the service available from within a second EKS cluster using the AWS Cloud Map MCS Controller. + +> *This tutorial will take you through the end-end implementation of the solution as outlined herein, including a functional implementation of the AWS Cloud Map MCS Controller across x2 EKS clusters situated in separate VPCs.* + +#### Solution Baseline + +![alt text](images/solution-baseline.png "Solution Baseline") + + + +In reference to the **Solution Baseline** diagram: + +- We have x2 EKS clusters (Cluster 1 & Cluster 2), each deployed into separate VPCs within a single AWS region. + - Cluster 1 VPC CIDR: 10.10.0.0/16, Kubernetes service IPv4 CIDR: 172.20.0.0/16 + - Cluster 2 VPC CIDR: 10.12.0.0/16, Kubernetes service IPv4 CIDR: 172.20.0.0/16 +- VPC peering is configured to permit network connectivity between workloads within each cluster. +- The CoreDNS multicluster plugin is deployed to each cluster. +- The AWS Cloud Map MCS Controller for Kubernetes is deployed to each cluster. +- Clusters 1 & 2 are each configured as members of the same mcs-api `ClusterSet`. + - Cluster 1 mcs-api `ClusterSet`: clusterset1, `Cluster` Id: cls1. + - Cluster 2 mcs-api `ClusterSet`: clusterset1, `Cluster` Id: cls2. +- Clusters 1 & 2 are both provisioned with the namespace `demo`. +- Cluster 1 has a `ClusterIP` Service `nginx-hello` deployed to the `demo` namespace which frontends a x3 replica Nginx deployment `nginx-demo`. + - Service | nginx-hello: 172.20.150.33:80 + - Endpoints | nginx-hello: 10.10.66.181:80,10.10.78.125:80,10.10.86.76:80 + +#### Service Provisioning + +With the required dependencies in place, the admin user is able to create a `ServiceExport` object in Cluster 1 for the `nginx-hello` Service, such that the MCS-Controller implementation will automatically provision a corresponding `ServiceImport` in Cluster 2 for consumer workloads to be able to locate and consume the exported service. + + +![alt text](images/service-provisioning.png "Service Provisioning") + +In reference to the **Service Provisioning** diagram: + +1. The administrator submits the request to the Cluster 1 Kube API server for a `ServiceExport` object to be created for ClusterIP Service `nginx-hello` in the `demo` Namespace. +2. The MCS-Controller in Cluster 1, watching for `ServiceExport` object creation provisions a corresponding `nginx-hello` service in the Cloud Map `demo` namespace. The Cloud Map service is provisioned with sufficient detail for the Service object and corresponding Endpoint Slice to be provisioned within additional clusters in the `ClusterSet`. +3. The MCS-Controller in Cluster 2 responds to the creation of the `nginx-hello` Cloud Map Service by provisioning the `ServiceImport` object and corresponding `EndpointSlice` objects via the Kube API Server. +4. The CoreDNS multicluster plugin, watching for `ServiceImport` and `EndpointSlice` creation provisions corresponding DNS records within the `.clusterset.local` zone. + +#### Service Consumption + +![alt text](images/service-consumption.png "Service Consumption") + +In reference to the **Service Consumption** diagram: + +1. The `client-hello` pod in Cluster 2 needs to consume the `nginx-hello` service, for which all Endpoints are deployed in Cluster 1. The `client-hello` pod requests the resource http://nginx-hello.demo.svc.clusterset.local:80. DNS based service discovery [1b] responds with the IP address of the local `nginx-hello` `ServiceExport` Service `ClusterSetIP`. +2. Requests to the local `ClusterSetIP` at `nginx-hello.demo.svc.clusterset.local` are proxied to the Endpoints located on Cluster 1. + +> *Note: In accordance with the mcs-api specification, a multi-cluster service will be imported by all clusters in which the service's namespace exists, meaning that each exporting cluster will also import the corresponding multi-cluster service. As such, the `nginx-hello` service will also be accessible via `ServiceExport` Service `ClusterSetIP` on Cluster 1. Identical to Cluster 2, the `ServiceExport` Service is resolvable by name at `nginx-hello.demo.svc.clusterset.local`.* + +### Implementation + +### Solution Baseline + +To prepare your environment to match the Solution Baseline deployment scenario, the following prerequisites should be addressed. + +#### Clone the `aws-cloud-map-mcs-controller-for-k8s` git repository + +Sample configuration files will be used through the course of the tutorial, which have been made available in the `aws-cloud-map-mcs-controller-for-k8s` repository. + +Clone the repository to the host from which you will be bootstrapping the clusters: + +```bash +git clone https://github.com/aws/aws-cloud-map-mcs-controller-for-k8s.git +``` + +> *Note: All commands as provided should be run from the root directory of the cloned git repository.* + +> *Note: Certain values located within the provided configuration files have been configured for substitution with OS environment variables. Work instructions below will identify which environment variables should be set before issuing any commands which will depend on variable substitution.* + +#### Create EKS Clusters + +x2 EKS clusters should be provisioned, each deployed into separate VPCs within a single AWS region. + +- VPCs and clusters should be provisioned with non-overlapping CIDRs. +- For compatibility with the remainder of the tutorial, it is recommended that `eksctl` be used to provision the clusters and associated security configuration. *By default, the `eksctl create cluster` command will create a dedicated VPC.* + +Sample `eksctl` config file `samples/eksctl-cluster.yaml` has been provided: + +- Environment variables AWS_REGION, CLUSTER_NAME, NODEGROUP_NAME, and VPC_CIDR should be configured. Example values have been provided in the below command reference - substitute values to suit your preference. +- Example VPC CIDRs match the values provided in the Baseline Configuration description. + +Run the following commands to create clusters using `eksctl`. + +Cluster 1: + +```bash +export AWS_REGION=ap-southeast-2 +export CLUSTER_NAME=cls1 +export NODEGROUP_NAME=cls1-nodegroup1 +export VPC_CIDR=10.10.0.0/16 +envsubst < samples/eksctl-cluster.yaml | eksctl create cluster -f - +``` + +Cluster 2: + +```bash +export AWS_REGION=ap-southeast-2 +export CLUSTER_NAME=cls2 +export NODEGROUP_NAME=cls2-nodegroup1 +export VPC_CIDR=10.12.0.0/16 +envsubst < samples/eksctl-cluster.yaml | eksctl create cluster -f - +``` + +#### Create VPC Peering Connection + +VPC peering is required to permit network connectivity between workloads provisioned within each cluster. + +- To create the VPC Peering connection, follow the instruction [Create a VPC peering connection with another VPC in your account](https://docs.aws.amazon.com/vpc/latest/peering/create-vpc-peering-connection.html) for guidance. +- VPC route tables in each VPC require updating, follow the instruction [Update your route tables for a VPC peering connection](https://docs.aws.amazon.com/vpc/latest/peering/vpc-peering-routing.html) for guidance. For simplicity, it's recommended to configure route destinations as the IPv4 CIDR block of the peer VPC. + +- Security Groups require updating to permit cross-cluster network communication. EKS cluster security groups in each cluster should be updated to permit inbound traffic originating from external clusters. For simplicity, it's recommended the Cluster 1 & Cluster 2 [EKS Cluster Security groups](https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html) be updated to allow inbound traffic from the IPv4 CIDR block of the peer VPC. + +> *The [VPC Reachability Analyzer](https://docs.aws.amazon.com/vpc/latest/reachability/getting-started.html) can be used to test and diagnose end-end connectivity between worker nodes within each cluster.* + +#### Enable EKS OIDC Provider + +In order to map required Cloud Map AWS IAM permissions to the MCS-Controller Kubernetes service account, we need to enable the OpenID Connect (OIDC) identity provider in our EKS clusters using `eksctl`. + +- Environment variables REGION and CLUSTERNAME should be configured. + +Run the following commands to enable OIDC providers using `eksctl`. + +Cluster 1: + +```bash +export AWS_REGION=ap-southeast-2 +export CLUSTER_NAME=cls1 +eksctl utils associate-iam-oidc-provider \ + --region $AWS_REGION \ + --cluster $CLUSTER_NAME \ + --approve +``` + +Cluster 2: + +```bash +export AWS_REGION=ap-southeast-2 +export CLUSTER_NAME=cls2 +eksctl utils associate-iam-oidc-provider \ + --region $AWS_REGION \ + --cluster $CLUSTER_NAME \ + --approve +``` + +#### Implement CoreDNS multicluster plugin + +The CoreDNS multicluster plugin implements the [Kubernetes DNS-Based Multicluster Service Discovery Specification](https://github.com/kubernetes/enhancements/pull/2577) which enables CoreDNS to lifecycle manage DNS records for `ServiceImport` objects. To enable the CoreDNS multicluster plugin within both EKS clusters, perform the following procedure. + +##### Update CoreDNS RBAC + +Run the following command against both clusters to update the `system:coredns` clusterrole to include access to additional multi-cluster API resources: + +```bash +kubectl apply -f samples/coredns-clusterrole.yaml +``` + +##### Update the CoreDNS configmap + +Run the following command against both clusters to update the default CoreDNS configmap to include the multicluster plugin directive, and `clusterset.local` zone: + +```bash +kubectl apply -f samples/coredns-configmap.yaml +``` + +##### Update the CoreDNS deployment + +Run the following command against both clusters to update the default CoreDNS deployment to use the container image `ghcr.io/aws/aws-cloud-map-mcs-controller-for-k8s/coredns-multicluster/coredns:v1.8.6` - which includes the multicluster plugin: + +```bash +kubectl apply -f samples/coredns-deployment.yaml +``` + +#### Install the aws-cloud-map-mcs-controller-for-k8s + +##### Configure MCS-Controller RBAC + +Before the Cloud Map MCS-Controller is installed, we will first pre-provision the controller Service Account, granting IAM access rights `AWSCloudMapFullAccess` to ensure that the MCS Controller can lifecycle manage Cloud Map resources. + +- Environment variable CLUSTER_NAME should be configured. + +Run the following commands to create the MCS-Controller namespace and service accounts in each cluster. + +> *Note: Be sure to change the `kubectl` context to the correct cluster before issuing commands.* + +Cluster 1: + +```bash +export CLUSTER_NAME=cls1 +kubectl create namespace cloud-map-mcs-system +eksctl create iamserviceaccount \ +--cluster $CLUSTER_NAME \ +--namespace cloud-map-mcs-system \ +--name cloud-map-mcs-controller-manager \ +--attach-policy-arn arn:aws:iam::aws:policy/AWSCloudMapFullAccess \ +--override-existing-serviceaccounts \ +--approve +``` + +Cluster 2: + +```bash +export CLUSTER_NAME=cls2 +kubectl create namespace cloud-map-mcs-system +eksctl create iamserviceaccount \ +--cluster $CLUSTER_NAME \ +--namespace cloud-map-mcs-system \ +--name cloud-map-mcs-controller-manager \ +--attach-policy-arn arn:aws:iam::aws:policy/AWSCloudMapFullAccess \ +--override-existing-serviceaccounts \ +--approve +``` + +##### Install the MCS-Controller +Now to install the MCS-Controller. + +- Environment variable AWS_REGION should be configured. + +Run the following command against both clusters to install the MCS-Controller latest release: + +```bash +export AWS_REGION=ap-southeast-2 +kubectl apply -k "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/config/controller_install_release" +``` + +##### Assign mcs-api `ClusterSet` membership and `Cluster` identifier +To ensure that `ServiceExport` and `ServiceImport` objects propagate correctly between clusters, each cluster should be configured as a member of a single mcs-api `ClusterSet` (clusterset1 in our example deployment scenario), and should be assigned a unique mcs-api `Cluster` Id within the `ClusterSet` (cls1 & cls2 in our example deployment scenario). + +- Environment variable CLUSTER_ID should be configured. +- Environment variable CLUSTERSET_ID should be configured. + +Run the following commands to configure Cluster Id and ClusterSet membership. + +Cluster 1: + +```bash +export CLUSTER_ID=cls1 +export CLUSTERSET_ID=clusterset1 +envsubst < samples/mcsapi-clusterproperty.yaml | kubectl apply -f - +``` + +Cluster 2: + +```bash +export CLUSTER_ID=cls2 +export CLUSTERSET_ID=clusterset1 +envsubst < samples/mcsapi-clusterproperty.yaml | kubectl apply -f - +``` + +#### Create `nginx-hello` Service + +Now that the clusters, CoreDNS and the MCS-Controller have been configured, we can create the `demo` namespace in both clusters and implement the `nginx-hello` Service and associated Deployment into Cluster 1. + +Run the following commands to prepare the demo environment on both clusters. + +> *Note: be sure to change the `kubectl` context to the correct cluster before issuing commands.* + +Cluster 1: + +```bash +kubectl create namespace demo +kubectl apply -f samples/nginx-deployment.yaml +kubectl apply -f samples/nginx-service.yaml +``` + +Cluster 2: + +```bash +kubectl create namespace demo +``` + +### Service Provisioning + +With the Solution Baseline in place, let's continue by implementing the Service Provisioning scenario. We'll create a `ServiceExport` object in Cluster 1 for the `nginx-hello` Service. This will trigger the Cluster 1 MCS-Controller to complete service provisioning and propagation into Cloud Map, and subsequent import and provisioning by the MCS-Controller in Cluster 2. + +#### Create `nginx-hello` ServiceExport + +Run the following command against Cluster 1 to to create the `ServiceExport` object for the `nginx-hello` Service: + +```bash +kubectl apply -f \config\nginx-serviceexport.yaml +``` + +#### Verify `nginx-hello` ServiceExport + +Let's verify the `ServiceExport` creation has succeeded, and that corresponding objects have been created in Cluster 1, Cloud Map, and Cluster 2. + +##### Cluster 1 + +Inspecting the MCS-Controller logs in Cluster 1, we see that the controller has detected the `ServiceExport` object, and created the corresponding `demo` Namespace and `nginx-hello` Service in Cloud Map: + +```bash +$ kubectl logs cloud-map-mcs-controller-manager-5b9f959fc9-hmz88 -c manager --namespace cloud-map-mcs-system +{"level":"info","ts":1665108812.7046816,"logger":"cloudmap","msg":"namespace created","nsId":"ns-nlnawwa2wa3ajoh3"} +{"level":"info","ts":1665108812.7626762,"logger":"cloudmap","msg":"service created","namespace":"demo","name":"nginx-hello","id":"srv-xqirlhajwua5vkvo"} +{"level":"info","ts":1665108812.7627065,"logger":"cloudmap","msg":"fetching a service","namespace":"demo","name":"nginx-hello"} +{"level":"info","ts":1665108812.8299918,"logger":"cloudmap","msg":"registering endpoints","namespaceName":"demo","serviceName":"nginx-hello","endpoints":[{"Id":"tcp-10_10_86_76-80","IP":"10.10.86.76","EndpointPort":{"Name":"","Port":80,"TargetPort":"","Protocol":"TCP"},"ServicePort":{"Name":"","Port":80,"TargetPort":"80","Protocol":"TCP"},"ClusterId":"cls1","ClusterSetId":"clusterset1","ServiceType":"ClusterSetIP","ServiceExportCreationTimestamp":1665108776000,"Ready":true,"Hostname":"","Nodename":"ip-10-10-77-143.ap-southeast-2.compute.internal","Attributes":{"K8S_CONTROLLER":"aws-cloud-map-mcs-controller-for-k8s d07e680 (d07e680)"}},{"Id":"tcp-10_10_66_181-80","IP":"10.10.66.181","EndpointPort":{"Name":"","Port":80,"TargetPort":"","Protocol":"TCP"},"ServicePort":{"Name":"","Port":80,"TargetPort":"80","Protocol":"TCP"},"ClusterId":"cls1","ClusterSetId":"clusterset1","ServiceType":"ClusterSetIP","ServiceExportCreationTimestamp":1665108776000,"Ready":true,"Hostname":"","Nodename":"ip-10-10-77-143.ap-southeast-2.compute.internal","Attributes":{"K8S_CONTROLLER":"aws-cloud-map-mcs-controller-for-k8s d07e680 (d07e680)"}},{"Id":"tcp-10_10_78_125-80","IP":"10.10.78.125","EndpointPort":{"Name":"","Port":80,"TargetPort":"","Protocol":"TCP"},"ServicePort":{"Name":"","Port":80,"TargetPort":"80","Protocol":"TCP"},"ClusterId":"cls1","ClusterSetId":"clusterset1","ServiceType":"ClusterSetIP","ServiceExportCreationTimestamp":1665108776000,"Ready":true,"Hostname":"","Nodename":"ip-10-10-77-143.ap-southeast-2.compute.internal","Attributes":{"K8S_CONTROLLER":"aws-cloud-map-mcs-controller-for-k8s d07e680 (d07e680)"}}]} +``` + +Using the AWS CLI we can verify Namespace and Service resources provisioned to Cloud Map by the Cluster 1 MCS-Controller: + +```bash +$ aws servicediscovery list-namespaces +{ + "Namespaces": [ + { + "Id": "ns-nlnawwa2wa3ajoh3", + "Arn": "arn:aws:servicediscovery:ap-southeast-2:911483634971:namespace/ns-nlnawwa2wa3ajoh3", + "Name": "demo", + "Type": "HTTP", + "Properties": { + "DnsProperties": { + "SOA": {} + }, + "HttpProperties": { + "HttpName": "demo" + } + }, + "CreateDate": "2022-10-07T02:13:32.310000+00:00" + } + ] +} +$ aws servicediscovery list-services +{ + "Services": [ + { + "Id": "srv-xqirlhajwua5vkvo", + "Arn": "arn:aws:servicediscovery:ap-southeast-2:911483634971:service/srv-xqirlhajwua5vkvo", + "Name": "nginx-hello", + "Type": "HTTP", + "DnsConfig": {}, + "CreateDate": "2022-10-07T02:13:32.744000+00:00" + } + ] +} +$ aws servicediscovery discover-instances --namespace-name demo --service-name nginx-hello +{ + "Instances": [ + { + "InstanceId": "tcp-10_10_78_125-80", + "NamespaceName": "demo", + "ServiceName": "nginx-hello", + "HealthStatus": "UNKNOWN", + "Attributes": { + "AWS_INSTANCE_IPV4": "10.10.78.125", + "AWS_INSTANCE_PORT": "80", + "CLUSTERSET_ID": "clusterset1", + "CLUSTER_ID": "cls1", + "ENDPOINT_PORT_NAME": "", + "ENDPOINT_PROTOCOL": "TCP", + "HOSTNAME": "", + "K8S_CONTROLLER": "aws-cloud-map-mcs-controller-for-k8s d07e680 (d07e680)", + "NODENAME": "ip-10-10-77-143.ap-southeast-2.compute.internal", + "READY": "true", + "SERVICE_EXPORT_CREATION_TIMESTAMP": "1665108776000", + "SERVICE_PORT": "80", + "SERVICE_PORT_NAME": "", + "SERVICE_PROTOCOL": "TCP", + "SERVICE_TARGET_PORT": "80", + "SERVICE_TYPE": "ClusterSetIP" + } + }, + { + "InstanceId": "tcp-10_10_66_181-80", + "NamespaceName": "demo", + "ServiceName": "nginx-hello", + "HealthStatus": "UNKNOWN", + "Attributes": { + "AWS_INSTANCE_IPV4": "10.10.66.181", + "AWS_INSTANCE_PORT": "80", + "CLUSTERSET_ID": "clusterset1", + "CLUSTER_ID": "cls1", + "ENDPOINT_PORT_NAME": "", + "ENDPOINT_PROTOCOL": "TCP", + "HOSTNAME": "", + "K8S_CONTROLLER": "aws-cloud-map-mcs-controller-for-k8s d07e680 (d07e680)", + "NODENAME": "ip-10-10-77-143.ap-southeast-2.compute.internal", + "READY": "true", + "SERVICE_EXPORT_CREATION_TIMESTAMP": "1665108776000", + "SERVICE_PORT": "80", + "SERVICE_PORT_NAME": "", + "SERVICE_PROTOCOL": "TCP", + "SERVICE_TARGET_PORT": "80", + "SERVICE_TYPE": "ClusterSetIP" + } + }, + { + "InstanceId": "tcp-10_10_86_76-80", + "NamespaceName": "demo", + "ServiceName": "nginx-hello", + "HealthStatus": "UNKNOWN", + "Attributes": { + "AWS_INSTANCE_IPV4": "10.10.86.76", + "AWS_INSTANCE_PORT": "80", + "CLUSTERSET_ID": "clusterset1", + "CLUSTER_ID": "cls1", + "ENDPOINT_PORT_NAME": "", + "ENDPOINT_PROTOCOL": "TCP", + "HOSTNAME": "", + "K8S_CONTROLLER": "aws-cloud-map-mcs-controller-for-k8s d07e680 (d07e680)", + "NODENAME": "ip-10-10-77-143.ap-southeast-2.compute.internal", + "READY": "true", + "SERVICE_EXPORT_CREATION_TIMESTAMP": "1665108776000", + "SERVICE_PORT": "80", + "SERVICE_PORT_NAME": "", + "SERVICE_PROTOCOL": "TCP", + "SERVICE_TARGET_PORT": "80", + "SERVICE_TYPE": "ClusterSetIP" + } + } + ] +} +``` + +##### Cluster 2 + +Inspecting the MCS-Controller logs in Cluster 2, we see that the controller has detected the `nginx-hello` Cloud Map Service, and created the corresponding Kubernetes `ServiceImport`: + +```bash +$ kubectl logs cloud-map-mcs-controller-manager-5b9f959fc9-v72s4 -c manager --namespace cloud-map-mcs-system +{"level":"info","ts":1665108822.2781157,"logger":"controllers.Cloudmap","msg":"created ServiceImport","namespace":"demo","name":"nginx-hello"} +{"level":"info","ts":1665108824.2420218,"logger":"controllers.Cloudmap","msg":"created derived Service","namespace":"demo","name":"imported-9cfu7k5mkr"} +{"level":"info","ts":1665108824.2501283,"logger":"controllers.Cloudmap","msg":"ServiceImport IPs need update","ServiceImport IPs":[],"cluster IPs":["172.20.80.119"]} +{"level":"info","ts":1665108824.2618752,"logger":"controllers.Cloudmap","msg":"updated ServiceImport","namespace":"demo","name":"nginx-hello","IP":["172.20.80.119"],"ports":[{"protocol":"TCP","port":80}]} +``` + +Inspecting the Cluster 2 Kubernetes `ServiceImport` object: + +```bash +$ kubectl get serviceimports.multicluster.x-k8s.io nginx-hello -n demo -o yaml +apiVersion: multicluster.x-k8s.io/v1alpha1 +kind: ServiceImport +metadata: + annotations: + multicluster.k8s.aws/derived-service: '[{"cluster":"cls1","derived-service":"imported-9cfu7k5mkr"}]' + creationTimestamp: "2022-10-07T02:13:42Z" + generation: 2 + name: nginx-hello + namespace: demo + resourceVersion: "12787" + uid: a53901af-57a8-49c7-aeb1-f67c4a44c2d2 +spec: + ips: + - 172.20.80.119 + ports: + - port: 80 + protocol: TCP + type: ClusterSetIP +status: + clusters: + - cluster: cls1 +``` + +And the corresponding Cluster 2 Kubernetes Endpoint Slice: + +```bash +$ kubectl get endpointslices.discovery.k8s.io -n demo +NAME ADDRESSTYPE PORTS ENDPOINTS AGE +imported-9cfu7k5mkr-dc7q9 IPv4 80 10.10.78.125,10.10.86.76,10.10.66.181 14m +``` + +Important points to note: + +- the `ServiceImport` Service is assigned an IP address from the local Kubernetes service IPv4 CIDR: 172.22.0.0/16 (172.20.80.119) so as to permit service discovery and access to the remote service endpoints from within the local cluster. +- the `EndpointSlice` IP addresses match those of the `nginx-demo` Endpoints in Cluster 1 (i.e. from the Cluster 1 VPC CIDR: 10.10.0.0/16). + +### Service Consumption + +With the Solution Baseline and Service Provisioning in place, workloads in Cluster 2 are now able to consume the nginx-hello Service Endpoints located in Cluster 1 via the locally provisioned ServiceImport object. To complete the Service Consumption scenario we'll deploy the client-hello Pod into Cluster 2, and observe how it's able to perform cross-cluster service discovery, and access each of the nginx-hello Service Endpoints in Cluster 1. + +#### Create `client-hello` Pod + +Run the following command against Cluster 2 create the `client-hello` Pod: + +```bash +kubectl apply -f samples/client-hello.yaml +``` + +#### Verify multi-cluster service consumption + +Let's exec into the `client-hello` Pod and perform an `nslookup` to cluster-local CoreDNS for the `ServiceImport` Service `nginx-hello.demo.svc.clusterset.local`: + +```bash +$ kubectl exec -it client-hello -n demo /bin/sh +/ # nslookup nginx-hello.demo.svc.clusterset.local +Server: 172.20.0.10 +Address: 172.20.0.10:53 + +Name: nginx-hello.demo.svc.clusterset.local +Address: 172.20.80.119 +``` + +Note that the Pod resolves the address of the `ServiceImport` object on Cluster 2. + +Finally, we generate HTTP requests from the `client-hello` Pod to the local `nginx-hello` `ServiceImport` Service: + +```bash +/ # apk --no-cache add curl +/ # curl nginx-hello.demo.svc.clusterset.local +Server address: 10.10.86.76:80 +Server name: nginx-demo-59c6cb8d7b-m4ktw +Date: 07/Oct/2022:02:31:45 +0000 +URI: / +Request ID: 17d43e6e8801a98d05059dfaf88d0abe +/ # +/ # curl nginx-hello.demo.svc.clusterset.local +Server address: 10.10.78.125:80 +Server name: nginx-demo-59c6cb8d7b-8w6rp +Date: 07/Oct/2022:02:32:26 +0000 +URI: / +Request ID: 0ddc09ffe7fd45c52903ce34c955f555 +/ # +/ # curl nginx-hello.demo.svc.clusterset.local +Server address: 10.10.66.181:80 +Server name: nginx-demo-59c6cb8d7b-mtm8l +Date: 07/Oct/2022:02:32:53 +0000 +URI: / +Request ID: 2fde1c34008a5ec18b8ae23797489c3a +``` + +Note that the responding Server Names and Server addresses are those of the `nginx-demo` Pods on Cluster 1 - confirming that the requests to the local `ClusterSetIP` at `nginx-hello.demo.svc.clusterset.local` originating on Cluster 2 are proxied cross-cluster to the Endpoints located on Cluster 1! + +## Conclusion + +The proliferation of container adoption is presenting new challenges in supporting workloads that have broken through the perimeter of the single cluster construct. + +For teams that are looking to implement a Kubenetes-centric approach to managing multi-cluster workloads, the mcs-api describes an effective approach to extending the scope of the service resource concept beyond the cluster boundary - providing a mechanism to weave multiple clusters together using standard (and familiar) DNS based service discovery. + +The [AWS Cloud Map MCS Controller for Kubernetes](https://github.com/aws/aws-cloud-map-mcs-controller-for-k8s) is an open source project that integrates with AWS Cloud Map to offer a decentralised implementation of the multi-cluster services API specification that's particularly suited for teams looking for a lightweight and effective Kubenetes-centric mechanism to deploy multi-cluster workloads to the AWS cloud. diff --git a/go.mod b/go.mod index 91c55fce..f9400cf5 100644 --- a/go.mod +++ b/go.mod @@ -1,18 +1,96 @@ module github.com/aws/aws-cloud-map-mcs-controller-for-k8s -go 1.15 +go 1.19 require ( - github.com/aws/aws-sdk-go-v2 v1.8.1 - github.com/aws/aws-sdk-go-v2/config v1.6.1 - github.com/aws/aws-sdk-go-v2/service/servicediscovery v1.7.3 - github.com/go-logr/logr v0.3.0 + github.com/aws/aws-sdk-go-v2 v1.22.0 + github.com/aws/aws-sdk-go-v2/config v1.20.0 + github.com/aws/aws-sdk-go-v2/service/servicediscovery v1.21.0 + github.com/go-logr/logr v1.2.4 github.com/golang/mock v1.6.0 - github.com/onsi/ginkgo v1.14.1 - github.com/onsi/gomega v1.10.2 - gotest.tools v2.2.0+incompatible - k8s.io/api v0.19.2 - k8s.io/apimachinery v0.19.2 - k8s.io/client-go v0.19.2 - sigs.k8s.io/controller-runtime v0.7.2 + github.com/google/go-cmp v0.5.9 + github.com/pkg/errors v0.9.1 + github.com/stretchr/testify v1.8.4 + golang.org/x/time v0.3.0 + k8s.io/api v0.24.3 + k8s.io/apimachinery v0.24.3 + k8s.io/client-go v0.24.2 + sigs.k8s.io/controller-runtime v0.12.3 +) + +require ( + cloud.google.com/go v0.81.0 // indirect + github.com/Azure/go-autorest v14.2.0+incompatible // indirect + github.com/Azure/go-autorest/autorest v0.11.18 // indirect + github.com/Azure/go-autorest/autorest/adal v0.9.13 // indirect + github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect + github.com/Azure/go-autorest/logger v0.2.1 // indirect + github.com/Azure/go-autorest/tracing v0.6.0 // indirect + github.com/PuerkitoBio/purell v1.1.1 // indirect + github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.14.0 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.0 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.0 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.0 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.4.0 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.0 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.16.0 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.18.0 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.24.0 // indirect + github.com/aws/smithy-go v1.16.0 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/emicklei/go-restful v2.16.0+incompatible // indirect + github.com/evanphx/json-patch v4.12.0+incompatible // indirect + github.com/form3tech-oss/jwt-go v3.2.3+incompatible // indirect + github.com/fsnotify/fsnotify v1.5.1 // indirect + github.com/go-logr/zapr v1.2.0 // indirect + github.com/go-openapi/jsonpointer v0.19.5 // indirect + github.com/go-openapi/jsonreference v0.19.5 // indirect + github.com/go-openapi/swag v0.19.14 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/protobuf v1.5.2 // indirect + github.com/google/gnostic v0.5.7-v3refs // indirect + github.com/google/gofuzz v1.1.0 // indirect + github.com/google/uuid v1.1.2 // indirect + github.com/imdario/mergo v0.3.12 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/mailru/easyjson v0.7.6 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/onsi/gomega v1.20.2 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/prometheus/client_golang v1.12.1 // indirect + github.com/prometheus/client_model v0.2.0 // indirect + github.com/prometheus/common v0.32.1 // indirect + github.com/prometheus/procfs v0.7.3 // indirect + github.com/spf13/pflag v1.0.5 // indirect + go.uber.org/atomic v1.7.0 // indirect + go.uber.org/multierr v1.6.0 // indirect + go.uber.org/zap v1.19.1 // indirect + golang.org/x/crypto v0.17.0 // indirect + golang.org/x/net v0.10.0 // indirect + golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect + golang.org/x/sys v0.15.0 // indirect + golang.org/x/term v0.15.0 // indirect + golang.org/x/text v0.14.0 // indirect + gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/protobuf v1.33.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/apiextensions-apiserver v0.24.2 // indirect + k8s.io/component-base v0.24.2 // indirect + k8s.io/klog/v2 v2.60.1 // indirect + k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 // indirect + k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 // indirect + sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect + sigs.k8s.io/yaml v1.3.0 // indirect ) diff --git a/go.sum b/go.sum index bcc8ffa2..6e00f6e8 100644 --- a/go.sum +++ b/go.sum @@ -5,332 +5,442 @@ cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6A cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.51.0 h1:PvKAVQWCtlGUSlZkGW3QLelKaWq7KYv/MW1EboG8bfM= -cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0 h1:at8Tk2zUz63cLPR0JPWm5vp77pEZmzxEQBEfRKn1VV8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= -github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest v0.9.6 h1:5YWtOnckcudzIw8lPPBcWOnmIFWMtHci1ZWAZulMSx0= -github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= -github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= -github.com/Azure/go-autorest/autorest/adal v0.8.2 h1:O1X4oexUxnZCaEUGsvMnr8ZGj8HI37tNezwY4npRqA0= -github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= -github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= -github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM= -github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= -github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc= -github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= -github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= -github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= -github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= -github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest v0.11.18 h1:90Y4srNYrwOtAgVo3ndrQkTYn6kf1Eg/AjTFJ8Is2aM= +github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= +github.com/Azure/go-autorest/autorest/adal v0.9.13 h1:Mp5hbtOePIzM8pJVRa3YLrWWmZtoxRXqUEzCfJt3+/Q= +github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= +github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk= +github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= +github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20210826220005-b48c857c3a0e/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/aws/aws-sdk-go-v2 v1.8.1 h1:GcFgQl7MsBygmeeqXyV1ivrTEmsVz/rdFJaTcltG9ag= -github.com/aws/aws-sdk-go-v2 v1.8.1/go.mod h1:xEFuWz+3TYdlPRuo+CqATbeDWIWyaT5uAPwPaWtgse0= -github.com/aws/aws-sdk-go-v2/config v1.6.1 h1:qrZINaORyr78syO1zfD4l7r4tZjy0Z1l0sy4jiysyOM= -github.com/aws/aws-sdk-go-v2/config v1.6.1/go.mod h1:t/y3UPu0XEDy0cEw6mvygaBQaPzWiYAxfP2SzgtvclA= -github.com/aws/aws-sdk-go-v2/credentials v1.3.3 h1:A13QPatmUl41SqUfnuT3V0E3XiNGL6qNTOINbE8cZL4= -github.com/aws/aws-sdk-go-v2/credentials v1.3.3/go.mod h1:oVieKMT3m9BSfqhOfuQ+E0j/yN84ZAJ7Qv8Sfume/ak= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.4.1 h1:rc+fRGvlKbeSd9IFhFS1KWBs0XjTkq0CfK5xqyLgIp0= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.4.1/go.mod h1:+GTydg3uHmVlQdkRoetz6VHKbOMEYof70m19IpMLifc= -github.com/aws/aws-sdk-go-v2/internal/ini v1.2.1 h1:IkqRRUZTKaS16P2vpX+FNc2jq3JWa3c478gykQp4ow4= -github.com/aws/aws-sdk-go-v2/internal/ini v1.2.1/go.mod h1:Pv3WenDjI0v2Jl7UaMFIIbPOBbhn33RmmAmGgkXDoqY= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.2.3 h1:VxFCgxsqWe7OThOwJ5IpFX3xrObtuIH9Hg/NW7oot1Y= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.2.3/go.mod h1:7gcsONBmFoCcKrAqrm95trrMd2+C/ReYKP7Vfu8yHHA= -github.com/aws/aws-sdk-go-v2/service/servicediscovery v1.7.3 h1:jdoRhOcuqrCbvifZT//qCb+DhCzjVEy6f2NH+ppKP3I= -github.com/aws/aws-sdk-go-v2/service/servicediscovery v1.7.3/go.mod h1:aukzhWNlyrzDQ2cjZeDj2vFgY2VYN5eMXrQUZwF58go= -github.com/aws/aws-sdk-go-v2/service/sso v1.3.3 h1:K2gCnGvAASpz+jqP9iyr+F/KNjmTYf8aWOtTQzhmZ5w= -github.com/aws/aws-sdk-go-v2/service/sso v1.3.3/go.mod h1:Jgw5O+SK7MZ2Yi9Yvzb4PggAPYaFSliiQuWR0hNjexk= -github.com/aws/aws-sdk-go-v2/service/sts v1.6.2 h1:l504GWCoQi1Pk68vSUFGLmDIEMzRfVGNgLakDK+Uj58= -github.com/aws/aws-sdk-go-v2/service/sts v1.6.2/go.mod h1:RBhoMJB8yFToaCnbe0jNq5Dcdy0jp6LhHqg55rjClkM= -github.com/aws/smithy-go v1.7.0 h1:+cLHMRrDZvQ4wk+KuQ9yH6eEg6KZEJ9RI2IkDqnygCg= -github.com/aws/smithy-go v1.7.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= +github.com/aws/aws-sdk-go-v2 v1.17.7/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= +github.com/aws/aws-sdk-go-v2 v1.22.0 h1:CpTS3XO3MWNel8ohoazkLZC6scvkYL2k+m0yzFJ17Hg= +github.com/aws/aws-sdk-go-v2 v1.22.0/go.mod h1:Kd0OJtkW3Q0M0lUWGszapWjEvrXDzRW+D21JNsroB+c= +github.com/aws/aws-sdk-go-v2/config v1.20.0 h1:q2+/mqFhY0J9m3Tb5RGFE3R4sdaUkIe4k2EuDfE3c08= +github.com/aws/aws-sdk-go-v2/config v1.20.0/go.mod h1:7+1riCZXyT+sAGvneR5j+Zl1GyfbBUNQurpQTE6FP6k= +github.com/aws/aws-sdk-go-v2/credentials v1.14.0 h1:LQquqPE7cL55RQmA/UBoBKehDlEtMnQKm3B0Q672ePE= +github.com/aws/aws-sdk-go-v2/credentials v1.14.0/go.mod h1:q/3oaTPlamrQWHPwJe56Mjq9g1TYDgddvgTgWJtHTmE= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.0 h1:lF/cVllNAPKgjDwN2RsQUX9g/f6hXer9f10ubLFSoug= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.0/go.mod h1:c28nJNzMVVb9TQpZ5q4tzZvwEJwf/7So7Ie2s90l1Fw= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.31/go.mod h1:QT0BqUvX1Bh2ABdTGnjqEjvjzrCfIniM9Sc8zn9Yndo= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.0 h1:tN6dNNE4SzMuyMnVtQJXGVKX177/d5Zy4MuA1HA4KUc= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.0/go.mod h1:F6MXWETIeetAHwFHyoHEqrcB3NpijFv9nLP5h9CXtT0= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.25/go.mod h1:zBHOPwhBc3FlQjQJE/D3IfPWiWaQmT06Vq9aNukDo0k= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.0 h1:bfdsbTARDjaC/dSYGMO+E0psxFU4hTvCLnqYAfZ3D38= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.0/go.mod h1:Jg8XVv5M2V2wiAMvBFx+O59jg6Yr8vhP0bgNF/IuquM= +github.com/aws/aws-sdk-go-v2/internal/ini v1.4.0 h1:21tlTXq3ev10yLMAjXZzpkZbrl49h3ElSjmxD57tD/E= +github.com/aws/aws-sdk-go-v2/internal/ini v1.4.0/go.mod h1:d9YrBHJhyzDCv5UsEVRizHlFV6Q0sLemFq6uxuqWfUw= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.0 h1:dJnwy5Awv+uvfk73aRENVbv1cSQQ60ydCkPaun097KM= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.0/go.mod h1:RsPWWy7u/hwmFX57sQ7MLvrvJeYyNkiMm5BaavpoU18= +github.com/aws/aws-sdk-go-v2/service/servicediscovery v1.21.0 h1:8Cq/VTVv8EbgDZo3G/0Rk5iUkAzvf+ydvw6ExKscj/w= +github.com/aws/aws-sdk-go-v2/service/servicediscovery v1.21.0/go.mod h1:T9ArVTDM6TUdMyfMGbULOLZMPwEnFhw1qjAoEj0VoHM= +github.com/aws/aws-sdk-go-v2/service/sso v1.16.0 h1:ZIlR6Wr/EgYwBdEz1NWBqdUsTh0mV7A68pId3YZl6H0= +github.com/aws/aws-sdk-go-v2/service/sso v1.16.0/go.mod h1:O7B5cpuhhJKefAKkM7onb0McmpHyKnsH4RrHJhOyq7M= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.18.0 h1:3BZyJei4k1SHdSAFhg9Qg15NnG3v5zosZyFWPm7df/A= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.18.0/go.mod h1:Td8EvzggonY02wLaqSpwybI3GbmA0PWoprKGil2uwJg= +github.com/aws/aws-sdk-go-v2/service/sts v1.24.0 h1:f/V5Y9OaHuNRrA9MntNQNAtMFXqhKj8HTEPnH81eXMI= +github.com/aws/aws-sdk-go-v2/service/sts v1.24.0/go.mod h1:HnCUMNz2XqwnEEk5X6oeDYB2HgOLFpJ/LyfilN8WErs= +github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= +github.com/aws/smithy-go v1.16.0 h1:gJZEH/Fqh+RsvlJ1Zt4tVAtV6bKkp3cC+R6FCZMNzik= +github.com/aws/smithy-go v1.16.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE= +github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= +github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= +github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo= +github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= +github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v2.16.0+incompatible h1:rgqiKNjTnFQA6kkhFe16D8epTksy9HQ1MyrbDXSdYhM= +github.com/emicklei/go-restful v2.16.0+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses= -github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= +github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= +github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/form3tech-oss/jwt-go v3.2.3+incompatible h1:7ZaBxOI7TMoYBfyA3cQHErNNyAWIKUMIwqxEtgHOs5c= +github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= +github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= +github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg= +github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= -github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v0.3.0 h1:q4c+kbcR0d5rSurhBR8dIgieOaYpXtsdTYfx22Cu6rs= -github.com/go-logr/logr v0.3.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/zapr v0.2.0 h1:v6Ji8yBW77pva6NkJKQdHLAJKrIJKRHz0RXwPqCHSR4= -github.com/go-logr/zapr v0.2.0/go.mod h1:qhKdvif7YF5GI9NWEpyxTSSBdGmzkNguibrdCNVPunU= -github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= -github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= -github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= -github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= -github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= -github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= -github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= -github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= -github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= -github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= +github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/zapr v1.2.0 h1:n4JnPI1T3Qq1SFEi/F8rwLrZERp2bso19PJZDB9dayk= +github.com/go-logr/zapr v1.2.0/go.mod h1:Qa4Bsj2Vb+FAVeAKsLD8RLQ+YRJB8YDmOAKxaBQf7Ro= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= -github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= +github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= -github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= -github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk= -github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= -github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= -github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= -github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= -github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= -github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= -github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= -github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= -github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= -github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= -github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= -github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= -github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= -github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/jsonreference v0.19.5 h1:1WJP/wi4OjB4iV8KVbH73rQaoialJrqv8gitZLxGLtM= +github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= -github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= -github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= +github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng= +github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7 h1:5ZkaAPbicIKTF2I64qf5Fh8Aa83Q/dnOafMYV0OMwjA= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +github.com/google/cel-go v0.10.1/go.mod h1:U7ayypeSkw23szu4GaQTPJGx66c20mx8JklMSxrmI1w= +github.com/google/cel-spec v0.6.0/go.mod h1:Nwjgxy5CbjlPrtCWjeDjUyKMl8w41YBYGjsyDdqk0xA= +github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= +github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= -github.com/googleapis/gnostic v0.5.1 h1:A8Yhf6EtqTv9RMsU6MQTyrtV1TjWlR6xU9BsZIwuTCM= -github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= -github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= +github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= -github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.10 h1:6q5mVkdH/vYmqngx7kZQTjJ5HRsx+ImorDIEQ+beJgc= -github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= +github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= +github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.14.1 h1:jMU0WaQrP0a/YAEq8eJmJKjBoMs+pClEr1vDMlM/Do4= -github.com/onsi/ginkgo v1.14.1/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= +github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= +github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU= +github.com/onsi/ginkgo/v2 v2.1.6/go.mod h1:MEH45j8TBi6u9BMogfbp0stKC5cdGjumZj5Y7AG4VIk= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.10.2 h1:aY/nuoWlKJud2J6U0E3NWsjlg+0GtwXxgEqthRdzlcs= -github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= +github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= +github.com/onsi/gomega v1.20.1/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo= +github.com/onsi/gomega v1.20.2 h1:8uQq0zMgLEfa0vRrrBgaJF2gyW9Da9BmfGV+OyUzfkY= +github.com/onsi/gomega v1.20.2/go.mod h1:iYAIXgPSaDHak0LCMA+AWBpIKBr8WZicMxnE8luStNc= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.7.1 h1:NTGy1Ja9pByO+xAeH/qiWnLrKtr3hJPNjaVUwnjpdpA= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk= +github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -339,96 +449,142 @@ github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6T github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.10.0 h1:RyRA7RzGXQZiW+tGMr7sxa85G1z0yOpM1qq5c8lNawc= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4= +github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.1.3 h1:F0+tqvhOksq22sc6iCHF5WGlWjdwj92p0udFh1VFBS8= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= +github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= +github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= -github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= +github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= -go.etcd.io/etcd v0.5.0-alpha.5.0.20200819165624-17cef6e3e9d5/go.mod h1:skWido08r9w6Lq/w70DO5XYIKMu4QFu1+4VsqLQuJy8= -go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= +go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= +go.etcd.io/etcd/api/v3 v3.5.1/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= +go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/pkg/v3 v3.5.1/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= +go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0= +go.etcd.io/etcd/client/v3 v3.5.1/go.mod h1:OnjH4M8OnAotwaB2l9bVgZzRFKru7/ZMoS46OtKyd3Q= +go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE= +go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc= +go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4= +go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= +go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM= +go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= +go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= +go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= +go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE= +go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE= +go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= -go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0= +go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= +go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= -go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= -go.uber.org/zap v1.8.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.15.0 h1:ZZCA22JRF2gQE5FoNmhmrf7jeJJ2uhqDUNRYKm8dvmM= -go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= +go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= +go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= +go.uber.org/zap v1.19.1 h1:ue41HOKd1vGURxrmeKIgELGb3jPW9DMUDGtsinblHwI= +go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -438,149 +594,305 @@ golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f h1:J5lckAjkw6qYlOZNj90mLYNTEKDvWeuc1yieZ8qUzUE= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4 h1:4nGaVu0QrbjT/AK2PRLuQfQuh6DJve+pELhqTdAj3x0= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6 h1:pE8b58s1HRDMi8RDc79m0HISf9D4TzseP40cEA6IGfs= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 h1:RerP+noqYHUQ8CMRcPlC2nvTa4dcBIjegkuWdcUDuqg= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007 h1:gG67DSER+11cZvqIMb8S8bt0vZtiN6xWYARwirrOSfE= +golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= +golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s= -golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.1.1 h1:wGiQel/hW0NnEkJUk8lbzkX2gFJU6PFxf1v5OlCfuOs= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.10-0.20220218145154-897bd77cd717/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= +golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gomodules.xyz/jsonpatch/v2 v2.1.0 h1:Phva6wqu+xR//Njw6iorylFFgn/z547tw5Ne3HZPQ+k= -gomodules.xyz/jsonpatch/v2 v2.1.0/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= +gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY= +gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -589,15 +901,66 @@ google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRn google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201102152239-715cce707fb0/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -606,18 +969,25 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= @@ -626,50 +996,63 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWD gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclpTYkz2zFM+lzLJFO4gQ= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= -gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= +gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -k8s.io/api v0.19.2 h1:q+/krnHWKsL7OBZg/rxnycsl9569Pud76UJ77MvKXms= -k8s.io/api v0.19.2/go.mod h1:IQpK0zFQ1xc5iNIQPqzgoOwuFugaYHK4iCknlAQP9nI= -k8s.io/apiextensions-apiserver v0.19.2 h1:oG84UwiDsVDu7dlsGQs5GySmQHCzMhknfhFExJMz9tA= -k8s.io/apiextensions-apiserver v0.19.2/go.mod h1:EYNjpqIAvNZe+svXVx9j4uBaVhTB4C94HkY3w058qcg= -k8s.io/apimachinery v0.19.2 h1:5Gy9vQpAGTKHPVOh5c4plE274X8D/6cuEiTO2zve7tc= -k8s.io/apimachinery v0.19.2/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= -k8s.io/apiserver v0.19.2/go.mod h1:FreAq0bJ2vtZFj9Ago/X0oNGC51GfubKK/ViOKfVAOA= -k8s.io/client-go v0.19.2 h1:gMJuU3xJZs86L1oQ99R4EViAADUPMHHtS9jFshasHSc= -k8s.io/client-go v0.19.2/go.mod h1:S5wPhCqyDNAlzM9CnEdgTGV4OqhsW3jGO1UM1epwfJA= -k8s.io/code-generator v0.19.2/go.mod h1:moqLn7w0t9cMs4+5CQyxnfA/HV8MF6aAVENF+WZZhgk= -k8s.io/component-base v0.19.2 h1:jW5Y9RcZTb79liEhW3XDVTW7MuvEGP0tQZnfSX6/+gs= -k8s.io/component-base v0.19.2/go.mod h1:g5LrsiTiabMLZ40AR6Hl45f088DevyGY+cCE2agEIVo= -k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +k8s.io/api v0.24.2/go.mod h1:AHqbSkTm6YrQ0ObxjO3Pmp/ubFF/KuM7jU+3khoBsOg= +k8s.io/api v0.24.3 h1:tt55QEmKd6L2k5DP6G/ZzdMQKvG5ro4H4teClqm0sTY= +k8s.io/api v0.24.3/go.mod h1:elGR/XSZrS7z7cSZPzVWaycpJuGIw57j9b95/1PdJNI= +k8s.io/apiextensions-apiserver v0.24.2 h1:/4NEQHKlEz1MlaK/wHT5KMKC9UKYz6NZz6JE6ov4G6k= +k8s.io/apiextensions-apiserver v0.24.2/go.mod h1:e5t2GMFVngUEHUd0wuCJzw8YDwZoqZfJiGOW6mm2hLQ= +k8s.io/apimachinery v0.24.2/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM= +k8s.io/apimachinery v0.24.3 h1:hrFiNSA2cBZqllakVYyH/VyEh4B581bQRmqATJSeQTg= +k8s.io/apimachinery v0.24.3/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM= +k8s.io/apiserver v0.24.2/go.mod h1:pSuKzr3zV+L+MWqsEo0kHHYwCo77AT5qXbFXP2jbvFI= +k8s.io/client-go v0.24.2 h1:CoXFSf8if+bLEbinDqN9ePIDGzcLtqhfd6jpfnwGOFA= +k8s.io/client-go v0.24.2/go.mod h1:zg4Xaoo+umDsfCWr4fCnmLEtQXyCNXCvJuSsglNcV30= +k8s.io/code-generator v0.24.2/go.mod h1:dpVhs00hTuTdTY6jvVxvTFCk6gSMrtfRydbhZwHI15w= +k8s.io/component-base v0.24.2 h1:kwpQdoSfbcH+8MPN4tALtajLDfSfYxBDYlXobNWI6OU= +k8s.io/component-base v0.24.2/go.mod h1:ucHwW76dajvQ9B7+zecZAP3BVqvrHoOxm8olHEg0nmM= +k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= -k8s.io/klog/v2 v2.2.0 h1:XRvcwJozkgZ1UQJmfMGpvRthQHOvihEhYtDfAaxMz/A= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6 h1:+WnxoVtG8TMiudHBSEtrVL1egv36TkkJm+bA8AxicmQ= -k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= -k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20200912215256-4140de9c8800 h1:9ZNvfPvVIEsp/T1ez4GQuzCcCTEQWhovSofhqR73A6g= -k8s.io/utils v0.0.0-20200912215256-4140de9c8800/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/klog/v2 v2.60.1 h1:VW25q3bZx9uE3vvdL6M8ezOX79vA2Aq1nEWLqNQclHc= +k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 h1:Gii5eqf+GmIEwGNKQYQClCayuJCe2/4fZUvF7VG99sU= +k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42/go.mod h1:Z/45zLw8lUo4wdiUkI+v/ImEGAvu3WatcZl3lPMR4Rk= +k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 h1:HNSDgDCrr/6Ly3WEGKZftiE7IY19Vz2GdbOCyI4qqhc= +k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.9/go.mod h1:dzAXnQbTRyDlZPJX2SUPEqvnB+j7AJjtlox7PEwigU0= -sigs.k8s.io/controller-runtime v0.7.2 h1:gD2JZp0bBLLuvSRYVNvox+bRCz1UUUxKDjPUCb56Ukk= -sigs.k8s.io/controller-runtime v0.7.2/go.mod h1:pJ3YBrJiAqMAZKi6UVGuE98ZrroV1p+pIhoHsMm9wdU= -sigs.k8s.io/structured-merge-diff/v4 v4.0.1 h1:YXTMot5Qz/X1iBRJhAt+vI+HVttY0WkSqqhKxQ0xVbA= -sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.30/go.mod h1:fEO7lRTdivWO2qYVCVG7dEADOMo/MLDCVr8So2g88Uw= +sigs.k8s.io/controller-runtime v0.12.3 h1:FCM8xeY/FI8hoAfh/V4XbbYMY20gElh9yh+A98usMio= +sigs.k8s.io/controller-runtime v0.12.3/go.mod h1:qKsk4WE6zW2Hfj0G4v10EnNB2jMG1C+NTb8h+DwCoU0= +sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 h1:kDi4JBNAsJWfz1aEXhO8Jg87JJaPNLh5tIzYHgStQ9Y= +sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY= +sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.2.1 h1:bKCqE9GvQ5tiVHn5rfn1r+yao3aLQEaLzkkmAkf+A6Y= +sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= +sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/integration/eks-test/configs/client-hello.yaml b/integration/eks-test/configs/client-hello.yaml new file mode 100644 index 00000000..6c9c8d1d --- /dev/null +++ b/integration/eks-test/configs/client-hello.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Pod +metadata: + name: client-hello + namespace: aws-cloud-map-mcs-eks-e2e +spec: + containers: + - command: + - sleep + - "1d" + image: alpine + name: client-hello diff --git a/integration/eks-test/configs/coredns-deployment.yaml b/integration/eks-test/configs/coredns-deployment.yaml new file mode 100644 index 00000000..3061042c --- /dev/null +++ b/integration/eks-test/configs/coredns-deployment.yaml @@ -0,0 +1,137 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + eks.amazonaws.com/component: coredns + k8s-app: kube-dns + kubernetes.io/name: CoreDNS + name: coredns + namespace: kube-system +spec: + progressDeadlineSeconds: 600 + replicas: 2 + revisionHistoryLimit: 10 + selector: + matchLabels: + eks.amazonaws.com/component: coredns + k8s-app: kube-dns + strategy: + rollingUpdate: + maxSurge: 25% + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + annotations: + eks.amazonaws.com/compute-type: ec2 + creationTimestamp: null + labels: + eks.amazonaws.com/component: coredns + k8s-app: kube-dns + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/os + operator: In + values: + - linux + - key: beta.kubernetes.io/arch + operator: In + values: + - amd64 + - arm64 + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: k8s-app + operator: In + values: + - kube-dns + topologyKey: kubernetes.io/hostname + weight: 100 + containers: + - args: + - -conf + - /etc/coredns/Corefile + image: ghcr.io/aws/aws-cloud-map-mcs-controller-for-k8s/coredns-multicluster/coredns:v1.8.6 + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 5 + httpGet: + path: /health + port: 8080 + scheme: HTTP + initialDelaySeconds: 60 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + name: coredns + ports: + - containerPort: 53 + name: dns + protocol: UDP + - containerPort: 53 + name: dns-tcp + protocol: TCP + - containerPort: 9153 + name: metrics + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + path: /health + port: 8080 + scheme: HTTP + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + resources: + limits: + memory: 170Mi + requests: + cpu: 100m + memory: 70Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_BIND_SERVICE + drop: + - all + readOnlyRootFilesystem: true + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /etc/coredns + name: config-volume + readOnly: true + - mountPath: /tmp + name: tmp + dnsPolicy: Default + priorityClassName: system-cluster-critical + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: coredns + serviceAccountName: coredns + terminationGracePeriodSeconds: 30 + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + - key: CriticalAddonsOnly + operator: Exists + volumes: + - emptyDir: {} + name: tmp + - configMap: + defaultMode: 420 + items: + - key: Corefile + path: Corefile + name: coredns + name: config-volume \ No newline at end of file diff --git a/integration/eks-test/configs/e2e-clusterproperty-1.yaml b/integration/eks-test/configs/e2e-clusterproperty-1.yaml new file mode 100644 index 00000000..51b6f49a --- /dev/null +++ b/integration/eks-test/configs/e2e-clusterproperty-1.yaml @@ -0,0 +1,13 @@ +apiVersion: about.k8s.io/v1alpha1 +kind: ClusterProperty +metadata: + name: cluster.clusterset.k8s.io +spec: + value: eks-e2e-clusterid-1 +--- +apiVersion: about.k8s.io/v1alpha1 +kind: ClusterProperty +metadata: + name: clusterset.k8s.io +spec: + value: eks-e2e-clustersetid-1 diff --git a/integration/eks-test/configs/e2e-clusterproperty-2.yaml b/integration/eks-test/configs/e2e-clusterproperty-2.yaml new file mode 100644 index 00000000..dc4a45bf --- /dev/null +++ b/integration/eks-test/configs/e2e-clusterproperty-2.yaml @@ -0,0 +1,13 @@ +apiVersion: about.k8s.io/v1alpha1 +kind: ClusterProperty +metadata: + name: cluster.clusterset.k8s.io +spec: + value: eks-e2e-clusterid-2 +--- +apiVersion: about.k8s.io/v1alpha1 +kind: ClusterProperty +metadata: + name: clusterset.k8s.io +spec: + value: eks-e2e-clustersetid-1 diff --git a/integration/eks-test/configs/eksctl-cluster.yaml b/integration/eks-test/configs/eksctl-cluster.yaml new file mode 100644 index 00000000..f47d4217 --- /dev/null +++ b/integration/eks-test/configs/eksctl-cluster.yaml @@ -0,0 +1,18 @@ +apiVersion: eksctl.io/v1alpha5 +kind: ClusterConfig +metadata: + name: $CLUSTER_NAME + region: $AWS_REGION + version: "1.22" +vpc: + cidr: $VPC_CIDR + autoAllocateIPv6: false + clusterEndpoints: + publicAccess: true + privateAccess: true +managedNodeGroups: +- name: $NODEGROUP_NAME + instanceType: t3.small + minSize: 1 + maxSize: 10 + desiredCapacity: 1 diff --git a/integration/eks-test/configs/nginx-deployment.yaml b/integration/eks-test/configs/nginx-deployment.yaml new file mode 100644 index 00000000..b60facdd --- /dev/null +++ b/integration/eks-test/configs/nginx-deployment.yaml @@ -0,0 +1,22 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + namespace: aws-cloud-map-mcs-eks-e2e + name: nginx-demo + labels: + app: nginx +spec: + replicas: 3 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginxdemos/hello:plain-text + ports: + - containerPort: 80 diff --git a/integration/eks-test/configs/nginx-service.yaml b/integration/eks-test/configs/nginx-service.yaml new file mode 100644 index 00000000..ad15cd94 --- /dev/null +++ b/integration/eks-test/configs/nginx-service.yaml @@ -0,0 +1,10 @@ +kind: Service +apiVersion: v1 +metadata: + namespace: aws-cloud-map-mcs-eks-e2e + name: nginx-hello +spec: + selector: + app: nginx + ports: + - port: 80 diff --git a/config/samples/multicluster_v1alpha1_serviceexport.yaml b/integration/eks-test/configs/nginx-serviceexport.yaml similarity index 52% rename from config/samples/multicluster_v1alpha1_serviceexport.yaml rename to integration/eks-test/configs/nginx-serviceexport.yaml index e53b0abf..cbb57568 100644 --- a/config/samples/multicluster_v1alpha1_serviceexport.yaml +++ b/integration/eks-test/configs/nginx-serviceexport.yaml @@ -1,7 +1,5 @@ -apiVersion: multicluster.x-k8s.io/v1alpha1 kind: ServiceExport +apiVersion: multicluster.x-k8s.io/v1alpha1 metadata: - name: serviceexport-sample -spec: - # Add fields here - foo: bar + namespace: aws-cloud-map-mcs-eks-e2e + name: nginx-hello \ No newline at end of file diff --git a/integration/eks-test/scripts/eks-DNS-test.sh b/integration/eks-test/scripts/eks-DNS-test.sh new file mode 100755 index 00000000..3a838dc3 --- /dev/null +++ b/integration/eks-test/scripts/eks-DNS-test.sh @@ -0,0 +1,37 @@ +#!/usr/bin/env bash + +# Testing service consumption with client-hello pod + +echo "verifying cross-cluster service consumption..." + +# Install curl if not installed +$KUBECTL_BIN exec $CLIENT_POD -n $NAMESPACE /bin/sh -- curl --version &>/dev/null +exit_code=$? +if [ "$exit_code" -eq 126 ]; then + echo "curl not installed, installing..." + $KUBECTL_BIN exec $CLIENT_POD -n $NAMESPACE /bin/sh -- apk add curl +fi + +# Perform an nslookup to cluster-local CoreDNS +echo "performing nslookup..." +$KUBECTL_BIN exec -it $CLIENT_POD -n $NAMESPACE /bin/sh -- nslookup $SERVICE.$NAMESPACE.svc.clusterset.local +exit_code=$? + +if [ "$exit_code" -ne 0 ]; then + echo "ERROR: Unable to nslookup service $SERVICE.$NAMESPACE.svc.clusterset.local" + exit $exit_code +fi +sleep 5 + +# Call to DNS server, if unable to reach, importing cluster is not able to properly consume service +echo "performing curl..." +$KUBECTL_BIN exec -it $CLIENT_POD -n $NAMESPACE /bin/sh -- curl $SERVICE.$NAMESPACE.svc.clusterset.local +exit_code=$? + +if [ "$exit_code" -ne 0 ]; then + echo "ERROR: Unable to reach service $SERVICE.$NAMESPACE.svc.clusterset.local" + exit $exit_code +fi + +echo "confirmed service consumption" +exit 0 diff --git a/integration/eks-test/scripts/eks-cleanup.sh b/integration/eks-test/scripts/eks-cleanup.sh new file mode 100755 index 00000000..7dc99aa9 --- /dev/null +++ b/integration/eks-test/scripts/eks-cleanup.sh @@ -0,0 +1,44 @@ +#!/usr/bin/env bash + +# Cleanup EKS cluster used for integration test. + +source ./integration/eks-test/scripts/eks-common.sh + +# Delete service and namespace from export and import cluster +$KUBECTL_BIN config use-context $EXPORT_CLS +$KUBECTL_BIN delete svc $SERVICE -n $NAMESPACE + +# Verfication to check if there are hanging ServiceExport or ServiceImport CRDs and clears the finalizers to allow cleanup process to continue +for CRD in $($KUBECTL_BIN get crd -n $NAMESPACE | grep multicluster | cut -d " " -f 1 | xargs); do + $KUBECTL_BIN patch crd -n $NAMESPACE $CRD --type merge -p '{"metadata":{"finalizers": [null]}}'; + $KUBECTL_BIN delete crd $CRD -n $NAMESPACE # CRD needs to be explictly deleted in order to ensure zero resources are hanging for future tests +done + +$KUBECTL_BIN delete namespaces $NAMESPACE + +# IAM Service Account needs to be explictly deleted, as not doing so creates hanging service accounts that cause permissions issues in future tests +eksctl delete iamserviceaccount \ + --name cloud-map-mcs-controller-manager \ + --namespace $MCS_NAMESPACE \ + --cluster $EXPORT_CLS \ + --wait + +$KUBECTL_BIN config use-context $IMPORT_CLS +$KUBECTL_BIN delete pod $CLIENT_POD -n $NAMESPACE +$KUBECTL_BIN delete namespaces $NAMESPACE +eksctl delete iamserviceaccount \ + --name cloud-map-mcs-controller-manager \ + --namespace $MCS_NAMESPACE \ + --cluster $IMPORT_CLS \ + --wait + +$KUBECTL_BIN config use-context $EXPORT_CLS +$KUBECTL_BIN delete -k "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/config/controller_install_latest" +$KUBECTL_BIN config use-context $IMPORT_CLS +$KUBECTL_BIN delete -k "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/config/controller_install_latest" + +echo "EKS clusters cleaned!" + +# ./integration/shared/scripts/cleanup-cloudmap.sh +go run ./integration/janitor/runner/main.go "$NAMESPACE" "$CLUSTERID1" "$CLUSTERSETID1" +go run ./integration/janitor/runner/main.go "$NAMESPACE" "$CLUSTERID2" "$CLUSTERSETID1" diff --git a/integration/eks-test/scripts/eks-common.sh b/integration/eks-test/scripts/eks-common.sh new file mode 100755 index 00000000..95242547 --- /dev/null +++ b/integration/eks-test/scripts/eks-common.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env bash + +export KIND_BIN='./bin/kind' +export KUBECTL_BIN='kubectl' +export LOGS='./integration/eks-test/testlog' +export CONFIGS='./integration/eks-test/configs' +export SCENARIOS='./integration/shared/scenarios' +export NAMESPACE='aws-cloud-map-mcs-eks-e2e' +export MCS_NAMESPACE='cloud-map-mcs-system' +export SERVICE='nginx-hello' +export SERVICE_TYPE='ClusterSetIP' +export CLIENT_POD='client-hello' +export ENDPT_PORT=80 +export SERVICE_PORT=80 # from nginx-service.yaml +export EXPORT_CLS='cls1' +export IMPORT_CLS='cls2' +export CLUSTERID1='eks-e2e-clusterid-1' +export CLUSTERID2='eks-e2e-clusterid-2' +export CLUSTERSETID1='eks-e2e-clustersetid-1' +export EXPECTED_ENDPOINT_COUNT=3 +export UPDATED_ENDPOINT_COUNT=4 \ No newline at end of file diff --git a/integration/eks-test/scripts/eks-run-tests.sh b/integration/eks-test/scripts/eks-run-tests.sh new file mode 100755 index 00000000..160daec4 --- /dev/null +++ b/integration/eks-test/scripts/eks-run-tests.sh @@ -0,0 +1,74 @@ +#!/usr/bin/env bash + +# Runs the AWS Cloud Map MCS Controller for K8s in EKS clusters and test services has been exported from one cluster and imported from the other + +source ./integration/eks-test/scripts/eks-common.sh + +# Checking expected endpoints number in exporting cluster +$KUBECTL_BIN config use-context $EXPORT_CLS +if ! endpts=$(./integration/shared/scripts/poll-endpoints.sh "$EXPECTED_ENDPOINT_COUNT"); then + exit $? +fi + +# Runner to verify expected endpoints are exported to Cloud Map +go run $SCENARIOS/runner/main.go $NAMESPACE $SERVICE $CLUSTERID1 $CLUSTERSETID1 $ENDPT_PORT $SERVICE_PORT $SERVICE_TYPE "$endpts" +exit_code=$? + +# Check imported endpoints in importing cluster +if [ "$exit_code" -eq 0 ] ; then + $KUBECTL_BIN config use-context $IMPORT_CLS + ./integration/shared/scripts/test-import.sh "$EXPECTED_ENDPOINT_COUNT" "$endpts" + exit_code=$? +fi + +# Verifying that importing cluster is properly consuming services +if [ "$exit_code" -eq 0 ] ; then + ./integration/eks-test/scripts/eks-DNS-test.sh + exit_code=$? +fi + +echo "sleeping..." +sleep 2 + +# Scaling and verifying deployment +if [ "$exit_code" -eq 0 ] ; then + $KUBECTL_BIN config use-context $EXPORT_CLS + deployment=$($KUBECTL_BIN get deployment --namespace "$NAMESPACE" -o json | jq -r '.items[0].metadata.name') + + echo "scaling the deployment $deployment to $UPDATED_ENDPOINT_COUNT" + $KUBECTL_BIN scale deployment/"$deployment" --replicas="$UPDATED_ENDPOINT_COUNT" --namespace "$NAMESPACE" + exit_code=$? +fi + +if [ "$exit_code" -eq 0 ] ; then + if ! updated_endpoints=$(./integration/shared/scripts/poll-endpoints.sh "$UPDATED_ENDPOINT_COUNT") ; then + exit $? + fi +fi + +if [ "$exit_code" -eq 0 ] ; then + go run $SCENARIOS/runner/main.go $NAMESPACE $SERVICE $CLUSTERID1 $CLUSTERSETID1 $ENDPT_PORT $SERVICE_PORT $SERVICE_TYPE "$updated_endpoints" + exit_code=$? +fi + +if [ "$exit_code" -eq 0 ] ; then + $KUBECTL_BIN config use-context $IMPORT_CLS + ./integration/shared/scripts/test-import.sh "$UPDATED_ENDPOINT_COUNT" "$updated_endpoints" + exit_code=$? +fi + +if [ "$exit_code" -eq 0 ] ; then + ./integration/eks-test/scripts/eks-DNS-test.sh + exit_code=$? +fi + + +# Dump logs +mkdir -p "$LOGS" +$KUBECTL_BIN config use-context $EXPORT_CLS +$KUBECTL_BIN logs -l control-plane=controller-manager -c manager --namespace $MCS_NAMESPACE &> "$LOGS/ctl-1.log" +$KUBECTL_BIN config use-context $IMPORT_CLS +$KUBECTL_BIN logs -l control-plane=controller-manager -c manager --namespace $MCS_NAMESPACE &> "$LOGS/ctl-2.log" +echo "dumped logs" + +exit $exit_code diff --git a/integration/eks-test/scripts/eks-setup-helper.sh b/integration/eks-test/scripts/eks-setup-helper.sh new file mode 100755 index 00000000..df4c53d6 --- /dev/null +++ b/integration/eks-test/scripts/eks-setup-helper.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash + +# Adding IAM service accounts +$KUBECTL_BIN config use-context $1 +$KUBECTL_BIN create namespace $MCS_NAMESPACE +eksctl create iamserviceaccount \ +--cluster $1 \ +--namespace $MCS_NAMESPACE \ +--name cloud-map-mcs-controller-manager \ +--attach-policy-arn arn:aws:iam::aws:policy/AWSCloudMapFullAccess \ +--override-existing-serviceaccounts \ +--approve + +# Installing controller +$KUBECTL_BIN config use-context $1 +$KUBECTL_BIN apply -k "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/config/controller_install_latest" + diff --git a/integration/eks-test/scripts/eks-setup.sh b/integration/eks-test/scripts/eks-setup.sh new file mode 100755 index 00000000..c97e8674 --- /dev/null +++ b/integration/eks-test/scripts/eks-setup.sh @@ -0,0 +1,32 @@ +#!/usr/bin/env bash + +source ./integration/eks-test/scripts/eks-common.sh + +# Call helper for service account and controller installation +./integration/eks-test/scripts/eks-setup-helper.sh $EXPORT_CLS +./integration/eks-test/scripts/eks-setup-helper.sh $IMPORT_CLS + +# Apply ClusterProperties +$KUBECTL_BIN config use-context $EXPORT_CLS +$KUBECTL_BIN apply -f "$CONFIGS/e2e-clusterproperty-1.yaml" + +$KUBECTL_BIN config use-context $IMPORT_CLS +$KUBECTL_BIN apply -f "$CONFIGS/e2e-clusterproperty-2.yaml" + +# Installing service +$KUBECTL_BIN config use-context $EXPORT_CLS +$KUBECTL_BIN create namespace $NAMESPACE +$KUBECTL_BIN apply -f "$CONFIGS/nginx-deployment.yaml" +$KUBECTL_BIN apply -f "$CONFIGS/nginx-service.yaml" + +$KUBECTL_BIN config use-context $IMPORT_CLS +$KUBECTL_BIN create namespace $NAMESPACE + +# Creating service export +$KUBECTL_BIN config use-context $EXPORT_CLS +$KUBECTL_BIN apply -f "$CONFIGS/nginx-serviceexport.yaml" + +# Create client-hello pod +$KUBECTL_BIN config use-context $IMPORT_CLS +$KUBECTL_BIN apply -f "$CONFIGS/client-hello.yaml" +sleep 15 diff --git a/integration/janitor/api.go b/integration/janitor/api.go new file mode 100644 index 00000000..6e00a400 --- /dev/null +++ b/integration/janitor/api.go @@ -0,0 +1,41 @@ +package janitor + +import ( + "context" + + "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/cloudmap" + "github.com/aws/aws-sdk-go-v2/aws" + sd "github.com/aws/aws-sdk-go-v2/service/servicediscovery" +) + +type ServiceDiscoveryJanitorApi interface { + DeleteNamespace(ctx context.Context, namespaceId string) (operationId string, err error) + DeleteService(ctx context.Context, serviceId string) error + cloudmap.ServiceDiscoveryApi +} + +type serviceDiscoveryJanitorApi struct { + cloudmap.ServiceDiscoveryApi + janitorFacade SdkJanitorFacade +} + +func NewServiceDiscoveryJanitorApiFromConfig(cfg *aws.Config) ServiceDiscoveryJanitorApi { + return &serviceDiscoveryJanitorApi{ + ServiceDiscoveryApi: cloudmap.NewServiceDiscoveryApiFromConfig(cfg), + janitorFacade: NewSdkJanitorFacadeFromConfig(cfg), + } +} + +func (api *serviceDiscoveryJanitorApi) DeleteNamespace(ctx context.Context, nsId string) (opId string, err error) { + out, err := api.janitorFacade.DeleteNamespace(ctx, &sd.DeleteNamespaceInput{Id: &nsId}) + if err != nil { + return "", err + } + + return aws.ToString(out.OperationId), nil +} + +func (api *serviceDiscoveryJanitorApi) DeleteService(ctx context.Context, svcId string) error { + _, err := api.janitorFacade.DeleteService(ctx, &sd.DeleteServiceInput{Id: &svcId}) + return err +} diff --git a/integration/janitor/api_test.go b/integration/janitor/api_test.go new file mode 100644 index 00000000..55a3b5b8 --- /dev/null +++ b/integration/janitor/api_test.go @@ -0,0 +1,52 @@ +package janitor + +import ( + "context" + "testing" + + janitorMock "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/mocks/integration/janitor" + "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/test" + "github.com/aws/aws-sdk-go-v2/aws" + sd "github.com/aws/aws-sdk-go-v2/service/servicediscovery" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" +) + +func TestNewServiceDiscoveryJanitorApiFromConfig(t *testing.T) { + assert.NotNil(t, NewServiceDiscoveryJanitorApiFromConfig(&aws.Config{})) +} + +func TestServiceDiscoveryJanitorApi_DeleteNamespace_HappyCase(t *testing.T) { + mockController := gomock.NewController(t) + defer mockController.Finish() + + mocksdk := janitorMock.NewMockSdkJanitorFacade(mockController) + jApi := getJanitorApi(mocksdk) + + mocksdk.EXPECT().DeleteNamespace(context.TODO(), &sd.DeleteNamespaceInput{Id: aws.String(test.HttpNsId)}). + Return(&sd.DeleteNamespaceOutput{OperationId: aws.String(test.OpId1)}, nil) + + opId, err := jApi.DeleteNamespace(context.TODO(), test.HttpNsId) + assert.Nil(t, err, "No error for happy case") + assert.Equal(t, test.OpId1, opId) +} + +func TestServiceDiscoveryJanitorApi_DeleteService_HappyCase(t *testing.T) { + mockController := gomock.NewController(t) + defer mockController.Finish() + + mocksdk := janitorMock.NewMockSdkJanitorFacade(mockController) + jApi := getJanitorApi(mocksdk) + + mocksdk.EXPECT().DeleteService(context.TODO(), &sd.DeleteServiceInput{Id: aws.String(test.SvcId)}). + Return(&sd.DeleteServiceOutput{}, nil) + + err := jApi.DeleteService(context.TODO(), test.SvcId) + assert.Nil(t, err, "No error for happy case") +} + +func getJanitorApi(sdk *janitorMock.MockSdkJanitorFacade) ServiceDiscoveryJanitorApi { + return &serviceDiscoveryJanitorApi{ + janitorFacade: sdk, + } +} diff --git a/integration/janitor/aws_facade.go b/integration/janitor/aws_facade.go new file mode 100644 index 00000000..5a2d3bce --- /dev/null +++ b/integration/janitor/aws_facade.go @@ -0,0 +1,31 @@ +package janitor + +import ( + "context" + + "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/cloudmap" + "github.com/aws/aws-sdk-go-v2/aws" + sd "github.com/aws/aws-sdk-go-v2/service/servicediscovery" +) + +// SdkJanitorFacade extends the minimal surface area of ServiceDiscovery API calls of the client +// for integration test janitor operations. +type SdkJanitorFacade interface { + // DeleteNamespace provides ServiceDiscovery DeleteNamespace wrapper interface. + DeleteNamespace(context.Context, *sd.DeleteNamespaceInput, ...func(*sd.Options)) (*sd.DeleteNamespaceOutput, error) + + // DeleteService provides ServiceDiscovery DeleteService wrapper interface. + DeleteService(context.Context, *sd.DeleteServiceInput, ...func(*sd.Options)) (*sd.DeleteServiceOutput, error) + + cloudmap.AwsFacade +} + +type sdkJanitorFacade struct { + *sd.Client +} + +// NewSdkJanitorFacadeFromConfig creates a new AWS facade from an AWS client config +// extended for integration test janitor operations. +func NewSdkJanitorFacadeFromConfig(cfg *aws.Config) SdkJanitorFacade { + return &sdkJanitorFacade{sd.NewFromConfig(*cfg)} +} diff --git a/integration/janitor/janitor.go b/integration/janitor/janitor.go new file mode 100644 index 00000000..260fc1e0 --- /dev/null +++ b/integration/janitor/janitor.go @@ -0,0 +1,112 @@ +package janitor + +import ( + "context" + "fmt" + "os" + + "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/cloudmap" + + "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/model" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/config" +) + +// CloudMapJanitor handles AWS Cloud Map resource cleanup during integration tests. +type CloudMapJanitor interface { + // Cleanup removes all instances, services and the namespace from AWS Cloud Map for a given namespace name. + Cleanup(ctx context.Context, nsName string) +} + +type cloudMapJanitor struct { + clusterId string + clusterSetId string + sdApi ServiceDiscoveryJanitorApi + fail func() +} + +// NewDefaultJanitor returns a new janitor object. +func NewDefaultJanitor(clusterId string, clusterSetId string) CloudMapJanitor { + awsCfg, err := config.LoadDefaultConfig(context.TODO()) + + if err != nil { + fmt.Printf("unable to configure AWS session: %s", err.Error()) + os.Exit(1) + } + + return &cloudMapJanitor{ + clusterId: clusterId, + clusterSetId: clusterSetId, + sdApi: NewServiceDiscoveryJanitorApiFromConfig(&awsCfg), + fail: func() { os.Exit(1) }, + } +} + +func (j *cloudMapJanitor) Cleanup(ctx context.Context, nsName string) { + fmt.Printf("Cleaning up all test resources in Cloud Map for namespace : %s\n", nsName) + + nsMap, err := j.sdApi.GetNamespaceMap(ctx) + j.checkOrFail(err, "", "could not find namespace to clean") + + ns, found := nsMap[nsName] + if !found { + fmt.Println("namespace does not exist in account, nothing to clean") + return + } + + fmt.Printf("found namespace to clean: %s\n", ns.Id) + + svcIdMap, err := j.sdApi.GetServiceIdMap(ctx, ns.Id) + j.checkOrFail(err, + fmt.Sprintf("namespace has %d services to clean", len(svcIdMap)), + "could not find services to clean") + + for svcName, svcId := range svcIdMap { + fmt.Printf("found service to clean: %s\n", svcId) + j.deregisterInstances(ctx, nsName, svcName, svcId) + + delSvcErr := j.sdApi.DeleteService(ctx, svcId) + j.checkOrFail(delSvcErr, "service deleted", "could not cleanup service") + } + + opId, err := j.sdApi.DeleteNamespace(ctx, ns.Id) + if err == nil { + fmt.Println("namespace delete in progress") + _, err = cloudmap.NewOperationPoller(j.sdApi).Poll(ctx, opId) + } + j.checkOrFail(err, "clean up successful", "could not cleanup namespace") +} + +func (j *cloudMapJanitor) deregisterInstances(ctx context.Context, nsName string, svcName string, svcId string) { + queryParameters := map[string]string{ + model.ClusterSetIdAttr: j.clusterSetId, + } + + insts, err := j.sdApi.DiscoverInstances(ctx, nsName, svcName, queryParameters) + j.checkOrFail(err, + fmt.Sprintf("service has %d instances to clean", len(insts)), + "could not list instances to cleanup") + + opPoller := cloudmap.NewOperationPoller(j.sdApi) + for _, inst := range insts { + instId := aws.ToString(inst.InstanceId) + fmt.Printf("found instance to clean: %s\n", instId) + opPoller.Submit(ctx, func() (opId string, err error) { + return j.sdApi.DeregisterInstance(ctx, svcId, instId) + }) + } + + err = opPoller.Await() + j.checkOrFail(err, "instances de-registered", "could not cleanup instances") +} + +func (j *cloudMapJanitor) checkOrFail(err error, successMsg string, failMsg string) { + if err != nil { + fmt.Printf("%s: %s\n", failMsg, err.Error()) + j.fail() + } + + if successMsg != "" { + fmt.Println(successMsg) + } +} diff --git a/integration/janitor/janitor_test.go b/integration/janitor/janitor_test.go new file mode 100644 index 00000000..95e25c71 --- /dev/null +++ b/integration/janitor/janitor_test.go @@ -0,0 +1,82 @@ +package janitor + +import ( + "context" + "testing" + + janitorMock "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/mocks/integration/janitor" + "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/model" + "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/test" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/servicediscovery/types" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" +) + +type testJanitor struct { + janitor *cloudMapJanitor + mockApi *janitorMock.MockServiceDiscoveryJanitorApi + failed *bool + close func() +} + +func TestNewDefaultJanitor(t *testing.T) { + assert.NotNil(t, NewDefaultJanitor(test.ClusterId1, test.ClusterSet)) +} + +func TestCleanupHappyCase(t *testing.T) { + tj := getTestJanitor(t) + defer tj.close() + + tj.mockApi.EXPECT().GetNamespaceMap(context.TODO()). + Return(map[string]*model.Namespace{test.HttpNsName: test.GetTestHttpNamespace()}, nil) + tj.mockApi.EXPECT().GetServiceIdMap(context.TODO(), test.HttpNsId). + Return(map[string]string{test.SvcName: test.SvcId}, nil) + tj.mockApi.EXPECT().DiscoverInstances(context.TODO(), test.HttpNsName, test.SvcName, map[string]string{ + model.ClusterSetIdAttr: test.ClusterSet, + }). + Return([]types.HttpInstanceSummary{{InstanceId: aws.String(test.EndptId1)}}, nil) + + tj.mockApi.EXPECT().DeregisterInstance(context.TODO(), test.SvcId, test.EndptId1). + Return(test.OpId1, nil) + tj.mockApi.EXPECT().GetOperation(context.TODO(), test.OpId1). + Return(&types.Operation{Status: types.OperationStatusSuccess}, nil) + tj.mockApi.EXPECT().DeleteService(context.TODO(), test.SvcId). + Return(nil) + tj.mockApi.EXPECT().DeleteNamespace(context.TODO(), test.HttpNsId). + Return(test.OpId2, nil) + tj.mockApi.EXPECT().GetOperation(context.TODO(), test.OpId2). + Return(&types.Operation{Status: types.OperationStatusSuccess, + Targets: map[string]string{string(types.OperationTargetTypeNamespace): test.HttpNsId}}, nil) + + tj.janitor.Cleanup(context.TODO(), test.HttpNsName) + assert.False(t, *tj.failed) +} + +func TestCleanupNothingToClean(t *testing.T) { + tj := getTestJanitor(t) + defer tj.close() + + tj.mockApi.EXPECT().GetNamespaceMap(context.TODO()). + Return(map[string]*model.Namespace{}, nil) + + tj.janitor.Cleanup(context.TODO(), test.HttpNsName) + assert.False(t, *tj.failed) +} + +func getTestJanitor(t *testing.T) *testJanitor { + mockController := gomock.NewController(t) + api := janitorMock.NewMockServiceDiscoveryJanitorApi(mockController) + failed := false + return &testJanitor{ + janitor: &cloudMapJanitor{ + clusterId: test.ClusterId1, + clusterSetId: test.ClusterSet, + sdApi: api, + fail: func() { failed = true }, + }, + mockApi: api, + failed: &failed, + close: func() { mockController.Finish() }, + } +} diff --git a/integration/janitor/runner/main.go b/integration/janitor/runner/main.go new file mode 100644 index 00000000..691c0d03 --- /dev/null +++ b/integration/janitor/runner/main.go @@ -0,0 +1,23 @@ +package main + +import ( + "context" + "fmt" + "os" + + "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/integration/janitor" +) + +func main() { + if len(os.Args) != 4 { + fmt.Println("Expected namespace name, clusterId, clusterSetId arguments") + os.Exit(1) + } + + nsName := os.Args[1] + clusterId := os.Args[2] + clusterSetId := os.Args[3] + + j := janitor.NewDefaultJanitor(clusterId, clusterSetId) + j.Cleanup(context.TODO(), nsName) +} diff --git a/integration/kind-test/configs/coredns-deployment.yaml b/integration/kind-test/configs/coredns-deployment.yaml new file mode 100644 index 00000000..86b6e0f3 --- /dev/null +++ b/integration/kind-test/configs/coredns-deployment.yaml @@ -0,0 +1,137 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + # eks.amazonaws.com/component: coredns + k8s-app: kube-dns + kubernetes.io/name: CoreDNS + name: coredns + namespace: kube-system +spec: + progressDeadlineSeconds: 600 + replicas: 2 + revisionHistoryLimit: 10 + selector: + matchLabels: + # eks.amazonaws.com/component: coredns + k8s-app: kube-dns + strategy: + rollingUpdate: + maxSurge: 25% + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + # annotations: + # eks.amazonaws.com/compute-type: ec2 + creationTimestamp: null + labels: + # eks.amazonaws.com/component: coredns + k8s-app: kube-dns + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/os + operator: In + values: + - linux + - key: beta.kubernetes.io/arch + operator: In + values: + - amd64 + - arm64 + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: k8s-app + operator: In + values: + - kube-dns + topologyKey: kubernetes.io/hostname + weight: 100 + containers: + - args: + - -conf + - /etc/coredns/Corefile + image: ghcr.io/aws/aws-cloud-map-mcs-controller-for-k8s/coredns-multicluster/coredns:v1.8.6 + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 5 + httpGet: + path: /health + port: 8080 + scheme: HTTP + initialDelaySeconds: 60 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + name: coredns + ports: + - containerPort: 53 + name: dns + protocol: UDP + - containerPort: 53 + name: dns-tcp + protocol: TCP + - containerPort: 9153 + name: metrics + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + path: /health + port: 8080 + scheme: HTTP + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + resources: + limits: + memory: 170Mi + requests: + cpu: 100m + memory: 70Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_BIND_SERVICE + drop: + - all + readOnlyRootFilesystem: true + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /etc/coredns + name: config-volume + readOnly: true + - mountPath: /tmp + name: tmp + dnsPolicy: Default + priorityClassName: system-cluster-critical + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: coredns + serviceAccountName: coredns + terminationGracePeriodSeconds: 30 + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + - key: CriticalAddonsOnly + operator: Exists + volumes: + - emptyDir: {} + name: tmp + - configMap: + defaultMode: 420 + items: + - key: Corefile + path: Corefile + name: coredns + name: config-volume diff --git a/integration/kind-test/configs/dnsutils-pod.yaml b/integration/kind-test/configs/dnsutils-pod.yaml new file mode 100644 index 00000000..beb5e89b --- /dev/null +++ b/integration/kind-test/configs/dnsutils-pod.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Pod +metadata: + name: dnsutils + namespace: default +spec: + containers: + - command: + - sleep + - "3600" + image: gcr.io/kubernetes-e2e-test-images/dnsutils:1.3 + name: dnsutils + imagePullPolicy: IfNotPresent + restartPolicy: Always diff --git a/integration/kind-test/configs/e2e-clusterproperty.yaml b/integration/kind-test/configs/e2e-clusterproperty.yaml new file mode 100644 index 00000000..64f700ae --- /dev/null +++ b/integration/kind-test/configs/e2e-clusterproperty.yaml @@ -0,0 +1,13 @@ +apiVersion: about.k8s.io/v1alpha1 +kind: ClusterProperty +metadata: + name: cluster.clusterset.k8s.io +spec: + value: kind-e2e-clusterid-1 +--- +apiVersion: about.k8s.io/v1alpha1 +kind: ClusterProperty +metadata: + name: clusterset.k8s.io +spec: + value: kind-e2e-clustersetid-1 diff --git a/integration/kind-test/configs/e2e-clusterset-ip-service.yaml b/integration/kind-test/configs/e2e-clusterset-ip-service.yaml new file mode 100644 index 00000000..23a1c804 --- /dev/null +++ b/integration/kind-test/configs/e2e-clusterset-ip-service.yaml @@ -0,0 +1,16 @@ +kind: Service +apiVersion: v1 +metadata: + namespace: aws-cloud-map-mcs-e2e + name: e2e-clusterset-ip-service +spec: + selector: + app: nginx-hello + ports: + - port: 80 +--- +kind: ServiceExport +apiVersion: multicluster.x-k8s.io/v1alpha1 +metadata: + namespace: aws-cloud-map-mcs-e2e + name: e2e-clusterset-ip-service diff --git a/integration/kind-test/configs/e2e-deployment.yaml b/integration/kind-test/configs/e2e-deployment.yaml new file mode 100644 index 00000000..0a04d4a5 --- /dev/null +++ b/integration/kind-test/configs/e2e-deployment.yaml @@ -0,0 +1,22 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + namespace: aws-cloud-map-mcs-e2e + name: nginx-hello-deployment + labels: + app: nginx-hello +spec: + replicas: 5 + selector: + matchLabels: + app: nginx-hello + template: + metadata: + labels: + app: nginx-hello + spec: + containers: + - name: nginx-hello + image: nginxdemos/hello:0.3-plain-text + ports: + - containerPort: 80 diff --git a/integration/kind-test/configs/e2e-headless-service.yaml b/integration/kind-test/configs/e2e-headless-service.yaml new file mode 100644 index 00000000..78f5b84f --- /dev/null +++ b/integration/kind-test/configs/e2e-headless-service.yaml @@ -0,0 +1,17 @@ +kind: Service +apiVersion: v1 +metadata: + namespace: aws-cloud-map-mcs-e2e + name: e2e-headless-service +spec: + clusterIP: None + selector: + app: nginx-hello + ports: + - port: 80 +--- +kind: ServiceExport +apiVersion: multicluster.x-k8s.io/v1alpha1 +metadata: + namespace: aws-cloud-map-mcs-e2e + name: e2e-headless-service diff --git a/integration/kind-test/configs/ipv6.yaml b/integration/kind-test/configs/ipv6.yaml new file mode 100644 index 00000000..a577feda --- /dev/null +++ b/integration/kind-test/configs/ipv6.yaml @@ -0,0 +1,5 @@ +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +networking: + ipFamily: ipv6 + apiServerAddress: 127.0.0.1 \ No newline at end of file diff --git a/integration/kind-test/scripts/cleanup-kind.sh b/integration/kind-test/scripts/cleanup-kind.sh new file mode 100755 index 00000000..f314c0c2 --- /dev/null +++ b/integration/kind-test/scripts/cleanup-kind.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash + +# Deletes Kind cluster used for integration test. + +set -eo pipefail +source ./integration/kind-test/scripts/common.sh + +$KIND_BIN delete cluster --name "$KIND_SHORT" + +./integration/shared/scripts/cleanup-cloudmap.sh diff --git a/integration/kind-test/scripts/common.sh b/integration/kind-test/scripts/common.sh new file mode 100755 index 00000000..c038c3a6 --- /dev/null +++ b/integration/kind-test/scripts/common.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash + +export KIND_BIN='./bin/kind' +export KUBECTL_BIN='kubectl' +export LOGS='./integration/kind-test/testlog' +export KIND_CONFIGS='./integration/kind-test/configs' +export SHARED_CONFIGS='./integration/shared/configs' +export SCENARIOS='./integration/shared/scenarios' +export NAMESPACE='aws-cloud-map-mcs-e2e' +export ENDPT_PORT=80 +export SERVICE_PORT=80 +export CLUSTERIP_SERVICE='e2e-clusterset-ip-service' +export HEADLESS_SERVICE='e2e-headless-service' +export KIND_SHORT='cloud-map-e2e' +export CLUSTER='kind-cloud-map-e2e' +export CLUSTERID1='kind-e2e-clusterid-1' +export CLUSTERSETID1='kind-e2e-clustersetid-1' +export IMAGE='kindest/node:v1.21.12@sha256:f316b33dd88f8196379f38feb80545ef3ed44d9197dca1bfd48bcb1583210207' +export EXPECTED_ENDPOINT_COUNT=5 +export UPDATED_ENDPOINT_COUNT=6 diff --git a/integration/kind-test/scripts/curl-test.sh b/integration/kind-test/scripts/curl-test.sh new file mode 100755 index 00000000..a96ba56e --- /dev/null +++ b/integration/kind-test/scripts/curl-test.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env bash + +# Testing service consumption with dnsutils pod + +deployment=$1 + +echo "performing curl to $SERVICE.$NAMESPACE.svc.clusterset.local" +http_code=$($KUBECTL_BIN exec deployment/$deployment --namespace "$NAMESPACE" -- curl -s -o /dev/null -w "%{http_code}" $SERVICE.$NAMESPACE.svc.clusterset.local) +exit_code=$? + +if [ "$exit_code" -ne 0 ]; then + echo "ERROR: Unable to curl $SERVICE.$NAMESPACE.svc.clusterset.local" + exit $exit_code +fi + +if [ "$http_code" -ne "200" ]; then + echo "ERROR: curl $SERVICE.$NAMESPACE.svc.clusterset.local failed with $http_code" + exit 1 +fi + +exit 0 diff --git a/integration/kind-test/scripts/dns-test.sh b/integration/kind-test/scripts/dns-test.sh new file mode 100755 index 00000000..627c10a8 --- /dev/null +++ b/integration/kind-test/scripts/dns-test.sh @@ -0,0 +1,76 @@ +#!/usr/bin/env bash + +# If the IP Type env var is not set, default it to IPv4 +if [[ -z "${ADDRESS_TYPE}" ]]; then + ADDRESS_TYPE="IPv4" +fi + +# Helper function to verify DNS results +checkDNS() { + dns_addresses_count=$(echo "$1" | wc -l | xargs) + + if [ "$SERVICE_TYPE" = "Headless" ]; then + if [ "$dns_addresses_count" -ne "$expected_endpoint_count" ]; then + echo "ERROR: Found $dns_addresses_count endpoints, expected $expected_endpoint_count endpoints" + exit 1 + fi + fi + + if [ "$SERVICE_TYPE" = "ClusterSetIP" ]; then + if [ "$dns_addresses_count" -ne 1 ]; then + echo "ERROR: Found $dns_addresses_count endpoints, expected 1 endpoint" + exit 1 + fi + fi +} + +# Testing service consumption with dnsutils pod + +echo "verifying dns resolution..." + +expected_endpoint_count=$1 + +# Install dnsutils pod +$KUBECTL_BIN apply -f "$KIND_CONFIGS/dnsutils-pod.yaml" +$KUBECTL_BIN wait --for=condition=ready pod/dnsutils # wait until pod is deployed + +# Perform a dig to cluster-local CoreDNS +# TODO: parse dig outputs for more precise verification - check specifics IPs? +if [[ $ADDRESS_TYPE == "IPv4" ]]; then + echo "performing dig for A records for IPv4..." + addresses=$($KUBECTL_BIN exec dnsutils -- dig +all +ans $SERVICE.$NAMESPACE.svc.clusterset.local +short) + exit_code=$? + echo "$addresses" +elif [[ $ADDRESS_TYPE == "IPv6" ]]; then + echo "performing dig for AAAA records for IPv6..." + addresses=$($KUBECTL_BIN exec dnsutils -- dig AAAA +all +ans $SERVICE.$NAMESPACE.svc.clusterset.local +short) + exit_code=$? + echo "$addresses" +else + echo "ADDRESS_TYPE invalid" + exit 1 +fi + +if [ "$exit_code" -ne 0 ]; then + echo "ERROR: Unable to dig service $SERVICE.$NAMESPACE.svc.clusterset.local" + exit $exit_code +fi + +# verify DNS results +checkDNS "$addresses" + +echo "performing dig for SRV records..." +addresses=$($KUBECTL_BIN exec dnsutils -- dig +all +ans $SERVICE.$NAMESPACE.svc.clusterset.local. SRV +short) +exit_code=$? +echo "$addresses" + +if [ "$exit_code" -ne 0 ]; then + echo "ERROR: Unable to dig service $SERVICE.$NAMESPACE.svc.clusterset.local" + exit $exit_code +fi + +# verify DNS results +checkDNS "$addresses" + +echo "confirmed dns resolution" +exit 0 diff --git a/integration/kind-test/scripts/ensure-jq.sh b/integration/kind-test/scripts/ensure-jq.sh new file mode 100755 index 00000000..8d354068 --- /dev/null +++ b/integration/kind-test/scripts/ensure-jq.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +# Ensure jq is available to parse json output. Installs jq on debian/ubuntu + +if ! which -s jq ; then + echo "jq not found, attempting to install" + if ! sudo apt-get install -y jq ; then + echo "failed to install jq, ensure it is available before running tests" + exit 1 + fi +fi + +exit 0 diff --git a/integration/kind-test/scripts/run-helper.sh b/integration/kind-test/scripts/run-helper.sh new file mode 100755 index 00000000..fd002efa --- /dev/null +++ b/integration/kind-test/scripts/run-helper.sh @@ -0,0 +1,31 @@ +#!/usr/bin/env bash + +# Helper to run test and passing different Service names + +source ./integration/kind-test/scripts/common.sh + +# create test namespace +$KUBECTL_BIN create namespace "$NAMESPACE" + +# If the IP Type env var is not set, default it to IPV4 +if [[ -z "${ADDRESS_TYPE}" ]]; then + ADDRESS_TYPE="IPv4" +fi + +# ClusterIP service test +./integration/kind-test/scripts/run-tests.sh "$CLUSTERIP_SERVICE" "ClusterSetIP" $ADDRESS_TYPE +exit_code=$? +if [ "$exit_code" -ne 0 ] ; then + echo "ERROR: Testing $CLUSTERIP_SERVICE failed" + exit $exit_code +fi + +sleep 5 + +# Headless service test +./integration/kind-test/scripts/run-tests.sh "$HEADLESS_SERVICE" "Headless" $ADDRESS_TYPE +exit_code=$? +if [ "$exit_code" -ne 0 ] ; then + echo "ERROR: Testing $HEADLESS_SERVICE failed" + exit $exit_code +fi diff --git a/integration/kind-test/scripts/run-tests.sh b/integration/kind-test/scripts/run-tests.sh new file mode 100755 index 00000000..95eab7bc --- /dev/null +++ b/integration/kind-test/scripts/run-tests.sh @@ -0,0 +1,88 @@ +#!/usr/bin/env bash + +# Runs the AWS Cloud Map MCS Controller for K8s as a background process and tests services have been exported + +source ./integration/kind-test/scripts/common.sh +export SERVICE=$1 +export SERVICE_TYPE=$2 +export IP_TYPE=$3 + +# Deploy pods +$KUBECTL_BIN apply -f "$KIND_CONFIGS/e2e-deployment.yaml" +# Get deployment +deployment=$($KUBECTL_BIN get deployment --namespace "$NAMESPACE" -o json | jq -r '.items[0].metadata.name') + +printf "\n***Testing Service: $SERVICE***\n" + +$KUBECTL_BIN apply -f "$KIND_CONFIGS/$SERVICE.yaml" + +if ! endpts=$(./integration/shared/scripts/poll-endpoints.sh "$EXPECTED_ENDPOINT_COUNT") ; then + exit $? +fi + +mkdir -p "$LOGS" +./bin/manager --zap-devel=true --zap-time-encoding=rfc3339 &> "$LOGS/ctl.log" & +CTL_PID=$! +echo "controller PID:$CTL_PID" + +go run $SCENARIOS/runner/main.go $NAMESPACE $SERVICE $CLUSTERID1 $CLUSTERSETID1 $ENDPT_PORT $SERVICE_PORT $SERVICE_TYPE $IP_TYPE "$endpts" +exit_code=$? + +if [ "$exit_code" -eq 0 ] ; then + ./integration/shared/scripts/test-import.sh "$EXPECTED_ENDPOINT_COUNT" "$endpts" + exit_code=$? +fi + +if [ "$exit_code" -eq 0 ] ; then + ./integration/kind-test/scripts/dns-test.sh "$EXPECTED_ENDPOINT_COUNT" + exit_code=$? +fi + +if [ "$exit_code" -eq 0 ] ; then + ./integration/kind-test/scripts/curl-test.sh "$deployment" + exit_code=$? +fi + +echo "sleeping..." +sleep 2 + +if [ "$exit_code" -eq 0 ] ; then + echo "scaling the deployment $deployment to $UPDATED_ENDPOINT_COUNT" + $KUBECTL_BIN scale deployment/"$deployment" --replicas="$UPDATED_ENDPOINT_COUNT" --namespace "$NAMESPACE" + exit_code=$? +fi + +if [ "$exit_code" -eq 0 ] ; then + if ! updated_endpoints=$(./integration/shared/scripts/poll-endpoints.sh "$UPDATED_ENDPOINT_COUNT") ; then + exit $? + fi +fi + +if [ "$exit_code" -eq 0 ] ; then + go run $SCENARIOS/runner/main.go $NAMESPACE $SERVICE $CLUSTERID1 $CLUSTERSETID1 $ENDPT_PORT $SERVICE_PORT $SERVICE_TYPE $IP_TYPE "$updated_endpoints" + exit_code=$? +fi + +if [ "$exit_code" -eq 0 ] ; then + ./integration/shared/scripts/test-import.sh "$UPDATED_ENDPOINT_COUNT" "$updated_endpoints" + exit_code=$? +fi + +if [ "$exit_code" -eq 0 ] ; then + ./integration/kind-test/scripts/dns-test.sh "$UPDATED_ENDPOINT_COUNT" + exit_code=$? +fi + +echo "Test Successful. Cleaning up..." + +# Remove the deployment and delete service (should also delete ServiceExport) +if [ "$exit_code" -eq 0 ] ; then + $KUBECTL_BIN delete -f "$KIND_CONFIGS/e2e-deployment.yaml" + $KUBECTL_BIN delete Service $SERVICE -n $NAMESPACE + # TODO: verify service export is not found + # TODO: verify cloudmap resources are cleaned up +fi + +echo "killing controller PID:$CTL_PID" +kill $CTL_PID +exit $exit_code diff --git a/integration/kind-test/scripts/setup-kind.sh b/integration/kind-test/scripts/setup-kind.sh new file mode 100755 index 00000000..2b37ff52 --- /dev/null +++ b/integration/kind-test/scripts/setup-kind.sh @@ -0,0 +1,35 @@ +#!/usr/bin/env bash + +# Builds the AWS Cloud Map MCS Controller for K8s, provisions a Kubernetes clusters with Kind, +# installs Cloud Map CRDs and controller into the cluster and applies export and deployment configs. + +set -e + +source ./integration/kind-test/scripts/common.sh + +./integration/kind-test/scripts/ensure-jq.sh + +# If the IP Type env var is not set, default it to IPv4 +if [[ -z "${ADDRESS_TYPE}" ]]; then + ADDRESS_TYPE="IPv4" +fi + +echo "ADDRESS_TYPE: $ADDRESS_TYPE" +if [[ $ADDRESS_TYPE == "IPv4" ]]; then + $KIND_BIN create cluster --name "$KIND_SHORT" --image "$IMAGE" +elif [[ $ADDRESS_TYPE == "IPv6" ]]; then + $KIND_BIN create cluster --name "$KIND_SHORT" --image "$IMAGE" --config=./integration/kind-test/configs/ipv6.yaml +else + echo "ADDRESS_TYPE invalid" +fi + +$KUBECTL_BIN config use-context "$CLUSTER" +make install + +# Install CoreDNS plugin +$KUBECTL_BIN apply -f "$SHARED_CONFIGS/coredns-clusterrole.yaml" +$KUBECTL_BIN apply -f "$SHARED_CONFIGS/coredns-configmap.yaml" +$KUBECTL_BIN apply -f "$KIND_CONFIGS/coredns-deployment.yaml" + +# Add ClusterId and ClusterSetId +$KUBECTL_BIN apply -f "$KIND_CONFIGS/e2e-clusterproperty.yaml" diff --git a/integration/shared/configs/coredns-clusterrole.yaml b/integration/shared/configs/coredns-clusterrole.yaml new file mode 100644 index 00000000..ac2e542a --- /dev/null +++ b/integration/shared/configs/coredns-clusterrole.yaml @@ -0,0 +1,69 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + eks.amazonaws.com/component: coredns + k8s-app: kube-dns + kubernetes.io/bootstrapping: rbac-defaults + name: system:coredns +rules: +- apiGroups: + - "" + resources: + - endpoints + - services + - pods + - namespaces + verbs: + - list + - watch +- apiGroups: + - "" + resources: + - nodes + verbs: + - get +- apiGroups: + - about.k8s.io + resources: + - clusterproperties + verbs: + - create + - get + - list + - patch + - update + - watch +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - create + - get + - list + - patch + - update + - watch +- apiGroups: + - multicluster.x-k8s.io + resources: + - serviceimports + verbs: + - create + - get + - list + - patch + - update + - watch +- apiGroups: + - multicluster.x-k8s.io + resources: + - serviceexports + verbs: + - create + - get + - list + - patch + - update + - watch \ No newline at end of file diff --git a/integration/shared/configs/coredns-configmap.yaml b/integration/shared/configs/coredns-configmap.yaml new file mode 100644 index 00000000..dfdf9c7c --- /dev/null +++ b/integration/shared/configs/coredns-configmap.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +data: + Corefile: | + .:53 { + errors + health + multicluster clusterset.local + kubernetes cluster.local in-addr.arpa ip6.arpa { + pods insecure + fallthrough in-addr.arpa ip6.arpa + } + prometheus :9153 + forward . /etc/resolv.conf + cache 30 + loop + reload + loadbalance + } +kind: ConfigMap +metadata: + annotations: + labels: + eks.amazonaws.com/component: coredns + k8s-app: kube-dns + name: coredns + namespace: kube-system diff --git a/integration/shared/scenarios/export_service.go b/integration/shared/scenarios/export_service.go new file mode 100644 index 00000000..535faaee --- /dev/null +++ b/integration/shared/scenarios/export_service.go @@ -0,0 +1,139 @@ +package scenarios + +import ( + "context" + "fmt" + "strconv" + "strings" + "time" + + "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/common" + + "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/cloudmap" + "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/model" + "github.com/aws/aws-sdk-go-v2/aws" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/wait" +) + +const ( + defaultScenarioPollInterval = 10 * time.Second + defaultScenarioPollTimeout = 2 * time.Minute +) + +// ExportServiceScenario defines an integration test against a service export to check creation of namespace, service, +// and endpoint export. +type ExportServiceScenario interface { + // Run executes the service export integration test scenario, returning any error. + Run() error +} + +type exportServiceScenario struct { + sdClient cloudmap.ServiceDiscoveryClient + expectedSvc model.Service +} + +func NewExportServiceScenario(cfg *aws.Config, nsName string, svcName string, clusterId string, clusterSetId string, portStr string, servicePortStr string, serviceType string, addressTypeStr string, ips string) (ExportServiceScenario, error) { + endpts := make([]*model.Endpoint, 0) + + port, parseError := strconv.ParseUint(portStr, 10, 16) + if parseError != nil { + return nil, parseError + } + servicePort, parseError := strconv.ParseUint(servicePortStr, 10, 16) + if parseError != nil { + return nil, parseError + } + addressType, parseError := model.GetAddressTypeFromString(addressTypeStr) + if parseError != nil { + return nil, parseError + } + + for _, ip := range strings.Split(ips, ",") { + endpointPort := model.Port{ + Port: int32(port), + Protocol: string(v1.ProtocolTCP), + } + endpts = append(endpts, &model.Endpoint{ + Id: model.EndpointIdFromIPAddressAndPort(ip, endpointPort), + IP: ip, + AddressType: addressType, + ServicePort: model.Port{ + Port: int32(servicePort), + TargetPort: portStr, + Protocol: string(v1.ProtocolTCP), + }, + Ready: true, + EndpointPort: endpointPort, + ClusterId: clusterId, + ClusterSetId: clusterSetId, + ServiceType: model.ServiceType(serviceType), + Attributes: make(map[string]string), + }) + } + + return &exportServiceScenario{ + sdClient: cloudmap.NewServiceDiscoveryClientWithCustomCache(cfg, + &cloudmap.SdCacheConfig{ + NsTTL: time.Second, + SvcTTL: time.Second, + EndptTTL: time.Second, + }, model.NewClusterUtilsWithValues(clusterId, clusterSetId)), + expectedSvc: model.Service{ + Namespace: nsName, + Name: svcName, + Endpoints: endpts, + }, + }, nil +} + +func (e *exportServiceScenario) Run() error { + fmt.Printf("Seeking expected service: %v\n", e.expectedSvc) + + return wait.Poll(defaultScenarioPollInterval, defaultScenarioPollTimeout, func() (done bool, err error) { + fmt.Println("Polling service...") + cmSvc, err := e.sdClient.GetService(context.TODO(), e.expectedSvc.Namespace, e.expectedSvc.Name) + if common.IsUnknown(err) { + return true, err + } + + if common.IsNotFound(err) { + fmt.Println("Service not found.") + return false, nil + } + + fmt.Printf("Found service: %+v\n", cmSvc) + return e.compareEndpoints(cmSvc.Endpoints), nil + }) +} + +func (e *exportServiceScenario) compareEndpoints(cmEndpoints []*model.Endpoint) bool { + if len(e.expectedSvc.Endpoints) != len(cmEndpoints) { + fmt.Println("Endpoints do not match.") + return false + } + + for _, expected := range e.expectedSvc.Endpoints { + match := false + for _, actual := range cmEndpoints { + // Ignore K8S instance attribute for the purpose of this test. + delete(actual.Attributes, model.K8sVersionAttr) + // Ignore ServiceExportCreationTimestamp attribute for the purpose of this test by setting value to 0. + actual.ServiceExportCreationTimestamp = 0 + // Ignore Nodename and Hostname, as they can be platform dependent + actual.Nodename = "" + actual.Hostname = "" + if expected.Equals(actual) { + match = true + break + } + } + if !match { + fmt.Println("Endpoints do not match.") + return false + } + } + + fmt.Println("Endpoints match.") + return true +} diff --git a/integration/shared/scenarios/runner/main.go b/integration/shared/scenarios/runner/main.go new file mode 100644 index 00000000..0889445f --- /dev/null +++ b/integration/shared/scenarios/runner/main.go @@ -0,0 +1,56 @@ +package main + +import ( + "context" + "fmt" + "os" + + "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/integration/shared/scenarios" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/config" +) + +func main() { + if len(os.Args) != 10 { + fmt.Println("Expected namespace, service, clusterId, clusterSetId, endpoint port, service port, serviceType, endpoint AddressType, and endpoint IP list as arguments") + os.Exit(1) + } + + nsName := os.Args[1] + svcName := os.Args[2] + clusterId := os.Args[3] + clusterSetId := os.Args[4] + port := os.Args[5] + servicePort := os.Args[6] + serviceType := os.Args[7] + addressType := os.Args[8] + ips := os.Args[9] + + testServiceExport(nsName, svcName, clusterId, clusterSetId, port, servicePort, serviceType, addressType, ips) +} + +func testServiceExport(nsName string, svcName string, clusterId string, clusterSetId string, port string, servicePort string, serviceType string, addressType string, ips string) { + fmt.Printf("Testing service export integration for namespace %s and service %s\n", nsName, svcName) + + export, err := scenarios.NewExportServiceScenario(getAwsConfig(), nsName, svcName, clusterId, clusterSetId, port, servicePort, serviceType, addressType, ips) + if err != nil { + fmt.Printf("Failed to setup service export integration test scenario: %s", err.Error()) + os.Exit(1) + } + + if err := export.Run(); err != nil { + fmt.Printf("Service export integration test scenario failed: %s", err.Error()) + os.Exit(1) + } +} + +func getAwsConfig() *aws.Config { + awsCfg, err := config.LoadDefaultConfig(context.TODO()) + + if err != nil { + fmt.Printf("unable to configure AWS session: %s", err.Error()) + os.Exit(1) + } + + return &awsCfg +} diff --git a/integration/shared/scripts/cleanup-cloudmap.sh b/integration/shared/scripts/cleanup-cloudmap.sh new file mode 100755 index 00000000..5ecf0d34 --- /dev/null +++ b/integration/shared/scripts/cleanup-cloudmap.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash + +# Deletes all AWS Cloud Map resources used for integration test. + +set -eo pipefail + +go run ./integration/janitor/runner/main.go "$NAMESPACE" "$CLUSTERID1" "$CLUSTERSETID1" diff --git a/integration/shared/scripts/poll-endpoints.sh b/integration/shared/scripts/poll-endpoints.sh new file mode 100755 index 00000000..eb6f4f40 --- /dev/null +++ b/integration/shared/scripts/poll-endpoints.sh @@ -0,0 +1,29 @@ +#!/usr/bin/env bash + +# Poll for endpoints to become active + +set -e + +endpt_count=0 +poll_count=0 +while ((endpt_count < $1)) +do + if ((poll_count++ > 30)) ; then + echo "timed out polling for endpoints" >&2 + exit 1 + fi + + sleep 2 + if ! addresses=$($KUBECTL_BIN get endpointslices -o json --namespace "$NAMESPACE" | \ + jq --arg SERVICE "$SERVICE" '.items[] | select(.metadata.ownerReferences[].name==$SERVICE) | .endpoints[].addresses[0]' 2> /dev/null) + then + # no endpoints ready + continue + fi + + endpt_count=$(echo "$addresses" | wc -l | xargs) +done + +echo "$addresses" | tr -d '"' | paste -sd "," - +echo "matched number of endpoints to expected count" >&2 +exit 0 diff --git a/integration/shared/scripts/test-import.sh b/integration/shared/scripts/test-import.sh new file mode 100755 index 00000000..d496bcf8 --- /dev/null +++ b/integration/shared/scripts/test-import.sh @@ -0,0 +1,42 @@ +#!/usr/bin/env bash + +# Test service imports were created during e2e test + +set -e + +expected_endpoint_count=$1 +endpoints=$2 +echo "checking service imports..." + +import_count=0 +poll_count=0 +while ((import_count < expected_endpoint_count)) +do + sleep 1 + if ((poll_count++ > 30)) ; then + echo "timed out polling for import endpoints" + exit 1 + fi + + imports=$($KUBECTL_BIN get endpointslices -o json --namespace $NAMESPACE | \ + jq '.items[] | select(.metadata.ownerReferences[].name | startswith("imported")) | .endpoints[].addresses[0]') + echo "import endpoint list from kubectl:" + echo "$imports" + + import_count=$(echo "$imports" | wc -l | xargs) +done + +echo "$imports" | tr -d '"' | while read -r import; do + echo "checking import: $import" + if ! echo "$endpoints" | grep -q "$import" ; then + echo "exported endpoint not found: $import" + exit 1 + fi +done + +if [ $? -ne 0 ]; then + exit $? +fi + +echo "matched all imports to exported endpoints" +exit 0 diff --git a/main.go b/main.go index 741d5826..0a075630 100644 --- a/main.go +++ b/main.go @@ -1,28 +1,15 @@ -/* -Copyright 2021. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - package main import ( "context" "flag" + "os" + "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/cloudmap" + "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/common" + "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/model" "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/version" "github.com/aws/aws-sdk-go-v2/config" - "os" // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) // to ensure that exec-entrypoint and run can make use of them. @@ -35,20 +22,23 @@ import ( "sigs.k8s.io/controller-runtime/pkg/healthz" "sigs.k8s.io/controller-runtime/pkg/log/zap" - multiclusterv1alpha1 "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/api/v1alpha1" - "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/controllers" + aboutv1alpha1 "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/apis/about/v1alpha1" + multiclusterv1alpha1 "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/apis/multicluster/v1alpha1" + multiclustercontrollers "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/controllers/multicluster" // +kubebuilder:scaffold:imports ) var ( - scheme = runtime.NewScheme() - setupLog = ctrl.Log.WithName("setup") + scheme = runtime.NewScheme() + log = ctrl.Log.WithName("main") ) func init() { utilruntime.Must(clientgoscheme.AddToScheme(scheme)) utilruntime.Must(multiclusterv1alpha1.AddToScheme(scheme)) + + utilruntime.Must(aboutv1alpha1.AddToScheme(scheme)) //+kubebuilder:scaffold:scheme } @@ -61,16 +51,18 @@ func main() { flag.BoolVar(&enableLeaderElection, "leader-elect", false, "Enable leader election for controller manager. "+ "Enabling this will ensure there is only one active controller manager.") - opts := zap.Options{ - Development: true, - } + + // Add the zap logger flag set to the CLI. The flag set must + // be added before calling flag.Parse(). + opts := zap.Options{} opts.BindFlags(flag.CommandLine) + flag.Parse() ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts))) v := version.GetVersion() - setupLog.Info("starting AWS Cloud Map MCS Controller for K8s", "version", v) + log.Info("starting AWS Cloud Map MCS Controller for K8s", "version", v) mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ Scheme: scheme, @@ -78,57 +70,63 @@ func main() { Port: 9443, HealthProbeBindAddress: probeAddr, LeaderElection: enableLeaderElection, - LeaderElectionID: "db692913.x-k8s.io", + LeaderElectionID: "aws-cloud-map-mcs-controller-for-k8s-lock", }) if err != nil { - setupLog.Error(err, "unable to start manager") + log.Error(err, "unable to start manager") os.Exit(1) } + log.Info("configuring AWS session") + // GO sdk will look for region in order 1) AWS_REGION env var, 2) ~/.aws/config file, 3) EC2 IMDS + awsCfg, err := config.LoadDefaultConfig(context.TODO(), config.WithEC2IMDSRegion()) - // TODO: configure session - awsCfg, err := config.LoadDefaultConfig(context.TODO(), - config.WithRegion(os.Getenv("AWS_REGION")), - ) - if err != nil { - setupLog.Error(err, "unable to configure AWS session") + if err != nil || awsCfg.Region == "" { + log.Error(err, "unable to configure AWS session", "AWS_REGION", awsCfg.Region) os.Exit(1) } - if err = (&controllers.ServiceExportReconciler{ - Client: mgr.GetClient(), - Log: ctrl.Log.WithName("controllers").WithName("ServiceExport"), - Scheme: mgr.GetScheme(), - Cloudmap: cloudmap.NewServiceDiscoveryClient(&awsCfg), + log.Info("Running with AWS region", "AWS_REGION", awsCfg.Region) + + clusterUtils := model.NewClusterUtils(mgr.GetClient()) + serviceDiscoveryClient := cloudmap.NewDefaultServiceDiscoveryClient(&awsCfg, clusterUtils) + + if err = (&multiclustercontrollers.ServiceExportReconciler{ + Client: mgr.GetClient(), + Log: common.NewLogger("controllers", "ServiceExportReconciler"), + Scheme: mgr.GetScheme(), + CloudMap: serviceDiscoveryClient, + ClusterUtils: clusterUtils, }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "ServiceExport") + log.Error(err, "unable to create controller", "controller", "ServiceExportReconciler") os.Exit(1) } - cloudMapReconciler := &controllers.CloudMapReconciler{ - Client: mgr.GetClient(), - Cloudmap: cloudmap.NewServiceDiscoveryClient(&awsCfg), - Logger: ctrl.Log.WithName("controllers").WithName("CloudMap"), + cloudMapReconciler := &multiclustercontrollers.CloudMapReconciler{ + Client: mgr.GetClient(), + Cloudmap: serviceDiscoveryClient, + Log: common.NewLogger("controllers", "CloudmapReconciler"), + ClusterUtils: clusterUtils, } if err = mgr.Add(cloudMapReconciler); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "CloudMap") + log.Error(err, "unable to create controller", "controller", "CloudmapReconciler") os.Exit(1) } //+kubebuilder:scaffold:builder if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { - setupLog.Error(err, "unable to set up health check") + log.Error(err, "unable to set up health check") os.Exit(1) } if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil { - setupLog.Error(err, "unable to set up ready check") + log.Error(err, "unable to set up ready check") os.Exit(1) } - setupLog.Info("starting manager") + log.Info("starting manager") if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { - setupLog.Error(err, "problem running manager") + log.Error(err, "problem running manager") os.Exit(1) } } diff --git a/mkdocs.yml b/mkdocs.yml new file mode 100644 index 00000000..20aae806 --- /dev/null +++ b/mkdocs.yml @@ -0,0 +1,12 @@ +site_name: AWS Cloud Map MCS Controller +repo_name: aws/aws-cloud-map-mcs-controller-for-k8s +repo_url: https://github.com/aws/aws-cloud-map-mcs-controller-for-k8s + +plugins: + - search +theme: + name: material + language: en + favicon: images/cloudmap.svg + logo: images/cloudmap.svg + diff --git a/pkg/apis/about/v1alpha1/clusterproperty_types.go b/pkg/apis/about/v1alpha1/clusterproperty_types.go new file mode 100644 index 00000000..f3fd599e --- /dev/null +++ b/pkg/apis/about/v1alpha1/clusterproperty_types.go @@ -0,0 +1,52 @@ +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// ClusterPropertySpec defines the desired state of ClusterProperty +type ClusterPropertySpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + + // ClusterProperty value + // +kubebuilder:validation:Maxlength=128000 + // +kubebuilder:validation:MinLength=1 + Value string `json:"value"` +} + +// ClusterPropertyStatus defines the observed state of ClusterProperty +type ClusterPropertyStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status +//+kubebuilder:resource:scope=Cluster + +// ClusterProperty is the Schema for the clusterproperties API +// +kubebuilder:printcolumn:name="value",type=string,JSONPath=`.spec.value` +// +kubebuilder:printcolumn:name="age",type=date,JSONPath=`.metadata.creationTimestamp` +type ClusterProperty struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ClusterPropertySpec `json:"spec,omitempty"` + Status ClusterPropertyStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// ClusterPropertyList contains a list of ClusterProperty +type ClusterPropertyList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ClusterProperty `json:"items"` +} + +func init() { + SchemeBuilder.Register(&ClusterProperty{}, &ClusterPropertyList{}) +} diff --git a/pkg/apis/about/v1alpha1/groupversion_info.go b/pkg/apis/about/v1alpha1/groupversion_info.go new file mode 100644 index 00000000..0b2f6df2 --- /dev/null +++ b/pkg/apis/about/v1alpha1/groupversion_info.go @@ -0,0 +1,20 @@ +// Package v1alpha1 contains API Schema definitions for the about v1alpha1 API group +// +kubebuilder:object:generate=true +// +groupName=about.k8s.io +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "about.k8s.io", Version: "v1alpha1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/pkg/apis/about/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/about/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 00000000..476fb37f --- /dev/null +++ b/pkg/apis/about/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,115 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterProperty) DeepCopyInto(out *ClusterProperty) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterProperty. +func (in *ClusterProperty) DeepCopy() *ClusterProperty { + if in == nil { + return nil + } + out := new(ClusterProperty) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterProperty) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterPropertyList) DeepCopyInto(out *ClusterPropertyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterProperty, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterPropertyList. +func (in *ClusterPropertyList) DeepCopy() *ClusterPropertyList { + if in == nil { + return nil + } + out := new(ClusterPropertyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterPropertyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterPropertySpec) DeepCopyInto(out *ClusterPropertySpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterPropertySpec. +func (in *ClusterPropertySpec) DeepCopy() *ClusterPropertySpec { + if in == nil { + return nil + } + out := new(ClusterPropertySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterPropertyStatus) DeepCopyInto(out *ClusterPropertyStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterPropertyStatus. +func (in *ClusterPropertyStatus) DeepCopy() *ClusterPropertyStatus { + if in == nil { + return nil + } + out := new(ClusterPropertyStatus) + in.DeepCopyInto(out) + return out +} diff --git a/pkg/api/v1alpha1/groupversion_info.go b/pkg/apis/multicluster/v1alpha1/groupversion_info.go similarity index 56% rename from pkg/api/v1alpha1/groupversion_info.go rename to pkg/apis/multicluster/v1alpha1/groupversion_info.go index dba40d29..44459d03 100644 --- a/pkg/api/v1alpha1/groupversion_info.go +++ b/pkg/apis/multicluster/v1alpha1/groupversion_info.go @@ -1,19 +1,3 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - // Package v1alpha1 contains API Schema definitions for the multicluster v1alpha1 API group // +kubebuilder:object:generate=true // +groupName=multicluster.x-k8s.io diff --git a/pkg/api/v1alpha1/serviceexport_types.go b/pkg/apis/multicluster/v1alpha1/serviceexport_types.go similarity index 79% rename from pkg/api/v1alpha1/serviceexport_types.go rename to pkg/apis/multicluster/v1alpha1/serviceexport_types.go index 7bc8beb6..125ea5c0 100644 --- a/pkg/api/v1alpha1/serviceexport_types.go +++ b/pkg/apis/multicluster/v1alpha1/serviceexport_types.go @@ -1,19 +1,3 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - package v1alpha1 import ( diff --git a/pkg/api/v1alpha1/serviceimport_types.go b/pkg/apis/multicluster/v1alpha1/serviceimport_types.go similarity index 87% rename from pkg/api/v1alpha1/serviceimport_types.go rename to pkg/apis/multicluster/v1alpha1/serviceimport_types.go index d13e7213..91c87478 100644 --- a/pkg/api/v1alpha1/serviceimport_types.go +++ b/pkg/apis/multicluster/v1alpha1/serviceimport_types.go @@ -1,19 +1,3 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - package v1alpha1 import ( @@ -53,7 +37,6 @@ type ServiceImportSpec struct { // +listType=atomic Ports []ServicePort `json:"ports"` // ip will be used as the VIP for this service when type is ClusterSetIP. - // +kubebuilder:validation:MaxItems:=1 // +optional IPs []string `json:"ips,omitempty"` // type defines the type of this service. diff --git a/pkg/api/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/multicluster/v1alpha1/zz_generated.deepcopy.go similarity index 99% rename from pkg/api/v1alpha1/zz_generated.deepcopy.go rename to pkg/apis/multicluster/v1alpha1/zz_generated.deepcopy.go index f74564df..1eca515f 100644 --- a/pkg/api/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/multicluster/v1alpha1/zz_generated.deepcopy.go @@ -23,7 +23,7 @@ package v1alpha1 import ( corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) diff --git a/pkg/cloudmap/api.go b/pkg/cloudmap/api.go new file mode 100644 index 00000000..3ed865a7 --- /dev/null +++ b/pkg/cloudmap/api.go @@ -0,0 +1,250 @@ +package cloudmap + +import ( + "context" + + "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/common" + "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/model" + "github.com/aws/aws-sdk-go-v2/aws" + sd "github.com/aws/aws-sdk-go-v2/service/servicediscovery" + "github.com/aws/aws-sdk-go-v2/service/servicediscovery/types" +) + +const ( + defaultServiceTTLInSeconds int64 = 60 +) + +// ServiceDiscoveryApi handles the AWS Cloud Map API request and response processing logic, and converts results to +// internal data structures. It manages all interactions with the AWS SDK. +type ServiceDiscoveryApi interface { + // GetNamespaceMap returns a map of all namespaces in the Cloud Map account indexed by namespace name. + GetNamespaceMap(ctx context.Context) (namespaces map[string]*model.Namespace, err error) + + // GetServiceIdMap returns a map of all service IDs for a given namespace indexed by service name. + GetServiceIdMap(ctx context.Context, namespaceId string) (serviceIdMap map[string]string, err error) + + // DiscoverInstances returns a list of service instances registered to a given service. + DiscoverInstances(ctx context.Context, nsName string, svcName string, queryParameters map[string]string) (insts []types.HttpInstanceSummary, err error) + + // GetOperation returns an operation. + GetOperation(ctx context.Context, operationId string) (operation *types.Operation, err error) + + // CreateHttpNamespace creates a HTTP namespace in AWS Cloud Map for a given name. + CreateHttpNamespace(ctx context.Context, namespaceName string) (operationId string, err error) + + // CreateService creates a named service in AWS Cloud Map under the given namespace. + CreateService(ctx context.Context, namespace model.Namespace, serviceName string) (serviceId string, err error) + + // RegisterInstance registers a service instance in AWS Cloud Map. + RegisterInstance(ctx context.Context, serviceId string, instanceId string, instanceAttrs map[string]string) (operationId string, err error) + + // DeregisterInstance de-registers a service instance in Cloud Map. + DeregisterInstance(ctx context.Context, serviceId string, instanceId string) (operationId string, err error) +} + +type serviceDiscoveryApi struct { + log common.Logger + awsFacade AwsFacade + rateLimiter common.RateLimiter +} + +// NewServiceDiscoveryApiFromConfig creates a new AWS Cloud Map API connection manager from an AWS client config. +func NewServiceDiscoveryApiFromConfig(cfg *aws.Config) ServiceDiscoveryApi { + return &serviceDiscoveryApi{ + log: common.NewLogger("cloudmap", "api"), + awsFacade: NewAwsFacadeFromConfig(cfg), + rateLimiter: common.NewDefaultRateLimiter(), + } +} + +func (sdApi *serviceDiscoveryApi) GetNamespaceMap(ctx context.Context) (map[string]*model.Namespace, error) { + err := sdApi.rateLimiter.Wait(ctx, common.ListNamespaces) + if err != nil { + return nil, err + } + + namespaceMap := make(map[string]*model.Namespace) + + pages := sd.NewListNamespacesPaginator(sdApi.awsFacade, &sd.ListNamespacesInput{}) + for pages.HasMorePages() { + output, err := pages.NextPage(ctx) + if err != nil { + return nil, err + } + + for _, ns := range output.Namespaces { + namespaceType := model.ConvertNamespaceType(ns.Type) + if namespaceType.IsUnsupported() { + continue + } + namespaceMap[aws.ToString(ns.Name)] = &model.Namespace{ + Id: aws.ToString(ns.Id), + Name: aws.ToString(ns.Name), + Type: namespaceType, + } + } + } + + return namespaceMap, nil +} + +func (sdApi *serviceDiscoveryApi) GetServiceIdMap(ctx context.Context, nsId string) (map[string]string, error) { + err := sdApi.rateLimiter.Wait(ctx, common.ListServices) + if err != nil { + return nil, err + } + + serviceIdMap := make(map[string]string) + + filter := types.ServiceFilter{ + Name: types.ServiceFilterNameNamespaceId, + Values: []string{nsId}, + } + + pages := sd.NewListServicesPaginator(sdApi.awsFacade, &sd.ListServicesInput{Filters: []types.ServiceFilter{filter}}) + for pages.HasMorePages() { + output, err := pages.NextPage(ctx) + if err != nil { + return nil, err + } + + for _, svc := range output.Services { + serviceIdMap[aws.ToString(svc.Name)] = aws.ToString(svc.Id) + } + } + + return serviceIdMap, nil +} + +func (sdApi *serviceDiscoveryApi) DiscoverInstances(ctx context.Context, nsName string, svcName string, queryParameters map[string]string) (insts []types.HttpInstanceSummary, err error) { + err = sdApi.rateLimiter.Wait(ctx, common.DiscoverInstances) + if err != nil { + return nil, err + } + + input := &sd.DiscoverInstancesInput{ + NamespaceName: aws.String(nsName), + ServiceName: aws.String(svcName), + HealthStatus: types.HealthStatusFilterAll, + MaxResults: aws.Int32(1000), + } + if queryParameters != nil { + input.QueryParameters = queryParameters + } + out, err := sdApi.awsFacade.DiscoverInstances(ctx, input) + + if err != nil { + return insts, err + } + + return out.Instances, nil +} + +func (sdApi *serviceDiscoveryApi) GetOperation(ctx context.Context, opId string) (operation *types.Operation, err error) { + err = sdApi.rateLimiter.Wait(ctx, common.GetOperation) + if err != nil { + return nil, err + } + + opResp, err := sdApi.awsFacade.GetOperation(ctx, &sd.GetOperationInput{OperationId: &opId}) + + if err != nil { + return nil, err + } + + return opResp.Operation, nil +} + +func (sdApi *serviceDiscoveryApi) CreateHttpNamespace(ctx context.Context, nsName string) (opId string, err error) { + err = sdApi.rateLimiter.Wait(ctx, common.CreateHttpNamespace) + if err != nil { + return "", err + } + + output, err := sdApi.awsFacade.CreateHttpNamespace(ctx, &sd.CreateHttpNamespaceInput{ + Name: &nsName, + }) + + if err != nil { + return "", err + } + + return aws.ToString(output.OperationId), nil +} + +func (sdApi *serviceDiscoveryApi) CreateService(ctx context.Context, namespace model.Namespace, svcName string) (svcId string, err error) { + err = sdApi.rateLimiter.Wait(ctx, common.CreateService) + if err != nil { + return "", err + } + + var output *sd.CreateServiceOutput + if namespace.Type == model.DnsPrivateNamespaceType { + dnsConfig := sdApi.getDnsConfig() + output, err = sdApi.awsFacade.CreateService(ctx, &sd.CreateServiceInput{ + NamespaceId: &namespace.Id, + DnsConfig: &dnsConfig, + Name: &svcName}) + } else { + output, err = sdApi.awsFacade.CreateService(ctx, &sd.CreateServiceInput{ + NamespaceId: &namespace.Id, + Name: &svcName}) + } + + if err != nil { + return "", err + } + + svcId = aws.ToString(output.Service.Id) + sdApi.log.Info("service created", "namespace", namespace.Name, "name", svcName, "id", svcId) + return svcId, nil +} + +func (sdApi *serviceDiscoveryApi) getDnsConfig() types.DnsConfig { + dnsConfig := types.DnsConfig{ + DnsRecords: []types.DnsRecord{ + { + TTL: aws.Int64(defaultServiceTTLInSeconds), + Type: "SRV", + }, + }, + } + return dnsConfig +} + +func (sdApi *serviceDiscoveryApi) RegisterInstance(ctx context.Context, svcId string, instId string, instAttrs map[string]string) (opId string, err error) { + err = sdApi.rateLimiter.Wait(ctx, common.RegisterInstance) + if err != nil { + return "", err + } + + regResp, err := sdApi.awsFacade.RegisterInstance(ctx, &sd.RegisterInstanceInput{ + Attributes: instAttrs, + InstanceId: &instId, + ServiceId: &svcId, + }) + + if err != nil { + return "", err + } + + return aws.ToString(regResp.OperationId), nil +} + +func (sdApi *serviceDiscoveryApi) DeregisterInstance(ctx context.Context, svcId string, instId string) (opId string, err error) { + err = sdApi.rateLimiter.Wait(ctx, common.DeregisterInstance) + if err != nil { + return "", err + } + + deregResp, err := sdApi.awsFacade.DeregisterInstance(ctx, &sd.DeregisterInstanceInput{ + InstanceId: &instId, + ServiceId: &svcId, + }) + + if err != nil { + return "", err + } + + return aws.ToString(deregResp.OperationId), err +} diff --git a/pkg/cloudmap/api_test.go b/pkg/cloudmap/api_test.go new file mode 100644 index 00000000..8990d51b --- /dev/null +++ b/pkg/cloudmap/api_test.go @@ -0,0 +1,304 @@ +package cloudmap + +import ( + "context" + "errors" + "fmt" + "testing" + + aboutv1alpha1 "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/apis/about/v1alpha1" + + cloudmapMock "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/mocks/pkg/cloudmap" + "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/common" + "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/model" + "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/test" + "github.com/aws/aws-sdk-go-v2/aws" + sd "github.com/aws/aws-sdk-go-v2/service/servicediscovery" + "github.com/aws/aws-sdk-go-v2/service/servicediscovery/types" + "github.com/go-logr/logr/testr" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "k8s.io/apimachinery/pkg/runtime" +) + +func TestNewServiceDiscoveryApi(t *testing.T) { + mockController := gomock.NewController(t) + defer mockController.Finish() + + awsFacade := cloudmapMock.NewMockAwsFacade(mockController) + + sdApi := getServiceDiscoveryApi(t, awsFacade) + assert.NotNil(t, sdApi) +} + +func TestServiceDiscoveryApi_GetNamespaceMap_HappyCase(t *testing.T) { + mockController := gomock.NewController(t) + defer mockController.Finish() + + awsFacade := cloudmapMock.NewMockAwsFacade(mockController) + sdApi := getServiceDiscoveryApi(t, awsFacade) + + id, name := test.DnsNsId, test.DnsNsName + ns := types.NamespaceSummary{ + Name: &name, + Id: &id, + Type: types.NamespaceTypeDnsPrivate, + } + awsFacade.EXPECT().ListNamespaces(context.TODO(), &sd.ListNamespacesInput{}). + Return(&sd.ListNamespacesOutput{Namespaces: []types.NamespaceSummary{ns}}, nil) + + namespaces, err := sdApi.GetNamespaceMap(context.TODO()) + assert.Nil(t, err, "No error for happy case") + assert.True(t, len(namespaces) == 1) + assert.Equal(t, test.GetTestDnsNamespace(), namespaces[test.DnsNsName]) +} + +func TestServiceDiscoveryApi_GetNamespaceMap_SkipPublicDNSNotSupported(t *testing.T) { + mockController := gomock.NewController(t) + defer mockController.Finish() + + awsFacade := cloudmapMock.NewMockAwsFacade(mockController) + sdApi := getServiceDiscoveryApi(t, awsFacade) + + id, name := test.DnsNsId, test.DnsNsName + ns := types.NamespaceSummary{ + Name: &name, + Id: &id, + Type: types.NamespaceTypeDnsPublic, + } + awsFacade.EXPECT().ListNamespaces(context.TODO(), &sd.ListNamespacesInput{}). + Return(&sd.ListNamespacesOutput{Namespaces: []types.NamespaceSummary{ns}}, nil) + + namespaces, err := sdApi.GetNamespaceMap(context.TODO()) + assert.Nil(t, err, "No error for happy case") + assert.Empty(t, namespaces, "Successfully skipped DNS_PUBLIC from the output") +} + +func TestServiceDiscoveryApi_GetServiceIdMap_HappyCase(t *testing.T) { + mockController := gomock.NewController(t) + defer mockController.Finish() + + awsFacade := cloudmapMock.NewMockAwsFacade(mockController) + sdApi := getServiceDiscoveryApi(t, awsFacade) + + filter := types.ServiceFilter{ + Name: types.ServiceFilterNameNamespaceId, + Values: []string{test.HttpNsId}, + } + + awsFacade.EXPECT().ListServices(context.TODO(), &sd.ListServicesInput{Filters: []types.ServiceFilter{filter}}). + Return(&sd.ListServicesOutput{Services: []types.ServiceSummary{ + {Id: aws.String(test.SvcId), Name: aws.String(test.SvcName)}, + }}, nil) + + svcs, err := sdApi.GetServiceIdMap(context.TODO(), test.HttpNsId) + assert.Nil(t, err, "No error for happy case") + assert.True(t, len(svcs) == 1) + assert.Equal(t, svcs[test.SvcName], test.SvcId) +} + +func TestServiceDiscoveryApi_DiscoverInstances_HappyCase(t *testing.T) { + mockController := gomock.NewController(t) + defer mockController.Finish() + + awsFacade := cloudmapMock.NewMockAwsFacade(mockController) + sdApi := getServiceDiscoveryApi(t, awsFacade) + + awsFacade.EXPECT().DiscoverInstances(context.TODO(), + &sd.DiscoverInstancesInput{ + NamespaceName: aws.String(test.HttpNsName), + ServiceName: aws.String(test.SvcName), + HealthStatus: types.HealthStatusFilterAll, + MaxResults: aws.Int32(1000), + QueryParameters: map[string]string{ + model.ClusterSetIdAttr: test.ClusterSet, + }, + }). + Return(&sd.DiscoverInstancesOutput{ + Instances: []types.HttpInstanceSummary{ + {InstanceId: aws.String(test.EndptId1)}, + {InstanceId: aws.String(test.EndptId2)}, + }, + }, nil) + + insts, err := sdApi.DiscoverInstances(context.TODO(), test.HttpNsName, test.SvcName, map[string]string{model.ClusterSetIdAttr: test.ClusterSet}) + assert.Nil(t, err, "No error for happy case") + assert.True(t, len(insts) == 2) + assert.Equal(t, test.EndptId1, *insts[0].InstanceId) + assert.Equal(t, test.EndptId2, *insts[1].InstanceId) +} + +func TestServiceDiscoveryApi_GetOperation_HappyCase(t *testing.T) { + mockController := gomock.NewController(t) + defer mockController.Finish() + + awsFacade := cloudmapMock.NewMockAwsFacade(mockController) + sdApi := getServiceDiscoveryApi(t, awsFacade) + + expectedOp := &types.Operation{Id: aws.String(test.OpId1), Status: types.OperationStatusPending} + awsFacade.EXPECT().GetOperation(context.TODO(), &sd.GetOperationInput{OperationId: aws.String(test.OpId1)}). + Return(&sd.GetOperationOutput{Operation: expectedOp}, nil) + + op, err := sdApi.GetOperation(context.TODO(), test.OpId1) + assert.Nil(t, err, "No error for happy case") + assert.Equal(t, expectedOp, op) +} + +func TestServiceDiscoveryApi_CreateHttNamespace_HappyCase(t *testing.T) { + mockController := gomock.NewController(t) + defer mockController.Finish() + + awsFacade := cloudmapMock.NewMockAwsFacade(mockController) + sdApi := getServiceDiscoveryApi(t, awsFacade) + + awsFacade.EXPECT().CreateHttpNamespace(context.TODO(), &sd.CreateHttpNamespaceInput{Name: aws.String(test.HttpNsName)}). + Return(&sd.CreateHttpNamespaceOutput{OperationId: aws.String(test.OpId1)}, nil) + + opId, err := sdApi.CreateHttpNamespace(context.TODO(), test.HttpNsName) + assert.Nil(t, err, "No error for happy case") + assert.Equal(t, test.OpId1, opId) +} + +func TestServiceDiscoveryApi_CreateService_CreateForHttpNamespace(t *testing.T) { + mockController := gomock.NewController(t) + defer mockController.Finish() + + awsFacade := cloudmapMock.NewMockAwsFacade(mockController) + sdApi := getServiceDiscoveryApi(t, awsFacade) + + nsId, svcId, svcName := test.HttpNsId, test.SvcId, test.SvcName + awsFacade.EXPECT().CreateService(context.TODO(), &sd.CreateServiceInput{ + Name: &svcName, + NamespaceId: &nsId, + }). + Return(&sd.CreateServiceOutput{ + Service: &types.Service{ + Id: &svcId, + }, + }, nil) + + retSvcId, err := sdApi.CreateService(context.TODO(), *test.GetTestHttpNamespace(), svcName) + assert.Nil(t, err) + assert.Equal(t, svcId, retSvcId, "Successfully created service") +} + +func TestServiceDiscoveryApi_CreateService_CreateForDnsNamespace(t *testing.T) { + mockController := gomock.NewController(t) + defer mockController.Finish() + + awsFacade := cloudmapMock.NewMockAwsFacade(mockController) + sdApi := getServiceDiscoveryApi(t, awsFacade) + + nsId, svcId, svcName := test.DnsNsId, test.SvcId, test.SvcName + awsFacade.EXPECT().CreateService(context.TODO(), &sd.CreateServiceInput{ + Name: &svcName, + NamespaceId: &nsId, + DnsConfig: &types.DnsConfig{ + DnsRecords: []types.DnsRecord{{ + TTL: aws.Int64(60), + Type: "SRV", + }}, + }, + }). + Return(&sd.CreateServiceOutput{ + Service: &types.Service{ + Id: &svcId, + }, + }, nil) + + retSvcId, err := sdApi.CreateService(context.TODO(), *test.GetTestDnsNamespace(), svcName) + assert.Nil(t, err) + assert.Equal(t, svcId, retSvcId, "Successfully created service") +} + +func TestServiceDiscoveryApi_CreateService_ThrowError(t *testing.T) { + mockController := gomock.NewController(t) + defer mockController.Finish() + + awsFacade := cloudmapMock.NewMockAwsFacade(mockController) + sdApi := getServiceDiscoveryApi(t, awsFacade) + + nsId, svcName := test.HttpNsId, test.SvcName + awsFacade.EXPECT().CreateService(context.TODO(), &sd.CreateServiceInput{ + Name: &svcName, + NamespaceId: &nsId, + }). + Return(nil, fmt.Errorf("dummy error")) + + retSvcId, err := sdApi.CreateService(context.TODO(), *test.GetTestHttpNamespace(), svcName) + assert.Empty(t, retSvcId) + assert.Equal(t, "dummy error", err.Error(), "Got error") +} + +func TestServiceDiscoveryApi_RegisterInstance_HappyCase(t *testing.T) { + mockController := gomock.NewController(t) + defer mockController.Finish() + + attrs := map[string]string{"a": "b"} + + awsFacade := cloudmapMock.NewMockAwsFacade(mockController) + awsFacade.EXPECT().RegisterInstance(context.TODO(), + &sd.RegisterInstanceInput{ + ServiceId: aws.String(test.SvcId), + InstanceId: aws.String(test.EndptId1), + Attributes: attrs}). + Return(&sd.RegisterInstanceOutput{OperationId: aws.String(test.OpId1)}, nil) + + sdApi := getServiceDiscoveryApi(t, awsFacade) + opId, err := sdApi.RegisterInstance(context.TODO(), test.SvcId, test.EndptId1, attrs) + assert.Nil(t, err) + assert.Equal(t, test.OpId1, opId) +} + +func TestServiceDiscoveryApi_RegisterInstance_Error(t *testing.T) { + mockController := gomock.NewController(t) + defer mockController.Finish() + + sdkErr := errors.New("fail") + awsFacade := cloudmapMock.NewMockAwsFacade(mockController) + awsFacade.EXPECT().RegisterInstance(context.TODO(), gomock.Any()).Return(nil, sdkErr) + + sdApi := getServiceDiscoveryApi(t, awsFacade) + _, err := sdApi.RegisterInstance(context.TODO(), test.SvcId, test.EndptId1, map[string]string{}) + assert.Equal(t, sdkErr, err) +} + +func TestServiceDiscoveryApi_DeregisterInstance_HappyCase(t *testing.T) { + mockController := gomock.NewController(t) + defer mockController.Finish() + + awsFacade := cloudmapMock.NewMockAwsFacade(mockController) + awsFacade.EXPECT().DeregisterInstance(context.TODO(), + &sd.DeregisterInstanceInput{ + ServiceId: aws.String(test.SvcId), + InstanceId: aws.String(test.EndptId1)}). + Return(&sd.DeregisterInstanceOutput{OperationId: aws.String(test.OpId1)}, nil) + + sdApi := getServiceDiscoveryApi(t, awsFacade) + opId, err := sdApi.DeregisterInstance(context.TODO(), test.SvcId, test.EndptId1) + assert.Nil(t, err) + assert.Equal(t, test.OpId1, opId) +} + +func TestServiceDiscoveryApi_DeregisterInstance_Error(t *testing.T) { + mockController := gomock.NewController(t) + defer mockController.Finish() + + sdkErr := errors.New("fail") + awsFacade := cloudmapMock.NewMockAwsFacade(mockController) + awsFacade.EXPECT().DeregisterInstance(context.TODO(), gomock.Any()).Return(nil, sdkErr) + + sdApi := getServiceDiscoveryApi(t, awsFacade) + _, err := sdApi.DeregisterInstance(context.TODO(), test.SvcId, test.EndptId1) + assert.Equal(t, sdkErr, err) +} + +func getServiceDiscoveryApi(t *testing.T, awsFacade *cloudmapMock.MockAwsFacade) ServiceDiscoveryApi { + scheme := runtime.NewScheme() + scheme.AddKnownTypes(aboutv1alpha1.GroupVersion, &aboutv1alpha1.ClusterProperty{}) + return &serviceDiscoveryApi{ + log: common.NewLoggerWithLogr(testr.New(t)), + awsFacade: awsFacade, + rateLimiter: common.NewDefaultRateLimiter(), + } +} diff --git a/pkg/cloudmap/aws_facade.go b/pkg/cloudmap/aws_facade.go new file mode 100644 index 00000000..76750c41 --- /dev/null +++ b/pkg/cloudmap/aws_facade.go @@ -0,0 +1,55 @@ +package cloudmap + +import ( + "context" + + "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/version" + "github.com/aws/aws-sdk-go-v2/aws/middleware" + + "github.com/aws/aws-sdk-go-v2/aws" + sd "github.com/aws/aws-sdk-go-v2/service/servicediscovery" +) + +// AwsFacade wraps the minimal surface area of ServiceDiscovery API calls for the AWS SDK +// required by the AWS Cloud Map client. This enables mock generation for unit testing. +type AwsFacade interface { + // ListNamespaces provides ServiceDiscovery ListNamespaces wrapper interface for paginator. + ListNamespaces(context.Context, *sd.ListNamespacesInput, ...func(*sd.Options)) (*sd.ListNamespacesOutput, error) + + // ListServices provides ServiceDiscovery ListServices wrapper interface for paginator. + ListServices(context.Context, *sd.ListServicesInput, ...func(options *sd.Options)) (*sd.ListServicesOutput, error) + + // ListOperations provides ServiceDiscovery ListOperations wrapper interface for paginator. + ListOperations(context.Context, *sd.ListOperationsInput, ...func(*sd.Options)) (*sd.ListOperationsOutput, error) + + // GetOperation provides ServiceDiscovery GetOperation wrapper interface. + GetOperation(context.Context, *sd.GetOperationInput, ...func(*sd.Options)) (*sd.GetOperationOutput, error) + + // CreateHttpNamespace provides ServiceDiscovery CreateHttpNamespace wrapper interface. + CreateHttpNamespace(context.Context, *sd.CreateHttpNamespaceInput, ...func(*sd.Options)) (*sd.CreateHttpNamespaceOutput, error) + + // CreateService provides ServiceDiscovery CreateService wrapper interface. + CreateService(context.Context, *sd.CreateServiceInput, ...func(*sd.Options)) (*sd.CreateServiceOutput, error) + + // RegisterInstance provides ServiceDiscovery RegisterInstance wrapper interface. + RegisterInstance(context.Context, *sd.RegisterInstanceInput, ...func(*sd.Options)) (*sd.RegisterInstanceOutput, error) + + // DeregisterInstance provides ServiceDiscovery DeregisterInstance wrapper interface. + DeregisterInstance(context.Context, *sd.DeregisterInstanceInput, ...func(*sd.Options)) (*sd.DeregisterInstanceOutput, error) + + // DiscoverInstances provides ServiceDiscovery DiscoverInstances wrapper interface. + DiscoverInstances(context.Context, *sd.DiscoverInstancesInput, ...func(*sd.Options)) (*sd.DiscoverInstancesOutput, error) +} + +type awsFacade struct { + *sd.Client +} + +// NewAwsFacadeFromConfig creates a new AWS facade from an AWS client config. +func NewAwsFacadeFromConfig(cfg *aws.Config) AwsFacade { + sdClient := sd.NewFromConfig(*cfg, func(options *sd.Options) { + // Append User-Agent to all the request, the format is going to be aws-cloud-map-mcs-controller-for-k8s/0.0.0-abc + options.APIOptions = append(options.APIOptions, middleware.AddUserAgentKeyValue(version.GetUserAgentKey(), version.GetUserAgentValue())) + }) + return &awsFacade{sdClient} +} diff --git a/pkg/cloudmap/cache.go b/pkg/cloudmap/cache.go new file mode 100644 index 00000000..8c75b83b --- /dev/null +++ b/pkg/cloudmap/cache.go @@ -0,0 +1,152 @@ +package cloudmap + +import ( + "errors" + "fmt" + "time" + + "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/common" + "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/model" + "k8s.io/apimachinery/pkg/util/cache" +) + +const ( + nsKey = "ns-map" + svcKeyPrefix = "svc-map" + + defaultCacheSize = 2048 + defaultNsTTL = 10 * time.Second + defaultSvcTTL = 10 * time.Second + defaultEndptTTL = 5 * time.Second +) + +type ServiceDiscoveryClientCache interface { + GetNamespaceMap() (namespaces map[string]*model.Namespace, found bool) + CacheNamespaceMap(namespaces map[string]*model.Namespace) + EvictNamespaceMap() + GetServiceIdMap(namespaceName string) (serviceIdMap map[string]string, found bool) + CacheServiceIdMap(namespaceName string, serviceIdMap map[string]string) + EvictServiceIdMap(namespaceName string) + GetEndpoints(namespaceName string, serviceName string) (endpoints []*model.Endpoint, found bool) + CacheEndpoints(namespaceName string, serviceName string, endpoints []*model.Endpoint) + EvictEndpoints(namespaceName string, serviceName string) +} + +type sdCache struct { + log common.Logger + defaultCache *cache.LRUExpireCache + endpointsCache *cache.LRUExpireCache + config *SdCacheConfig +} + +type SdCacheConfig struct { + NsTTL time.Duration + SvcTTL time.Duration + EndptTTL time.Duration +} + +func NewServiceDiscoveryClientCache(cacheConfig *SdCacheConfig) ServiceDiscoveryClientCache { + return &sdCache{ + log: common.NewLogger("cloudmap"), + defaultCache: cache.NewLRUExpireCache(defaultCacheSize), + endpointsCache: cache.NewLRUExpireCache(defaultCacheSize), + config: cacheConfig, + } +} + +func NewDefaultServiceDiscoveryClientCache() ServiceDiscoveryClientCache { + return NewServiceDiscoveryClientCache( + &SdCacheConfig{ + NsTTL: defaultNsTTL, + SvcTTL: defaultSvcTTL, + EndptTTL: defaultEndptTTL, + }) +} + +func (sdCache *sdCache) GetNamespaceMap() (namespaceMap map[string]*model.Namespace, found bool) { + entry, exists := sdCache.defaultCache.Get(nsKey) + if !exists { + return nil, false + } + + namespaceMap, ok := entry.(map[string]*model.Namespace) + if !ok { + sdCache.log.Error(errors.New("failed to retrieve namespaceMap from cache"), "") + sdCache.defaultCache.Remove(nsKey) + return nil, false + } + + return namespaceMap, true +} + +func (sdCache *sdCache) CacheNamespaceMap(namespaces map[string]*model.Namespace) { + sdCache.defaultCache.Add(nsKey, namespaces, sdCache.config.NsTTL) +} + +func (sdCache *sdCache) EvictNamespaceMap() { + sdCache.defaultCache.Remove(nsKey) +} + +func (sdCache *sdCache) GetServiceIdMap(nsName string) (serviceIdMap map[string]string, found bool) { + key := sdCache.buildSvcKey(nsName) + entry, exists := sdCache.defaultCache.Get(key) + if !exists { + return nil, false + } + + serviceIdMap, ok := entry.(map[string]string) + if !ok { + err := fmt.Errorf("failed to retrieve service IDs from cache") + sdCache.log.Error(err, err.Error(), "namespace", nsName) + sdCache.defaultCache.Remove(key) + return nil, false + } + + return serviceIdMap, true +} + +func (sdCache *sdCache) CacheServiceIdMap(nsName string, serviceIdMap map[string]string) { + key := sdCache.buildSvcKey(nsName) + sdCache.defaultCache.Add(key, serviceIdMap, sdCache.config.SvcTTL) +} + +func (sdCache *sdCache) EvictServiceIdMap(nsName string) { + key := sdCache.buildSvcKey(nsName) + sdCache.defaultCache.Remove(key) +} + +func (sdCache *sdCache) GetEndpoints(nsName string, svcName string) (endpts []*model.Endpoint, found bool) { + key := sdCache.buildEndptsKey(nsName, svcName) + entry, exists := sdCache.endpointsCache.Get(key) + if !exists { + return nil, false + } + + endpts, ok := entry.([]*model.Endpoint) + if !ok { + err := fmt.Errorf("failed to retrieve endpoints from cache") + sdCache.log.Error(err, err.Error(), "namespace", nsName, "service", svcName) + sdCache.endpointsCache.Remove(key) + return nil, false + } + + return endpts, true +} + +func (sdCache *sdCache) CacheEndpoints(nsName string, svcName string, endpts []*model.Endpoint) { + key := sdCache.buildEndptsKey(nsName, svcName) + sdCache.endpointsCache.Add(key, endpts, sdCache.config.EndptTTL) +} + +func (sdCache *sdCache) EvictEndpoints(nsName string, svcName string) { + key := sdCache.buildEndptsKey(nsName, svcName) + sdCache.endpointsCache.Remove(key) +} + +func (sdCache *sdCache) buildSvcKey(nsName string) (cacheKey string) { + return fmt.Sprintf("%s:%s", svcKeyPrefix, nsName) +} + +func (sdCache *sdCache) buildEndptsKey(nsName string, svcName string) string { + return fmt.Sprintf("%s:%s", nsName, svcName) +} diff --git a/pkg/cloudmap/cache_test.go b/pkg/cloudmap/cache_test.go new file mode 100644 index 00000000..a9231bd7 --- /dev/null +++ b/pkg/cloudmap/cache_test.go @@ -0,0 +1,163 @@ +package cloudmap + +import ( + "testing" + "time" + + "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/common" + "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/model" + "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/test" + "github.com/go-logr/logr/testr" + "github.com/stretchr/testify/assert" + "k8s.io/apimachinery/pkg/util/cache" +) + +func TestNewServiceDiscoveryClientCache(t *testing.T) { + sdc, ok := NewServiceDiscoveryClientCache(&SdCacheConfig{ + NsTTL: 3 * time.Second, + SvcTTL: 3 * time.Second, + EndptTTL: 3 * time.Second, + }).(*sdCache) + if !ok { + t.Fatalf("failed to create cache") + } + + assert.Equal(t, 3*time.Second, sdc.config.NsTTL) + assert.Equal(t, 3*time.Second, sdc.config.SvcTTL) + assert.Equal(t, 3*time.Second, sdc.config.EndptTTL) +} + +func TestNewDefaultServiceDiscoveryClientCache(t *testing.T) { + sdc, ok := NewDefaultServiceDiscoveryClientCache().(*sdCache) + if !ok { + t.Fatalf("failed to create cache") + } + + assert.Equal(t, defaultNsTTL, sdc.config.NsTTL) + assert.Equal(t, defaultSvcTTL, sdc.config.SvcTTL) + assert.Equal(t, defaultEndptTTL, sdc.config.EndptTTL) +} + +func TestServiceDiscoveryClientCacheGetNamespaceMap_Found(t *testing.T) { + sdc := NewDefaultServiceDiscoveryClientCache() + sdc.CacheNamespaceMap(map[string]*model.Namespace{ + test.HttpNsName: test.GetTestHttpNamespace(), + }) + + nsMap, found := sdc.GetNamespaceMap() + assert.True(t, found) + assert.Equal(t, test.GetTestHttpNamespace(), nsMap[test.HttpNsName]) +} + +func TestServiceDiscoveryClientCacheGetNamespaceMap_NotFound(t *testing.T) { + sdc := NewDefaultServiceDiscoveryClientCache() + + nsMap, found := sdc.GetNamespaceMap() + assert.False(t, found) + assert.Nil(t, nsMap) +} + +func TestServiceDiscoveryClientCacheGetNamespaceMap_Corrupt(t *testing.T) { + sdc := getCacheImpl(t) + sdc.defaultCache.Add(nsKey, &model.Plan{}, time.Minute) + + nsMap, found := sdc.GetNamespaceMap() + assert.False(t, found) + assert.Nil(t, nsMap) +} + +func TestServiceDiscoveryClientEvictNamespaceMap(t *testing.T) { + sdc := NewDefaultServiceDiscoveryClientCache() + sdc.CacheNamespaceMap(map[string]*model.Namespace{ + test.HttpNsName: test.GetTestHttpNamespace(), + }) + sdc.EvictNamespaceMap() + + nsMap, found := sdc.GetNamespaceMap() + assert.False(t, found) + assert.Nil(t, nsMap) +} + +func TestServiceDiscoveryClientCacheGetServiceIdMap_Found(t *testing.T) { + sdc := NewDefaultServiceDiscoveryClientCache() + sdc.CacheServiceIdMap(test.HttpNsName, map[string]string{ + test.SvcName: test.SvcId, + }) + + svcIdMap, found := sdc.GetServiceIdMap(test.HttpNsName) + assert.True(t, found) + assert.Equal(t, test.SvcId, svcIdMap[test.SvcName]) +} + +func TestServiceDiscoveryClientCacheGetServiceIdMap_NotFound(t *testing.T) { + sdc := NewDefaultServiceDiscoveryClientCache() + + svcIdMap, found := sdc.GetServiceIdMap(test.HttpNsName) + assert.False(t, found) + assert.Empty(t, svcIdMap) +} + +func TestServiceDiscoveryClientCacheGetServiceIdMap_Corrupt(t *testing.T) { + sdc := getCacheImpl(t) + sdc.defaultCache.Add(sdc.buildSvcKey(test.HttpNsName), &model.Plan{}, time.Minute) + + svcIdMap, found := sdc.GetServiceIdMap(test.HttpNsName) + assert.False(t, found) + assert.Empty(t, svcIdMap) +} + +func TestServiceDiscoveryClientEvictServiceIdMap(t *testing.T) { + sdc := NewDefaultServiceDiscoveryClientCache() + sdc.CacheServiceIdMap(test.HttpNsName, map[string]string{ + test.SvcName: test.SvcId, + }) + sdc.EvictServiceIdMap(test.HttpNsName) + + svcIdMap, found := sdc.GetServiceIdMap(test.HttpNsName) + assert.False(t, found) + assert.Empty(t, svcIdMap) +} + +func TestServiceDiscoveryClientCacheGetEndpoints_Found(t *testing.T) { + sdc := NewDefaultServiceDiscoveryClientCache() + sdc.CacheEndpoints(test.HttpNsName, test.SvcName, []*model.Endpoint{test.GetTestEndpoint1(), test.GetTestEndpoint2()}) + + endpts, found := sdc.GetEndpoints(test.HttpNsName, test.SvcName) + assert.True(t, found) + assert.Equal(t, []*model.Endpoint{test.GetTestEndpoint1(), test.GetTestEndpoint2()}, endpts) +} + +func TestServiceDiscoveryClientCacheGetEndpoints_NotFound(t *testing.T) { + sdc := NewDefaultServiceDiscoveryClientCache() + + endpts, found := sdc.GetEndpoints(test.HttpNsName, test.SvcName) + assert.False(t, found) + assert.Nil(t, endpts) +} + +func TestServiceDiscoveryClientCacheGetEndpoints_Corrupt(t *testing.T) { + sdc := getCacheImpl(t) + sdc.defaultCache.Add(sdc.buildEndptsKey(test.HttpNsName, test.SvcName), &model.Plan{}, time.Minute) + + endpts, found := sdc.GetEndpoints(test.HttpNsName, test.SvcName) + assert.False(t, found) + assert.Nil(t, endpts) +} + +func TestServiceDiscoveryClientEvictEndpoints(t *testing.T) { + sdc := NewDefaultServiceDiscoveryClientCache() + sdc.CacheEndpoints(test.HttpNsName, test.SvcName, []*model.Endpoint{test.GetTestEndpoint1(), test.GetTestEndpoint2()}) + sdc.EvictEndpoints(test.HttpNsName, test.SvcName) + + endpts, found := sdc.GetEndpoints(test.HttpNsName, test.SvcName) + assert.False(t, found) + assert.Nil(t, endpts) +} + +func getCacheImpl(t *testing.T) sdCache { + return sdCache{ + log: common.NewLoggerWithLogr(testr.New(t)), + defaultCache: cache.NewLRUExpireCache(defaultCacheSize), + endpointsCache: cache.NewLRUExpireCache(defaultCacheSize), + } +} diff --git a/pkg/cloudmap/client.go b/pkg/cloudmap/client.go index 81052d4e..6cd4ddca 100644 --- a/pkg/cloudmap/client.go +++ b/pkg/cloudmap/client.go @@ -4,77 +4,78 @@ import ( "context" "errors" "fmt" + + "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/common" "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/model" "github.com/aws/aws-sdk-go-v2/aws" - sd "github.com/aws/aws-sdk-go-v2/service/servicediscovery" "github.com/aws/aws-sdk-go-v2/service/servicediscovery/types" - "github.com/go-logr/logr" - "k8s.io/apimachinery/pkg/util/cache" - ctrl "sigs.k8s.io/controller-runtime" - "time" -) - -const ( - defaultNamespaceIdCacheTTL = 2 * time.Minute - defaultNamespaceIdCacheSize = 100 - defaultServiceIdCacheTTL = 2 * time.Minute - defaultServiceIdCacheSize = 1024 ) +// ServiceDiscoveryClient provides the service endpoint management functionality required by the AWS Cloud Map +// multi-cluster service discovery for Kubernetes controller. It maintains local caches for all AWS Cloud Map resources. type ServiceDiscoveryClient interface { - // ListServices returns all services and their endpoints for a given namespace + // ListServices returns all services and their endpoints for a given namespace. ListServices(ctx context.Context, namespaceName string) ([]*model.Service, error) - // CreateService creates a Cloud Map service resource and return created service struct - CreateService(ctx context.Context, service *model.Service) error + // CreateService creates a Cloud Map service resource, and namespace if necessary. + CreateService(ctx context.Context, namespaceName string, serviceName string) error - // GetService returns a service resource fetched from the Cloud Map API or nil if not found - GetService(ctx context.Context, namespace string, name string) (*model.Service, error) + // GetService returns a service resource fetched from AWS Cloud Map or nil if not found. + GetService(ctx context.Context, namespaceName string, serviceName string) (*model.Service, error) - // RegisterEndpoints registers all endpoints for given service - RegisterEndpoints(ctx context.Context, service *model.Service) error + // RegisterEndpoints registers all endpoints for given service. + RegisterEndpoints(ctx context.Context, namespaceName string, serviceName string, endpoints []*model.Endpoint) error - // DeleteEndpoints de-registers all endpoints for given service - DeleteEndpoints(ctx context.Context, service *model.Service) error + // DeleteEndpoints de-registers all endpoints for given service. + DeleteEndpoints(ctx context.Context, namespaceName string, serviceName string, endpoints []*model.Endpoint) error } type serviceDiscoveryClient struct { - log logr.Logger - sdApi *sd.Client - namespaceIdCache *cache.LRUExpireCache - serviceIdCache *cache.LRUExpireCache - EndpointManager + log common.Logger + sdApi ServiceDiscoveryApi + cache ServiceDiscoveryClientCache + clusterUtils model.ClusterUtils } -func NewServiceDiscoveryClient(cfg *aws.Config) ServiceDiscoveryClient { +// NewDefaultServiceDiscoveryClient creates a new service discovery client for AWS Cloud Map with default resource cache +// from a given AWS client config. +func NewDefaultServiceDiscoveryClient(cfg *aws.Config, clusterUtils model.ClusterUtils) ServiceDiscoveryClient { return &serviceDiscoveryClient{ - log: ctrl.Log.WithName("cloudmap"), - sdApi: sd.NewFromConfig(*cfg), - namespaceIdCache: cache.NewLRUExpireCache(defaultNamespaceIdCacheSize), - serviceIdCache: cache.NewLRUExpireCache(defaultServiceIdCacheSize), - EndpointManager: NewEndpointManager(cfg), + log: common.NewLogger("cloudmap", "client"), + sdApi: NewServiceDiscoveryApiFromConfig(cfg), + cache: NewDefaultServiceDiscoveryClientCache(), + clusterUtils: clusterUtils, } } -func (sdc *serviceDiscoveryClient) ListServices(ctx context.Context, namespaceName string) ([]*model.Service, error) { - svcs := make([]*model.Service, 0) - - svcSums, svcErr := sdc.listServicesFromCloudMap(ctx, namespaceName) - - if svcErr != nil { - return svcs, svcErr +func NewServiceDiscoveryClientWithCustomCache(cfg *aws.Config, cacheConfig *SdCacheConfig, clusterUtils model.ClusterUtils) ServiceDiscoveryClient { + return &serviceDiscoveryClient{ + log: common.NewLogger("cloudmap", "client"), + sdApi: NewServiceDiscoveryApiFromConfig(cfg), + cache: NewServiceDiscoveryClientCache(cacheConfig), + clusterUtils: clusterUtils, } +} - for _, svcSum := range svcSums { - endpts, endptsErr := sdc.EndpointManager.ListEndpoints(ctx, aws.ToString(svcSum.Id)) +func (sdc *serviceDiscoveryClient) ListServices(ctx context.Context, nsName string) (svcs []*model.Service, err error) { + svcIdMap, err := sdc.getServiceIds(ctx, nsName) + if err != nil { + // Ignore resource not found error, as it will indicate deleted resources in CloudMap + if common.IsNotFound(err) { + return svcs, nil + } + return svcs, err + } + for svcName := range svcIdMap { + endpts, endptsErr := sdc.getEndpoints(ctx, nsName, svcName) if endptsErr != nil { return svcs, endptsErr } svcs = append(svcs, &model.Service{ - Namespace: namespaceName, - Name: aws.ToString(svcSum.Name), + Namespace: nsName, + Name: svcName, Endpoints: endpts, }) } @@ -82,197 +83,242 @@ func (sdc *serviceDiscoveryClient) ListServices(ctx context.Context, namespaceNa return svcs, nil } -func (sdc *serviceDiscoveryClient) CreateService(ctx context.Context, service *model.Service) error { - sdc.log.Info("creating a new service", "namespace", service.Namespace, "name", service.Name) - - nsId, nsErr := sdc.getNamespaceId(ctx, service.Namespace) +func (sdc *serviceDiscoveryClient) CreateService(ctx context.Context, nsName string, svcName string) error { + sdc.log.Info("creating a new service", "namespace", nsName, "name", svcName) - if nsErr != nil { - return nsErr + namespace, err := sdc.getNamespace(ctx, nsName) + if common.IsUnknown(err) { + return err } - //TODO: Handle non-http namespaces - sdSrv, srvErr := sdc.sdApi.CreateService(ctx, &sd.CreateServiceInput{ - Name: &service.Name, - NamespaceId: &nsId}) + if common.IsNotFound(err) { + sdc.log.Info("namespace not found for service", "namespace", nsName, "service", svcName) + // Create HttpNamespace if the namespace is not present in CloudMap + namespace, err = sdc.createNamespace(ctx, nsName) + if err != nil { + return err + } + } - if srvErr != nil { - return srvErr + _, err = sdc.sdApi.CreateService(ctx, *namespace, svcName) + if err != nil { + return err } - sdc.serviceIdCache.Add( - sdc.buildServiceIdCacheKey(nsId, service.Name), - *sdSrv.Service.Id, defaultServiceIdCacheTTL) + sdc.cache.EvictServiceIdMap(nsName) - return sdc.RegisterEndpoints(ctx, service) + return nil } -func (sdc *serviceDiscoveryClient) GetService(ctx context.Context, namespaceName string, serviceName string) (*model.Service, error) { - sdc.log.Info("fetching a service", "namespaceName", namespaceName, "serviceName", serviceName) - - svcId, svcIdErr := sdc.getServiceId(ctx, namespaceName, serviceName) - - if svcIdErr != nil { - return nil, svcIdErr +func (sdc *serviceDiscoveryClient) GetService(ctx context.Context, nsName string, svcName string) (svc *model.Service, err error) { + sdc.log.Info("fetching a service", "namespace", nsName, "name", svcName) + if endpts, found := sdc.cache.GetEndpoints(nsName, svcName); found { + return &model.Service{ + Namespace: nsName, + Name: svcName, + Endpoints: endpts, + }, nil } - if svcId == "" { - return nil, nil + _, err = sdc.getServiceId(ctx, nsName, svcName) + if err != nil { + return nil, err } - endpts, endptsErr := sdc.EndpointManager.ListEndpoints(ctx, svcId) - - if endptsErr != nil { - return nil, endptsErr + endpts, err := sdc.getEndpoints(ctx, nsName, svcName) + if err != nil { + return nil, err } - svc := &model.Service{ - Namespace: namespaceName, - Name: serviceName, + return &model.Service{ + Namespace: nsName, + Name: svcName, Endpoints: endpts, - } - - return svc, nil + }, nil } -func (sdc *serviceDiscoveryClient) RegisterEndpoints(ctx context.Context, service *model.Service) error { - if len(service.Endpoints) == 0 { - sdc.log.Info("skipping endpoint registration for empty endpoint list", "serviceName", service.Name) +func (sdc *serviceDiscoveryClient) RegisterEndpoints(ctx context.Context, nsName string, svcName string, endpts []*model.Endpoint) (err error) { + if len(endpts) == 0 { + sdc.log.Info("skipping endpoint registration for empty endpoint list", "serviceName", svcName) return nil } - sdc.log.Info("registering endpoints", "namespaceName", service.Namespace, - "serviceName", service.Name, "endpoints", service.Endpoints) - svcId, svcErr := sdc.getServiceId(ctx, service.Namespace, service.Name) - if svcErr != nil { - return svcErr - } + sdc.log.Info("registering endpoints", "namespaceName", nsName, "serviceName", svcName, "endpoints", endpts) - return sdc.EndpointManager.RegisterEndpoints(ctx, service, svcId) -} + svcId, err := sdc.getServiceId(ctx, nsName, svcName) + if err != nil { + return err + } -func (sdc *serviceDiscoveryClient) DeleteEndpoints(ctx context.Context, service *model.Service) error { - if len(service.Endpoints) == 0 { - sdc.log.Info("skipping endpoint deletion for empty endpoint list", "serviceName", service.Name) - return nil + operationPoller := NewOperationPoller(sdc.sdApi) + for _, endpt := range endpts { + endptId := endpt.Id + endptAttrs := endpt.GetCloudMapAttributes() + operationPoller.Submit(ctx, func() (opId string, err error) { + return sdc.sdApi.RegisterInstance(ctx, svcId, endptId, endptAttrs) + }) } - sdc.log.Info("deleting endpoints", "namespaceName", service.Namespace, - "serviceName", service.Name, "endpoints", service.Endpoints) - svcId, svcErr := sdc.getServiceId(ctx, service.Namespace, service.Name) - if svcErr != nil { - return svcErr + // Evict cache entry so next list call reflects changes + sdc.cache.EvictEndpoints(nsName, svcName) + + err = operationPoller.Await() + if err != nil { + return common.Wrap(err, errors.New("failure while registering endpoints")) } - return sdc.EndpointManager.DeregisterEndpoints(ctx, service, svcId) + return nil } -func (sdc *serviceDiscoveryClient) getNamespaceId(ctx context.Context, nsName string) (string, error) { - // We are assuming a unique namespace name per account - if cachedValue, exists := sdc.namespaceIdCache.Get(nsName); exists { - return cachedValue.(string), nil +func (sdc *serviceDiscoveryClient) DeleteEndpoints(ctx context.Context, nsName string, svcName string, endpts []*model.Endpoint) (err error) { + if len(endpts) == 0 { + sdc.log.Info("skipping endpoint deletion for empty endpoint list", "serviceName", svcName) + return nil } - nsId, err := sdc.getNamespaceIdFromCloudMap(ctx, nsName) + sdc.log.Info("deleting endpoints", "namespaceName", nsName, "serviceName", svcName, "endpoints", endpts) + svcId, err := sdc.getServiceId(ctx, nsName, svcName) if err != nil { - return "", err + return err + } + + operationPoller := NewOperationPoller(sdc.sdApi) + for _, endpt := range endpts { + endptId := endpt.Id + operationPoller.Submit(ctx, func() (opId string, err error) { + return sdc.sdApi.DeregisterInstance(ctx, svcId, endptId) + }) } - sdc.namespaceIdCache.Add(nsName, nsId, defaultNamespaceIdCacheTTL) + // Evict cache entry so next list call reflects changes + sdc.cache.EvictEndpoints(nsName, svcName) + + err = operationPoller.Await() + if err != nil { + return common.Wrap(err, errors.New("failure while de-registering endpoints")) + } - return nsId, err + return err } -func (sdc *serviceDiscoveryClient) getNamespaceIdFromCloudMap(ctx context.Context, nsName string) (string, error) { +func (sdc *serviceDiscoveryClient) getEndpoints(ctx context.Context, nsName string, svcName string) (endpts []*model.Endpoint, err error) { + endpts, found := sdc.cache.GetEndpoints(nsName, svcName) + if found { + return endpts, nil + } - pages := sd.NewListNamespacesPaginator(sdc.sdApi, &sd.ListNamespacesInput{}) + clusterProperties, err := sdc.clusterUtils.GetClusterProperties(ctx) + if err != nil { + sdc.log.Error(err, "failed to retrieve clusterSetId") + return nil, err + } - for pages.HasMorePages() { - output, err := pages.NextPage(ctx) - if err != nil { - return "", err - } + queryParameters := map[string]string{ + model.ClusterSetIdAttr: clusterProperties.ClusterSetId(), + } + insts, err := sdc.sdApi.DiscoverInstances(ctx, nsName, svcName, queryParameters) + if err != nil { + return nil, err + } - for _, ns := range output.Namespaces { - if nsName == aws.ToString(ns.Name) { - return aws.ToString(ns.Id), nil - } + for _, inst := range insts { + endpt, endptErr := model.NewEndpointFromInstance(&inst) + if endptErr != nil { + sdc.log.Error(endptErr, "skipping instance to endpoint conversion", "instanceId", *inst.InstanceId) + continue } + endpts = append(endpts, endpt) } + sdc.cache.CacheEndpoints(nsName, svcName, endpts) - return "", errors.New(fmt.Sprintf("namespace %s not found", nsName)) + return endpts, nil } -func (sdc *serviceDiscoveryClient) getServiceId(ctx context.Context, nsName string, svcName string) (string, error) { - cacheKey := sdc.buildServiceIdCacheKey(nsName, svcName) +func (sdc *serviceDiscoveryClient) getNamespace(ctx context.Context, nsName string) (namespace *model.Namespace, err error) { + namespaces, err := sdc.getNamespaces(ctx) + if err != nil { + return nil, err + } - if cachedValue, exists := sdc.serviceIdCache.Get(cacheKey); exists { - return cachedValue.(string), nil + if namespace, ok := namespaces[nsName]; ok { + return namespace, nil } - svcId, svcErr := sdc.getServiceIdFromCloudMap(ctx, nsName, svcName) + return nil, common.NotFoundError(fmt.Sprintf("namespace: %s", nsName)) +} - if svcErr != nil { - return "", svcErr +func (sdc *serviceDiscoveryClient) getNamespaces(ctx context.Context) (namespaces map[string]*model.Namespace, err error) { + // We are assuming a unique namespace name per account + namespaces, found := sdc.cache.GetNamespaceMap() + if found { + return namespaces, nil } - if svcId != "" { - sdc.serviceIdCache.Add(cacheKey, svcId, defaultServiceIdCacheTTL) + namespaces, err = sdc.sdApi.GetNamespaceMap(ctx) + if err != nil { + return nil, err } + sdc.cache.CacheNamespaceMap(namespaces) - return svcId, nil + return namespaces, nil } -func (sdc *serviceDiscoveryClient) getServiceIdFromCloudMap(ctx context.Context, nsName string, svcName string) (string, error) { - svcs, err := sdc.listServicesFromCloudMap(ctx, nsName) - +func (sdc *serviceDiscoveryClient) getServiceId(ctx context.Context, nsName string, svcName string) (svcId string, err error) { + svcIdMap, err := sdc.getServiceIds(ctx, nsName) if err != nil { return "", err } - for _, svc := range svcs { - if svcName == aws.ToString(svc.Name) { - return aws.ToString(svc.Id), nil - } + if svcId, ok := svcIdMap[svcName]; ok { + return svcId, nil } - return "", nil + return "", common.NotFoundError(fmt.Sprintf("service: %s", svcName)) } -func (sdc *serviceDiscoveryClient) listServicesFromCloudMap(ctx context.Context, nsName string) ([]*types.ServiceSummary, error) { - svcs := make([]*types.ServiceSummary, 0) - - nsId, nsErr := sdc.getNamespaceId(ctx, nsName) - if nsErr != nil { - return svcs, nil +func (sdc *serviceDiscoveryClient) getServiceIds(ctx context.Context, nsName string) (map[string]string, error) { + serviceIdMap, found := sdc.cache.GetServiceIdMap(nsName) + if found { + return serviceIdMap, nil } - filter := types.ServiceFilter{ - Name: types.ServiceFilterNameNamespaceId, - Values: []string{nsId}, + namespace, err := sdc.getNamespace(ctx, nsName) + if err != nil { + return nil, err } - pages := sd.NewListServicesPaginator(sdc.sdApi, &sd.ListServicesInput{Filters: []types.ServiceFilter{filter}}) + serviceIdMap, err = sdc.sdApi.GetServiceIdMap(ctx, namespace.Id) + if err != nil { + return nil, err + } + sdc.cache.CacheServiceIdMap(nsName, serviceIdMap) - for pages.HasMorePages() { - output, err := pages.NextPage(ctx) - if err != nil { - return svcs, err - } + return serviceIdMap, nil +} - for _, svc := range output.Services { - svcs = append(svcs, &svc) +func (sdc *serviceDiscoveryClient) createNamespace(ctx context.Context, nsName string) (namespace *model.Namespace, err error) { + sdc.log.Info("creating a new namespace", "namespace", nsName) + opId, err := sdc.sdApi.CreateHttpNamespace(ctx, nsName) + if err != nil { + return nil, err + } - cacheKey := sdc.buildServiceIdCacheKey(nsName, aws.ToString(svc.Name)) - svcId := aws.ToString(svc.Id) - sdc.serviceIdCache.Add(cacheKey, svcId, defaultServiceIdCacheTTL) - } + op, err := NewOperationPoller(sdc.sdApi).Poll(ctx, opId) + if err != nil { + return nil, err } + nsId := op.Targets[string(types.OperationTargetTypeNamespace)] - return svcs, nil -} + sdc.log.Info("namespace created", "nsId", nsId, "namespace", nsName) + + // Default namespace type HTTP + namespace = &model.Namespace{ + Id: nsId, + Name: nsName, + Type: model.HttpNamespaceType, + } -func (sdc *serviceDiscoveryClient) buildServiceIdCacheKey(nsName string, svcName string) string { - return fmt.Sprintf("%s/%s", nsName, svcName) + sdc.cache.EvictNamespaceMap() + return namespace, nil } diff --git a/pkg/cloudmap/client_test.go b/pkg/cloudmap/client_test.go new file mode 100644 index 00000000..3de7f567 --- /dev/null +++ b/pkg/cloudmap/client_test.go @@ -0,0 +1,552 @@ +package cloudmap + +import ( + "context" + "errors" + "strconv" + "testing" + + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + cloudmapMock "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/mocks/pkg/cloudmap" + aboutv1alpha1 "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/apis/about/v1alpha1" + "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/common" + "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/model" + "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/test" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/servicediscovery/types" + "github.com/go-logr/logr/testr" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" +) + +type testSdClient struct { + client *serviceDiscoveryClient + mockApi cloudmapMock.MockServiceDiscoveryApi + mockCache cloudmapMock.MockServiceDiscoveryClientCache + close func() +} + +func TestNewServiceDiscoveryClient(t *testing.T) { + tc := getTestSdClient(t) + defer tc.close() + assert.NotNil(t, tc) +} + +func TestServiceDiscoveryClient_ListServices_HappyCase(t *testing.T) { + tc := getTestSdClient(t) + defer tc.close() + + tc.mockCache.EXPECT().GetServiceIdMap(test.HttpNsName).Return(nil, false) + + tc.mockCache.EXPECT().GetNamespaceMap().Return(nil, false) + tc.mockApi.EXPECT().GetNamespaceMap(context.TODO()).Return(getNamespaceMapForTest(), nil) + tc.mockCache.EXPECT().CacheNamespaceMap(getNamespaceMapForTest()) + + tc.mockApi.EXPECT().GetServiceIdMap(context.TODO(), test.HttpNsId).Return(getServiceIdMapForTest(), nil) + tc.mockCache.EXPECT().CacheServiceIdMap(test.HttpNsName, getServiceIdMapForTest()) + + tc.mockCache.EXPECT().GetEndpoints(test.HttpNsName, test.SvcName).Return(nil, false) + tc.mockApi.EXPECT().DiscoverInstances(context.TODO(), test.HttpNsName, test.SvcName, map[string]string{ + model.ClusterSetIdAttr: test.ClusterSet, + }).Return(getHttpInstanceSummaryForTest(), nil) + + tc.mockCache.EXPECT().CacheEndpoints(test.HttpNsName, test.SvcName, + []*model.Endpoint{test.GetTestEndpoint1(), test.GetTestEndpoint2()}) + + svcs, err := tc.client.ListServices(context.TODO(), test.HttpNsName) + assert.Equal(t, []*model.Service{test.GetTestService()}, svcs) + assert.Nil(t, err, "No error for happy case") +} + +func TestServiceDiscoveryClient_ListServices_HappyCaseCachedResults(t *testing.T) { + tc := getTestSdClient(t) + defer tc.close() + + dnsService := test.GetTestService() + dnsService.Namespace = test.DnsNsName + + tc.mockCache.EXPECT().GetServiceIdMap(test.DnsNsName).Return(getServiceIdMapForTest(), true) + + tc.mockCache.EXPECT().GetEndpoints(test.DnsNsName, test.SvcName). + Return([]*model.Endpoint{test.GetTestEndpoint1(), test.GetTestEndpoint2()}, true) + + svcs, err := tc.client.ListServices(context.TODO(), test.DnsNsName) + assert.Equal(t, []*model.Service{dnsService}, svcs) + assert.Nil(t, err, "No error for happy case") +} + +func TestServiceDiscoveryClient_ListServices_NamespaceError(t *testing.T) { + tc := getTestSdClient(t) + defer tc.close() + + tc.mockCache.EXPECT().GetServiceIdMap(test.HttpNsName).Return(nil, false) + + nsErr := errors.New("error listing namespaces") + tc.mockCache.EXPECT().GetNamespaceMap().Return(nil, false) + tc.mockApi.EXPECT().GetNamespaceMap(context.TODO()).Return(nil, nsErr) + + svcs, err := tc.client.ListServices(context.TODO(), test.HttpNsName) + assert.Equal(t, nsErr, err) + assert.Empty(t, svcs) +} + +func TestServiceDiscoveryClient_ListServices_ServiceError(t *testing.T) { + tc := getTestSdClient(t) + defer tc.close() + + tc.mockCache.EXPECT().GetServiceIdMap(test.HttpNsName).Return(nil, false) + + tc.mockCache.EXPECT().GetNamespaceMap().Return(getNamespaceMapForTest(), true) + + svcErr := errors.New("error listing services") + tc.mockApi.EXPECT().GetServiceIdMap(context.TODO(), test.HttpNsId). + Return(nil, svcErr) + + svcs, err := tc.client.ListServices(context.TODO(), test.HttpNsName) + assert.Equal(t, svcErr, err) + assert.Empty(t, svcs) +} + +func TestServiceDiscoveryClient_ListServices_InstanceError(t *testing.T) { + tc := getTestSdClient(t) + defer tc.close() + + tc.mockCache.EXPECT().GetServiceIdMap(test.HttpNsName).Return(getServiceIdMapForTest(), true) + + endptErr := errors.New("error listing endpoints") + tc.mockCache.EXPECT().GetEndpoints(test.HttpNsName, test.SvcName).Return(nil, false) + tc.mockApi.EXPECT().DiscoverInstances(context.TODO(), test.HttpNsName, test.SvcName, map[string]string{ + model.ClusterSetIdAttr: test.ClusterSet, + }). + Return([]types.HttpInstanceSummary{}, endptErr) + + svcs, err := tc.client.ListServices(context.TODO(), test.HttpNsName) + assert.Equal(t, endptErr, err) + assert.Empty(t, svcs) +} + +func TestServiceDiscoveryClient_ListServices_NamespaceNotFound(t *testing.T) { + tc := getTestSdClient(t) + defer tc.close() + + tc.mockCache.EXPECT().GetServiceIdMap(test.HttpNsName).Return(nil, false) + tc.mockCache.EXPECT().GetNamespaceMap().Return(nil, true) + + svcs, err := tc.client.ListServices(context.TODO(), test.HttpNsName) + assert.Empty(t, svcs) + assert.Nil(t, err, "No error for namespace not found") +} + +func TestServiceDiscoveryClient_CreateService_HappyCase(t *testing.T) { + tc := getTestSdClient(t) + defer tc.close() + + tc.mockCache.EXPECT().GetNamespaceMap().Return(getNamespaceMapForTest(), true) + + tc.mockApi.EXPECT().CreateService(context.TODO(), *test.GetTestHttpNamespace(), test.SvcName). + Return(test.SvcId, nil) + tc.mockCache.EXPECT().EvictServiceIdMap(test.HttpNsName) + + err := tc.client.CreateService(context.TODO(), test.HttpNsName, test.SvcName) + assert.Nil(t, err, "No error for happy case") +} + +func TestServiceDiscoveryClient_CreateService_HappyCaseForDNSNamespace(t *testing.T) { + tc := getTestSdClient(t) + defer tc.close() + + tc.mockCache.EXPECT().GetNamespaceMap().Return(getNamespaceMapForTest(), true) + + tc.mockApi.EXPECT().CreateService(context.TODO(), *test.GetTestDnsNamespace(), test.SvcName). + Return(test.SvcId, nil) + tc.mockCache.EXPECT().EvictServiceIdMap(test.DnsNsName) + + err := tc.client.CreateService(context.TODO(), test.DnsNsName, test.SvcName) + assert.Nil(t, err, "No error for happy case") +} + +func TestServiceDiscoveryClient_CreateService_NamespaceError(t *testing.T) { + tc := getTestSdClient(t) + defer tc.close() + + nsErr := errors.New("error listing namespaces") + + tc.mockCache.EXPECT().GetNamespaceMap().Return(nil, false) + tc.mockApi.EXPECT().GetNamespaceMap(context.TODO()).Return(nil, nsErr) + + err := tc.client.CreateService(context.TODO(), test.HttpNsName, test.SvcName) + assert.Equal(t, nsErr, err) +} + +func TestServiceDiscoveryClient_CreateService_NamespaceNotFound(t *testing.T) { + tc := getTestSdClient(t) + defer tc.close() + + tc.mockCache.EXPECT().GetNamespaceMap().Return(map[string]*model.Namespace{}, true) + tc.mockApi.EXPECT().CreateHttpNamespace(context.TODO(), test.HttpNsName).Return(test.OpId1, nil) + tc.mockApi.EXPECT().GetOperation(context.TODO(), test.OpId1). + Return(&types.Operation{Status: types.OperationStatusSuccess, + Targets: map[string]string{string(types.OperationTargetTypeNamespace): test.HttpNsId}}, nil) + tc.mockCache.EXPECT().EvictNamespaceMap() + + tc.mockApi.EXPECT().CreateService(context.TODO(), *test.GetTestHttpNamespace(), test.SvcName). + Return(test.SvcId, nil) + tc.mockCache.EXPECT().EvictServiceIdMap(test.HttpNsName) + + err := tc.client.CreateService(context.TODO(), test.HttpNsName, test.SvcName) + assert.Nil(t, err) +} + +func TestServiceDiscoveryClient_CreateService_CreateServiceError(t *testing.T) { + tc := getTestSdClient(t) + defer tc.close() + + tc.mockCache.EXPECT().GetNamespaceMap().Return(getNamespaceMapForTest(), true) + + svcErr := errors.New("error creating service") + tc.mockApi.EXPECT().CreateService(context.TODO(), *test.GetTestDnsNamespace(), test.SvcName). + Return("", svcErr) + + err := tc.client.CreateService(context.TODO(), test.DnsNsName, test.SvcName) + assert.Equal(t, err, svcErr) +} + +func TestServiceDiscoveryClient_CreateService_CreatesNamespace_HappyCase(t *testing.T) { + tc := getTestSdClient(t) + defer tc.close() + + tc.mockCache.EXPECT().GetNamespaceMap().Return(map[string]*model.Namespace{ + test.DnsNsName: test.GetTestDnsNamespace(), + }, true) + + tc.mockApi.EXPECT().CreateHttpNamespace(context.TODO(), test.HttpNsName). + Return(test.OpId1, nil) + tc.mockApi.EXPECT().GetOperation(context.TODO(), test.OpId1). + Return(&types.Operation{Status: types.OperationStatusSuccess, + Targets: map[string]string{string(types.OperationTargetTypeNamespace): test.HttpNsId}}, nil) + tc.mockCache.EXPECT().EvictNamespaceMap() + + tc.mockApi.EXPECT().CreateService(context.TODO(), *test.GetTestHttpNamespace(), test.SvcName). + Return(test.SvcId, nil) + tc.mockCache.EXPECT().EvictServiceIdMap(test.HttpNsName) + + err := tc.client.CreateService(context.TODO(), test.HttpNsName, test.SvcName) + assert.Nil(t, err, "No error for happy case") +} + +func TestServiceDiscoveryClient_CreateService_CreatesNamespace_PollError(t *testing.T) { + tc := getTestSdClient(t) + defer tc.close() + + tc.mockCache.EXPECT().GetNamespaceMap().Return(nil, true) + + pollErr := errors.New("polling error") + tc.mockApi.EXPECT().CreateHttpNamespace(context.TODO(), test.HttpNsName). + Return(test.OpId1, nil) + tc.mockApi.EXPECT().GetOperation(context.TODO(), test.OpId1). + Return(nil, pollErr) + + err := tc.client.CreateService(context.TODO(), test.HttpNsName, test.SvcName) + assert.Equal(t, pollErr, err) +} + +func TestServiceDiscoveryClient_CreateService_CreatesNamespace_CreateNsError(t *testing.T) { + tc := getTestSdClient(t) + defer tc.close() + + tc.mockCache.EXPECT().GetNamespaceMap().Return(nil, true) + + nsErr := errors.New("create namespace error") + tc.mockApi.EXPECT().CreateHttpNamespace(context.TODO(), test.HttpNsName). + Return("", nsErr) + + err := tc.client.CreateService(context.TODO(), test.HttpNsName, test.SvcName) + assert.Equal(t, nsErr, err) +} + +func TestServiceDiscoveryClient_GetService_HappyCase(t *testing.T) { + tc := getTestSdClient(t) + defer tc.close() + + tc.mockCache.EXPECT().GetEndpoints(test.HttpNsName, test.SvcName).Return(nil, false) + + tc.mockCache.EXPECT().GetServiceIdMap(test.HttpNsName).Return(nil, false) + + tc.mockCache.EXPECT().GetNamespaceMap().Return(nil, false) + tc.mockApi.EXPECT().GetNamespaceMap(context.TODO()). + Return(getNamespaceMapForTest(), nil) + tc.mockCache.EXPECT().CacheNamespaceMap(getNamespaceMapForTest()) + + tc.mockApi.EXPECT().GetServiceIdMap(context.TODO(), test.HttpNsId). + Return(map[string]string{test.SvcName: test.SvcId}, nil) + tc.mockCache.EXPECT().CacheServiceIdMap(test.HttpNsName, getServiceIdMapForTest()) + + tc.mockCache.EXPECT().GetEndpoints(test.HttpNsName, test.SvcName).Return([]*model.Endpoint{}, false) + tc.mockApi.EXPECT().DiscoverInstances(context.TODO(), test.HttpNsName, test.SvcName, map[string]string{ + model.ClusterSetIdAttr: test.ClusterSet, + }).Return(getHttpInstanceSummaryForTest(), nil) + tc.mockCache.EXPECT().CacheEndpoints(test.HttpNsName, test.SvcName, + []*model.Endpoint{test.GetTestEndpoint1(), test.GetTestEndpoint2()}) + + svc, err := tc.client.GetService(context.TODO(), test.HttpNsName, test.SvcName) + assert.Nil(t, err) + assert.Equal(t, test.GetTestService(), svc) +} + +func TestServiceDiscoveryClient_GetService_CachedValues(t *testing.T) { + tc := getTestSdClient(t) + defer tc.close() + + tc.mockCache.EXPECT().GetEndpoints(test.HttpNsName, test.SvcName). + Return([]*model.Endpoint{test.GetTestEndpoint1(), test.GetTestEndpoint2()}, true) + + svc, err := tc.client.GetService(context.TODO(), test.HttpNsName, test.SvcName) + assert.Nil(t, err) + assert.Equal(t, test.GetTestService(), svc) +} + +func TestServiceDiscoveryClient_GetService_ServiceNotFound(t *testing.T) { + tc := getTestSdClient(t) + defer tc.close() + + tc.mockCache.EXPECT().GetEndpoints(test.HttpNsName, test.SvcName).Return(nil, false) + + tc.mockCache.EXPECT().GetServiceIdMap(test.HttpNsName).Return(nil, false) + + tc.mockCache.EXPECT().GetNamespaceMap().Return(nil, false) + tc.mockApi.EXPECT().GetNamespaceMap(context.TODO()). + Return(getNamespaceMapForTest(), nil) + tc.mockCache.EXPECT().CacheNamespaceMap(getNamespaceMapForTest()) + + // return empty list from CloudMap's api + tc.mockApi.EXPECT().GetServiceIdMap(context.TODO(), test.HttpNsId). + Return(map[string]string{}, nil) + tc.mockCache.EXPECT().CacheServiceIdMap(test.HttpNsName, map[string]string{}) + + svc, err := tc.client.GetService(context.TODO(), test.HttpNsName, test.SvcName) + assert.NotNil(t, err) + assert.True(t, common.IsNotFound(err), svc) + assert.Contains(t, err.Error(), test.SvcName) +} + +func TestServiceDiscoveryClient_RegisterEndpoints(t *testing.T) { + tc := getTestSdClient(t) + defer tc.close() + + tc.mockCache.EXPECT().GetServiceIdMap(test.HttpNsName).Return(getServiceIdMapForTest(), true) + + tc.mockApi.EXPECT().RegisterInstance(context.TODO(), test.SvcId, test.EndptId1, getAttrs1()). + Return(test.OpId1, nil) + tc.mockApi.EXPECT().RegisterInstance(context.TODO(), test.SvcId, test.EndptId2, getAttrs2()). + Return(test.OpId2, nil) + tc.mockApi.EXPECT().GetOperation(context.TODO(), test.OpId1). + Return(&types.Operation{Status: types.OperationStatusSuccess}, nil) + tc.mockApi.EXPECT().GetOperation(context.TODO(), test.OpId2). + Return(&types.Operation{Status: types.OperationStatusSuccess}, nil) + + tc.mockCache.EXPECT().EvictEndpoints(test.HttpNsName, test.SvcName) + + err := tc.client.RegisterEndpoints(context.TODO(), test.HttpNsName, test.SvcName, + []*model.Endpoint{test.GetTestEndpoint1(), test.GetTestEndpoint2()}) + + assert.Nil(t, err) +} + +func TestServiceDiscoveryClient_RegisterEndpoints_PollFailure(t *testing.T) { + tc := getTestSdClient(t) + defer tc.close() + + tc.mockCache.EXPECT().GetServiceIdMap(test.HttpNsName).Return(getServiceIdMapForTest(), true) + + tc.mockApi.EXPECT().RegisterInstance(context.TODO(), test.SvcId, test.EndptId1, getAttrs1()). + Return(test.OpId1, nil) + tc.mockApi.EXPECT().RegisterInstance(context.TODO(), test.SvcId, test.EndptId2, getAttrs2()). + Return(test.OpId2, nil) + tc.mockApi.EXPECT().GetOperation(context.TODO(), test.OpId1). + Return(&types.Operation{Status: types.OperationStatusFail}, nil) + tc.mockApi.EXPECT().GetOperation(context.TODO(), test.OpId2). + Return(&types.Operation{Status: types.OperationStatusSuccess}, nil) + + tc.mockCache.EXPECT().EvictEndpoints(test.HttpNsName, test.SvcName) + + err := tc.client.RegisterEndpoints(context.TODO(), test.HttpNsName, test.SvcName, + []*model.Endpoint{test.GetTestEndpoint1(), test.GetTestEndpoint2()}) + + assert.NotNil(t, err) + assert.Contains(t, err.Error(), test.OpId1) +} + +func TestServiceDiscoveryClient_DeleteEndpoints(t *testing.T) { + tc := getTestSdClient(t) + defer tc.close() + + tc.mockCache.EXPECT().GetServiceIdMap(test.HttpNsName).Return(getServiceIdMapForTest(), true) + + tc.mockApi.EXPECT().DeregisterInstance(context.TODO(), test.SvcId, test.EndptId1). + Return(test.OpId1, nil) + tc.mockApi.EXPECT().DeregisterInstance(context.TODO(), test.SvcId, test.EndptId2). + Return(test.OpId2, nil) + tc.mockApi.EXPECT().GetOperation(context.TODO(), test.OpId1). + Return(&types.Operation{Status: types.OperationStatusSuccess}, nil) + tc.mockApi.EXPECT().GetOperation(context.TODO(), test.OpId2). + Return(&types.Operation{Status: types.OperationStatusSuccess}, nil) + + tc.mockCache.EXPECT().EvictEndpoints(test.HttpNsName, test.SvcName) + + err := tc.client.DeleteEndpoints(context.TODO(), test.HttpNsName, test.SvcName, + []*model.Endpoint{ + {Id: test.EndptId1, ClusterId: test.ClusterId1, ClusterSetId: test.ClusterSet}, + {Id: test.EndptId2, ClusterId: test.ClusterId1, ClusterSetId: test.ClusterSet}, + }) + assert.Nil(t, err) +} + +func TestServiceDiscoveryClient_DeleteEndpoints_PollFailure(t *testing.T) { + tc := getTestSdClient(t) + defer tc.close() + + tc.mockCache.EXPECT().GetServiceIdMap(test.HttpNsName).Return(getServiceIdMapForTest(), true) + + tc.mockApi.EXPECT().DeregisterInstance(context.TODO(), test.SvcId, test.EndptId1). + Return(test.OpId1, nil) + tc.mockApi.EXPECT().DeregisterInstance(context.TODO(), test.SvcId, test.EndptId2). + Return(test.OpId2, nil) + tc.mockApi.EXPECT().GetOperation(context.TODO(), test.OpId1). + Return(&types.Operation{Status: types.OperationStatusFail}, nil) + tc.mockApi.EXPECT().GetOperation(context.TODO(), test.OpId2). + Return(&types.Operation{Status: types.OperationStatusSuccess}, nil) + + tc.mockCache.EXPECT().EvictEndpoints(test.HttpNsName, test.SvcName) + + err := tc.client.DeleteEndpoints(context.TODO(), test.HttpNsName, test.SvcName, + []*model.Endpoint{ + {Id: test.EndptId1, ClusterId: test.ClusterId1, ClusterSetId: test.ClusterSet}, + {Id: test.EndptId2, ClusterId: test.ClusterId1, ClusterSetId: test.ClusterSet}, + }) + + assert.NotNil(t, err) + assert.Contains(t, err.Error(), test.OpId1) +} + +func getTestSdClient(t *testing.T) *testSdClient { + test.SetTestVersion() + mockController := gomock.NewController(t) + mockCache := cloudmapMock.NewMockServiceDiscoveryClientCache(mockController) + mockApi := cloudmapMock.NewMockServiceDiscoveryApi(mockController) + scheme := runtime.NewScheme() + scheme.AddKnownTypes(aboutv1alpha1.GroupVersion, &aboutv1alpha1.ClusterProperty{}, &aboutv1alpha1.ClusterPropertyList{}) + fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(test.ClusterIdForTest(), test.ClusterSetIdForTest()).Build() + return &testSdClient{ + client: &serviceDiscoveryClient{ + log: common.NewLoggerWithLogr(testr.New(t)), + sdApi: mockApi, + cache: mockCache, + clusterUtils: model.NewClusterUtils(fakeClient), + }, + mockApi: *mockApi, + mockCache: *mockCache, + close: func() { mockController.Finish() }, + } +} + +func getHttpInstanceSummaryForTest() []types.HttpInstanceSummary { + return []types.HttpInstanceSummary{ + { + InstanceId: aws.String(test.EndptId1), + Attributes: map[string]string{ + model.ClusterIdAttr: test.ClusterId1, + model.ClusterSetIdAttr: test.ClusterSet, + model.EndpointIpv4Attr: test.EndptIp1, + model.EndpointPortAttr: test.PortStr1, + model.EndpointPortNameAttr: test.PortName1, + model.EndpointProtocolAttr: test.Protocol1, + model.EndpointReadyAttr: test.EndptReadyTrue, + model.ServicePortNameAttr: test.PortName1, + model.ServicePortAttr: test.ServicePortStr1, + model.ServiceProtocolAttr: test.Protocol1, + model.ServiceTargetPortAttr: test.PortStr1, + model.ServiceTypeAttr: test.SvcType, + model.EndpointHostnameAttr: test.Hostname, + model.EndpointNodeNameAttr: test.Nodename, + model.ServiceExportCreationAttr: strconv.FormatInt(test.SvcExportCreationTimestamp, 10), + model.K8sVersionAttr: test.PackageVersion, + }, + }, + { + InstanceId: aws.String(test.EndptId2), + Attributes: map[string]string{ + model.ClusterIdAttr: test.ClusterId1, + model.ClusterSetIdAttr: test.ClusterSet, + model.EndpointIpv4Attr: test.EndptIp2, + model.EndpointPortAttr: test.PortStr2, + model.EndpointPortNameAttr: test.PortName2, + model.EndpointProtocolAttr: test.Protocol2, + model.EndpointReadyAttr: test.EndptReadyTrue, + model.ServicePortNameAttr: test.PortName2, + model.ServicePortAttr: test.ServicePortStr2, + model.ServiceProtocolAttr: test.Protocol2, + model.ServiceTargetPortAttr: test.PortStr2, + model.ServiceTypeAttr: test.SvcType, + model.EndpointHostnameAttr: test.Hostname, + model.EndpointNodeNameAttr: test.Nodename, + model.ServiceExportCreationAttr: strconv.FormatInt(test.SvcExportCreationTimestamp, 10), + model.K8sVersionAttr: test.PackageVersion, + }, + }, + } +} + +func getNamespaceMapForTest() map[string]*model.Namespace { + return map[string]*model.Namespace{ + test.HttpNsName: test.GetTestHttpNamespace(), + test.DnsNsName: test.GetTestDnsNamespace(), + } +} + +func getServiceIdMapForTest() map[string]string { + return map[string]string{test.SvcName: test.SvcId} +} + +func getAttrs2() map[string]string { + return map[string]string{ + model.ClusterIdAttr: test.ClusterId1, + model.ClusterSetIdAttr: test.ClusterSet, + model.EndpointIpv4Attr: test.EndptIp2, + model.EndpointPortAttr: test.PortStr2, + model.EndpointPortNameAttr: test.PortName2, + model.EndpointProtocolAttr: test.Protocol2, + model.EndpointReadyAttr: test.EndptReadyTrue, + model.ServicePortNameAttr: test.PortName2, + model.ServicePortAttr: test.ServicePortStr2, + model.ServiceProtocolAttr: test.Protocol2, + model.ServiceTargetPortAttr: test.PortStr2, + model.ServiceTypeAttr: test.SvcType, + model.EndpointHostnameAttr: test.Hostname, + model.EndpointNodeNameAttr: test.Nodename, + model.ServiceExportCreationAttr: strconv.FormatInt(test.SvcExportCreationTimestamp, 10), + model.K8sVersionAttr: test.PackageVersion, + } +} + +func getAttrs1() map[string]string { + return map[string]string{ + model.ClusterIdAttr: test.ClusterId1, + model.ClusterSetIdAttr: test.ClusterSet, + model.EndpointIpv4Attr: test.EndptIp1, + model.EndpointPortAttr: test.PortStr1, + model.EndpointPortNameAttr: test.PortName1, + model.EndpointProtocolAttr: test.Protocol1, + model.EndpointReadyAttr: test.EndptReadyTrue, + model.ServicePortNameAttr: test.PortName1, + model.ServicePortAttr: test.ServicePortStr1, + model.ServiceProtocolAttr: test.Protocol1, + model.ServiceTargetPortAttr: test.PortStr1, + model.ServiceTypeAttr: test.SvcType, + model.EndpointHostnameAttr: test.Hostname, + model.EndpointNodeNameAttr: test.Nodename, + model.ServiceExportCreationAttr: strconv.FormatInt(test.SvcExportCreationTimestamp, 10), + model.K8sVersionAttr: test.PackageVersion, + } +} diff --git a/pkg/cloudmap/endpoint_manager.go b/pkg/cloudmap/endpoint_manager.go deleted file mode 100644 index 27398403..00000000 --- a/pkg/cloudmap/endpoint_manager.go +++ /dev/null @@ -1,291 +0,0 @@ -package cloudmap - -import ( - "context" - "errors" - "fmt" - "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/model" - "github.com/aws/aws-sdk-go-v2/aws" - sd "github.com/aws/aws-sdk-go-v2/service/servicediscovery" - "github.com/aws/aws-sdk-go-v2/service/servicediscovery/types" - "github.com/go-logr/logr" - "k8s.io/apimachinery/pkg/util/cache" - "k8s.io/apimachinery/pkg/util/wait" - ctrl "sigs.k8s.io/controller-runtime" - "strconv" - "time" -) - -const ( - defaultEndpointsCacheTTL = 5 * time.Second - defaultEndpointsCacheSize = 1024 - - defaultOperationPollInterval = 3 * time.Second -) - -type EndpointManager interface { - ListEndpoints(ctx context.Context, serviceId string) ([]*model.Endpoint, error) - - RegisterEndpoints(ctx context.Context, service *model.Service, serviceId string) error - - DeregisterEndpoints(ctx context.Context, service *model.Service, serviceId string) error -} - -type defaultEndpointManager struct { - log logr.Logger - sdApi *sd.Client - endpointCache *cache.LRUExpireCache - endpointCacheTTL time.Duration - - // interval between each getOperation call - operationPollInterval time.Duration - // maximum retries per getOperation call - operationPollMaxRetries int -} - -func NewEndpointManager(cfg *aws.Config) EndpointManager { - return &defaultEndpointManager{ - log: ctrl.Log.WithName("cloudmap"), - sdApi: sd.NewFromConfig(*cfg), - endpointCache: cache.NewLRUExpireCache(defaultEndpointsCacheSize), - endpointCacheTTL: defaultEndpointsCacheTTL, - operationPollInterval: defaultOperationPollInterval, - } -} - -func (mgr *defaultEndpointManager) ListEndpoints(ctx context.Context, serviceId string) ([]*model.Endpoint, error) { - - if cachedValue, exists := mgr.endpointCache.Get(serviceId); exists { - return cachedValue.([]*model.Endpoint), nil - } - - endpts, endptsErr := mgr.listEndpointsFromCloudMap(ctx, serviceId) - - if endptsErr != nil { - return nil, endptsErr - } - - mgr.endpointCache.Add(serviceId, endpts, defaultEndpointsCacheTTL) - - return endpts, nil -} - -func (mgr *defaultEndpointManager) listEndpointsFromCloudMap(ctx context.Context, svcId string) ([]*model.Endpoint, error) { - endpts := make([]*model.Endpoint, 0) - - pages := sd.NewListInstancesPaginator(mgr.sdApi, &sd.ListInstancesInput{ServiceId: &svcId}) - - for pages.HasMorePages() { - output, err := pages.NextPage(ctx) - if err != nil { - return endpts, err - } - - for _, inst := range output.Instances { - endpt, endptErr := model.NewEndpointFromInstance(&inst) - - if endptErr != nil { - mgr.log.Info(fmt.Sprintf("skipping instance %s to endpoint conversion: %s", *inst.Id, endptErr.Error())) - continue - } - - endpts = append(endpts, endpt) - } - } - - return endpts, nil -} - -type opResult struct { - instId string - opId string - err error -} - -func (mgr *defaultEndpointManager) RegisterEndpoints(ctx context.Context, service *model.Service, serviceId string) error { - opChan := make(chan opResult) - startTime := mgr.now() - - for _, endpt := range service.Endpoints { - go mgr.registerInstanceInCloudMap(ctx, serviceId, endpt.Id, endpt.GetAttributes(), opChan) - } - - ops, regSuccess := mgr.getOpsList(len(service.Endpoints), opChan) - opsErr := mgr.pollOperations(ctx, types.OperationTypeRegisterInstance, serviceId, startTime, ops) - - // Evict cache entry so next list call reflects changes - mgr.endpointCache.Remove(serviceId) - - if opsErr != nil { - return opsErr - } - - if !regSuccess { - return errors.New("failure registering endpoints") - } - - return nil -} - -func (mgr *defaultEndpointManager) registerInstanceInCloudMap(ctx context.Context, svcId string, instId string, instAttrs map[string]string, opChan chan opResult) { - - regResp, err := mgr.sdApi.RegisterInstance(ctx, &sd.RegisterInstanceInput{ - Attributes: instAttrs, - InstanceId: &instId, - ServiceId: &svcId, - }) - - opChan <- opResult{instId, aws.ToString(regResp.OperationId), err} -} - -func (mgr *defaultEndpointManager) DeregisterEndpoints(ctx context.Context, service *model.Service, serviceId string) error { - opChan := make(chan opResult) - startTime := mgr.now() - - for _, endpt := range service.Endpoints { - go mgr.deregisterInstanceInCloudMap(ctx, serviceId, endpt.Id, opChan) - } - - ops, deregSuccess := mgr.getOpsList(len(service.Endpoints), opChan) - - opsErr := mgr.pollOperations(ctx, types.OperationTypeDeregisterInstance, serviceId, startTime, ops) - - // Evict cache entry so next list call reflects changes - mgr.endpointCache.Remove(serviceId) - - if opsErr != nil { - return opsErr - } - - if !deregSuccess { - return errors.New("failure de-registering endpoints") - } - - return nil -} - -func (mgr *defaultEndpointManager) deregisterInstanceInCloudMap(ctx context.Context, svcId string, instId string, opChan chan opResult) { - deregResp, err := mgr.sdApi.DeregisterInstance(ctx, &sd.DeregisterInstanceInput{ - InstanceId: &instId, - ServiceId: &svcId, - }) - - if err != nil { - opChan <- opResult{instId, "", err} - } - - opChan <- opResult{instId, *deregResp.OperationId, nil} -} - -func (mgr *defaultEndpointManager) getOpsList(opCount int, opChan chan opResult) ([]string, bool) { - success := true - - ops := make([]string, 0) - - for i := 0; i < opCount; i++ { - op := <-opChan - - if op.err != nil { - mgr.log.Info("could not create operation", "error", op.err) - success = false - continue - } - - ops = append(ops, op.opId) - } - - return ops, success -} - -func (mgr *defaultEndpointManager) pollOperations(ctx context.Context, opType types.OperationType, svcId string, startTime int64, ops []string) error { - - if len(ops) == 0 { - mgr.log.Info("no operations to poll") - } - - svcFilter := types.OperationFilter{ - Name: types.OperationFilterNameServiceId, - Values: []string{svcId}, - } - statusFilter := types.OperationFilter{ - Name: types.OperationFilterNameStatus, - Condition: types.FilterConditionIn, - - Values: []string{ - string(types.OperationStatusFail), - string(types.OperationStatusSuccess)}, - } - typeFilter := types.OperationFilter{ - Name: types.OperationFilterNameType, - Values: []string{string(opType)}, - } - - timeFilter := types.OperationFilter{ - Name: types.OperationFilterNameUpdateDate, - Condition: types.FilterConditionBetween, - Values: []string{ - strconv.Itoa(int(startTime)), - // Add one minute to end range in case op updates while list request is in flight - strconv.Itoa(int(mgr.now() + 60000)), - }, - } - - return wait.PollUntil(mgr.operationPollInterval, func() (bool, error) { - mgr.log.Info("polling operations", "operations", ops) - completed := 0 - failedOps := make([]string, 0) - - pages := sd.NewListOperationsPaginator(mgr.sdApi, &sd.ListOperationsInput{ - Filters: []types.OperationFilter{svcFilter, statusFilter, typeFilter, timeFilter}, - }) - - for pages.HasMorePages() { - output, err := pages.NextPage(ctx) - - if err != nil { - return true, err - } - - for _, pollOp := range ops { - for _, sdOp := range output.Operations { - if pollOp == aws.ToString(sdOp.Id) { - completed++ - if sdOp.Status == types.OperationStatusFail { - failedOps = append(failedOps, pollOp) - } - } - } - } - } - - if completed != len(ops) { - return false, nil - } - - if len(failedOps) != 0 { - for _, failedOp := range failedOps { - mgr.log.Info("Operation failed", "failedOp", failedOp, "reason", mgr.getFailedOpReason(ctx, failedOp)) - } - return true, fmt.Errorf("operation failure") - } - - mgr.log.Info("operations completed successfully") - return true, nil - }, ctx.Done()) -} - -// getFailedOpReason returns operation error message, which is not available in ListOperations response -func (mgr *defaultEndpointManager) getFailedOpReason(ctx context.Context, op string) string { - opResp, err := mgr.sdApi.GetOperation(ctx, &sd.GetOperationInput{OperationId: &op}) - - if err != nil { - return "failed to retrieve operation failure reason" - } - - return aws.ToString(opResp.Operation.ErrorMessage) -} - -// now returns current time with milliseconds, as used by operation UPDATE_DATE field -func (mgr *defaultEndpointManager) now() int64 { - return time.Now().UnixNano() / 1000000 -} diff --git a/pkg/cloudmap/operation_poller.go b/pkg/cloudmap/operation_poller.go new file mode 100644 index 00000000..d62278d5 --- /dev/null +++ b/pkg/cloudmap/operation_poller.go @@ -0,0 +1,129 @@ +package cloudmap + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/common" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/servicediscovery/types" + "k8s.io/apimachinery/pkg/util/wait" +) + +const ( + // Interval between each getOperation call. + defaultOperationPollInterval = 2 * time.Second + + // Time until we stop polling the operation + defaultOperationPollTimeout = 1 * time.Minute + + operationPollTimoutErrorMessage = "timed out while polling operations" +) + +// OperationPoller polls a list operations for a terminal status +type OperationPoller interface { + // Submit operations to async poll + Submit(ctx context.Context, opProvider func() (opId string, err error)) + + // Poll operations for a terminal state + Poll(ctx context.Context, opId string) (*types.Operation, error) + + // Await waits for all operation results from async poll + Await() (err error) +} + +type operationPoller struct { + log common.Logger + sdApi ServiceDiscoveryApi + opChan chan opResult + waitGroup sync.WaitGroup + pollInterval time.Duration + pollTimeout time.Duration +} + +type opResult struct { + opId string + err error +} + +// NewOperationPoller creates a new operation poller +func NewOperationPoller(sdApi ServiceDiscoveryApi) OperationPoller { + return NewOperationPollerWithConfig(defaultOperationPollInterval, defaultOperationPollTimeout, sdApi) +} + +// NewOperationPollerWithConfig creates a new operation poller +func NewOperationPollerWithConfig(pollInterval, pollTimeout time.Duration, sdApi ServiceDiscoveryApi) OperationPoller { + return &operationPoller{ + log: common.NewLogger("cloudmap", "OperationPoller"), + sdApi: sdApi, + opChan: make(chan opResult), + pollInterval: pollInterval, + pollTimeout: pollTimeout, + } +} + +func (p *operationPoller) Submit(ctx context.Context, opProvider func() (opId string, err error)) { + p.waitGroup.Add(1) + + // Poll for the operation in a separate go routine + go func() { + // Indicate the polling done i.e. decrement the WaitGroup counter when the goroutine returns + defer p.waitGroup.Done() + + opId, err := opProvider() + // Poll for the operationId if the provider doesn't throw error + if err == nil { + _, err = p.Poll(ctx, opId) + } + + p.opChan <- opResult{opId: opId, err: err} + }() +} + +func (p *operationPoller) Poll(ctx context.Context, opId string) (op *types.Operation, err error) { + // poll tries a condition func until it returns true, an error, or the timeout is reached. + err = wait.Poll(p.pollInterval, p.pollTimeout, func() (done bool, err error) { + p.log.Info("polling operation", "opId", opId) + + op, err = p.sdApi.GetOperation(ctx, opId) + if err != nil { + return true, err + } + + switch op.Status { + case types.OperationStatusSuccess: + return true, nil + case types.OperationStatusFail: + return true, fmt.Errorf("operation failed, opId: %s, reason: %s", opId, aws.ToString(op.ErrorMessage)) + default: + return false, nil + } + }) + if err == wait.ErrWaitTimeout { + err = fmt.Errorf("%s, opId: %s", operationPollTimoutErrorMessage, opId) + } + + return op, err +} + +func (p *operationPoller) Await() (err error) { + // Run wait in separate go routine to unblock reading from the channel. + go func() { + // Block till the polling done i.e. WaitGroup counter is zero, and then close the channel + p.waitGroup.Wait() + close(p.opChan) + }() + + for res := range p.opChan { + if res.err != nil { + p.log.Error(res.err, "operation failed", "opId", res.opId) + err = common.Wrap(err, res.err) + } else { + p.log.Info("operations completed successfully", "opId", res.opId) + } + } + + return err +} diff --git a/pkg/cloudmap/operation_poller_test.go b/pkg/cloudmap/operation_poller_test.go new file mode 100644 index 00000000..87ec236d --- /dev/null +++ b/pkg/cloudmap/operation_poller_test.go @@ -0,0 +1,166 @@ +package cloudmap + +import ( + "context" + "fmt" + "testing" + "time" + + cloudmapMock "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/mocks/pkg/cloudmap" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/servicediscovery/types" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" +) + +const ( + op1 = "one" + op2 = "two" + op3 = "three" + interval = 100 * time.Millisecond + timeout = 500 * time.Millisecond +) + +func TestOperationPoller_HappyCase(t *testing.T) { + mockController := gomock.NewController(t) + defer mockController.Finish() + + sdApi := cloudmapMock.NewMockServiceDiscoveryApi(mockController) + + op1First := sdApi.EXPECT().GetOperation(gomock.Any(), op1).Return(opSubmitted(), nil) + op1Second := sdApi.EXPECT().GetOperation(gomock.Any(), op1).Return(opPending(), nil) + op1Third := sdApi.EXPECT().GetOperation(gomock.Any(), op1).Return(opSuccess(), nil) + gomock.InOrder(op1First, op1Second, op1Third) + + op2First := sdApi.EXPECT().GetOperation(gomock.Any(), op2).Return(opPending(), nil) + op2Second := sdApi.EXPECT().GetOperation(gomock.Any(), op2).Return(opSuccess(), nil) + gomock.InOrder(op2First, op2Second) + + sdApi.EXPECT().GetOperation(gomock.Any(), op3).Return(opSuccess(), nil) + + op := NewOperationPollerWithConfig(interval, timeout, sdApi) + op.Submit(context.TODO(), func() (opId string, err error) { return op1, nil }) + op.Submit(context.TODO(), func() (opId string, err error) { return op2, nil }) + op.Submit(context.TODO(), func() (opId string, err error) { return op3, nil }) + + result := op.Await() + assert.Nil(t, result) +} + +func TestOperationPoller_AllFail(t *testing.T) { + mockController := gomock.NewController(t) + defer mockController.Finish() + + sdApi := cloudmapMock.NewMockServiceDiscoveryApi(mockController) + + op1First := sdApi.EXPECT().GetOperation(gomock.Any(), op1).Return(opSubmitted(), nil) + op1Second := sdApi.EXPECT().GetOperation(gomock.Any(), op1).Return(opPending(), nil) + op1Third := sdApi.EXPECT().GetOperation(gomock.Any(), op1).Return(opFailed(), nil) + gomock.InOrder(op1First, op1Second, op1Third) + + op2First := sdApi.EXPECT().GetOperation(gomock.Any(), op2).Return(opSubmitted(), nil) + op2Second := sdApi.EXPECT().GetOperation(gomock.Any(), op2).Return(opFailed(), nil) + gomock.InOrder(op2First, op2Second) + + op := NewOperationPollerWithConfig(interval, timeout, sdApi) + op.Submit(context.TODO(), func() (opId string, err error) { return op1, nil }) + op.Submit(context.TODO(), func() (opId string, err error) { return op2, nil }) + unknown := "failed to reg error" + op.Submit(context.TODO(), func() (opId string, err error) { + return "", fmt.Errorf(unknown) + }) + + err := op.Await() + assert.NotNil(t, err) + assert.Contains(t, err.Error(), op1) + assert.Contains(t, err.Error(), op2) + assert.Contains(t, err.Error(), unknown) +} + +func TestOperationPoller_Mixed(t *testing.T) { + mockController := gomock.NewController(t) + defer mockController.Finish() + + sdApi := cloudmapMock.NewMockServiceDiscoveryApi(mockController) + + op1First := sdApi.EXPECT().GetOperation(gomock.Any(), op1).Return(opSubmitted(), nil) + op1Second := sdApi.EXPECT().GetOperation(gomock.Any(), op1).Return(opPending(), nil) + op1Third := sdApi.EXPECT().GetOperation(gomock.Any(), op1).Return(opFailed(), nil) + gomock.InOrder(op1First, op1Second, op1Third) + + op2First := sdApi.EXPECT().GetOperation(gomock.Any(), op2).Return(opSubmitted(), nil) + op2Second := sdApi.EXPECT().GetOperation(gomock.Any(), op2).Return(opPending(), nil) + op2Third := sdApi.EXPECT().GetOperation(gomock.Any(), op2).Return(opSuccess(), nil) + gomock.InOrder(op2First, op2Second, op2Third) + + op := NewOperationPollerWithConfig(interval, timeout, sdApi) + op.Submit(context.TODO(), func() (opId string, err error) { return op1, nil }) + op.Submit(context.TODO(), func() (opId string, err error) { return op2, nil }) + + err := op.Await() + assert.NotNil(t, err) + assert.Contains(t, err.Error(), op1) + assert.NotContains(t, err.Error(), op2) +} + +func TestOperationPoller_Timeout(t *testing.T) { + mockController := gomock.NewController(t) + defer mockController.Finish() + + sdApi := cloudmapMock.NewMockServiceDiscoveryApi(mockController) + + sdApi.EXPECT().GetOperation(gomock.Any(), op1).Return(opPending(), nil).AnyTimes() + + op2First := sdApi.EXPECT().GetOperation(gomock.Any(), op2).Return(opPending(), nil) + op2Second := sdApi.EXPECT().GetOperation(gomock.Any(), op2).Return(opSuccess(), nil) + gomock.InOrder(op2First, op2Second) + + op := NewOperationPollerWithConfig(interval, timeout, sdApi) + op.Submit(context.TODO(), func() (opId string, err error) { return op1, nil }) + op.Submit(context.TODO(), func() (opId string, err error) { return op2, nil }) + + err := op.Await() + assert.NotNil(t, err) + assert.Contains(t, err.Error(), op1) + assert.Contains(t, err.Error(), operationPollTimoutErrorMessage) + assert.NotContains(t, err.Error(), op2) +} + +func TestOperationPoller_Poll_HappyCase(t *testing.T) { + mockController := gomock.NewController(t) + defer mockController.Finish() + + sdApi := cloudmapMock.NewMockServiceDiscoveryApi(mockController) + + sdApi.EXPECT().GetOperation(context.TODO(), op1).Return(opPending(), nil) + sdApi.EXPECT().GetOperation(context.TODO(), op1).Return(opSuccess(), nil) + + op := NewOperationPollerWithConfig(interval, timeout, sdApi) + _, err := op.Poll(context.TODO(), op1) + assert.Nil(t, err) +} + +func opPending() *types.Operation { + return &types.Operation{ + Status: types.OperationStatusPending, + } +} + +func opFailed() *types.Operation { + return &types.Operation{ + Status: types.OperationStatusFail, + ErrorMessage: aws.String("fail"), + } +} + +func opSubmitted() *types.Operation { + return &types.Operation{ + Status: types.OperationStatusSubmitted, + } +} + +func opSuccess() *types.Operation { + return &types.Operation{ + Status: types.OperationStatusSuccess, + } +} diff --git a/pkg/common/errors.go b/pkg/common/errors.go new file mode 100644 index 00000000..97f6537a --- /dev/null +++ b/pkg/common/errors.go @@ -0,0 +1,32 @@ +package common + +import ( + "github.com/pkg/errors" +) + +var notFound = errors.New("resource was not found") + +func IsNotFound(err error) bool { + return errors.Is(err, notFound) +} + +func IsUnknown(err error) bool { + return err != nil && !errors.Is(err, notFound) +} + +func NotFoundError(message string) error { + return errors.Wrap(notFound, message) +} + +func Wrap(err1 error, err2 error) error { + switch { + case err1 != nil && err2 != nil: + return errors.Wrap(err1, err2.Error()) + case err1 != nil: + return err1 + case err2 != nil: + return err2 + default: + return nil + } +} diff --git a/pkg/common/errors_test.go b/pkg/common/errors_test.go new file mode 100644 index 00000000..c84b35b4 --- /dev/null +++ b/pkg/common/errors_test.go @@ -0,0 +1,88 @@ +package common + +import ( + "errors" + "testing" +) + +func TestIsNotFound(t *testing.T) { + type args struct { + err error + } + tests := []struct { + name string + args args + want bool + }{ + { + name: "trueCase", + args: struct{ err error }{err: NotFoundError("1")}, + want: true, + }, + { + name: "falseCase", + args: struct{ err error }{err: errors.New("test")}, + want: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := IsNotFound(tt.args.err); got != tt.want { + t.Errorf("IsNotFound() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestIsUnknown(t *testing.T) { + type args struct { + err error + } + tests := []struct { + name string + args args + want bool + }{ + { + name: "trueCase", + args: struct{ err error }{err: errors.New("test")}, + want: true, + }, + { + name: "falseCase", + args: struct{ err error }{err: NotFoundError("1")}, + want: false, + }, + { + name: "nilCase", + args: struct{ err error }{err: nil}, + want: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := IsUnknown(tt.args.err); got != tt.want { + t.Errorf("IsUnknown() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestNotFoundError(t *testing.T) { + tests := []struct { + name string + arg string + }{ + { + name: "happyCase", + arg: "arg", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if err := NotFoundError(tt.arg); !IsNotFound(err) { + t.Errorf("NotFoundError() error = %v, containsErr = %v", err, notFound) + } + }) + } +} diff --git a/pkg/common/logger.go b/pkg/common/logger.go new file mode 100644 index 00000000..194f011f --- /dev/null +++ b/pkg/common/logger.go @@ -0,0 +1,44 @@ +package common + +import ( + "github.com/go-logr/logr" + ctrl "sigs.k8s.io/controller-runtime" +) + +type Logger interface { + Info(msg string, keysAndValues ...interface{}) + Debug(msg string, keysAndValues ...interface{}) + Error(err error, msg string, keysAndValues ...interface{}) +} + +type logger struct { + log logr.Logger +} + +func NewLogger(name string, names ...string) Logger { + l := ctrl.Log.WithName(name) + for _, n := range names { + l = l.WithName(n) + } + return logger{log: l} +} + +func NewLoggerWithLogr(l logr.Logger) Logger { + return logger{log: l} +} + +func (l logger) Info(msg string, keysAndValues ...interface{}) { + l.log.V(0).Info(msg, keysAndValues...) +} + +func (l logger) Debug(msg string, keysAndValues ...interface{}) { + l.log.V(1).Info(msg, keysAndValues...) +} + +func (l logger) Error(err error, msg string, keysAndValues ...interface{}) { + l.log.Error(err, msg, keysAndValues...) +} + +func (l logger) WithValues(keysAndValues ...interface{}) Logger { + return logger{log: l.log.WithValues(keysAndValues...)} +} diff --git a/pkg/common/ratelimiter.go b/pkg/common/ratelimiter.go new file mode 100644 index 00000000..6d60f7c6 --- /dev/null +++ b/pkg/common/ratelimiter.go @@ -0,0 +1,49 @@ +package common + +import ( + "context" + "fmt" + + "golang.org/x/time/rate" +) + +const ( + ListNamespaces Event = "ListNamespaces" + ListServices Event = "ListServices" + GetOperation Event = "GetOperation" + DiscoverInstances Event = "DiscoverInstances" + CreateHttpNamespace Event = "CreateHttpNamespace" + CreateService Event = "CreateService" + RegisterInstance Event = "RegisterInstance" + DeregisterInstance Event = "DeregisterInstance" +) + +type Event string + +type RateLimiter struct { + rateLimiters map[Event]*rate.Limiter +} + +// NewDefaultRateLimiter returns the rate limiters with the default limits for the AWS CloudMap's API calls +func NewDefaultRateLimiter() RateLimiter { + return RateLimiter{rateLimiters: map[Event]*rate.Limiter{ + // Below are the default limits for the AWS CloudMap's APIs + // TODO: make it customizable in the future + ListNamespaces: rate.NewLimiter(rate.Limit(0.5), 5), // 1 ListNamespaces API calls per second + ListServices: rate.NewLimiter(rate.Limit(2), 10), // 2 ListServices API calls per second + GetOperation: rate.NewLimiter(rate.Limit(100), 200), // 100 GetOperation API calls per second + DiscoverInstances: rate.NewLimiter(rate.Limit(500), 1000), // 500 DiscoverInstances API calls per second + CreateHttpNamespace: rate.NewLimiter(rate.Limit(0.5), 5), // 1 CreateHttpNamespace API calls per second + CreateService: rate.NewLimiter(rate.Limit(5), 50), // 5 CreateService API calls per second + RegisterInstance: rate.NewLimiter(rate.Limit(50), 100), // 50 RegisterInstance API calls per second + DeregisterInstance: rate.NewLimiter(rate.Limit(50), 100), // 50 DeregisterInstance API calls per second + }} +} + +// Wait blocks until limit permits an event to happen. It returns an error if the Context is canceled, or the expected wait time exceeds the Context's Deadline. +func (r RateLimiter) Wait(ctx context.Context, event Event) error { + if limiter, ok := r.rateLimiters[event]; ok { + return limiter.Wait(ctx) + } + return fmt.Errorf("event %s not found in the list of limiters", event) +} diff --git a/pkg/common/ratelimiter_test.go b/pkg/common/ratelimiter_test.go new file mode 100644 index 00000000..f1b6afb1 --- /dev/null +++ b/pkg/common/ratelimiter_test.go @@ -0,0 +1,64 @@ +package common + +import ( + "context" + "testing" +) + +func TestRateLimiter_Wait(t *testing.T) { + type fields struct { + RateLimiter RateLimiter + } + type args struct { + ctx context.Context + event Event + } + tests := []struct { + name string + fields fields + args args + wantErr bool + }{ + { + name: "happy", + fields: fields{RateLimiter: NewDefaultRateLimiter()}, + args: args{ + ctx: context.TODO(), + event: ListServices, + }, + wantErr: false, + }, + { + name: "not_found", + fields: fields{RateLimiter: NewDefaultRateLimiter()}, + args: args{ + ctx: context.TODO(), + event: "test", + }, + wantErr: true, + }, + { + name: "error_ctx_canceled", + fields: fields{RateLimiter: NewDefaultRateLimiter()}, + args: args{ + ctx: ctxCanceled(context.TODO()), + event: ListNamespaces, + }, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + r := tt.fields.RateLimiter + if err := r.Wait(tt.args.ctx, tt.args.event); (err != nil) != tt.wantErr { + t.Errorf("Wait() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} + +func ctxCanceled(ctx context.Context) context.Context { + ret, cancel := context.WithCancel(ctx) + defer cancel() // cancel after function call + return ret +} diff --git a/pkg/controllers/cloudmap_controller.go b/pkg/controllers/cloudmap_controller.go deleted file mode 100644 index 1c53fc33..00000000 --- a/pkg/controllers/cloudmap_controller.go +++ /dev/null @@ -1,377 +0,0 @@ -package controllers - -import ( - "context" - "crypto/sha256" - "encoding/base32" - "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/api/v1alpha1" - "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/cloudmap" - "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/model" - "github.com/go-logr/logr" - v1 "k8s.io/api/core/v1" - "k8s.io/api/discovery/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" - "reflect" - "sigs.k8s.io/controller-runtime/pkg/client" - "strings" - "time" -) - -const ( - // TODO move to configuration - syncPeriod = 2 * time.Second - - DerivedServiceAnnotation = "multicluster.k8s.aws/derived-service" -) - -// CloudMapReconciler reconciles state of Cloud Map services with local ServiceImport objects -type CloudMapReconciler struct { - client.Client - Cloudmap cloudmap.ServiceDiscoveryClient - logr.Logger -} - -// +kubebuilder:rbac:groups=multicluster.x-k8s.io,resources=serviceimports,verbs=get;list;update;patch - -// Start implements manager.Runnable -func (r *CloudMapReconciler) Start(ctx context.Context) error { - ticker := time.NewTicker(syncPeriod) - defer ticker.Stop() - for { - if err := r.Reconcile(ctx); err != nil { - // just log the error and continue running - r.Logger.Error(err, "Cloud Map reconciliation error") - } - select { - case <-ticker.C: - case <-ctx.Done(): - r.Logger.Info("terminating CloudMapReconciler") - return nil - } - } -} - -// Reconcile triggers a single reconciliation round -func (r *CloudMapReconciler) Reconcile(ctx context.Context) error { - - namespaces := v1.NamespaceList{} - if err := r.Client.List(ctx, &namespaces); err != nil { - r.Logger.Error(err, "unable to list namespaces") - return err - } - - for _, ns := range namespaces.Items { - if err := r.reconcileNamespace(ctx, ns.Name); err != nil { - return err - } - } - - return nil -} - -func (r *CloudMapReconciler) reconcileNamespace(ctx context.Context, namespaceName string) error { - r.Logger.Info("syncing namespace", "namespace", namespaceName) - - desiredServices, err := r.Cloudmap.ListServices(ctx, namespaceName) - if err != nil { - return err - } - - serviceImports := v1alpha1.ServiceImportList{} - if err := r.Client.List(ctx, &serviceImports, client.InNamespace(namespaceName)); err != nil { - return nil - } - - existingImportsMap := make(map[string]v1alpha1.ServiceImport) - for _, svc := range serviceImports.Items { - existingImportsMap[svc.Namespace+"/"+svc.Name] = svc - } - - for _, svc := range desiredServices { - if len(svc.Endpoints) == 0 { - // skip empty services - continue - } - - if err := r.reconcileService(ctx, svc); err != nil { - r.Logger.Error(err, "error when syncing service", "namespace", svc.Namespace, "name", svc.Name) - } - delete(existingImportsMap, svc.Namespace+"/"+svc.Name) - } - - // delete remaining imports that have not been matched - for _, i := range existingImportsMap { - r.Client.Delete(ctx, &i) - r.Logger.Info("delete ServiceImport", "namespace", i.Namespace, "name", i.Name) - } - - return nil -} - -func (r *CloudMapReconciler) reconcileService(ctx context.Context, svc *model.Service) error { - r.Logger.Info("syncing service", "namespace", svc.Namespace, "service", svc.Name) - - // create ServiceImport if doesn't exist - svcImport, err := r.getExistingServiceImport(ctx, svc.Namespace, svc.Name) - if err != nil { - if !errors.IsNotFound(err) { - return err - } - - if err2 := r.createServiceImport(ctx, svc.Namespace, svc.Name); err2 != nil { - return err2 - } - return nil - } - - // create derived Service if it doesn't exist - existingService, err := r.getExistingDerivedService(ctx, svc.Namespace, svcImport.Annotations[DerivedServiceAnnotation]) - if err != nil { - if !errors.IsNotFound(err) { - return err - } - - if err2 := r.createDerivedService(ctx, svc, svcImport); err2 != nil { - return err2 - } - return nil - } - - // update ServiceImport to match IP and port of previously created service - if err = r.updateServiceImport(ctx, svcImport, existingService); err != nil { - return err - } - - err = r.updateEndpointSlices(ctx, svc, existingService) - if err != nil { - return err - } - - return nil -} - -func (r *CloudMapReconciler) getExistingServiceImport(ctx context.Context, namespace string, name string) (*v1alpha1.ServiceImport, error) { - existingServiceImport := &v1alpha1.ServiceImport{} - err := r.Client.Get(ctx, types.NamespacedName{Namespace: namespace, Name: name}, existingServiceImport) - - return existingServiceImport, err -} - -func (r *CloudMapReconciler) createServiceImport(ctx context.Context, namespace string, name string) error { - imp := &v1alpha1.ServiceImport{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace, - Name: name, - Annotations: map[string]string{DerivedServiceAnnotation: DerivedName(namespace, name)}, - }, - Spec: v1alpha1.ServiceImportSpec{ - IPs: []string{}, - Type: v1alpha1.ClusterSetIP, - Ports: []v1alpha1.ServicePort{}, - }, - } - - if err := r.Client.Create(ctx, imp); err != nil { - return err - } - r.Logger.Info("created ServiceImport", "namespace", imp.Namespace, "name", imp.Name) - - return nil -} - -func (r *CloudMapReconciler) getExistingDerivedService(ctx context.Context, namespace string, name string) (*v1.Service, error) { - existingService := &v1.Service{} - err := r.Client.Get(ctx, types.NamespacedName{Namespace: namespace, Name: name}, existingService) - - return existingService, err -} - -func (r *CloudMapReconciler) createDerivedService(ctx context.Context, svc *model.Service, svcImport *v1alpha1.ServiceImport) error { - toCreate := createDerivedServiceStruct(svc, svcImport) - if err := r.Client.Create(ctx, toCreate); err != nil { - return err - } - r.Logger.Info("created derived Service", - "namespace", toCreate.Namespace, "name", toCreate.Name) - - return nil -} - -func (r *CloudMapReconciler) updateEndpointSlices(ctx context.Context, cloudMapService *model.Service, svc *v1.Service) error { - existingSlicesList := v1beta1.EndpointSliceList{} - var existingSlices []*v1beta1.EndpointSlice - if err := r.Client.List(ctx, &existingSlicesList, - client.InNamespace(svc.Namespace), client.MatchingLabels{v1beta1.LabelServiceName: svc.Name}); err != nil { - return err - } - if len(existingSlicesList.Items) == 0 { - // create new endpoint slice - existingSlices = createEndpointSlicesStruct(cloudMapService, svc) - for _, slice := range existingSlices { - if err := r.Client.Create(ctx, slice); err != nil { - return err - } - r.Logger.Info("created EndpointSlice", "namespace", slice.Namespace, "name", slice.Name) - } - } - - // TODO check existing slices match Cloud Map endpoints - - return nil -} - -// DerivedName computes the "placeholder" name for the imported service -func DerivedName(namespace string, name string) string { - hash := sha256.New() - hash.Write([]byte(namespace + name)) - return "imported-" + strings.ToLower(base32.HexEncoding.WithPadding(base32.NoPadding).EncodeToString(hash.Sum(nil)))[:10] -} - -func createDerivedServiceStruct(svc *model.Service, svcImport *v1alpha1.ServiceImport) *v1.Service { - ownerRef := metav1.NewControllerRef(svcImport, schema.GroupVersionKind{ - Version: svcImport.TypeMeta.APIVersion, - Kind: svcImport.TypeMeta.Kind, - }) - - return &v1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: svcImport.Namespace, - Name: svcImport.Annotations[DerivedServiceAnnotation], - OwnerReferences: []metav1.OwnerReference{*ownerRef}, - }, - Spec: v1.ServiceSpec{ - Type: v1.ServiceTypeClusterIP, - Ports: extractServicePorts(svc), - }, - } -} - -func createEndpointSlicesStruct(cloudMapSvc *model.Service, svc *v1.Service) []*v1beta1.EndpointSlice { - slices := make([]*v1beta1.EndpointSlice, 0) - - t := true - - endpoints := make([]v1beta1.Endpoint, 0) - for _, ep := range cloudMapSvc.Endpoints { - endpoints = append(endpoints, v1beta1.Endpoint{ - Addresses: []string{ep.IP}, - Conditions: v1beta1.EndpointConditions{ - Ready: &t, - }, - TargetRef: &v1.ObjectReference{ - Kind: "Service", - Namespace: svc.Namespace, - Name: svc.Name, - UID: svc.ObjectMeta.UID, - ResourceVersion: svc.ObjectMeta.ResourceVersion, - }, - }) - } - - // TODO split slices in case there are more than 1000 endpoints - // see https://github.com/kubernetes/enhancements/blob/master/keps/sig-network/0752-endpointslices/README.md - - slices = append(slices, &v1beta1.EndpointSlice{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{v1beta1.LabelServiceName: svc.Name}, - GenerateName: svc.Name + "-", - OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(svc, schema.GroupVersionKind{ - Version: svc.TypeMeta.APIVersion, - Kind: svc.TypeMeta.Kind, - })}, - Namespace: svc.Namespace, - }, - AddressType: v1beta1.AddressTypeIPv4, - Endpoints: endpoints, - Ports: extractEndpointPorts(cloudMapSvc), - }) - - return slices -} - -func extractServicePorts(svc *model.Service) []v1.ServicePort { - ports := extractPorts(svc) - - servicePorts := make([]v1.ServicePort, 0, len(ports)) - for _, p := range ports { - servicePorts = append(servicePorts, v1.ServicePort{ - Protocol: v1.ProtocolTCP, - Port: p, - }) - } - - return servicePorts -} - -func extractEndpointPorts(svc *model.Service) []v1beta1.EndpointPort { - ports := extractPorts(svc) - - protocol := v1.ProtocolTCP - - endpointPorts := make([]v1beta1.EndpointPort, 0, len(ports)) - for _, p := range ports { - endpointPorts = append(endpointPorts, v1beta1.EndpointPort{ - Protocol: &protocol, - Port: &p, - }) - } - - return endpointPorts -} - -func extractPorts(svc *model.Service) []int32 { - ports := make([]int32, 0) - - portMap := make(map[int32]bool, 0) - - for _, ep := range svc.Endpoints { - portMap[ep.Port] = true - } - - for p, _ := range portMap { - ports = append(ports, p) - } - - return ports -} - -func (r *CloudMapReconciler) updateServiceImport(ctx context.Context, svcImport *v1alpha1.ServiceImport, svc *v1.Service) error { - if len(svcImport.Spec.IPs) != 1 || svcImport.Spec.IPs[0] != svc.Spec.ClusterIP || !portsEqual(svcImport, svc) { - svcImport.Spec.IPs = []string{svc.Spec.ClusterIP} - - svcImport.Spec.Ports = make([]v1alpha1.ServicePort, 0) - for _, p := range svc.Spec.Ports { - svcImport.Spec.Ports = append(svcImport.Spec.Ports, servicePortToServiceImport(p)) - } - if err := r.Update(ctx, svcImport); err != nil { - return err - } - r.Logger.Info("updated ServiceImport", - "namespace", svcImport.Namespace, "name", svcImport.Name, - "IP", svcImport.Spec.IPs, "ports", svcImport.Spec.Ports) - } - - return nil -} - -func portsEqual(svcImport *v1alpha1.ServiceImport, svc *v1.Service) bool { - impPorts := svcImport.Spec.Ports - svcPorts := make([]v1alpha1.ServicePort, 0) - for _, p := range svc.Spec.Ports { - svcPorts = append(svcPorts, servicePortToServiceImport(p)) - } - - return reflect.DeepEqual(impPorts, svcPorts) -} - -func servicePortToServiceImport(port v1.ServicePort) v1alpha1.ServicePort { - return v1alpha1.ServicePort{ - Name: port.Name, - Protocol: port.Protocol, - AppProtocol: port.AppProtocol, - Port: port.Port, - } -} diff --git a/pkg/controllers/multicluster/cloudmap_controller.go b/pkg/controllers/multicluster/cloudmap_controller.go new file mode 100644 index 00000000..9a945ba8 --- /dev/null +++ b/pkg/controllers/multicluster/cloudmap_controller.go @@ -0,0 +1,365 @@ +package controllers + +import ( + "context" + "fmt" + "time" + + multiclusterv1alpha1 "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/apis/multicluster/v1alpha1" + "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/cloudmap" + "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/common" + "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/model" + v1 "k8s.io/api/core/v1" + discovery "k8s.io/api/discovery/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +const ( + // TODO move to configuration + syncPeriod = 2 * time.Second +) + +// CloudMapReconciler reconciles state of Cloud Map services with local ServiceImport objects +type CloudMapReconciler struct { + Client client.Client + Cloudmap cloudmap.ServiceDiscoveryClient + Log common.Logger + ClusterUtils model.ClusterUtils +} + +// +kubebuilder:rbac:groups="",resources=namespaces,verbs=list;watch +// +kubebuilder:rbac:groups="",resources=services,verbs=create;get;list;watch;update;delete +// +kubebuilder:rbac:groups=about.k8s.io,resources=clusterproperties,verbs=create;get;list;watch;update;patch;delete +// +kubebuilder:rbac:groups="discovery.k8s.io",resources=endpointslices,verbs=list;get;create;watch;update;delete;deletecollection +// +kubebuilder:rbac:groups=multicluster.x-k8s.io,resources=serviceimports,verbs=create;get;list;watch;update;patch;delete + +// Start implements manager.Runnable +func (r *CloudMapReconciler) Start(ctx context.Context) error { + ticker := time.NewTicker(syncPeriod) + defer ticker.Stop() + for { + if err := r.Reconcile(ctx); err != nil { + // just log the error and continue running + r.Log.Error(err, "Cloud Map reconciliation error") + } + select { + case <-ticker.C: + case <-ctx.Done(): + r.Log.Info("terminating CloudMapReconciler") + return nil + } + } +} + +// Reconcile triggers a single reconciliation round +func (r *CloudMapReconciler) Reconcile(ctx context.Context) (err error) { + clusterProperties, err := r.ClusterUtils.GetClusterProperties(ctx) + if err != nil { + r.Log.Error(err, "unable to retrieve ClusterId and ClusterSetId") + return err + } + r.Log.Debug("clusterProperties found", "ClusterId", clusterProperties.ClusterId(), "ClusterSetId", clusterProperties.ClusterSetId()) + + namespaces := v1.NamespaceList{} + if err = r.Client.List(ctx, &namespaces); err != nil { + r.Log.Error(err, "unable to list cluster namespaces") + return err + } + + for _, ns := range namespaces.Items { + reconErr := r.reconcileNamespace(ctx, ns.Name) + if reconErr != nil { + err = common.Wrap(err, reconErr) + } + } + + return err +} + +func (r *CloudMapReconciler) reconcileNamespace(ctx context.Context, namespaceName string) (err error) { + r.Log.Debug("syncing namespace", "namespace", namespaceName) + + desiredServices, err := r.Cloudmap.ListServices(ctx, namespaceName) + if err != nil { + r.Log.Error(err, "failed to fetch the list Services") + return err + } + + serviceImports := multiclusterv1alpha1.ServiceImportList{} + if err = r.Client.List(ctx, &serviceImports, client.InNamespace(namespaceName)); err != nil { + r.Log.Error(err, "failed to reconcile namespace", "namespace", namespaceName) + return err + } + + existingImportsMap := make(map[string]multiclusterv1alpha1.ServiceImport) + for _, svc := range serviceImports.Items { + existingImportsMap[svc.Name] = svc + } + + for _, svc := range desiredServices { + if len(svc.Endpoints) == 0 { + // skip empty services + continue + } + + if reconErr := r.reconcileService(ctx, svc); reconErr != nil { + r.Log.Error(reconErr, "error when syncing service", "namespace", svc.Namespace, "name", svc.Name) + err = common.Wrap(err, reconErr) + } + delete(existingImportsMap, svc.Name) + } + + // delete remaining imports that have not been matched + for _, i := range existingImportsMap { + r.Log.Info("delete ServiceImport", "namespace", i.Namespace, "name", i.Name) + if deleteErr := r.Client.Delete(ctx, &i); deleteErr != nil { + r.Log.Error(deleteErr, "error deleting ServiceImport", "namespace", i.Namespace, "name", i.Name) + err = common.Wrap(err, deleteErr) + continue + } + } + + return err +} + +func (r *CloudMapReconciler) reconcileService(ctx context.Context, svc *model.Service) error { + r.Log.Debug("syncing service", "namespace", svc.Namespace, "service", svc.Name) + + importedSvcPorts := ExtractServicePorts(svc.Endpoints) + + clusterIdToEndpointsMap := make(map[string][]*model.Endpoint) + for _, ep := range svc.Endpoints { + clusterIdToEndpointsMap[ep.ClusterId] = append(clusterIdToEndpointsMap[ep.ClusterId], ep) + } + + clusterIds := make([]string, 0, len(clusterIdToEndpointsMap)) + for clusterId := range clusterIdToEndpointsMap { + clusterIds = append(clusterIds, clusterId) + } + + svcImport, err := r.getServiceImport(ctx, svc.Namespace, svc.Name) + if err != nil { + if !errors.IsNotFound(err) { + return err + } + + // create ServiceImport if it doesn't exist + if svcImport, err = r.createAndGetServiceImport(ctx, svc, importedSvcPorts, clusterIds); err != nil { + return err + } + } + + // get or create derived Service for each cluster the service is a member of + derivedServices := make([]*v1.Service, 0, len(clusterIds)) + for _, clusterId := range clusterIds { + endpoints := clusterIdToEndpointsMap[clusterId] + clusterImportedSvcPorts := ExtractServicePorts(endpoints) + + derivedService, err := r.getDerivedService(ctx, svc.Namespace, svc.Name, clusterId) + if err != nil { + if !errors.IsNotFound(err) { + return err + } + + // create derived Service if it doesn't exist + if derivedService, err = r.createAndGetDerivedService(ctx, svcImport, clusterId, clusterImportedSvcPorts); err != nil { + return err + } + } + + // update derived Service ports to match imported ports if necessary + if err = r.updateDerivedService(ctx, derivedService, clusterImportedSvcPorts); err != nil { + return err + } + + // update EndpointSlices of this derived Service + if err = r.updateEndpointSlices(ctx, svcImport, endpoints, derivedService, clusterId); err != nil { + return err + } + + derivedServices = append(derivedServices, derivedService) + } + + // remove any existing derived services that do not have any endpoints in cloud map + existingDerivedServices := &v1.ServiceList{} + existingDerivedSvcErr := r.Client.List(ctx, existingDerivedServices, client.InNamespace(svcImport.Namespace), client.MatchingLabels{LabelDerivedServiceOriginatingName: svcImport.Name}) + if existingDerivedSvcErr != nil { + r.Log.Error(existingDerivedSvcErr, "failed to list derived services") + return existingDerivedSvcErr + } + for _, derivedService := range existingDerivedServices.Items { + clusterId := derivedService.Labels[LabelSourceCluster] + if _, ok := clusterIdToEndpointsMap[clusterId]; !ok { + if err := r.DeleteDerivedServiceAndEndpointSlices(ctx, &derivedService); err != nil { + return err + } + } + } + + // update service import to match derived service clusterIPs and imported ports if necessary + return r.updateServiceImport(ctx, svcImport, derivedServices, importedSvcPorts) +} + +func (r *CloudMapReconciler) getServiceImport(ctx context.Context, namespace string, name string) (*multiclusterv1alpha1.ServiceImport, error) { + existingServiceImport := &multiclusterv1alpha1.ServiceImport{} + err := r.Client.Get(ctx, types.NamespacedName{Namespace: namespace, Name: name}, existingServiceImport) + return existingServiceImport, err +} + +func (r *CloudMapReconciler) createAndGetServiceImport(ctx context.Context, svc *model.Service, servicePorts []*model.Port, clusterIds []string) (*multiclusterv1alpha1.ServiceImport, error) { + toCreate := CreateServiceImportStruct(svc, clusterIds, servicePorts) + if err := r.Client.Create(ctx, toCreate); err != nil { + return nil, err + } + r.Log.Info("created ServiceImport", "namespace", svc.Namespace, "name", svc.Name) + + return r.getServiceImport(ctx, svc.Namespace, svc.Name) +} + +func (r *CloudMapReconciler) getDerivedService(ctx context.Context, namespace string, name string, clusterId string) (*v1.Service, error) { + derivedName := DerivedName(namespace, name, clusterId) + existingService := &v1.Service{} + err := r.Client.Get(ctx, types.NamespacedName{Namespace: namespace, Name: derivedName}, existingService) + return existingService, err +} + +func (r *CloudMapReconciler) createAndGetDerivedService(ctx context.Context, svcImport *multiclusterv1alpha1.ServiceImport, clusterId string, svcPorts []*model.Port) (*v1.Service, error) { + toCreate := CreateDerivedServiceStruct(svcImport, svcPorts, clusterId) + if err := r.Client.Create(ctx, toCreate); err != nil { + return nil, err + } + r.Log.Info("created derived Service", "namespace", toCreate.Namespace, "name", toCreate.Name) + + return r.getDerivedService(ctx, svcImport.Namespace, svcImport.Name, clusterId) +} + +func (r *CloudMapReconciler) updateEndpointSlices(ctx context.Context, svcImport *multiclusterv1alpha1.ServiceImport, desiredEndpoints []*model.Endpoint, svc *v1.Service, clusterId string) error { + existingSlicesList := discovery.EndpointSliceList{} + if err := r.Client.List(ctx, &existingSlicesList, + client.InNamespace(svc.Namespace), client.MatchingLabels{discovery.LabelServiceName: svc.Name}); err != nil { + return err + } + + existingSlices := make([]*discovery.EndpointSlice, 0) + for _, existingSlice := range existingSlicesList.Items { + existingSlices = append(existingSlices, &existingSlice) + } + + plan := EndpointSlicePlan{ + Current: existingSlices, + Desired: desiredEndpoints, + Service: svc, + ServiceImportName: svcImport.Name, + ClusterId: clusterId, + } + + changes := plan.CalculateChanges() + + for _, sliceToUpdate := range changes.Update { + r.Log.Debug("updating EndpointSlice", "namespace", sliceToUpdate.Namespace, "name", sliceToUpdate.Name) + if err := r.Client.Update(ctx, sliceToUpdate); err != nil { + return fmt.Errorf("failed to update EndpointSlice: %w", err) + } + } + + for _, sliceToDelete := range changes.Delete { + r.Log.Debug("deleting EndpointSlice", "namespace", sliceToDelete.Namespace, "name", sliceToDelete.Name) + if err := r.Client.Delete(ctx, sliceToDelete); err != nil { + return fmt.Errorf("failed to delete EndpointSlice: %w", err) + } + } + + for _, sliceToCreate := range changes.Create { + r.Log.Debug("creating EndpointSlice", "namespace", sliceToCreate.Namespace) + if err := r.Client.Create(ctx, sliceToCreate); err != nil { + return fmt.Errorf("failed to create EndpointSlice: %w", err) + } + } + + return nil +} + +func (r *CloudMapReconciler) updateServiceImport(ctx context.Context, svcImport *multiclusterv1alpha1.ServiceImport, derivedServices []*v1.Service, importedSvcPorts []*model.Port) error { + updateRequired := false + + clusterIPs := GetClusterIpsFromServices(derivedServices) + if !IPsEqualIgnoreOrder(svcImport.Spec.IPs, clusterIPs) { + r.Log.Info("ServiceImport IPs need update", "ServiceImport IPs", svcImport.Spec.IPs, "cluster IPs", clusterIPs) + svcImport.Spec.IPs = clusterIPs + updateRequired = true + } + + // ServiceImport ports do not have TargetPort, exclude field for purpose of comparison + simplifiedSvcPorts := make([]*model.Port, 0) + for _, svcPort := range importedSvcPorts { + simplifiedSvcPorts = append(simplifiedSvcPorts, &model.Port{ + Name: svcPort.Name, + Port: svcPort.Port, + Protocol: svcPort.Protocol, + }) + } + + svcImportPorts := make([]*model.Port, 0) + for _, importPort := range svcImport.Spec.Ports { + port := ServiceImportPortToPort(importPort) + svcImportPorts = append(svcImportPorts, &port) + } + + if !PortsEqualIgnoreOrder(svcImportPorts, simplifiedSvcPorts) { + r.Log.Debug("ServiceImport ports need update", "ServiceImport Ports", svcImport.Spec.Ports, "imported ports", importedSvcPorts) + serviceImportPorts := make([]multiclusterv1alpha1.ServicePort, 0) + for _, port := range importedSvcPorts { + serviceImportPorts = append(serviceImportPorts, PortToServiceImportPort(*port)) + } + svcImport.Spec.Ports = serviceImportPorts + updateRequired = true + } + + if updateRequired { + if err := r.Client.Update(ctx, svcImport); err != nil { + return err + } + r.Log.Info("updated ServiceImport", + "namespace", svcImport.Namespace, "name", svcImport.Name, + "IP", svcImport.Spec.IPs, "ports", svcImport.Spec.Ports) + } + + return nil +} + +func (r *CloudMapReconciler) updateDerivedService(ctx context.Context, svc *v1.Service, importedSvcPorts []*model.Port) error { + svcPorts := make([]*model.Port, 0) + for _, p := range svc.Spec.Ports { + port := ServicePortToPort(p) + svcPorts = append(svcPorts, &port) + } + + portsMatch := PortsEqualIgnoreOrder(importedSvcPorts, svcPorts) + if !portsMatch { + newSvcPorts := make([]v1.ServicePort, 0) + for _, importPort := range importedSvcPorts { + newSvcPorts = append(newSvcPorts, PortToServicePort(*importPort)) + } + + svc.Spec.Ports = newSvcPorts + if err := r.Client.Update(ctx, svc); err != nil { + return err + } + r.Log.Info("updated derived Service", + "namespace", svc.Namespace, "name", svc.Name, "ports", svc.Spec.Ports) + } + + return nil +} + +func (r *CloudMapReconciler) DeleteDerivedServiceAndEndpointSlices(ctx context.Context, derivedService *v1.Service) error { + // delete EndpointSlices + if err := r.Client.DeleteAllOf(ctx, &discovery.EndpointSlice{}, client.InNamespace(derivedService.Namespace), client.MatchingLabels{discovery.LabelServiceName: derivedService.Name}); err != nil { + return err + } + // delete Service + r.Log.Info("deleting derived Service", "namespace", derivedService.Namespace, "name", derivedService.Name) + return r.Client.Delete(ctx, derivedService) +} diff --git a/pkg/controllers/multicluster/cloudmap_controller_test.go b/pkg/controllers/multicluster/cloudmap_controller_test.go new file mode 100644 index 00000000..4c8b27ee --- /dev/null +++ b/pkg/controllers/multicluster/cloudmap_controller_test.go @@ -0,0 +1,176 @@ +package controllers + +import ( + "context" + "strings" + "testing" + + aboutv1alpha1 "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/apis/about/v1alpha1" + "k8s.io/apimachinery/pkg/runtime" + + cloudmapMock "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/mocks/pkg/cloudmap" + multiclusterv1alpha1 "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/apis/multicluster/v1alpha1" + "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/common" + "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/model" + "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/test" + "github.com/go-logr/logr/testr" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + v1 "k8s.io/api/core/v1" + discovery "k8s.io/api/discovery/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func TestCloudMapReconciler_Reconcile(t *testing.T) { + // create a fake controller client and add some objects + svcImportToBeDeleted := serviceImportForTest("svc1") + fakeClient := fake.NewClientBuilder().WithScheme(getCloudMapReconcilerScheme()). + WithObjects(k8sNamespaceForTest(), serviceImportForTest(test.SvcName), svcImportToBeDeleted, + test.ClusterIdForTest(), test.ClusterSetIdForTest()).Build() + + // create a mock cloudmap service discovery client + mockController := gomock.NewController(t) + defer mockController.Finish() + + mockSDClient := cloudmapMock.NewMockServiceDiscoveryClient(mockController) + // The service model in the Cloudmap + mockSDClient.EXPECT().ListServices(context.TODO(), test.HttpNsName). + Return([]*model.Service{test.GetTestServiceWithEndpoint([]*model.Endpoint{test.GetTestEndpoint1()})}, nil) + + reconciler := getReconciler(t, mockSDClient, fakeClient) + + err := reconciler.Reconcile(context.TODO()) + if err != nil { + t.Fatalf("reconcile failed: (%v)", err) + } + + // assert service import object + serviceImport := &multiclusterv1alpha1.ServiceImport{} + err = fakeClient.Get(context.TODO(), types.NamespacedName{Namespace: test.HttpNsName, Name: test.SvcName}, serviceImport) + assert.NoError(t, err) + assert.Equal(t, test.SvcName, serviceImport.Name, "Service imported") + + // assert derived service is successfully created + derivedServiceList := &v1.ServiceList{} + err = fakeClient.List(context.TODO(), derivedServiceList, client.InNamespace(test.HttpNsName)) + assert.NoError(t, err) + derivedService := derivedServiceList.Items[0] + assertDerivedService(t, &derivedService, test.ServicePort1, test.Port1) + + // assert endpoint slices are created + endpointSliceList := &discovery.EndpointSliceList{} + err = fakeClient.List(context.TODO(), endpointSliceList, client.InNamespace(test.HttpNsName)) + assert.NoError(t, err) + endpointSlice := endpointSliceList.Items[0] + assertEndpointSlice(t, &endpointSlice, test.Port1, test.EndptIp1, test.ClusterId1) + + // assert svcImportToBeDeleted is not found in list + serviceImports := &multiclusterv1alpha1.ServiceImportList{} + err = fakeClient.List(context.TODO(), serviceImports, client.InNamespace(test.HttpNsName)) + assert.NoError(t, err) + assert.True(t, len(serviceImports.Items) == 1) + assert.Equal(t, serviceImports.Items[0].Name, test.SvcName) +} + +func TestCloudMapReconciler_Reconcile_MulticlusterService(t *testing.T) { + // create a fake controller client and add some objects + fakeClient := fake.NewClientBuilder().WithScheme(getCloudMapReconcilerScheme()). + WithObjects(k8sNamespaceForTest(), test.ClusterIdForTest(), test.ClusterSetIdForTest()).Build() + + // create a mock cloudmap service discovery client + mockController := gomock.NewController(t) + defer mockController.Finish() + + mockSDClient := cloudmapMock.NewMockServiceDiscoveryClient(mockController) + // The service model in the Cloudmap. + mockSDClient.EXPECT().ListServices(context.TODO(), test.HttpNsName). + // The multicluster service has endpoints in different clusters (different ClusterIds) + Return([]*model.Service{test.GetTestMulticlusterService()}, nil) + + reconciler := getReconciler(t, mockSDClient, fakeClient) + + err := reconciler.Reconcile(context.TODO()) + if err != nil { + t.Fatalf("reconcile failed: (%v)", err) + } + + // assert service import object + svcImport := &multiclusterv1alpha1.ServiceImport{} + err = fakeClient.Get(context.TODO(), types.NamespacedName{Namespace: test.HttpNsName, Name: test.SvcName}, svcImport) + assert.NoError(t, err) + assert.Equal(t, test.SvcName, svcImport.Name, "Service imported") + + assert.Contains(t, svcImport.Status.Clusters, multiclusterv1alpha1.ClusterStatus{Cluster: test.ClusterId1}) + assert.Contains(t, svcImport.Status.Clusters, multiclusterv1alpha1.ClusterStatus{Cluster: test.ClusterId2}) + assert.Equal(t, 2, len(svcImport.Status.Clusters)) + + // assert derived services are successfully created + derivedServiceList := &v1.ServiceList{} + err = fakeClient.List(context.TODO(), derivedServiceList, client.InNamespace(test.HttpNsName)) + assert.NoError(t, err) + assert.Equal(t, 2, len(derivedServiceList.Items)) + + derivedServiceMap := map[string]v1.Service{} + for _, derivedService := range derivedServiceList.Items { + derivedServiceMap[derivedService.ObjectMeta.Name] = derivedService + } + + derivedService1 := derivedServiceMap[DerivedName(svcImport.Namespace, svcImport.Name, test.ClusterId1)] + assertDerivedService(t, &derivedService1, test.ServicePort1, test.Port1) + derivedService2 := derivedServiceMap[DerivedName(svcImport.Namespace, svcImport.Name, test.ClusterId2)] + assertDerivedService(t, &derivedService2, test.ServicePort2, test.Port2) + + // assert endpoint slices are created for each derived service + endpointSliceList := &discovery.EndpointSliceList{} + err = fakeClient.List(context.TODO(), endpointSliceList, client.InNamespace(test.HttpNsName)) + assert.NoError(t, err) + assert.Equal(t, 2, len(endpointSliceList.Items)) + + endpointSliceMap := make(map[string]discovery.EndpointSlice) + for _, endpointSlice := range endpointSliceList.Items { + endpointSliceName := endpointSlice.ObjectMeta.Name + derivedServiceName := endpointSliceName[:strings.LastIndex(endpointSliceName, "-")] + endpointSliceMap[derivedServiceName] = endpointSlice + } + + endpointSlice1 := endpointSliceMap[derivedService1.Name] + assertEndpointSlice(t, &endpointSlice1, test.Port1, test.EndptIp1, test.ClusterId1) + endpointSlice2 := endpointSliceMap[derivedService2.Name] + assertEndpointSlice(t, &endpointSlice2, test.Port2, test.EndptIp2, test.ClusterId2) +} + +func getCloudMapReconcilerScheme() *runtime.Scheme { + s := scheme.Scheme + s.AddKnownTypes(multiclusterv1alpha1.GroupVersion, &multiclusterv1alpha1.ServiceImportList{}, &multiclusterv1alpha1.ServiceImport{}) + s.AddKnownTypes(aboutv1alpha1.GroupVersion, &aboutv1alpha1.ClusterProperty{}, &aboutv1alpha1.ClusterPropertyList{}) + return s +} + +func getReconciler(t *testing.T, mockSDClient *cloudmapMock.MockServiceDiscoveryClient, client client.Client) *CloudMapReconciler { + return &CloudMapReconciler{ + Client: client, + Cloudmap: mockSDClient, + Log: common.NewLoggerWithLogr(testr.New(t)), + ClusterUtils: model.NewClusterUtils(client), + } +} + +func assertDerivedService(t *testing.T, derivedService *v1.Service, servicePort int, port int) { + assert.NotNil(t, derivedService) + assert.True(t, strings.Contains(derivedService.Name, "imported-"), "Derived service created", "service", derivedService.Name) + assert.Equal(t, int32(servicePort), derivedService.Spec.Ports[0].Port) + assert.Equal(t, int32(port), derivedService.Spec.Ports[0].TargetPort.IntVal) +} + +func assertEndpointSlice(t *testing.T, endpointSlice *discovery.EndpointSlice, port int, endptIp string, clusterId string) { + assert.NotNil(t, endpointSlice) + assert.Equal(t, test.SvcName, endpointSlice.Labels["multicluster.kubernetes.io/service-name"], "Endpoint slice is created") + assert.Equal(t, clusterId, endpointSlice.Labels["multicluster.kubernetes.io/source-cluster"], "Endpoint slice is created") + assert.Contains(t, endpointSlice.Labels, LabelEndpointSliceManagedBy, "Managed by label is added") + assert.Equal(t, int32(port), *endpointSlice.Ports[0].Port) + assert.Equal(t, 1, len(endpointSlice.Endpoints)) + assert.Equal(t, endptIp, endpointSlice.Endpoints[0].Addresses[0]) +} diff --git a/pkg/controllers/multicluster/controllers_common_test.go b/pkg/controllers/multicluster/controllers_common_test.go new file mode 100644 index 00000000..37dce06f --- /dev/null +++ b/pkg/controllers/multicluster/controllers_common_test.go @@ -0,0 +1,109 @@ +package controllers + +import ( + "strconv" + "time" + + "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/model" + + multiclusterv1alpha1 "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/apis/multicluster/v1alpha1" + "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/test" + "github.com/aws/aws-sdk-go-v2/aws" + v1 "k8s.io/api/core/v1" + discovery "k8s.io/api/discovery/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// Factory functions for testing + +func k8sNamespaceForTest() *v1.Namespace { + return &v1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: test.HttpNsName, + Namespace: test.HttpNsName, + }, + } +} + +func k8sServiceForTest() *v1.Service { + return &v1.Service{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: test.SvcName, + Namespace: test.HttpNsName, + }, + Spec: v1.ServiceSpec{ + Ports: []v1.ServicePort{{ + Name: test.PortName1, + Protocol: test.Protocol1, + Port: test.ServicePort1, + TargetPort: intstr.IntOrString{Type: intstr.Int, IntVal: test.Port1}, + }}, + }, + Status: v1.ServiceStatus{}, + } +} + +func serviceExportForTest() *multiclusterv1alpha1.ServiceExport { + creationTimestamp := metav1.NewTime(time.UnixMilli(test.SvcExportCreationTimestamp)) + return &multiclusterv1alpha1.ServiceExport{ + ObjectMeta: metav1.ObjectMeta{ + Name: test.SvcName, + Namespace: test.HttpNsName, + CreationTimestamp: creationTimestamp, + }, + } +} + +func serviceImportForTest(svcName string) *multiclusterv1alpha1.ServiceImport { + return &multiclusterv1alpha1.ServiceImport{ + ObjectMeta: metav1.ObjectMeta{ + Name: svcName, + Namespace: test.HttpNsName, + }, + } +} + +func endpointSliceForTest() *discovery.EndpointSlice { + port := int32(test.Port1) + protocol := v1.ProtocolTCP + nodename := test.Nodename + hostname := test.Hostname + ready, _ := strconv.ParseBool(test.EndptReadyTrue) + return &discovery.EndpointSlice{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: test.HttpNsName, + Name: test.SvcName + "-slice", + Labels: map[string]string{discovery.LabelServiceName: test.SvcName}, + }, + AddressType: discovery.AddressTypeIPv4, + Endpoints: []discovery.Endpoint{{ + Addresses: []string{test.EndptIp1}, + Conditions: discovery.EndpointConditions{ + Ready: aws.Bool(ready), + }, + NodeName: &nodename, + Hostname: &hostname, + }}, + Ports: []discovery.EndpointPort{{ + Name: aws.String(test.PortName1), + Protocol: &protocol, + Port: &port, + }}, + } +} + +func endpointSliceFromEndpointsForTest(endpts []*model.Endpoint, ports []discovery.EndpointPort) *discovery.EndpointSlice { + svc := k8sServiceForTest() + slice := CreateEndpointSliceStruct(svc, test.SvcName, test.ClusterId1, endpts[0].AddressType) + slice.Ports = ports + + testEndpoints := make([]discovery.Endpoint, 0) + for _, endpt := range endpts { + testEndpoints = append(testEndpoints, CreateEndpointForSlice(svc, endpt)) + } + slice.Endpoints = testEndpoints + + return slice +} diff --git a/pkg/controllers/multicluster/endpointslice_plan.go b/pkg/controllers/multicluster/endpointslice_plan.go new file mode 100644 index 00000000..1653ea83 --- /dev/null +++ b/pkg/controllers/multicluster/endpointslice_plan.go @@ -0,0 +1,195 @@ +package controllers + +import ( + "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/model" + v1 "k8s.io/api/core/v1" + discovery "k8s.io/api/discovery/v1" +) + +const defaultMaxEndpointsPerSlice = 100 + +type EndpointSliceChanges struct { + // Create: List of EndpointSlices that need to be created + Create []*discovery.EndpointSlice + // Update: List of EndpointSlices that need to be updated + Update []*discovery.EndpointSlice + // Delete: List of EndpointSlices that need to be deleted + Delete []*discovery.EndpointSlice + // Unmodified: List of EndpointSlices that do not need to be changed + Unmodified []*discovery.EndpointSlice +} + +type EndpointSlicePlan struct { + // maxEndpointsPerSlice defaults to 100 + maxEndpointsPerSlice int + + // Service to reconcile endpoints in + Service *v1.Service + + // ServiceImportName name used to create new EndpointSlices + ServiceImportName string + + // Current EndpontSlices + Current []*discovery.EndpointSlice + + // Desired Endpoints + Desired []*model.Endpoint + + // Cluster the EndpointSlice belongs to + ClusterId string +} + +// CheckAddressType TODO: Will need to improve how IP Type is determined when we implement dual stack. +func (p *EndpointSlicePlan) CheckAddressType() discovery.AddressType { + // Peek at the first endpoint for its AddressType. All endpoints in a slice will be of the same AddressType. + if len(p.Desired) == 0 { + return discovery.AddressTypeIPv4 + } + return p.Desired[0].AddressType +} + +// CalculateChanges returns list of EndpointSlice Changes that need to applied +func (p *EndpointSlicePlan) CalculateChanges() EndpointSliceChanges { + // populate map of desired endpoints for lookup efficiency + desiredEndpoints := make(map[string]*model.Endpoint) + for _, desiredEndpoint := range p.Desired { + desiredEndpoints[desiredEndpoint.IP] = desiredEndpoint + } + + desiredPorts := ExtractEndpointPorts(p.Desired) + + // Remove unwanted endpoints from slices + changes := p.trimSlices(desiredEndpoints, desiredPorts) + + // Add new endpoints to slices + for len(desiredEndpoints) > 0 { + sliceWithRoom, needsPortUpdate := p.getOrCreateUnfilledEndpointSlice(&changes, len(desiredEndpoints)) + + for key, endpointToAdd := range desiredEndpoints { + roomInSlice := p.getMaxEndpointsPerSlice() - len(sliceWithRoom.Endpoints) + if roomInSlice <= 0 { + // stop adding to slice once it is full + break + } + sliceWithRoom.Endpoints = append(sliceWithRoom.Endpoints, CreateEndpointForSlice(p.Service, endpointToAdd)) + delete(desiredEndpoints, key) + } + + if needsPortUpdate { + newPorts := portSliceToEndpointPortSlice(desiredPorts) + sliceWithRoom.Ports = newPorts + } + } + + return changes +} + +func (p *EndpointSlicePlan) trimSlices(desiredEndpoints map[string]*model.Endpoint, desiredPorts []*model.Port) (changes EndpointSliceChanges) { + // remove all undesired existing endpoints in slices + for _, existingSlice := range p.Current { + updatedEndpointList := make([]discovery.Endpoint, 0) + + sliceNeedsUpdateConditions := false + + for _, existingEndpoint := range existingSlice.Endpoints { + key := existingEndpoint.Addresses[0] + + if _, found := desiredEndpoints[key]; found { + // if different ready status, set sliceNeedsUpdateConditions to true + if existingEndpoint.Conditions.Ready != &desiredEndpoints[key].Ready { + sliceNeedsUpdateConditions = true + existingEndpoint.Conditions.Ready = &desiredEndpoints[key].Ready + } + updatedEndpointList = append(updatedEndpointList, existingEndpoint) + delete(desiredEndpoints, key) + } + } + + // mark slice for deletion if all endpoints were removed + if len(updatedEndpointList) == 0 { + changes.Delete = append(changes.Delete, existingSlice) + continue + } + + sliceNeedsUpdate := false + + // slice needs to be updated if ports do not match + if !PortsEqualIgnoreOrder(desiredPorts, endpointPortSliceToPortSlice(existingSlice.Ports)) { + existingSlice.Ports = portSliceToEndpointPortSlice(desiredPorts) + sliceNeedsUpdate = true + } + + // slice needs to be updated if endpoint list changed + if len(updatedEndpointList) != len(existingSlice.Endpoints) || sliceNeedsUpdateConditions { + existingSlice.Endpoints = updatedEndpointList + sliceNeedsUpdate = true + } + + if sliceNeedsUpdate { + changes.Update = append(changes.Update, existingSlice) + } else { + changes.Unmodified = append(changes.Unmodified, existingSlice) + } + } + + return changes +} + +func (p *EndpointSlicePlan) getOrCreateUnfilledEndpointSlice(changes *EndpointSliceChanges, requiredCapacity int) (sliceWithRoom *discovery.EndpointSlice, needsPortUpdate bool) { + // Prefer slices we are already updating + for _, sliceToUpdate := range changes.Update { + if len(sliceToUpdate.Endpoints) < p.getMaxEndpointsPerSlice() { + return sliceToUpdate, false + } + } + + // Update a slice marked for deletion if possible + if len(changes.Delete) > 0 { + sliceToReuse := changes.Delete[0] + changes.Delete = changes.Delete[1:] + changes.Update = append(changes.Update, sliceToReuse) + + // clear endpoint list that was marked for deletion before reusing + sliceToReuse.Endpoints = []discovery.Endpoint{} + return sliceToReuse, true + } + + // Update an unmodified slice if it has capacity to add all endpoints + for i, unmodifiedSlice := range changes.Unmodified { + proposedSliceLength := len(unmodifiedSlice.Endpoints) + requiredCapacity + if proposedSliceLength <= p.getMaxEndpointsPerSlice() { + changes.Unmodified = append(changes.Unmodified[:i], changes.Unmodified[i+1:]...) + changes.Update = append(changes.Update, unmodifiedSlice) + return unmodifiedSlice, false + } + } + + // No existing slices can fill new endpoint requirements so create a new slice + sliceToCreate := CreateEndpointSliceStruct(p.Service, p.ServiceImportName, p.ClusterId, p.CheckAddressType()) + changes.Create = append(changes.Create, sliceToCreate) + return sliceToCreate, true +} + +func (p *EndpointSlicePlan) getMaxEndpointsPerSlice() int { + if p.maxEndpointsPerSlice != 0 { + return p.maxEndpointsPerSlice + } + + return defaultMaxEndpointsPerSlice +} + +func endpointPortSliceToPortSlice(endpointPorts []discovery.EndpointPort) (ports []*model.Port) { + for _, endpointPort := range endpointPorts { + port := EndpointPortToPort(endpointPort) + ports = append(ports, &port) + } + return ports +} + +func portSliceToEndpointPortSlice(ports []*model.Port) (endpointPorts []discovery.EndpointPort) { + for _, port := range ports { + endpointPort := PortToEndpointPort(*port) + endpointPorts = append(endpointPorts, endpointPort) + } + return endpointPorts +} diff --git a/pkg/controllers/multicluster/endpointslice_plan_test.go b/pkg/controllers/multicluster/endpointslice_plan_test.go new file mode 100644 index 00000000..512252e8 --- /dev/null +++ b/pkg/controllers/multicluster/endpointslice_plan_test.go @@ -0,0 +1,245 @@ +package controllers + +import ( + "encoding/json" + "reflect" + "testing" + + "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/model" + "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/test" + "github.com/stretchr/testify/assert" + discovery "k8s.io/api/discovery/v1" +) + +func TestCheckAddressType(t *testing.T) { + tests := []struct { + name string + want discovery.AddressType + slicePlan EndpointSlicePlan + }{ + { + name: "happy case ipv4", + want: discovery.AddressTypeIPv4, + slicePlan: EndpointSlicePlan{ + maxEndpointsPerSlice: 0, + Service: nil, + ServiceImportName: "", + Current: nil, + Desired: []*model.Endpoint{test.GetTestEndpoint1()}, + ClusterId: "", + }, + }, + { + name: "happy case ipv6", + want: discovery.AddressTypeIPv6, + slicePlan: EndpointSlicePlan{ + maxEndpointsPerSlice: 0, + Service: nil, + ServiceImportName: "", + Current: nil, + Desired: []*model.Endpoint{test.GetTestEndpointIpv6()}, + ClusterId: "", + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := tt.slicePlan.CheckAddressType(); got != tt.want { + t.Errorf("CheckAddressType() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestEndpointSlicePlan_CalculateChanges(t *testing.T) { + type fields struct { + Current []*discovery.EndpointSlice + Desired []*model.Endpoint + } + tests := []struct { + name string + fields fields + want EndpointSliceChanges + }{ + { + name: "no changes", + fields: fields{ + Current: []*discovery.EndpointSlice{endpointSliceForTest()}, + Desired: []*model.Endpoint{test.GetTestEndpoint1()}, + }, + want: EndpointSliceChanges{ + Unmodified: []*discovery.EndpointSlice{endpointSliceForTest()}, + }, + }, + { + name: "delete slice", + fields: fields{ + Current: []*discovery.EndpointSlice{endpointSliceForTest()}, + Desired: []*model.Endpoint{}, + }, + want: EndpointSliceChanges{ + Delete: []*discovery.EndpointSlice{endpointSliceForTest()}, + }, + }, + { + name: "new slice", + fields: fields{ + Current: []*discovery.EndpointSlice{}, + Desired: []*model.Endpoint{test.GetTestEndpoint1()}, + }, + want: EndpointSliceChanges{ + Create: []*discovery.EndpointSlice{ + endpointSliceFromEndpointsForTest([]*model.Endpoint{test.GetTestEndpoint1()}, []discovery.EndpointPort{ + PortToEndpointPort(test.GetTestEndpoint1().EndpointPort), + }), + }, + }, + }, + { + name: "removed endpoint needs slice update", + fields: fields{ + Current: []*discovery.EndpointSlice{ + endpointSliceFromEndpointsForTest( + []*model.Endpoint{test.GetTestEndpoint1(), test.GetTestEndpoint2()}, + []discovery.EndpointPort{ + PortToEndpointPort(test.GetTestEndpoint1().EndpointPort), + }), + }, + Desired: []*model.Endpoint{test.GetTestEndpoint2()}, + }, + want: EndpointSliceChanges{ + Update: []*discovery.EndpointSlice{ + endpointSliceFromEndpointsForTest([]*model.Endpoint{test.GetTestEndpoint2()}, []discovery.EndpointPort{ + PortToEndpointPort(test.GetTestEndpoint2().EndpointPort), + }), + }, + }, + }, + { + name: "added endpoint needs slice update", + fields: fields{ + Current: []*discovery.EndpointSlice{ + endpointSliceFromEndpointsForTest([]*model.Endpoint{test.GetTestEndpoint1()}, + []discovery.EndpointPort{ + PortToEndpointPort(model.Port{Name: test.PortName1, Port: test.Port1, Protocol: test.Protocol1}), + }), + }, + Desired: []*model.Endpoint{ + test.GetTestEndpoint1(), + { + Id: test.EndptId2, + IP: test.EndptIp2, + Ready: true, + Hostname: test.Hostname, + Nodename: test.Nodename, + EndpointPort: model.Port{ + Name: test.PortName1, + Port: test.Port1, + Protocol: test.Protocol1, + }, + }, + }, + }, + want: EndpointSliceChanges{ + Update: []*discovery.EndpointSlice{ + endpointSliceFromEndpointsForTest([]*model.Endpoint{test.GetTestEndpoint1(), test.GetTestEndpoint2()}, + []discovery.EndpointPort{ + PortToEndpointPort(test.GetTestEndpoint1().EndpointPort), + }), + }, + Unmodified: []*discovery.EndpointSlice{}, + }, + }, + { + name: "swapped endpoints need slice update", + fields: fields{ + Current: []*discovery.EndpointSlice{ + endpointSliceFromEndpointsForTest([]*model.Endpoint{test.GetTestEndpoint1()}, + []discovery.EndpointPort{ + PortToEndpointPort(test.GetTestEndpoint2().EndpointPort), + }), + }, + Desired: []*model.Endpoint{ + test.GetTestEndpoint2(), + }, + }, + want: EndpointSliceChanges{ + Update: []*discovery.EndpointSlice{ + endpointSliceFromEndpointsForTest([]*model.Endpoint{test.GetTestEndpoint2()}, + []discovery.EndpointPort{ + PortToEndpointPort(test.GetTestEndpoint2().EndpointPort), + }), + }, + Delete: []*discovery.EndpointSlice{}, + }, + }, + { + name: "changed ports need slice update", + fields: fields{ + Current: []*discovery.EndpointSlice{ + endpointSliceFromEndpointsForTest([]*model.Endpoint{test.GetTestEndpoint2()}, + []discovery.EndpointPort{ + PortToEndpointPort(test.GetTestEndpoint1().EndpointPort), + }), + }, + Desired: []*model.Endpoint{ + test.GetTestEndpoint2(), + }, + }, + want: EndpointSliceChanges{ + Update: []*discovery.EndpointSlice{ + endpointSliceFromEndpointsForTest([]*model.Endpoint{test.GetTestEndpoint2()}, + []discovery.EndpointPort{ + PortToEndpointPort(test.GetTestEndpoint2().EndpointPort), + }), + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + p := &EndpointSlicePlan{ + Service: k8sServiceForTest(), + ServiceImportName: test.SvcName, + ClusterId: test.ClusterId1, + Current: tt.fields.Current, + Desired: tt.fields.Desired, + } + if got := p.CalculateChanges(); !reflect.DeepEqual(got, tt.want) { + gotJson, _ := json.MarshalIndent(got, "", " ") + wantJson, _ := json.MarshalIndent(tt.want, "", " ") + t.Errorf("CalculateChanges() = \n%s\nwant = \n%s", gotJson, wantJson) + } + }) + } +} + +func TestEndpointSlicePlan_MultipleSliceCreation(t *testing.T) { + p := &EndpointSlicePlan{ + maxEndpointsPerSlice: 2, + Service: k8sServiceForTest(), + ServiceImportName: test.SvcName, + ClusterId: test.ClusterId1, + Current: []*discovery.EndpointSlice{}, + Desired: test.GetTestEndpoints(43), + } + changes := p.CalculateChanges() + assert.Equal(t, 22, len(changes.Create)) +} + +func TestEndpointSlicePlan_PreferCreateOverMultipleSliceUpdate(t *testing.T) { + p := &EndpointSlicePlan{ + maxEndpointsPerSlice: 2, + Service: k8sServiceForTest(), + ServiceImportName: test.SvcName, + ClusterId: test.ClusterId1, + Current: []*discovery.EndpointSlice{endpointSliceForTest()}, + Desired: []*model.Endpoint{test.GetTestEndpoint1()}, + } + p.Desired = append(p.Desired, test.GetTestEndpoints(2)...) + changes := p.CalculateChanges() + assert.Equal(t, 1, len(changes.Create)) + assert.Equal(t, 1, len(changes.Unmodified)) + assert.Equal(t, 0, len(changes.Update)) + assert.Equal(t, 0, len(changes.Delete)) +} diff --git a/pkg/controllers/multicluster/serviceexport_controller.go b/pkg/controllers/multicluster/serviceexport_controller.go new file mode 100644 index 00000000..34dabdd4 --- /dev/null +++ b/pkg/controllers/multicluster/serviceexport_controller.go @@ -0,0 +1,411 @@ +package controllers + +import ( + "context" + "time" + + "k8s.io/apimachinery/pkg/api/errors" + + aboutv1alpha1 "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/apis/about/v1alpha1" + "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/cloudmap" + "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/common" + "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/model" + "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/version" + "github.com/aws/aws-sdk-go-v2/aws" + v1 "k8s.io/api/core/v1" + discovery "k8s.io/api/discovery/v1" + + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" + + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + multiclusterv1alpha1 "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/apis/multicluster/v1alpha1" +) + +// ServiceExportReconciler reconciles a ServiceExport object +type ServiceExportReconciler struct { + Client client.Client + Log common.Logger + Scheme *runtime.Scheme + CloudMap cloudmap.ServiceDiscoveryClient + ClusterUtils model.ClusterUtils +} + +// +kubebuilder:rbac:groups="",resources=services,verbs=get +// +kubebuilder:rbac:groups=about.k8s.io,resources=clusterproperties,verbs=create;get;list;watch;update;patch;delete +// +kubebuilder:rbac:groups="discovery.k8s.io",resources=endpointslices,verbs=list;watch;create +// +kubebuilder:rbac:groups=multicluster.x-k8s.io,resources=serviceexports,verbs=get;list;watch;update;patch +// +kubebuilder:rbac:groups=multicluster.x-k8s.io,resources=serviceexports/finalizers,verbs=get;update + +func (r *ServiceExportReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + namespace := req.Namespace + name := req.NamespacedName + r.Log.Debug("reconciling ServiceExport", "Namespace", namespace, "Name", name) + + serviceExport := multiclusterv1alpha1.ServiceExport{} + if err := r.Client.Get(ctx, name, &serviceExport); err != nil { + if errors.IsNotFound(err) { + r.Log.Debug("no ServiceExport found", + "Namespace", namespace, "Name", name) + } else { + r.Log.Error(err, "error fetching ServiceExport", + "Namespace", namespace, "Name", name) + } + return ctrl.Result{}, nil + } + + // Mark ServiceExport to be deleted, which is indicated by the deletion timestamp being set. + isServiceExportMarkedForDelete := !serviceExport.GetDeletionTimestamp().IsZero() + + service := v1.Service{} + namespacedName := types.NamespacedName{Namespace: serviceExport.Namespace, Name: serviceExport.Name} + if err := r.Client.Get(ctx, namespacedName, &service); err != nil { + if errors.IsNotFound(err) { + r.Log.Info("no Service found, deleting the ServiceExport", "Namespace", serviceExport.Namespace, "Name", serviceExport.Name) + // Mark ServiceExport to be deleted, if the corresponding Service is not found + isServiceExportMarkedForDelete = true + } else { + r.Log.Error(err, "error fetching Service", "Namespace", serviceExport.Namespace, "Name", serviceExport.Name) + return ctrl.Result{}, nil + } + } + + clusterProperties, err := r.ClusterUtils.GetClusterProperties(ctx) + if err != nil { + r.Log.Error(err, "unable to retrieve ClusterId and ClusterSetId") + return ctrl.Result{}, err + } + + // Check if the service export is marked to be deleted + if isServiceExportMarkedForDelete { + return r.handleDelete(ctx, clusterProperties.ClusterId(), &serviceExport) + } + + return r.handleUpdate(ctx, clusterProperties.ClusterId(), &serviceExport, &service) +} + +func (r *ServiceExportReconciler) handleUpdate(ctx context.Context, clusterId string, serviceExport *multiclusterv1alpha1.ServiceExport, service *v1.Service) (ctrl.Result, error) { + err := r.addFinalizerAndOwnerRef(ctx, serviceExport, service) + if err != nil { + return ctrl.Result{}, err + } + + r.Log.Info("updating Cloud Map service", "namespace", service.Namespace, "name", service.Name) + cmService, err := r.createOrGetCloudMapService(ctx, service) + if err != nil { + r.Log.Error(err, "error fetching Service from Cloud Map", "namespace", service.Namespace, "name", service.Name) + return ctrl.Result{}, err + } + + endpoints, err := r.extractEndpoints(ctx, service, serviceExport) + if err != nil { + r.Log.Error(err, "error extracting Endpoints", "namespace", serviceExport.Namespace, "name", serviceExport.Name) + return ctrl.Result{}, err + } + + // Compute diff between Cloud Map and K8s endpoints, and apply changes + plan := model.Plan{ + Current: cmService.GetEndpoints(clusterId), + Desired: endpoints, + } + changes := plan.CalculateChanges() + + if changes.HasUpdates() { + // merge creates and updates (Cloud Map RegisterEndpoints can handle both) + upserts := changes.Create + upserts = append(upserts, changes.Update...) + + if err := r.CloudMap.RegisterEndpoints(ctx, service.Namespace, service.Name, upserts); err != nil { + r.Log.Error(err, "error registering Endpoints to Cloud Map", "namespace", service.Namespace, "name", service.Name) + return ctrl.Result{}, err + } + } + + if changes.HasDeletes() { + if err := r.CloudMap.DeleteEndpoints(ctx, service.Namespace, service.Name, changes.Delete); err != nil { + r.Log.Error(err, "error deleting Endpoints from Cloud Map", "namespace", cmService.Namespace, "name", cmService.Name) + return ctrl.Result{}, err + } + } + + if changes.IsNone() { + r.Log.Info("no changes to export to Cloud Map", "namespace", service.Namespace, "name", service.Name) + } + + return ctrl.Result{}, nil +} + +func (r *ServiceExportReconciler) addFinalizerAndOwnerRef(ctx context.Context, serviceExport *multiclusterv1alpha1.ServiceExport, service *v1.Service) error { + // Add the finalizer to the service export if not present, ensures the ServiceExport won't be deleted + if !controllerutil.ContainsFinalizer(serviceExport, ServiceExportFinalizer) { + controllerutil.AddFinalizer(serviceExport, ServiceExportFinalizer) + if err := r.Client.Update(ctx, serviceExport); err != nil { + r.Log.Error(err, "error adding finalizer", "Namespace", serviceExport.Namespace, "Name", serviceExport.Name) + return err + } + } + if len(serviceExport.GetOwnerReferences()) == 0 { + err := controllerutil.SetControllerReference(service, serviceExport, r.Scheme) + if err == nil { + err = r.Client.Update(ctx, serviceExport) + } + if err != nil { + r.Log.Error(err, "error setting Service as an owner of the ServiceExport", "namespace", service.Namespace, "name", service.Name) + return err + } + } + return nil +} + +func (r *ServiceExportReconciler) createOrGetCloudMapService(ctx context.Context, service *v1.Service) (*model.Service, error) { + cmService, err := r.CloudMap.GetService(ctx, service.Namespace, service.Name) + if common.IsUnknown(err) { + return nil, err + } + + if common.IsNotFound(err) { + err = r.CloudMap.CreateService(ctx, service.Namespace, service.Name) + if err != nil { + r.Log.Error(err, "error creating a new Service in Cloud Map", "namespace", service.Namespace, "name", service.Name) + return nil, err + } + if cmService, err = r.CloudMap.GetService(ctx, service.Namespace, service.Name); err != nil { + return nil, err + } + } + + return cmService, nil +} + +func (r *ServiceExportReconciler) handleDelete(ctx context.Context, clusterId string, serviceExport *multiclusterv1alpha1.ServiceExport) (ctrl.Result, error) { + if controllerutil.ContainsFinalizer(serviceExport, ServiceExportFinalizer) { + r.Log.Info("removing service export", "namespace", serviceExport.Namespace, "name", serviceExport.Name) + + cmService, err := r.CloudMap.GetService(ctx, serviceExport.Namespace, serviceExport.Name) + if common.IsUnknown(err) { + r.Log.Error(err, "error fetching Service from Cloud Map", "namespace", serviceExport.Namespace, "name", serviceExport.Name) + return ctrl.Result{}, err + } + if cmService != nil { + if err := r.CloudMap.DeleteEndpoints(ctx, cmService.Namespace, cmService.Name, cmService.GetEndpoints(clusterId)); err != nil { + r.Log.Error(err, "error deleting Endpoints from Cloud Map", "namespace", cmService.Namespace, "name", cmService.Name) + return ctrl.Result{}, err + } + } + + // Remove finalizer. Once all finalizers have been + // removed, the ServiceExport object will be deleted. + controllerutil.RemoveFinalizer(serviceExport, ServiceExportFinalizer) + if err := r.Client.Update(ctx, serviceExport); err != nil { + return ctrl.Result{}, err + } + } + + return ctrl.Result{}, nil +} + +func (r *ServiceExportReconciler) extractEndpoints(ctx context.Context, svc *v1.Service, svcExport *multiclusterv1alpha1.ServiceExport) ([]*model.Endpoint, error) { + clusterProperties, err := r.ClusterUtils.GetClusterProperties(ctx) + if err != nil { + r.Log.Error(err, "unable to retrieve ClusterId and ClusterSetId") + return nil, err + } + + endpointSlices := discovery.EndpointSliceList{} + err = r.Client.List(ctx, &endpointSlices, + client.InNamespace(svc.Namespace), client.MatchingLabels{discovery.LabelServiceName: svc.Name}) + + if err != nil { + return nil, err + } + + serviceType := ExtractServiceType(svc) + + servicePortMap := make(map[string]model.Port) + for _, svcPort := range svc.Spec.Ports { + servicePortMap[svcPort.Name] = ServicePortToPort(svcPort) + } + + var svcExportCreationTimestamp int64 = 0 + if !svcExport.ObjectMeta.CreationTimestamp.IsZero() { + svcExportCreationTimestamp = svcExport.ObjectMeta.CreationTimestamp.Time.UnixMilli() + } + + attributes := make(map[string]string) + attributes[model.K8sVersionAttr] = version.GetPackageVersion() + + endpoints := make([]*model.Endpoint, 0) + for _, slice := range endpointSlices.Items { + for _, endpointPort := range slice.Ports { + for _, endpoint := range slice.Endpoints { + port := EndpointPortToPort(endpointPort) + readyCondition := aws.ToBool(endpoint.Conditions.Ready) + + for _, IP := range endpoint.Addresses { + endpoints = append(endpoints, &model.Endpoint{ + Id: model.EndpointIdFromIPAddressAndPort(IP, port), + IP: IP, + AddressType: slice.AddressType, + EndpointPort: port, + ServicePort: servicePortMap[*endpointPort.Name], + ClusterId: clusterProperties.ClusterId(), + ClusterSetId: clusterProperties.ClusterSetId(), + ServiceType: serviceType, + ServiceExportCreationTimestamp: svcExportCreationTimestamp, + Ready: readyCondition, + Hostname: aws.ToString(endpoint.Hostname), + Nodename: aws.ToString(endpoint.NodeName), + Attributes: attributes, + }) + } + } + } + } + + return endpoints, nil +} + +func (r *ServiceExportReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&multiclusterv1alpha1.ServiceExport{}). + // Filter-out all the events if the cluster-properties are not found + WithEventFilter(r.clusterPropertyFilter()). + // Watch for the changes to Service which have corresponding ServiceExport + Watches( + &source.Kind{Type: &v1.Service{}}, + &handler.EnqueueRequestForObject{}, + builder.WithPredicates(r.serviceExportPredicates()), + ). + // Watch for the changes to the EndpointSlice object which have corresponding ServiceExport. + // This object is bound to be updated when Deployment are updated. + Watches( + &source.Kind{Type: &discovery.EndpointSlice{}}, + handler.EnqueueRequestsFromMapFunc(r.endpointSliceMappingFunction()), + builder.WithPredicates(r.serviceExportPredicates()), + ). + // Watch for changes to ClusterProperty objects. If a ClusterProperty object is + // created, updated or deleted, the controller will reconcile all service exports + Watches( + &source.Kind{Type: &aboutv1alpha1.ClusterProperty{}}, + handler.EnqueueRequestsFromMapFunc(r.clusterPropertyMappingFunction()), + ). + WithOptions(controller.Options{ + // rate-limiting is applied to reconcile responses with an error + // We are increasing the base delay to 500ms, defaults baseDelay: 5ms, maxDelay: 1000s + RateLimiter: workqueue.NewItemExponentialFailureRateLimiter(500*time.Millisecond, 1000*time.Second), + }). + Complete(r) +} + +func (r *ServiceExportReconciler) endpointSliceMappingFunction() handler.MapFunc { + return func(object client.Object) []reconcile.Request { + labels := object.GetLabels() + serviceName := labels[discovery.LabelServiceName] + return []reconcile.Request{ + {NamespacedName: types.NamespacedName{ + Name: serviceName, + Namespace: object.GetNamespace(), + }}, + } + } +} + +func (r *ServiceExportReconciler) clusterPropertyMappingFunction() handler.MapFunc { + // Return reconcile requests for all service exports + return func(object client.Object) []reconcile.Request { + // Reset clusterproperties if there is an &aboutv1alpha1.ClusterProperty{} event + err := r.ClusterUtils.LoadClusterProperties(context.TODO()) + if err != nil { + return nil + } + + serviceExports := &multiclusterv1alpha1.ServiceExportList{} + if err := r.Client.List(context.TODO(), serviceExports); err != nil { + r.Log.Error(err, "error listing ServiceExports") + return nil + } + + result := make([]reconcile.Request, 0) + for _, serviceExport := range serviceExports.Items { + result = append(result, reconcile.Request{NamespacedName: types.NamespacedName{ + Name: serviceExport.Name, + Namespace: serviceExport.Namespace, + }}) + } + return result + } +} + +func (r *ServiceExportReconciler) serviceExportPredicates() predicate.Funcs { + return predicate.Funcs{ + GenericFunc: func(e event.GenericEvent) bool { + return r.doesObjectHaveServiceExport(e.Object) + }, + CreateFunc: func(e event.CreateEvent) bool { + return r.doesObjectHaveServiceExport(e.Object) + }, + UpdateFunc: func(e event.UpdateEvent) bool { + return r.doesObjectHaveServiceExport(e.ObjectNew) + }, + DeleteFunc: func(e event.DeleteEvent) bool { + return r.doesObjectHaveServiceExport(e.Object) + }, + } +} + +func (r *ServiceExportReconciler) doesObjectHaveServiceExport(object client.Object) bool { + serviceName, ok := object.GetLabels()[discovery.LabelServiceName] + if !ok { + serviceName = object.GetName() + } + ns := types.NamespacedName{ + Name: serviceName, + Namespace: object.GetNamespace(), + } + svcExport := multiclusterv1alpha1.ServiceExport{} + if err := r.Client.Get(context.TODO(), ns, &svcExport); err != nil { + return false + } + return true +} + +func (r *ServiceExportReconciler) clusterPropertyFilter() predicate.Funcs { + return predicate.Funcs{ + GenericFunc: func(e event.GenericEvent) bool { + return r.doesClusterPropertyExists() + }, + CreateFunc: func(e event.CreateEvent) bool { + return r.doesClusterPropertyExists() + }, + UpdateFunc: func(e event.UpdateEvent) bool { + return r.doesClusterPropertyExists() + }, + DeleteFunc: func(e event.DeleteEvent) bool { + return r.doesClusterPropertyExists() + }, + } +} + +func (r *ServiceExportReconciler) doesClusterPropertyExists() bool { + clusterProperties, err := r.ClusterUtils.GetClusterProperties(context.TODO()) + if err != nil { + r.Log.Error(err, "unable to retrieve ClusterId and ClusterSetId") + return false + } + r.Log.Debug("clusterProperties found", "ClusterId", clusterProperties.ClusterId(), "ClusterSetId", clusterProperties.ClusterSetId()) + return clusterProperties.IsValid() +} diff --git a/pkg/controllers/multicluster/serviceexport_controller_test.go b/pkg/controllers/multicluster/serviceexport_controller_test.go new file mode 100644 index 00000000..5b1e6dac --- /dev/null +++ b/pkg/controllers/multicluster/serviceexport_controller_test.go @@ -0,0 +1,215 @@ +package controllers + +import ( + "context" + "fmt" + + cloudmapMock "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/mocks/pkg/cloudmap" + aboutv1alpha1 "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/apis/about/v1alpha1" + multiclusterv1alpha1 "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/apis/multicluster/v1alpha1" + "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/common" + "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/model" + "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/test" + "github.com/go-logr/logr/testr" + "github.com/golang/mock/gomock" + + "testing" + + "github.com/stretchr/testify/assert" + v1 "k8s.io/api/core/v1" + discovery "k8s.io/api/discovery/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func TestServiceExportReconciler_Reconcile_NewServiceExport(t *testing.T) { + // create a fake controller client and add some objects + fakeClient := fake.NewClientBuilder(). + WithScheme(getServiceExportScheme()). + WithObjects(k8sServiceForTest(), serviceExportForTest(), test.ClusterIdForTest(), test.ClusterSetIdForTest()). + WithLists(&discovery.EndpointSliceList{ + Items: []discovery.EndpointSlice{*endpointSliceForTest()}, + }). + Build() + + // create a mock cloudmap service discovery client + mockController := gomock.NewController(t) + defer mockController.Finish() + + mock := cloudmapMock.NewMockServiceDiscoveryClient(mockController) + // expected interactions with the Cloud Map client + // The first get call is expected to return nil, then second call after the creation of service is + // supposed to return the value + first := mock.EXPECT().GetService(gomock.Any(), test.HttpNsName, test.SvcName). + Return(nil, common.NotFoundError("")) + second := mock.EXPECT().GetService(gomock.Any(), test.HttpNsName, test.SvcName). + Return(&model.Service{Namespace: test.HttpNsName, Name: test.SvcName}, nil) + gomock.InOrder(first, second) + mock.EXPECT().CreateService(gomock.Any(), test.HttpNsName, test.SvcName).Return(nil).Times(1) + mock.EXPECT().RegisterEndpoints(gomock.Any(), test.HttpNsName, test.SvcName, + []*model.Endpoint{test.GetTestEndpoint1()}).Return(nil).Times(1) + + reconciler := getServiceExportReconciler(t, mock, fakeClient) + + request := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Namespace: test.HttpNsName, + Name: test.SvcName, + }, + } + + got, err := reconciler.Reconcile(context.Background(), request) + if err != nil { + t.Errorf("Reconcile() error = %v", err) + return + } + assert.Equal(t, ctrl.Result{}, got, "Result should be empty") + + serviceExport := &multiclusterv1alpha1.ServiceExport{} + err = fakeClient.Get(context.TODO(), types.NamespacedName{Namespace: test.HttpNsName, Name: test.SvcName}, serviceExport) + assert.NoError(t, err) + assert.Contains(t, serviceExport.Finalizers, ServiceExportFinalizer, "Finalizer added to the service export") +} + +func TestServiceExportReconciler_Reconcile_ExistingServiceExport(t *testing.T) { + // create a fake controller client and add some objects + fakeClient := fake.NewClientBuilder(). + WithScheme(getServiceExportScheme()). + WithObjects(k8sServiceForTest(), serviceExportForTest(), test.ClusterIdForTest(), test.ClusterSetIdForTest()). + WithLists(&discovery.EndpointSliceList{ + Items: []discovery.EndpointSlice{*endpointSliceForTest()}, + }). + Build() + + mockController := gomock.NewController(t) + defer mockController.Finish() + + mock := cloudmapMock.NewMockServiceDiscoveryClient(mockController) + + // GetService from Cloudmap returns endpoint1 and endpoint2 + mock.EXPECT().GetService(gomock.Any(), test.HttpNsName, test.SvcName). + Return(test.GetTestService(), nil) + // call to delete the endpoint not present in the k8s cluster + mock.EXPECT().DeleteEndpoints(gomock.Any(), test.HttpNsName, test.SvcName, + []*model.Endpoint{test.GetTestEndpoint2()}).Return(nil).Times(1) + + request := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Namespace: test.HttpNsName, + Name: test.SvcName, + }, + } + + reconciler := getServiceExportReconciler(t, mock, fakeClient) + + got, err := reconciler.Reconcile(context.Background(), request) + if err != nil { + t.Errorf("Reconcile() error = %v", err) + return + } + assert.Equal(t, ctrl.Result{}, got, "Result should be empty") + + serviceExport := &multiclusterv1alpha1.ServiceExport{} + err = fakeClient.Get(context.TODO(), types.NamespacedName{Namespace: test.HttpNsName, Name: test.SvcName}, serviceExport) + assert.NoError(t, err) + assert.Contains(t, serviceExport.Finalizers, ServiceExportFinalizer, "Finalizer added to the service export") +} + +func TestServiceExportReconciler_Reconcile_DeleteExistingService(t *testing.T) { + // create a fake controller client and add some objects + serviceExportObj := serviceExportForTest() + // Add finalizer string to the service + serviceExportObj.Finalizers = []string{ServiceExportFinalizer} + fakeClient := fake.NewClientBuilder(). + WithScheme(getServiceExportScheme()). + WithObjects(serviceExportObj, test.ClusterIdForTest(), test.ClusterSetIdForTest()). + WithLists(&discovery.EndpointSliceList{ + Items: []discovery.EndpointSlice{*endpointSliceForTest()}, + }). + Build() + + mockController := gomock.NewController(t) + defer mockController.Finish() + + mock := cloudmapMock.NewMockServiceDiscoveryClient(mockController) + + // GetService from Cloudmap returns endpoint1 and endpoint2 + mock.EXPECT().GetService(gomock.Any(), test.HttpNsName, test.SvcName). + Return(test.GetTestService(), nil) + // call to delete the endpoint in the cloudmap + mock.EXPECT().DeleteEndpoints(gomock.Any(), test.HttpNsName, test.SvcName, + test.GetTestService().GetEndpoints(test.ClusterId1)).Return(nil).Times(1) + + request := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Namespace: test.HttpNsName, + Name: test.SvcName, + }, + } + + reconciler := getServiceExportReconciler(t, mock, fakeClient) + + got, err := reconciler.Reconcile(context.Background(), request) + assert.NoError(t, err) + assert.Equal(t, ctrl.Result{}, got, "Result should be empty") + + serviceExport := &multiclusterv1alpha1.ServiceExport{} + err = fakeClient.Get(context.TODO(), types.NamespacedName{Namespace: test.HttpNsName, Name: test.SvcName}, serviceExport) + assert.NoError(t, err) + assert.Empty(t, serviceExport.Finalizers, "Finalizer removed from the service export") +} + +func TestServiceExportReconciler_Reconcile_NoClusterProperty(t *testing.T) { + // create a fake controller client and add some objects + fakeClient := fake.NewClientBuilder(). + WithScheme(getServiceExportScheme()). + // do-not add clusterId + WithObjects(k8sServiceForTest(), serviceExportForTest(), test.ClusterSetIdForTest()). + WithLists(&discovery.EndpointSliceList{ + Items: []discovery.EndpointSlice{*endpointSliceForTest()}, + }).Build() + + // create a mock cloudmap service discovery client + mockController := gomock.NewController(t) + defer mockController.Finish() + + mockSDClient := cloudmapMock.NewMockServiceDiscoveryClient(mockController) + + reconciler := getServiceExportReconciler(t, mockSDClient, fakeClient) + + request := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Namespace: test.HttpNsName, + Name: test.SvcName, + }, + } + + // Reconciling should throw an error + got, err := reconciler.Reconcile(context.Background(), request) + expectedError := fmt.Errorf("ClusterProperty not found") + assert.ErrorContains(t, err, expectedError.Error()) + assert.Equal(t, ctrl.Result{}, got, "Result should be empty") +} + +func getServiceExportScheme() *runtime.Scheme { + scheme := runtime.NewScheme() + scheme.AddKnownTypes(aboutv1alpha1.GroupVersion, &aboutv1alpha1.ClusterProperty{}, &aboutv1alpha1.ClusterPropertyList{}) + scheme.AddKnownTypes(multiclusterv1alpha1.GroupVersion, &multiclusterv1alpha1.ServiceExport{}) + scheme.AddKnownTypes(v1.SchemeGroupVersion, &v1.Service{}) + scheme.AddKnownTypes(discovery.SchemeGroupVersion, &discovery.EndpointSlice{}, &discovery.EndpointSliceList{}) + return scheme +} + +func getServiceExportReconciler(t *testing.T, mockClient *cloudmapMock.MockServiceDiscoveryClient, client client.Client) *ServiceExportReconciler { + test.SetTestVersion() + return &ServiceExportReconciler{ + Client: client, + Log: common.NewLoggerWithLogr(testr.New(t)), + Scheme: client.Scheme(), + CloudMap: mockClient, + ClusterUtils: model.NewClusterUtils(client), + } +} diff --git a/pkg/controllers/multicluster/utils.go b/pkg/controllers/multicluster/utils.go new file mode 100644 index 00000000..4a919431 --- /dev/null +++ b/pkg/controllers/multicluster/utils.go @@ -0,0 +1,310 @@ +package controllers + +import ( + "crypto/sha256" + "encoding/base32" + "encoding/json" + "strings" + + multiclusterv1alpha1 "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/apis/multicluster/v1alpha1" + "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/model" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + v1 "k8s.io/api/core/v1" + discovery "k8s.io/api/discovery/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/intstr" +) + +const ( + // DerivedServiceAnnotation annotates a ServiceImport with derived Service name + DerivedServiceAnnotation = "multicluster.k8s.aws/derived-service" + + // ServiceExportFinalizer finalizer to perform cloudmap resource cleanup on delete + ServiceExportFinalizer = "multicluster.k8s.aws/service-export-finalizer" + + // LabelServiceImportName indicates the name of the multi-cluster service that an EndpointSlice belongs to. + LabelServiceImportName = "multicluster.kubernetes.io/service-name" + + // LabelDerivedServiceOriginatingName indicates the name of the multi-cluster service that the derived service originated from. + LabelDerivedServiceOriginatingName = "multicluster.kubernetes.io/service-name" + + // LabelEndpointSliceManagedBy indicates the name of the entity that manages the EndpointSlice. + LabelEndpointSliceManagedBy = "endpointslice.kubernetes.io/managed-by" + + // LabelSourceCluster indicates the id of the cluster the object was created for + LabelSourceCluster = "multicluster.kubernetes.io/source-cluster" + + // ValueEndpointSliceManagedBy indicates the name of the entity that manages the EndpointSlice. + ValueEndpointSliceManagedBy = "aws-cloud-map-mcs-controller-for-k8s" +) + +// ServicePortToPort converts a k8s service port to internal model port +func ServicePortToPort(svcPort v1.ServicePort) model.Port { + return model.Port{ + Name: svcPort.Name, + Port: svcPort.Port, + TargetPort: svcPort.TargetPort.String(), + Protocol: string(svcPort.Protocol), + } +} + +// ServiceImportPortToPort converts a service import port to an internal model port +func ServiceImportPortToPort(svcPort multiclusterv1alpha1.ServicePort) model.Port { + return model.Port{ + Name: svcPort.Name, + Port: svcPort.Port, + Protocol: string(svcPort.Protocol), + } +} + +// EndpointPortToPort converts a k8s endpoint port to an internal model port +func EndpointPortToPort(port discovery.EndpointPort) model.Port { + return model.Port{ + Name: *port.Name, + Port: *port.Port, + Protocol: string(*port.Protocol), + } +} + +// PortToServicePort converts an internal model port to a k8s service port +func PortToServicePort(port model.Port) v1.ServicePort { + return v1.ServicePort{ + Name: port.Name, + Protocol: v1.Protocol(port.Protocol), + Port: port.Port, + TargetPort: intstr.Parse(port.TargetPort), + } +} + +// PortToServiceImportPort converts an internal model port to a service import port +func PortToServiceImportPort(port model.Port) multiclusterv1alpha1.ServicePort { + return multiclusterv1alpha1.ServicePort{ + Name: port.Name, + Protocol: v1.Protocol(port.Protocol), + Port: port.Port, + } +} + +// PortToEndpointPort converts an internal model port to a k8s endpoint port +func PortToEndpointPort(port model.Port) discovery.EndpointPort { + protocol := v1.Protocol(port.Protocol) + return discovery.EndpointPort{ + Name: &port.Name, + Protocol: &protocol, + Port: &port.Port, + } +} + +// ExtractServicePorts extracts all unique service ports from a slice of endpoints +func ExtractServicePorts(endpoints []*model.Endpoint) (servicePorts []*model.Port) { + uniquePorts := make(map[string]model.Port) + for _, ep := range endpoints { + uniquePorts[ep.ServicePort.GetID()] = ep.ServicePort + } + for _, servicePort := range uniquePorts { + portRef := servicePort + servicePorts = append(servicePorts, &portRef) + } + return servicePorts +} + +// ExtractEndpointPorts extracts all unique endpoint ports from a slice of endpoints +func ExtractEndpointPorts(endpoints []*model.Endpoint) (endpointPorts []*model.Port) { + uniquePorts := make(map[string]model.Port) + for _, ep := range endpoints { + uniquePorts[ep.EndpointPort.GetID()] = ep.EndpointPort + } + for _, endpointPort := range uniquePorts { + portRef := endpointPort + endpointPorts = append(endpointPorts, &portRef) + } + return endpointPorts +} + +func PortsEqualIgnoreOrder(a, b []*model.Port) (equal bool) { + idsA := make([]string, len(a)) + idsB := make([]string, len(b)) + for i, port := range a { + idsA[i] = port.GetID() + } + for i, port := range b { + idsB[i] = port.GetID() + } + less := func(x, y string) bool { return x < y } + equalIgnoreOrder := cmp.Diff(idsA, idsB, cmpopts.SortSlices(less)) == "" + return equalIgnoreOrder +} + +func IPsEqualIgnoreOrder(a, b []string) (equal bool) { + less := func(x, y string) bool { return x < y } + equalIgnoreOrder := cmp.Diff(a, b, cmpopts.SortSlices(less)) == "" + return equalIgnoreOrder +} + +// GetClusterIpsFromServices returns list of ClusterIPs from services +func GetClusterIpsFromServices(services []*v1.Service) []string { + clusterIPs := make([]string, 0) + for _, svc := range services { + clusterIPs = append(clusterIPs, svc.Spec.ClusterIP) + } + return clusterIPs +} + +// DerivedName computes the "placeholder" name for an imported service +func DerivedName(namespace string, name string, clusterId string) string { + hash := sha256.New() + hash.Write([]byte(namespace + name + clusterId)) + return "imported-" + strings.ToLower(base32.HexEncoding.WithPadding(base32.NoPadding).EncodeToString(hash.Sum(nil)))[:10] +} + +// CreateServiceImportStruct creates struct representation of a ServiceImport +func CreateServiceImportStruct(svc *model.Service, clusterIds []string, servicePorts []*model.Port) *multiclusterv1alpha1.ServiceImport { + serviceImportPorts := make([]multiclusterv1alpha1.ServicePort, 0) + for _, port := range servicePorts { + serviceImportPorts = append(serviceImportPorts, PortToServiceImportPort(*port)) + } + + clusters := make([]multiclusterv1alpha1.ClusterStatus, 0) + for _, clusterId := range clusterIds { + clusters = append(clusters, multiclusterv1alpha1.ClusterStatus{ + Cluster: clusterId, + }) + } + + return &multiclusterv1alpha1.ServiceImport{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: svc.Namespace, + Name: svc.Name, + Annotations: map[string]string{ + DerivedServiceAnnotation: CreateDerivedServiceAnnotation(svc.Namespace, svc.Name, clusterIds), + }, + }, + Spec: multiclusterv1alpha1.ServiceImportSpec{ + IPs: []string{}, + Type: ServiceTypetoServiceImportType(svc.Endpoints[0].ServiceType), + Ports: serviceImportPorts, + }, + Status: multiclusterv1alpha1.ServiceImportStatus{ + Clusters: clusters, + }, + } +} + +// CreateDerivedServiceStruct creates struct representation of a derived service +func CreateDerivedServiceStruct(svcImport *multiclusterv1alpha1.ServiceImport, importedSvcPorts []*model.Port, clusterId string) *v1.Service { + ownerRef := metav1.NewControllerRef(svcImport, schema.GroupVersionKind{ + Version: svcImport.TypeMeta.APIVersion, + Kind: svcImport.TypeMeta.Kind, + }) + + svcPorts := make([]v1.ServicePort, 0) + for _, svcPort := range importedSvcPorts { + svcPorts = append(svcPorts, PortToServicePort(*svcPort)) + } + + svc := &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + LabelSourceCluster: clusterId, + LabelDerivedServiceOriginatingName: svcImport.Name, + }, + Namespace: svcImport.Namespace, + Name: DerivedName(svcImport.Namespace, svcImport.Name, clusterId), + OwnerReferences: []metav1.OwnerReference{*ownerRef}, + }, + Spec: v1.ServiceSpec{ + Type: v1.ServiceTypeClusterIP, + Ports: svcPorts, + }, + } + + // if svcImport is Headless type, specify ClusterIP field to "None" + if svcImport.Spec.Type == multiclusterv1alpha1.Headless { + svc.Spec.ClusterIP = "None" + } + + return svc +} + +func CreateEndpointForSlice(svc *v1.Service, endpoint *model.Endpoint) discovery.Endpoint { + ep := discovery.Endpoint{ + TargetRef: &v1.ObjectReference{ + Kind: "Service", + Namespace: svc.Namespace, + Name: svc.Name, + UID: svc.ObjectMeta.UID, + ResourceVersion: svc.ObjectMeta.ResourceVersion, + }, + Addresses: []string{endpoint.IP}, + Conditions: discovery.EndpointConditions{ + Ready: &endpoint.Ready, + }, + } + if endpoint.Hostname != "" { + ep.Hostname = &endpoint.Hostname + } + if endpoint.Nodename != "" { + ep.NodeName = &endpoint.Nodename + } + return ep +} + +func CreateEndpointSliceStruct(svc *v1.Service, svcImportName string, clusterId string, addressType discovery.AddressType) *discovery.EndpointSlice { + return &discovery.EndpointSlice{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + // derived Service name + discovery.LabelServiceName: svc.Name, + // original ServiceImport name + LabelServiceImportName: svcImportName, + // 'managed-by' label set to controller + LabelEndpointSliceManagedBy: ValueEndpointSliceManagedBy, + // 'source-cluster' label set to current cluster + LabelSourceCluster: clusterId, + }, + GenerateName: svc.Name + "-", + OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(svc, schema.GroupVersionKind{ + Version: svc.TypeMeta.APIVersion, + Kind: svc.TypeMeta.Kind, + })}, + Namespace: svc.Namespace, + }, + AddressType: addressType, + } +} + +// ExtractServiceType finds the ServiceType of a given service as Headless/ClusterSetIP +func ExtractServiceType(svc *v1.Service) model.ServiceType { + if svc.Spec.ClusterIP == "None" { + return model.HeadlessType + } + return model.ClusterSetIPType +} + +// CreateDerivedServiceAnnotation creates a JSON object containing a slice of maps of clusterIds and derived service names +func CreateDerivedServiceAnnotation(namespace string, name string, clusterIds []string) string { + clusters := make([]map[string]string, 0, len(clusterIds)) + for _, clusterId := range clusterIds { + clusters = append(clusters, map[string]string{ + "cluster": clusterId, + "derived-service": DerivedName(namespace, name, clusterId), + }) + } + // create JSON + jsonBytes, err := json.Marshal(clusters) + if err != nil { + return "" + } + return string(jsonBytes) +} + +// ServiceTypetoServiceImportType converts model service type to multicluster ServiceImport type +func ServiceTypetoServiceImportType(serviceType model.ServiceType) multiclusterv1alpha1.ServiceImportType { + if serviceType == model.HeadlessType { + return multiclusterv1alpha1.Headless + } + + return multiclusterv1alpha1.ClusterSetIP +} diff --git a/pkg/controllers/multicluster/utils_test.go b/pkg/controllers/multicluster/utils_test.go new file mode 100644 index 00000000..56157af4 --- /dev/null +++ b/pkg/controllers/multicluster/utils_test.go @@ -0,0 +1,822 @@ +package controllers + +import ( + "reflect" + "strconv" + "testing" + + multiclusterv1alpha1 "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/apis/multicluster/v1alpha1" + "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/model" + "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/test" + "github.com/stretchr/testify/assert" + v1 "k8s.io/api/core/v1" + discovery "k8s.io/api/discovery/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +func TestServicePortToPort(t *testing.T) { + type args struct { + svcPort v1.ServicePort + } + tests := []struct { + name string + args args + want model.Port + }{ + { + name: "happy case", + args: args{ + svcPort: v1.ServicePort{ + Name: "http", + Protocol: v1.ProtocolTCP, + Port: 80, + TargetPort: intstr.IntOrString{ + Type: intstr.Int, + IntVal: 8080, + }, + }, + }, + want: model.Port{ + Name: "http", + Port: 80, + TargetPort: "8080", + Protocol: "TCP", + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := ServicePortToPort(tt.args.svcPort); !reflect.DeepEqual(got, tt.want) { + t.Errorf("ServicePortToPort() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestServiceImportPortToPort(t *testing.T) { + type args struct { + svcImportPort multiclusterv1alpha1.ServicePort + } + tests := []struct { + name string + args args + want model.Port + }{ + { + name: "happy case", + args: args{ + svcImportPort: multiclusterv1alpha1.ServicePort{ + Name: test.PortName1, + Protocol: v1.ProtocolTCP, + Port: 80, + }, + }, + want: model.Port{ + Name: test.PortName1, + Port: 80, + Protocol: test.Protocol1, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := ServiceImportPortToPort(tt.args.svcImportPort); !reflect.DeepEqual(got, tt.want) { + t.Errorf("ServiceImportPortToPort() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestEndpointPortToPort(t *testing.T) { + type args struct { + port discovery.EndpointPort + } + name := "http" + protocolTCP := v1.ProtocolTCP + port := int32(80) + tests := []struct { + name string + args args + want model.Port + }{ + { + name: "happy case", + args: args{ + port: discovery.EndpointPort{ + Name: &name, + Protocol: &protocolTCP, + Port: &port, + }, + }, + want: model.Port{ + Name: "http", + Port: 80, + TargetPort: "", + Protocol: "TCP", + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := EndpointPortToPort(tt.args.port); !reflect.DeepEqual(got, tt.want) { + t.Errorf("EndpointPortToPort() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestPortToServicePort(t *testing.T) { + type args struct { + port model.Port + } + tests := []struct { + name string + args args + want v1.ServicePort + }{ + { + name: "happy case", + args: args{ + port: model.Port{ + Name: "http", + Port: 80, + TargetPort: "8080", + Protocol: "TCP", + }, + }, + want: v1.ServicePort{ + Name: "http", + Protocol: v1.ProtocolTCP, + Port: 80, + TargetPort: intstr.IntOrString{ + Type: intstr.Int, + IntVal: 8080, + }, + }, + }, + { + name: "happy case for string targertPort", + args: args{ + port: model.Port{ + Name: "http", + Port: 80, + TargetPort: "https", + Protocol: "TCP", + }, + }, + want: v1.ServicePort{ + Name: "http", + Protocol: v1.ProtocolTCP, + Port: 80, + TargetPort: intstr.IntOrString{ + Type: intstr.String, + StrVal: "https", + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := PortToServicePort(tt.args.port); !reflect.DeepEqual(got, tt.want) { + t.Errorf("PortToServicePort() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestPortToServiceImportPort(t *testing.T) { + type args struct { + port model.Port + } + tests := []struct { + name string + args args + want multiclusterv1alpha1.ServicePort + }{ + { + name: "happy case", + args: args{ + port: model.Port{ + Name: test.PortName1, + Port: test.Port1, + TargetPort: test.PortStr2, // ignored + Protocol: test.Protocol1, + }, + }, + want: multiclusterv1alpha1.ServicePort{ + Name: test.PortName1, + Protocol: v1.ProtocolTCP, + Port: test.Port1, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := PortToServiceImportPort(tt.args.port); !reflect.DeepEqual(got, tt.want) { + t.Errorf("PortToServiceImportPort() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestPortToEndpointPort(t *testing.T) { + name := "http" + protocolTCP := v1.ProtocolTCP + port := int32(80) + type args struct { + port model.Port + } + tests := []struct { + name string + args args + want discovery.EndpointPort + }{ + { + name: "happy case", + args: args{ + port: model.Port{ + Name: "http", + Port: 80, + Protocol: "TCP", + }, + }, + want: discovery.EndpointPort{ + Name: &name, + Protocol: &protocolTCP, + Port: &port, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := PortToEndpointPort(tt.args.port); !reflect.DeepEqual(got, tt.want) { + t.Errorf("PortToEndpointPort() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestExtractServicePorts(t *testing.T) { + type args struct { + endpoints []*model.Endpoint + } + tests := []struct { + name string + args args + want []*model.Port + }{ + { + name: "unique service ports extracted", + args: args{ + endpoints: []*model.Endpoint{ + { + ServicePort: model.Port{Protocol: test.Protocol1, Port: test.Port1}, + }, + { + ServicePort: model.Port{Protocol: test.Protocol1, Port: test.Port2}, + }, + { + ServicePort: model.Port{Protocol: test.Protocol2, Port: test.Port2}, + }, + }, + }, + want: []*model.Port{ + {Protocol: test.Protocol1, Port: test.Port1}, + {Protocol: test.Protocol1, Port: test.Port2}, + {Protocol: test.Protocol2, Port: test.Port2}, + }, + }, + { + name: "duplicate and endpoint ports ignored", + args: args{ + endpoints: []*model.Endpoint{ + { + ServicePort: model.Port{Protocol: test.Protocol1, Port: test.Port1}, + EndpointPort: model.Port{Protocol: test.Protocol1, Port: test.Port1}, + }, + { + ServicePort: model.Port{Protocol: test.Protocol1, Port: test.Port1}, + EndpointPort: model.Port{Protocol: test.Protocol2, Port: test.Port2}, + }, + }, + }, + want: []*model.Port{ + {Protocol: test.Protocol1, Port: test.Port1}, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := ExtractServicePorts(tt.args.endpoints); !PortsEqualIgnoreOrder(got, tt.want) { + t.Errorf("ServicePortToPort() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestExtractEndpointPorts(t *testing.T) { + type args struct { + endpoints []*model.Endpoint + } + tests := []struct { + name string + args args + want []*model.Port + }{ + { + name: "unique endpoint ports extracted", + args: args{ + endpoints: []*model.Endpoint{ + { + EndpointPort: model.Port{Protocol: test.Protocol1, Port: test.Port1}, + }, + { + EndpointPort: model.Port{Protocol: test.Protocol1, Port: test.Port2}, + }, + { + EndpointPort: model.Port{Protocol: test.Protocol2, Port: test.Port2}, + }, + }, + }, + want: []*model.Port{ + {Protocol: test.Protocol1, Port: test.Port1}, + {Protocol: test.Protocol1, Port: test.Port2}, + {Protocol: test.Protocol2, Port: test.Port2}, + }, + }, + { + name: "duplicate and service ports ignored", + args: args{ + endpoints: []*model.Endpoint{ + { + EndpointPort: model.Port{Protocol: test.Protocol1, Port: test.Port1}, + ServicePort: model.Port{Protocol: test.Protocol1, Port: test.Port1}, + }, + { + EndpointPort: model.Port{Protocol: test.Protocol1, Port: test.Port1}, + ServicePort: model.Port{Protocol: test.Protocol2, Port: test.Port2}, + }, + }, + }, + want: []*model.Port{ + {Protocol: test.Protocol1, Port: test.Port1}, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := ExtractEndpointPorts(tt.args.endpoints); !PortsEqualIgnoreOrder(got, tt.want) { + t.Errorf("ServicePortToPort() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestPortsEqualIgnoreOrder(t *testing.T) { + type args struct { + portsA []*model.Port + portsB []*model.Port + } + tests := []struct { + name string + args args + want bool + }{ + { + name: "ports equal same order", + args: args{ + portsA: []*model.Port{ + {Protocol: test.Protocol1, Port: test.Port1}, + {Protocol: test.Protocol2, Port: test.Port2}, + }, + portsB: []*model.Port{ + {Protocol: test.Protocol1, Port: test.Port1}, + {Protocol: test.Protocol2, Port: test.Port2}, + }, + }, + want: true, + }, + { + name: "ports equal different order", + args: args{ + portsA: []*model.Port{ + {Protocol: test.Protocol1, Port: test.Port1}, + {Protocol: test.Protocol2, Port: test.Port2}, + }, + portsB: []*model.Port{ + {Protocol: test.Protocol2, Port: test.Port2}, + {Protocol: test.Protocol1, Port: test.Port1}, + }, + }, + want: true, + }, + { + name: "ports not equal", + args: args{ + portsA: []*model.Port{ + {Protocol: test.Protocol1, Port: test.Port1}, + {Protocol: test.Protocol2, Port: test.Port2}, + }, + portsB: []*model.Port{ + {Protocol: test.Protocol1, Port: test.Port1}, + {Protocol: test.Protocol2, Port: 3}, + }, + }, + want: false, + }, + { + name: "protocols not equal", + args: args{ + portsA: []*model.Port{ + {Protocol: test.Protocol1, Port: test.Port1}, + {Protocol: test.Protocol2, Port: test.Port2}, + }, + portsB: []*model.Port{ + {Protocol: test.Protocol1, Port: test.Port1}, + {Protocol: test.Protocol1, Port: test.Port2}, + }, + }, + want: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := PortsEqualIgnoreOrder(tt.args.portsA, tt.args.portsB); !(got == tt.want) { + t.Errorf("PortsEqualIgnoreOrder() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestIPsEqualIgnoreOrder(t *testing.T) { + type args struct { + ipsA []string + ipsB []string + } + tests := []struct { + name string + args args + want bool + }{ + { + name: "ips equal same order", + args: args{ + ipsA: []string{ + test.ClusterIp1, + test.ClusterIp2, + }, + ipsB: []string{ + test.ClusterIp1, + test.ClusterIp2, + }, + }, + want: true, + }, + { + name: "ips equal different order", + args: args{ + ipsA: []string{ + test.ClusterIp1, + test.ClusterIp2, + }, + ipsB: []string{ + test.ClusterIp2, + test.ClusterIp1, + }, + }, + want: true, + }, + { + name: "ips not equal", + args: args{ + ipsA: []string{ + test.ClusterIp1, + test.ClusterIp2, + }, + ipsB: []string{ + test.ClusterIp1, + "10.10.10.3", + }, + }, + want: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := IPsEqualIgnoreOrder(tt.args.ipsA, tt.args.ipsB); !(got == tt.want) { + t.Errorf("IPsEqualIgnoreOrder() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestGetClusterIpsFromServices(t *testing.T) { + type args struct { + services []*v1.Service + } + + tests := []struct { + name string + args args + want []string + }{ + { + name: "happy case", + args: args{ + services: []*v1.Service{ + { + ObjectMeta: metav1.ObjectMeta{}, + Spec: v1.ServiceSpec{ + Type: v1.ServiceTypeClusterIP, + ClusterIP: test.ClusterIp1, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{}, + Spec: v1.ServiceSpec{ + Type: v1.ServiceTypeClusterIP, + ClusterIP: test.ClusterIp2, + }, + }, + }}, + want: []string{ + test.ClusterIp1, test.ClusterIp2, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := GetClusterIpsFromServices(tt.args.services); !reflect.DeepEqual(got, tt.want) { + t.Errorf("GetClusterIpsFromServices() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestDerivedService(t *testing.T) { + const numTests = 100 + derivedServiceMap := make(map[string]bool) + for i := 0; i < numTests; i++ { + namespace := test.HttpNsName + name := "test-svcname-" + strconv.Itoa(i) + clusterId := "test-clusterid-" + strconv.Itoa(i) + derivedService := DerivedName(namespace, name, clusterId) + assert.NotContains(t, derivedServiceMap, derivedService, "derived service already exists") + derivedServiceMap[derivedService] = true + } + assert.Equal(t, numTests, len(derivedServiceMap)) + assert.True(t, DerivedName(test.HttpNsName, test.SvcName, test.ClusterId1) != DerivedName(test.HttpNsName, test.SvcName, test.ClusterId2)) +} + +func TestCreateServiceImportStruct(t *testing.T) { + type args struct { + servicePorts []*model.Port + endpoints []*model.Endpoint + clusterIds []string + } + tests := []struct { + name string + args args + want multiclusterv1alpha1.ServiceImport + }{ + { + name: "happy case", + args: args{ + clusterIds: []string{test.ClusterId1, test.ClusterId2}, + servicePorts: []*model.Port{ + {Name: test.PortName1, Protocol: test.Protocol1, Port: test.Port1}, + {Name: test.PortName2, Protocol: test.Protocol2, Port: test.Port2}, + }, + endpoints: []*model.Endpoint{ + { + ServiceType: model.ClusterSetIPType, + }, + }, + }, + want: multiclusterv1alpha1.ServiceImport{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: test.HttpNsName, + Name: test.SvcName, + Annotations: map[string]string{ + DerivedServiceAnnotation: CreateDerivedServiceAnnotation(test.HttpNsName, test.SvcName, []string{test.ClusterId1, test.ClusterId2}), + }, + }, + Spec: multiclusterv1alpha1.ServiceImportSpec{ + IPs: []string{}, + Type: multiclusterv1alpha1.ClusterSetIP, + Ports: []multiclusterv1alpha1.ServicePort{ + {Name: test.PortName1, Protocol: v1.ProtocolTCP, Port: test.Port1}, + {Name: test.PortName2, Protocol: v1.ProtocolUDP, Port: test.Port2}, + }, + }, + Status: multiclusterv1alpha1.ServiceImportStatus{ + Clusters: []multiclusterv1alpha1.ClusterStatus{ + { + Cluster: test.ClusterId1, + }, + { + Cluster: test.ClusterId2, + }, + }, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := CreateServiceImportStruct(test.GetTestServiceWithEndpoint(tt.args.endpoints), tt.args.clusterIds, tt.args.servicePorts); !reflect.DeepEqual(*got, tt.want) { + t.Errorf("CreateServiceImportStruct() = %v, want %v", *got, tt.want) + } + }) + } +} + +func TestExtractServiceType(t *testing.T) { + tests := []struct { + name string + svc *v1.Service + want model.ServiceType + }{ + { + name: "cluster ip type", + svc: &v1.Service{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: test.SvcName, + Namespace: test.HttpNsName, + }, + Spec: v1.ServiceSpec{ + Ports: []v1.ServicePort{{ + Name: test.PortName1, + Protocol: test.Protocol1, + Port: test.ServicePort1, + TargetPort: intstr.IntOrString{Type: intstr.Int, IntVal: test.Port1}, + }}, + ClusterIP: "10.108.89.43", + }, + Status: v1.ServiceStatus{}, + }, + want: model.ClusterSetIPType, + }, + { + name: "headless type", + svc: &v1.Service{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: test.SvcName, + Namespace: test.HttpNsName, + }, + Spec: v1.ServiceSpec{ + Ports: []v1.ServicePort{{ + Name: test.PortName1, + Protocol: test.Protocol1, + Port: test.ServicePort1, + TargetPort: intstr.IntOrString{Type: intstr.Int, IntVal: test.Port1}, + }}, + ClusterIP: "None", + }, + Status: v1.ServiceStatus{}, + }, + want: model.HeadlessType, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := ExtractServiceType(tt.svc); got != tt.want { + t.Errorf("ExtractServiceType() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestServiceTypetoServiceImportType(t *testing.T) { + tests := []struct { + name string + svcType model.ServiceType + want multiclusterv1alpha1.ServiceImportType + }{ + { + name: "cluster ip type", + svcType: model.ClusterSetIPType, + want: multiclusterv1alpha1.ClusterSetIP, + }, + { + name: "headless type", + svcType: model.HeadlessType, + want: multiclusterv1alpha1.Headless, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := ServiceTypetoServiceImportType(tt.svcType); got != tt.want { + t.Errorf("ServiceTypetoServiceImportType() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestCreateDerivedServiceStruct(t *testing.T) { + type args struct { + servicePorts []*model.Port + svcImport *multiclusterv1alpha1.ServiceImport + } + tests := []struct { + name string + args args + want *v1.ServiceSpec + }{ + { + name: "cluster ip case", + args: args{ + servicePorts: []*model.Port{ + {Name: test.PortName1, Protocol: test.Protocol1, Port: test.Port1, TargetPort: "8080"}, + {Name: test.PortName2, Protocol: test.Protocol2, Port: test.Port2, TargetPort: "8080"}, + }, + svcImport: &multiclusterv1alpha1.ServiceImport{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: test.HttpNsName, + Name: test.SvcName, + Annotations: map[string]string{DerivedServiceAnnotation: DerivedName(test.HttpNsName, test.SvcName, test.ClusterId1)}, + }, + Spec: multiclusterv1alpha1.ServiceImportSpec{ + IPs: []string{}, + Type: multiclusterv1alpha1.ClusterSetIP, + }, + }, + }, + want: &v1.ServiceSpec{ + Type: v1.ServiceTypeClusterIP, + Ports: []v1.ServicePort{ + {Name: test.PortName1, Protocol: test.Protocol1, Port: test.Port1, TargetPort: intstr.IntOrString{Type: intstr.Int, IntVal: 8080}}, + {Name: test.PortName2, Protocol: test.Protocol2, Port: test.Port2, TargetPort: intstr.IntOrString{Type: intstr.Int, IntVal: 8080}}, + }, + }, + }, + { + name: "headless case", + args: args{ + servicePorts: []*model.Port{ + {Name: test.PortName1, Protocol: test.Protocol1, Port: test.Port1, TargetPort: "8080"}, + {Name: test.PortName2, Protocol: test.Protocol2, Port: test.Port2, TargetPort: "8080"}, + }, + svcImport: &multiclusterv1alpha1.ServiceImport{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: test.HttpNsName, + Name: test.SvcName, + Annotations: map[string]string{DerivedServiceAnnotation: DerivedName(test.HttpNsName, test.SvcName, test.ClusterId1)}, + }, + Spec: multiclusterv1alpha1.ServiceImportSpec{ + IPs: []string{}, + Type: multiclusterv1alpha1.Headless, + }, + }, + }, + want: &v1.ServiceSpec{ + Type: v1.ServiceTypeClusterIP, + Ports: []v1.ServicePort{ + {Name: test.PortName1, Protocol: test.Protocol1, Port: test.Port1, TargetPort: intstr.IntOrString{Type: intstr.Int, IntVal: 8080}}, + {Name: test.PortName2, Protocol: test.Protocol2, Port: test.Port2, TargetPort: intstr.IntOrString{Type: intstr.Int, IntVal: 8080}}, + }, + ClusterIP: "None", + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := &CreateDerivedServiceStruct(tt.args.svcImport, tt.args.servicePorts, test.ClusterId1).Spec; !reflect.DeepEqual(got, tt.want) { + t.Errorf("CreateDerivedServiceStruct() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestCreateDerivedServiceAnnotation(t *testing.T) { + type args struct { + namespace string + name string + clusterIds []string + } + tests := []struct { + name string + args args + want string + }{ + { + name: "create derived service annotation", + args: args{ + namespace: test.HttpNsName, + name: test.SvcName, + clusterIds: []string{test.ClusterId1, test.ClusterId2}, + }, + want: "[{\"cluster\":\"test-mcs-clusterid-1\",\"derived-service\":\"imported-vm6pdvp7di\"},{\"cluster\":\"test-mcs-clusterid-2\",\"derived-service\":\"imported-i8hm9c3um2\"}]", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := CreateDerivedServiceAnnotation(tt.args.namespace, tt.args.name, tt.args.clusterIds); got != tt.want { + t.Errorf("CreateDerivedServiceAnnotation() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/pkg/controllers/serviceexport_controller.go b/pkg/controllers/serviceexport_controller.go deleted file mode 100644 index 8aaf0660..00000000 --- a/pkg/controllers/serviceexport_controller.go +++ /dev/null @@ -1,226 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controllers - -import ( - "context" - "fmt" - "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/cloudmap" - "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/model" - "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/version" - v1 "k8s.io/api/core/v1" - discovery "k8s.io/api/discovery/v1beta1" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - - "github.com/go-logr/logr" - "k8s.io/apimachinery/pkg/runtime" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - - v1alpha1 "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/api/v1alpha1" -) - -const ( - serviceExportFinalizer = "multicluster.k8s.aws/service-export-finalizer" -) - -// ServiceExportReconciler reconciles a ServiceExport object -type ServiceExportReconciler struct { - client.Client - Log logr.Logger - Scheme *runtime.Scheme - Cloudmap cloudmap.ServiceDiscoveryClient -} - -// +kubebuilder:rbac:groups=multicluster.x-k8s.io,resources=serviceexports,verbs=get;list;watch;update;patch -// +kubebuilder:rbac:groups=multicluster.x-k8s.io,resources=serviceexports/finalizers,verbs=get;update - -func (r *ServiceExportReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - log := r.Log.WithValues("serviceexport", req.NamespacedName) - - svcExport := v1alpha1.ServiceExport{} - if err := r.Client.Get(ctx, req.NamespacedName, &svcExport); err != nil { - return ctrl.Result{}, client.IgnoreNotFound(err) - } - - if svcExport.DeletionTimestamp.IsZero() { - return r.handleUpdate(ctx, log, &svcExport) - } else { - return r.handleDelete(ctx, log, &svcExport) - } -} - -func (r *ServiceExportReconciler) handleUpdate(ctx context.Context, log logr.Logger, svcExport *v1alpha1.ServiceExport) (ctrl.Result, error) { - // add finalizer if not present - if !controllerutil.ContainsFinalizer(svcExport, serviceExportFinalizer) { - controllerutil.AddFinalizer(svcExport, serviceExportFinalizer) - if err := r.Update(ctx, svcExport); err != nil { - return ctrl.Result{}, err - } - } - - svc := v1.Service{} - if err := r.Client.Get(ctx, types.NamespacedName{Namespace: svcExport.Namespace, Name: svcExport.Name}, &svc); err != nil { - log.Error(err, "no service found for ServiceExport", - "Namespace", svcExport.GetNamespace(), "Name", svcExport.Name) - return ctrl.Result{}, client.IgnoreNotFound(err) - } - - endpoints, err := r.extractEndpoints(ctx, &svc) - if err != nil { - return ctrl.Result{}, err - } - - cloudMapService := &model.Service{ - Namespace: svc.Namespace, - Name: svc.Name, - } - - changes := model.Changes{ - Create: endpoints, - } - - log.Info("updating Cloud Map service", "namespace", svc.Namespace, "name", svc.Name) - srv, err := r.Cloudmap.GetService(ctx, svc.Namespace, svc.Name) - if err != nil { - log.Error(err, "error when fetching service from Cloud Map API", "namespace", svc.Namespace, "name", svc.Name) - return ctrl.Result{}, err - } - if srv == nil { - if err := r.Cloudmap.CreateService(ctx, cloudMapService); err != nil { - log.Error(err, "error when creating new service in Cloud Map", "namespace", svc.Namespace, "name", svc.Name) - return ctrl.Result{}, err - } - } else { - // compute diff between Cloud Map and K8s and apply changes - plan := model.Plan{ - Current: srv.Endpoints, - Desired: endpoints, - } - changes = plan.CalculateChanges() - } - - createRequired := len(changes.Create) > 0 - updateRequired := len(changes.Update) > 0 - deleteRequired := len(changes.Delete) > 0 - - if createRequired || updateRequired { - // merge creates and updates (Cloud Map RegisterEndpoints can handle both) - cloudMapService.Endpoints = changes.Create - cloudMapService.Endpoints = append(cloudMapService.Endpoints, changes.Update...) - - if err := r.Cloudmap.RegisterEndpoints(ctx, cloudMapService); err != nil { - log.Error(err, "error when registering endpoints to Cloud Map", - "namespace", svc.Namespace, "name", svc.Name) - return ctrl.Result{}, err - } - } - - if deleteRequired { - cloudMapService.Endpoints = changes.Delete - - if err := r.Cloudmap.DeleteEndpoints(ctx, cloudMapService); err != nil { - log.Error(err, "error when deleting endpoints from Cloud Map", - "namespace", srv.Namespace, "name", srv.Name) - return ctrl.Result{}, err - } - } - - if !createRequired && !updateRequired && !deleteRequired { - log.Info("no changes to export", "namespace", svc.Namespace, "name", svc.Name) - } - - return ctrl.Result{}, nil -} - -func (r *ServiceExportReconciler) handleDelete(ctx context.Context, log logr.Logger, svcExport *v1alpha1.ServiceExport) (ctrl.Result, error) { - if controllerutil.ContainsFinalizer(svcExport, serviceExportFinalizer) { - - log.Info("removing Cloud Map service", "namespace", svcExport.Namespace, "name", svcExport.Name) - - srv, err := r.Cloudmap.GetService(ctx, svcExport.Namespace, svcExport.Name) - if err != nil { - log.Error(err, "error when fetching service from Cloud Map API", - "namespace", svcExport.Namespace, "name", svcExport.Name) - return ctrl.Result{}, err - } - if srv != nil { - if err := r.Cloudmap.DeleteEndpoints(ctx, srv); err != nil { - log.Error(err, "error when deleting endpoints from Cloud Map", - "namespace", srv.Namespace, "name", srv.Name) - return ctrl.Result{}, err - } - } - - // remove finalizer - controllerutil.RemoveFinalizer(svcExport, serviceExportFinalizer) - if err := r.Update(ctx, svcExport); err != nil { - return ctrl.Result{}, err - } - } - - return ctrl.Result{}, nil -} - -func (r *ServiceExportReconciler) extractEndpoints(ctx context.Context, svc *v1.Service) ([]*model.Endpoint, error) { - result := make([]*model.Endpoint, 0) - - endpointSlices := discovery.EndpointSliceList{} - err := r.Client.List(ctx, &endpointSlices, - client.InNamespace(svc.Namespace), client.MatchingLabels{discovery.LabelServiceName: svc.Name}) - - if err != nil { - return nil, err - } - - for _, slice := range endpointSlices.Items { - if slice.AddressType != discovery.AddressTypeIPv4 { - return nil, fmt.Errorf("unsupported address type %s for service %s", slice.AddressType, svc.Name) - } - - for _, port := range slice.Ports { - for _, ep := range slice.Endpoints { - for _, IP := range ep.Addresses { - attributes := make(map[string]string, 0) - if version.GetVersion() != "" { - attributes["K8S_CONTROLLER"] = version.PackageName + " " + version.GetVersion() - } - // TODO extract attributes - pod, node and other useful details if possible - - result = append(result, &model.Endpoint{ - Id: model.EndpointIdFromIPAddress(IP), - IP: IP, - Port: *port.Port, - Attributes: attributes, - }) - } - } - } - } - - return result, nil -} - -func (r *ServiceExportReconciler) SetupWithManager(mgr ctrl.Manager) error { - return ctrl.NewControllerManagedBy(mgr). - For(&v1alpha1.ServiceExport{}). - Complete(r) -} diff --git a/pkg/controllers/serviceexport_controller_test.go b/pkg/controllers/serviceexport_controller_test.go deleted file mode 100644 index 177050f5..00000000 --- a/pkg/controllers/serviceexport_controller_test.go +++ /dev/null @@ -1,163 +0,0 @@ -package controllers - -import ( - "context" - "fmt" - cloudmapmock "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/mocks/pkg/cloudmap" - "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/api/v1alpha1" - "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/cloudmap" - "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/model" - testing2 "github.com/go-logr/logr/testing" - "github.com/golang/mock/gomock" - "gotest.tools/assert" - v1 "k8s.io/api/core/v1" - discovery "k8s.io/api/discovery/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/client/fake" - "testing" -) - -func TestServiceExportReconciler_Reconcile_NewServiceExport(t *testing.T) { - mockController := gomock.NewController(t) - defer mockController.Finish() - - expectedService := model.Service{ - Namespace: "my-namespace", - Name: "exported-service", - Endpoints: []*model.Endpoint{{ - Id: "1_1_1_1", - IP: "1.1.1.1", - Port: 80, - Attributes: map[string]string{}, - }}, - } - - cloudmapMock := cloudmapmock.NewMockServiceDiscoveryClient(mockController) - fmt.Printf("test output") - // expected interactions with the Cloud Map client - cloudmapMock.EXPECT().GetService(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil) - cloudmapMock.EXPECT().CreateService(gomock.Any(), gomock.Any()).Return(nil).Times(1) - cloudmapMock.EXPECT().RegisterEndpoints(gomock.Any(), gomock.Eq(&expectedService)).Return(nil).Times(1) - - reconciler := setupServiceExportReconciler(t, cloudmapMock) - - request := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Namespace: "my-namespace", - Name: "exported-service", - }, - } - - got, err := reconciler.Reconcile(context.Background(), request) - if err != nil { - t.Errorf("Reconcile() error = %v", err) - return - } - assert.Equal(t, ctrl.Result{}, got, "Result should be empty") -} - -func TestServiceExportReconciler_Reconcile_ExistingServiceNewEndpoint(t *testing.T) { - mockController := gomock.NewController(t) - defer mockController.Finish() - - emptyService := model.Service{ - Namespace: "my-namespace", - Name: "exported-service", - } - - expectedService := model.Service{ - Namespace: "my-namespace", - Name: "exported-service", - Endpoints: []*model.Endpoint{{ - Id: "1_1_1_1", - IP: "1.1.1.1", - Port: 80, - Attributes: map[string]string{}, - }}, - } - - cloudmapMock := cloudmapmock.NewMockServiceDiscoveryClient(mockController) - - // expected interactions with the Cloud Map client - cloudmapMock.EXPECT().GetService(gomock.Any(), gomock.Any(), gomock.Any()).Return(&emptyService, nil) - cloudmapMock.EXPECT().RegisterEndpoints(gomock.Any(), gomock.Eq(&expectedService)).Return(nil).Times(1) - - request := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Namespace: "my-namespace", - Name: "exported-service", - }, - } - - reconciler := setupServiceExportReconciler(t, cloudmapMock) - - got, err := reconciler.Reconcile(context.Background(), request) - if err != nil { - t.Errorf("Reconcile() error = %v", err) - return - } - assert.Equal(t, ctrl.Result{}, got, "Result should be empty") -} - -func setupServiceExportReconciler(t *testing.T, cloudmapMock cloudmap.ServiceDiscoveryClient) *ServiceExportReconciler { - k8sClient := setupK8sClient() - - return &ServiceExportReconciler{ - Client: k8sClient, - Log: testing2.TestLogger{T: t}, - Scheme: k8sClient.Scheme(), - Cloudmap: cloudmapMock, - } -} - -func setupK8sClient() client.Client { - // ServiceExport object - serviceExport := &v1alpha1.ServiceExport{ - ObjectMeta: metav1.ObjectMeta{ - Name: "exported-service", - Namespace: "my-namespace", - }, - } - - // Service object - service := &v1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "exported-service", - Namespace: "my-namespace", - }, - } - - // EndpointSlice object - port := int32(80) - endpointSlice := &discovery.EndpointSlice{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "my-namespace", - Name: "slice-id", - Labels: map[string]string{discovery.LabelServiceName: "exported-service"}, - }, - AddressType: discovery.AddressTypeIPv4, - Endpoints: []discovery.Endpoint{{ - Addresses: []string{"1.1.1.1"}, - }}, - Ports: []discovery.EndpointPort{{Port: &port}}, - } - endpointSliceList := &discovery.EndpointSliceList{ - Items: []discovery.EndpointSlice{*endpointSlice}, - } - - scheme := runtime.NewScheme() - scheme.AddKnownTypes(v1alpha1.GroupVersion, serviceExport) - scheme.AddKnownTypes(v1.SchemeGroupVersion, service) - scheme.AddKnownTypes(discovery.SchemeGroupVersion, endpointSlice) - scheme.AddKnownTypes(discovery.SchemeGroupVersion, endpointSliceList) - - return fake.NewClientBuilder(). - WithScheme(scheme). - WithObjects(serviceExport, service). - WithLists(endpointSliceList). - Build() -} diff --git a/pkg/controllers/suite_test.go b/pkg/controllers/suite_test.go deleted file mode 100644 index e4127751..00000000 --- a/pkg/controllers/suite_test.go +++ /dev/null @@ -1,83 +0,0 @@ -/* -Copyright 2021. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controllers - -import ( - "path/filepath" - "testing" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - "k8s.io/client-go/kubernetes/scheme" - "k8s.io/client-go/rest" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/envtest" - "sigs.k8s.io/controller-runtime/pkg/envtest/printer" - logf "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/log/zap" - - multiclusterv1alpha1 "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/api/v1alpha1" - // +kubebuilder:scaffold:imports -) - -// These tests use Ginkgo (BDD-style Go testing framework). Refer to -// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. - -var cfg *rest.Config -var k8sClient client.Client -var testEnv *envtest.Environment - -func TestAPIs(t *testing.T) { - RegisterFailHandler(Fail) - - RunSpecsWithDefaultAndCustomReporters(t, - "Controller Suite", - []Reporter{printer.NewlineReporter{}}) -} - -var _ = BeforeSuite(func() { - logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) - - By("bootstrapping test environment") - testEnv = &envtest.Environment{ - CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")}, - ErrorIfCRDPathMissing: true, - } - - cfg, err := testEnv.Start() - Expect(err).NotTo(HaveOccurred()) - Expect(cfg).NotTo(BeNil()) - - err = multiclusterv1alpha1.AddToScheme(scheme.Scheme) - Expect(err).NotTo(HaveOccurred()) - - err = multiclusterv1alpha1.AddToScheme(scheme.Scheme) - Expect(err).NotTo(HaveOccurred()) - - //+kubebuilder:scaffold:scheme - - k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) - Expect(err).NotTo(HaveOccurred()) - Expect(k8sClient).NotTo(BeNil()) - -}, 60) - -var _ = AfterSuite(func() { - By("tearing down the test environment") - err := testEnv.Stop() - Expect(err).NotTo(HaveOccurred()) -}) diff --git a/pkg/model/cluster.go b/pkg/model/cluster.go new file mode 100644 index 00000000..1b008733 --- /dev/null +++ b/pkg/model/cluster.go @@ -0,0 +1,86 @@ +package model + +import ( + "context" + "fmt" + + aboutv1alpha1 "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/apis/about/v1alpha1" + + "sigs.k8s.io/controller-runtime/pkg/client" +) + +const ( + ClusterIdPropertyName = "cluster.clusterset.k8s.io" + ClusterSetIdPropertyName = "clusterset.k8s.io" +) + +// Non-exported type, accessible via read-only func +type clusterProperties struct { + clusterId string + clusterSetId string +} + +func (r clusterProperties) ClusterId() string { + return r.clusterId +} + +func (r clusterProperties) ClusterSetId() string { + return r.clusterSetId +} + +func (r clusterProperties) IsValid() bool { + return r.clusterSetId != "" && r.clusterId != "" +} + +func (r clusterProperties) String() string { + return fmt.Sprintf("ClusterId: %s, ClusterSetId: %s", r.clusterId, r.clusterSetId) +} + +// ClusterUtils provides utility functions for working with clusters +type ClusterUtils struct { + client client.Client + clusterProperties clusterProperties +} + +func NewClusterUtils(client client.Client) ClusterUtils { + return ClusterUtils{ + client: client, + clusterProperties: clusterProperties{}, + } +} + +func NewClusterUtilsWithValues(clusterId string, clusterSetId string) ClusterUtils { + return ClusterUtils{ + clusterProperties: clusterProperties{clusterId: clusterId, clusterSetId: clusterSetId}, + } +} + +func (r *ClusterUtils) GetClusterProperties(ctx context.Context) (*clusterProperties, error) { + if !r.clusterProperties.IsValid() { + err := r.LoadClusterProperties(ctx) + if err != nil { + return nil, err + } + } + return &r.clusterProperties, nil +} + +func (r *ClusterUtils) LoadClusterProperties(ctx context.Context) error { + clusterPropertyList := &aboutv1alpha1.ClusterPropertyList{} + err := r.client.List(ctx, clusterPropertyList) + if err != nil { + return err + } + for _, clusterProperty := range clusterPropertyList.Items { + switch clusterProperty.Name { + case ClusterIdPropertyName: + r.clusterProperties.clusterId = clusterProperty.Spec.Value + case ClusterSetIdPropertyName: + r.clusterProperties.clusterSetId = clusterProperty.Spec.Value + } + } + if !r.clusterProperties.IsValid() { + return fmt.Errorf("ClusterProperty not found: %s", r.clusterProperties) + } + return nil +} diff --git a/pkg/model/cluster_test.go b/pkg/model/cluster_test.go new file mode 100644 index 00000000..2dbb6db1 --- /dev/null +++ b/pkg/model/cluster_test.go @@ -0,0 +1,108 @@ +package model + +import ( + "context" + "reflect" + "testing" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + + aboutv1alpha1 "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/apis/about/v1alpha1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func TestClusterUtils_GetClusterProperties(t *testing.T) { + type fields struct { + client client.Client + clusterProperties clusterProperties + } + type args struct { + ctx context.Context + } + clusterId := "cluster1" + clusterSetId := "clusterset1" + tests := []struct { + name string + fields fields + args args + want *clusterProperties + wantErr bool + }{ + { + name: "happy case fetch from client", + fields: fields{ + client: fake.NewClientBuilder().WithScheme(GetScheme()).WithObjects(ClusterIdForTest(clusterId), ClusterSetIdForTest(clusterSetId)).Build(), + clusterProperties: clusterProperties{}, + }, + args: args{ctx: context.TODO()}, + want: &clusterProperties{clusterId: clusterId, clusterSetId: clusterSetId}, + wantErr: false, + }, + { + name: "happy case already set", + fields: fields{ + client: nil, + clusterProperties: clusterProperties{clusterId: clusterId, clusterSetId: clusterSetId}, + }, + args: args{ctx: context.TODO()}, + want: &clusterProperties{clusterId: clusterId, clusterSetId: clusterSetId}, + wantErr: false, + }, + { + name: "error cluster properties not present", + fields: fields{ + client: fake.NewClientBuilder().WithScheme(GetScheme()).Build(), + clusterProperties: clusterProperties{}, + }, + args: args{ctx: context.TODO()}, + want: nil, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + r := &ClusterUtils{ + client: tt.fields.client, + clusterProperties: tt.fields.clusterProperties, + } + got, err := r.GetClusterProperties(tt.args.ctx) + if (err != nil) != tt.wantErr { + t.Errorf("GetClusterProperties() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("GetClusterProperties() got = %v, want %v", got, tt.want) + } + }) + } +} + +func ClusterIdForTest(clusterId string) *aboutv1alpha1.ClusterProperty { + return &aboutv1alpha1.ClusterProperty{ + ObjectMeta: metav1.ObjectMeta{ + Name: ClusterIdPropertyName, + }, + Spec: aboutv1alpha1.ClusterPropertySpec{ + Value: clusterId, + }, + } +} + +func ClusterSetIdForTest(clusterSetId string) *aboutv1alpha1.ClusterProperty { + return &aboutv1alpha1.ClusterProperty{ + ObjectMeta: metav1.ObjectMeta{ + Name: ClusterSetIdPropertyName, + }, + Spec: aboutv1alpha1.ClusterPropertySpec{ + Value: clusterSetId, + }, + } +} + +func GetScheme() *runtime.Scheme { + scheme := runtime.NewScheme() + scheme.AddKnownTypes(aboutv1alpha1.GroupVersion, &aboutv1alpha1.ClusterProperty{}, &aboutv1alpha1.ClusterPropertyList{}) + return scheme +} diff --git a/pkg/model/plan.go b/pkg/model/plan.go index 7ce29d5f..618b4df2 100644 --- a/pkg/model/plan.go +++ b/pkg/model/plan.go @@ -45,3 +45,15 @@ func (p *Plan) CalculateChanges() Changes { return changes } + +func (c *Changes) HasUpdates() bool { + return len(c.Create) > 0 || len(c.Update) > 0 +} + +func (c *Changes) HasDeletes() bool { + return len(c.Delete) > 0 +} + +func (c *Changes) IsNone() bool { + return len(c.Create) == 0 && len(c.Update) == 0 && len(c.Delete) == 0 +} diff --git a/pkg/model/types.go b/pkg/model/types.go index 0dcd4dbe..85c93579 100644 --- a/pkg/model/types.go +++ b/pkg/model/types.go @@ -2,72 +2,272 @@ package model import ( "encoding/json" - "errors" "fmt" - "github.com/aws/aws-sdk-go-v2/service/servicediscovery/types" "reflect" "strconv" "strings" + + discovery "k8s.io/api/discovery/v1" + + "github.com/aws/aws-sdk-go-v2/service/servicediscovery/types" +) + +const ( + HttpNamespaceType NamespaceType = "HTTP" + DnsPrivateNamespaceType NamespaceType = "DNS_PRIVATE" + // UnsupportedNamespaceType Placeholder NamespaceType to denote not supported values + UnsupportedNamespaceType NamespaceType = "" ) +type NamespaceType string + +// Namespace hold namespace attributes +type Namespace struct { + Id string + Name string + Type NamespaceType +} + +// Service holds namespace and endpoint state for a named service. type Service struct { Namespace string Name string Endpoints []*Endpoint } +const ( + HeadlessType ServiceType = "Headless" + ClusterSetIPType ServiceType = "ClusterSetIP" +) + +type ServiceType string + +// Endpoint holds basic values and attributes for an endpoint. type Endpoint struct { - Id string - IP string + Id string + IP string + AddressType discovery.AddressType + EndpointPort Port + ServicePort Port + ClusterId string + ClusterSetId string + ServiceType ServiceType + ServiceExportCreationTimestamp int64 + Ready bool + Hostname string + Nodename string + Attributes map[string]string +} + +type Port struct { + Name string Port int32 - Attributes map[string]string + TargetPort string + Protocol string // TCP, UDP, SCTP } -const ipv4Attr = "AWS_INSTANCE_IPV4" -const portAttr = "AWS_INSTANCE_PORT" +// Cloudmap Instances IP and Port is supposed to be AWS_INSTANCE_IPV4 and AWS_INSTANCE_PORT +// Rest are custom attributes +const ( + EndpointIpv4Attr = "AWS_INSTANCE_IPV4" + EndpointIpv6Attr = "AWS_INSTANCE_IPV6" + EndpointPortAttr = "AWS_INSTANCE_PORT" + EndpointPortNameAttr = "ENDPOINT_PORT_NAME" + EndpointProtocolAttr = "ENDPOINT_PROTOCOL" + EndpointReadyAttr = "READY" + EndpointHostnameAttr = "HOSTNAME" + EndpointNodeNameAttr = "NODENAME" + ClusterIdAttr = "CLUSTER_ID" + ClusterSetIdAttr = "CLUSTERSET_ID" + ServicePortNameAttr = "SERVICE_PORT_NAME" + ServicePortAttr = "SERVICE_PORT" + ServiceTargetPortAttr = "SERVICE_TARGET_PORT" + ServiceProtocolAttr = "SERVICE_PROTOCOL" + ServiceTypeAttr = "SERVICE_TYPE" + ServiceExportCreationAttr = "SERVICE_EXPORT_CREATION_TIMESTAMP" + K8sVersionAttr = "K8S_CONTROLLER" +) -// NewEndpointFromInstance converts a Cloud Map InstanceSummary to an endpoint -func NewEndpointFromInstance(inst *types.InstanceSummary) (*Endpoint, error) { +// NewEndpointFromInstance converts a Cloud Map HttpInstanceSummary to an endpoint. +func NewEndpointFromInstance(inst *types.HttpInstanceSummary) (*Endpoint, error) { endpoint := Endpoint{ - Id: *inst.Id, - Attributes: make(map[string]string, 0), + Id: *inst.InstanceId, + Attributes: make(map[string]string), + } + attributes := make(map[string]string) + for key, value := range inst.Attributes { + attributes[key] = value } - if ipv4, hasIp := inst.Attributes[ipv4Attr]; hasIp { - endpoint.IP = ipv4 - } else { - return nil, errors.New(fmt.Sprintf("cannot convert service instance %s to endpoint without IP address", *inst.Id)) + // Remove and set the IP, Port, Service Port, ServiceType, ClusterId, ClusterSetId + + // ASSUMPTION: Endpoints have either IPV4 OR IPV6, not both. Defaults to IPV4 if both are present. + ipv4, ipv4Exists := attributes[EndpointIpv4Attr] + ipv6, ipv6Exists := attributes[EndpointIpv6Attr] + if ipv6Exists { + ip, err := removeStringAttr(attributes, EndpointIpv6Attr) + if err != nil { + return nil, err + } + endpoint.IP = ip + endpoint.AddressType = discovery.AddressTypeIPv6 + } + if ipv4Exists { + ip, err := removeStringAttr(attributes, EndpointIpv4Attr) + if err != nil { + return nil, err + } + endpoint.IP = ip + endpoint.AddressType = discovery.AddressTypeIPv4 + } + if ipv4Exists && ipv6Exists { + fmt.Printf("WARNING: Found both address types in one Endpoint... IPv4: %s IPv6: %s\n", ipv4, ipv6) } - if portStr, hasPort := inst.Attributes[portAttr]; hasPort { - port, parseError := strconv.Atoi(portStr) + endpointPort, err := endpointPortFromAttr(attributes) + if err != nil { + return nil, err + } + endpoint.EndpointPort = endpointPort + + servicePort, err := servicePortFromAttr(attributes) + if err != nil { + return nil, err + } + endpoint.ServicePort = servicePort + + serviceTypeStr, err := removeStringAttr(attributes, ServiceTypeAttr) + if err != nil { + return nil, err + } + endpoint.ServiceType = ServiceType(serviceTypeStr) + + if endpoint.ClusterId, err = removeStringAttr(attributes, ClusterIdAttr); err != nil { + return nil, err + } + + if endpoint.ClusterSetId, err = removeStringAttr(attributes, ClusterSetIdAttr); err != nil { + return nil, err + } + + if endpoint.Ready, err = removeBoolAttr(attributes, EndpointReadyAttr); err != nil { + return nil, err + } + + if endpoint.ServiceExportCreationTimestamp, err = removeTimestampAttr(attributes, ServiceExportCreationAttr); err != nil { + return nil, err + } + + // Hostname and Nodename are Optional attributes + endpoint.Hostname, _ = removeStringAttr(attributes, EndpointHostnameAttr) + endpoint.Nodename, _ = removeStringAttr(attributes, EndpointNodeNameAttr) + + // Add the remaining attributes + endpoint.Attributes = attributes + + return &endpoint, err +} +func endpointPortFromAttr(attributes map[string]string) (port Port, err error) { + port = Port{} + if port.Name, err = removeStringAttr(attributes, EndpointPortNameAttr); err != nil { + return port, err + } + if port.Port, err = removeIntAttr(attributes, EndpointPortAttr); err != nil { + return port, err + } + if port.Protocol, err = removeStringAttr(attributes, EndpointProtocolAttr); err != nil { + return port, err + } + return port, err +} + +func servicePortFromAttr(attributes map[string]string) (port Port, err error) { + port = Port{} + if port.TargetPort, err = removeStringAttr(attributes, ServiceTargetPortAttr); err != nil { + return port, err + } + if port.Name, err = removeStringAttr(attributes, ServicePortNameAttr); err != nil { + return port, err + } + if port.Port, err = removeIntAttr(attributes, ServicePortAttr); err != nil { + return port, err + } + if port.Protocol, err = removeStringAttr(attributes, ServiceProtocolAttr); err != nil { + return port, err + } + return port, err +} + +func removeStringAttr(attributes map[string]string, attr string) (string, error) { + if value, hasValue := attributes[attr]; hasValue { + delete(attributes, attr) + return value, nil + } + return "", fmt.Errorf("cannot find the attribute %s", attr) +} + +func removeIntAttr(attributes map[string]string, attr string) (int32, error) { + if value, hasValue := attributes[attr]; hasValue { + parsedValue, parseError := strconv.ParseUint(value, 10, 16) if parseError != nil { - return nil, parseError + return 0, fmt.Errorf("failed to parse the %s as int with error %s", attr, parseError.Error()) } - - endpoint.Port = int32(port) - } else { - return nil, errors.New(fmt.Sprintf("cannot convert service instance %s to endpoint without port", *inst.Id)) + delete(attributes, attr) + return int32(parsedValue), nil } + return 0, fmt.Errorf("cannot find the attribute %s", attr) +} - for key, val := range inst.Attributes { - if key != ipv4Attr && key != portAttr { - endpoint.Attributes[key] = val +func removeBoolAttr(attributes map[string]string, attr string) (bool, error) { + if value, hasValue := attributes[attr]; hasValue { + parsedValue, parseError := strconv.ParseBool(value) + if parseError != nil { + return false, fmt.Errorf("failed to parse the %s as bool with error %s", attr, parseError.Error()) } + delete(attributes, attr) + return parsedValue, nil } + return false, fmt.Errorf("cannot find the attribute %s", attr) +} - return &endpoint, nil +func removeTimestampAttr(attributes map[string]string, attr string) (int64, error) { + if value, hasValue := attributes[attr]; hasValue { + parsedValue, parseError := strconv.ParseInt(value, 10, 64) + if parseError != nil { + return 0, fmt.Errorf("failed to parse the %s as int with error %s", + attr, parseError.Error()) + } + delete(attributes, attr) + return parsedValue, nil + } + return 0, fmt.Errorf("cannot find the attribute %s", attr) } -// GetAttributes extracts endpoint attributes for Cloud Map service instance registration -func (e *Endpoint) GetAttributes() map[string]string { - attrs := make(map[string]string, 0) +// GetCloudMapAttributes extracts endpoint attributes for Cloud Map service instance registration. +func (e *Endpoint) GetCloudMapAttributes() map[string]string { + attrs := make(map[string]string) - attrs[ipv4Attr] = e.IP + if e.AddressType == discovery.AddressTypeIPv4 { + attrs[EndpointIpv4Attr] = e.IP + } else if e.AddressType == discovery.AddressTypeIPv6 { + attrs[EndpointIpv6Attr] = e.IP + } - port := strconv.FormatInt(int64(e.Port), 10) - attrs[portAttr] = port + attrs[ClusterIdAttr] = e.ClusterId + attrs[ClusterSetIdAttr] = e.ClusterSetId + attrs[EndpointPortAttr] = strconv.Itoa(int(e.EndpointPort.Port)) + attrs[EndpointProtocolAttr] = e.EndpointPort.Protocol + attrs[EndpointPortNameAttr] = e.EndpointPort.Name + attrs[ServicePortNameAttr] = e.ServicePort.Name + attrs[ServicePortAttr] = strconv.Itoa(int(e.ServicePort.Port)) + attrs[ServiceTargetPortAttr] = e.ServicePort.TargetPort + attrs[ServiceProtocolAttr] = e.ServicePort.Protocol + attrs[ServiceTypeAttr] = e.ServiceType.String() + attrs[ServiceExportCreationAttr] = strconv.FormatInt(e.ServiceExportCreationTimestamp, 10) + attrs[EndpointReadyAttr] = strconv.FormatBool(e.Ready) + attrs[EndpointHostnameAttr] = e.Hostname + attrs[EndpointNodeNameAttr] = e.Nodename for key, val := range e.Attributes { attrs[key] = val @@ -76,10 +276,12 @@ func (e *Endpoint) GetAttributes() map[string]string { return attrs } +// Equals evaluates if two Endpoints are "deeply equal" (including all fields). func (e *Endpoint) Equals(other *Endpoint) bool { return reflect.DeepEqual(e, other) } +// String gives a string representation for an endpoint. func (e *Endpoint) String() string { bytes, err := json.Marshal(e) if err != nil { @@ -89,7 +291,58 @@ func (e *Endpoint) String() string { return string(bytes) } -// EndpointIdFromIPAddress converts an IP address to human readable identifier -func EndpointIdFromIPAddress(address string) string { - return strings.Replace(address, ".", "_", -1) +// EndpointIdFromIPAddressAndPort converts an IP address to human-readable identifier. +func EndpointIdFromIPAddressAndPort(address string, port Port) string { + address = strings.ReplaceAll(address, ".", "_") + address = strings.ReplaceAll(address, ":", "_") + return fmt.Sprintf("%s-%s-%d", strings.ToLower(port.Protocol), address, port.Port) +} + +// Gives string representation for ServiceType +func (serviceType ServiceType) String() string { + return string(serviceType) +} + +func ConvertNamespaceType(nsType types.NamespaceType) (namespaceType NamespaceType) { + switch nsType { + case types.NamespaceTypeDnsPrivate: + return DnsPrivateNamespaceType + case types.NamespaceTypeHttp: + return HttpNamespaceType + default: + return UnsupportedNamespaceType + } +} + +func (svc *Service) GetEndpoints(clusterId string) (endpts []*Endpoint) { + for _, endpt := range svc.Endpoints { + if endpt.ClusterId == clusterId { + endpts = append(endpts, endpt) + } + } + return endpts +} + +func (namespaceType *NamespaceType) IsUnsupported() bool { + return *namespaceType == UnsupportedNamespaceType +} + +func (p *Port) GetID() string { + return fmt.Sprintf("%s:%d", p.Protocol, p.Port) +} + +// Equals evaluates if two Ports are "deeply equal" (including all fields). +func (p *Port) Equals(other *Port) bool { + return reflect.DeepEqual(p, other) +} + +func GetAddressTypeFromString(addressTypeStr string) (discovery.AddressType, error) { + switch addressTypeStr { + case string(discovery.AddressTypeIPv4): + return discovery.AddressTypeIPv4, nil + case string(discovery.AddressTypeIPv6): + return discovery.AddressTypeIPv6, nil + default: + return "", fmt.Errorf("Invalid AddressType, could not parse from string: %s", addressTypeStr) + } } diff --git a/pkg/model/types_test.go b/pkg/model/types_test.go new file mode 100644 index 00000000..8ab1ca44 --- /dev/null +++ b/pkg/model/types_test.go @@ -0,0 +1,665 @@ +package model + +import ( + "reflect" + "strconv" + "testing" + + discovery "k8s.io/api/discovery/v1" + + "github.com/aws/aws-sdk-go-v2/service/servicediscovery/types" + "github.com/google/go-cmp/cmp" +) + +var instId = "my-instance" +var ipv4 = "192.168.0.1" +var ipv6 = "2001:0db8:0001:0000:0000:0ab9:C0A8:0102" +var clusterId = "test-mcs-clusterId" +var clusterId2 = "test-mcs-clusterid-2" +var clusterId3 = "test-mcs-clusterid-3" +var namespaceName = "test-mcs-namespace" +var serviceName = "test-mcs-service" +var clusterSetId = "test-mcs-clusterSetId" +var serviceType = ClusterSetIPType.String() +var svcExportCreationTimestamp int64 = 1640995200000 + +func TestNewEndpointFromInstance(t *testing.T) { + tests := []struct { + name string + inst *types.HttpInstanceSummary + want *Endpoint + wantErr bool + }{ + { + name: "happy case ipv4", + inst: &types.HttpInstanceSummary{ + InstanceId: &instId, + Attributes: map[string]string{ + ClusterIdAttr: clusterId, + ClusterSetIdAttr: clusterSetId, + EndpointIpv4Attr: ipv4, + EndpointPortAttr: "80", + EndpointProtocolAttr: "TCP", + EndpointPortNameAttr: "http", + EndpointReadyAttr: "true", + ServicePortNameAttr: "http", + ServiceProtocolAttr: "TCP", + ServicePortAttr: "65535", + ServiceTargetPortAttr: "80", + ServiceTypeAttr: serviceType, + ServiceExportCreationAttr: strconv.FormatInt(svcExportCreationTimestamp, 10), + "custom-attr": "custom-val", + }, + }, + want: &Endpoint{ + Id: instId, + IP: ipv4, + AddressType: discovery.AddressTypeIPv4, + EndpointPort: Port{ + Name: "http", + Port: 80, + Protocol: "TCP", + }, + ServicePort: Port{ + Name: "http", + Port: 65535, + TargetPort: "80", + Protocol: "TCP", + }, + ClusterId: clusterId, + ClusterSetId: clusterSetId, + ServiceType: ServiceType(serviceType), + ServiceExportCreationTimestamp: svcExportCreationTimestamp, + Ready: true, + Attributes: map[string]string{ + "custom-attr": "custom-val", + }, + }, + }, + { + name: "happy case ipv6", + inst: &types.HttpInstanceSummary{ + InstanceId: &instId, + Attributes: map[string]string{ + ClusterIdAttr: clusterId, + ClusterSetIdAttr: clusterSetId, + EndpointIpv6Attr: ipv6, + EndpointPortAttr: "80", + EndpointProtocolAttr: "TCP", + EndpointPortNameAttr: "http", + EndpointReadyAttr: "true", + ServicePortNameAttr: "http", + ServiceProtocolAttr: "TCP", + ServicePortAttr: "65535", + ServiceTargetPortAttr: "80", + ServiceTypeAttr: serviceType, + ServiceExportCreationAttr: strconv.FormatInt(svcExportCreationTimestamp, 10), + "custom-attr": "custom-val", + }, + }, + want: &Endpoint{ + Id: instId, + IP: ipv6, + AddressType: discovery.AddressTypeIPv6, + EndpointPort: Port{ + Name: "http", + Port: 80, + Protocol: "TCP", + }, + ServicePort: Port{ + Name: "http", + Port: 65535, + TargetPort: "80", + Protocol: "TCP", + }, + ClusterId: clusterId, + ClusterSetId: clusterSetId, + ServiceType: ServiceType(serviceType), + ServiceExportCreationTimestamp: svcExportCreationTimestamp, + Ready: true, + Attributes: map[string]string{ + "custom-attr": "custom-val", + }, + }, + }, + { + name: "ipv4 and ipv6 defaults to ipv4", + inst: &types.HttpInstanceSummary{ + InstanceId: &instId, + Attributes: map[string]string{ + ClusterIdAttr: clusterId, + ClusterSetIdAttr: clusterSetId, + EndpointIpv4Attr: ipv4, + EndpointIpv6Attr: ipv6, + EndpointPortAttr: "80", + EndpointProtocolAttr: "TCP", + EndpointPortNameAttr: "http", + EndpointReadyAttr: "true", + ServicePortNameAttr: "http", + ServiceProtocolAttr: "TCP", + ServicePortAttr: "65535", + ServiceTargetPortAttr: "80", + ServiceTypeAttr: serviceType, + ServiceExportCreationAttr: strconv.FormatInt(svcExportCreationTimestamp, 10), + "custom-attr": "custom-val", + }, + }, + want: &Endpoint{ + Id: instId, + IP: ipv4, + AddressType: discovery.AddressTypeIPv4, + EndpointPort: Port{ + Name: "http", + Port: 80, + Protocol: "TCP", + }, + ServicePort: Port{ + Name: "http", + Port: 65535, + TargetPort: "80", + Protocol: "TCP", + }, + ClusterId: clusterId, + ClusterSetId: clusterSetId, + ServiceType: ServiceType(serviceType), + ServiceExportCreationTimestamp: svcExportCreationTimestamp, + Ready: true, + Attributes: map[string]string{ + "custom-attr": "custom-val", + }, + }, + }, + { + name: "invalid port", + inst: &types.HttpInstanceSummary{ + InstanceId: &instId, + Attributes: map[string]string{ + EndpointIpv4Attr: ipv4, + EndpointPortAttr: "80", + EndpointProtocolAttr: "TCP", + EndpointPortNameAttr: "http", + EndpointReadyAttr: "true", + ServicePortNameAttr: "http", + ServiceProtocolAttr: "TCP", + ServicePortAttr: "99999", + ServiceTargetPortAttr: "80", + ServiceTypeAttr: serviceType, + "custom-attr": "custom-val", + }, + }, + wantErr: true, + }, + { + name: "missing IP", + inst: &types.HttpInstanceSummary{ + InstanceId: &instId, + Attributes: map[string]string{ + EndpointPortAttr: "80", + "custom-attr": "custom-val", + }, + }, + wantErr: true, + }, + { + name: "missing port", + inst: &types.HttpInstanceSummary{ + InstanceId: &instId, + Attributes: map[string]string{ + EndpointIpv4Attr: ipv4, + "custom-attr": "custom-val", + }, + }, + wantErr: true, + }, + { + name: "missing clusterid", + inst: &types.HttpInstanceSummary{ + InstanceId: &instId, + Attributes: map[string]string{ + ClusterSetIdAttr: clusterSetId, + EndpointIpv4Attr: ipv4, + EndpointPortAttr: "80", + EndpointProtocolAttr: "TCP", + EndpointPortNameAttr: "http", + EndpointReadyAttr: "true", + ServicePortNameAttr: "http", + ServiceProtocolAttr: "TCP", + ServicePortAttr: "65535", + ServiceTargetPortAttr: "80", + "custom-attr": "custom-val", + }, + }, + wantErr: true, + }, + { + name: "missing clustersetid", + inst: &types.HttpInstanceSummary{ + InstanceId: &instId, + Attributes: map[string]string{ + ClusterIdAttr: clusterId, + EndpointIpv4Attr: ipv4, + EndpointPortAttr: "80", + EndpointProtocolAttr: "TCP", + EndpointPortNameAttr: "http", + EndpointReadyAttr: "true", + ServicePortNameAttr: "http", + ServiceProtocolAttr: "TCP", + ServicePortAttr: "65535", + ServiceTargetPortAttr: "80", + "custom-attr": "custom-val", + }, + }, + wantErr: true, + }, + { + name: "missing service type", + inst: &types.HttpInstanceSummary{ + InstanceId: &instId, + Attributes: map[string]string{ + ClusterIdAttr: clusterId, + ClusterSetIdAttr: clusterSetId, + EndpointIpv4Attr: ipv4, + EndpointPortAttr: "80", + EndpointProtocolAttr: "TCP", + EndpointPortNameAttr: "http", + EndpointReadyAttr: "true", + ServicePortNameAttr: "http", + ServiceProtocolAttr: "TCP", + ServicePortAttr: "65535", + ServiceTargetPortAttr: "80", + "custom-attr": "custom-val", + }, + }, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := NewEndpointFromInstance(tt.inst) + if (err != nil) != tt.wantErr { + t.Errorf("NewEndpointFromInstance() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("NewEndpointFromInstance() got = %v, want %v", got, tt.want) + } + }) + } +} + +func TestEndpoint_GetAttributes(t *testing.T) { + tests := []struct { + name string + endpoint Endpoint + want map[string]string + }{ + { + name: "happy case ipv4", + endpoint: Endpoint{ + IP: ipv4, + AddressType: discovery.AddressTypeIPv4, + EndpointPort: Port{ + Name: "http", + Port: 80, + Protocol: "TCP", + }, + ServicePort: Port{ + Name: "http", + Port: 30, + TargetPort: "80", + Protocol: "TCP", + }, + Ready: true, + ClusterId: clusterId, + ClusterSetId: clusterSetId, + ServiceType: ServiceType(serviceType), + ServiceExportCreationTimestamp: svcExportCreationTimestamp, + Attributes: map[string]string{ + "custom-attr": "custom-val", + }, + }, + want: map[string]string{ + ClusterIdAttr: clusterId, + ClusterSetIdAttr: clusterSetId, + EndpointIpv4Attr: ipv4, + EndpointPortAttr: "80", + EndpointProtocolAttr: "TCP", + EndpointPortNameAttr: "http", + EndpointReadyAttr: "true", + EndpointHostnameAttr: "", + EndpointNodeNameAttr: "", + ServicePortNameAttr: "http", + ServiceProtocolAttr: "TCP", + ServicePortAttr: "30", + ServiceTargetPortAttr: "80", + ServiceTypeAttr: serviceType, + ServiceExportCreationAttr: strconv.FormatInt(svcExportCreationTimestamp, 10), + "custom-attr": "custom-val", + }, + }, + { + name: "happy case ipv6", + endpoint: Endpoint{ + IP: ipv6, + AddressType: discovery.AddressTypeIPv6, + EndpointPort: Port{ + Name: "http", + Port: 80, + Protocol: "TCP", + }, + ServicePort: Port{ + Name: "http", + Port: 30, + TargetPort: "80", + Protocol: "TCP", + }, + Ready: true, + ClusterId: clusterId, + ClusterSetId: clusterSetId, + ServiceType: ServiceType(serviceType), + ServiceExportCreationTimestamp: svcExportCreationTimestamp, + Attributes: map[string]string{ + "custom-attr": "custom-val", + }, + }, + want: map[string]string{ + ClusterIdAttr: clusterId, + ClusterSetIdAttr: clusterSetId, + EndpointIpv6Attr: ipv6, + EndpointPortAttr: "80", + EndpointProtocolAttr: "TCP", + EndpointPortNameAttr: "http", + EndpointReadyAttr: "true", + EndpointHostnameAttr: "", + EndpointNodeNameAttr: "", + ServicePortNameAttr: "http", + ServiceProtocolAttr: "TCP", + ServicePortAttr: "30", + ServiceTargetPortAttr: "80", + ServiceTypeAttr: serviceType, + ServiceExportCreationAttr: strconv.FormatInt(svcExportCreationTimestamp, 10), + "custom-attr": "custom-val", + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := tt.endpoint.GetCloudMapAttributes(); !reflect.DeepEqual(got, tt.want) { + t.Errorf("GetAttributes() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestEndpointIdFromIPAddressAndPort(t *testing.T) { + tests := []struct { + name string + address string + port Port + want string + }{ + { + name: "happy case ipv4", + address: ipv4, + port: Port{ + Name: "http", + Port: 80, + Protocol: "TCP", + }, + want: "tcp-192_168_0_1-80", + }, + { + name: "happy case ipv6", + address: ipv6, + port: Port{ + Name: "http", + Port: 80, + Protocol: "TCP", + }, + want: "tcp-2001_0db8_0001_0000_0000_0ab9_C0A8_0102-80", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := EndpointIdFromIPAddressAndPort(tt.address, tt.port); got != tt.want { + t.Errorf("EndpointIdFromIPAddressAndPort() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestEndpoint_Equals(t *testing.T) { + firstEndpointIpv4 := Endpoint{ + Id: instId, + IP: ipv4, + AddressType: discovery.AddressTypeIPv4, + ServicePort: Port{ + Port: 80, + }, + Attributes: map[string]string{ + "custom-key": "custom-val", + }, + } + + secondEndpointIpv4 := Endpoint{ + Id: instId, + IP: ipv4, + AddressType: discovery.AddressTypeIPv4, + ServicePort: Port{ + Port: 80, + Name: "", + }, + Attributes: map[string]string{ + "custom-key": "custom-val", + }, + } + + thirdEndpointIpv4 := Endpoint{ + Id: instId, + IP: ipv4, + AddressType: discovery.AddressTypeIPv4, + ServicePort: Port{ + Port: 80, + }, + Attributes: map[string]string{ + "custom-key": "different-val", + }, + } + + firstEndpointIpv6 := Endpoint{ + Id: instId, + IP: ipv6, + AddressType: discovery.AddressTypeIPv6, + ServicePort: Port{ + Port: 80, + }, + Attributes: map[string]string{ + "custom-key": "custom-val", + }, + } + + secondEndpointIpv6 := Endpoint{ + Id: instId, + IP: ipv6, + AddressType: discovery.AddressTypeIPv6, + ServicePort: Port{ + Port: 80, + Name: "", + }, + Attributes: map[string]string{ + "custom-key": "custom-val", + }, + } + + thirdEndpointIpv6 := Endpoint{ + Id: instId, + IP: ipv6, + AddressType: discovery.AddressTypeIPv6, + ServicePort: Port{ + Port: 80, + }, + Attributes: map[string]string{ + "custom-key": "different-val", + }, + } + + tests := []struct { + name string + x Endpoint + y Endpoint + want bool + }{ + { + name: "identical ipv4", + x: firstEndpointIpv4, + y: secondEndpointIpv4, + want: true, + }, + { + name: "identical ipv6", + x: firstEndpointIpv6, + y: secondEndpointIpv6, + want: true, + }, + { + name: "different ipv4", + x: firstEndpointIpv4, + y: thirdEndpointIpv4, + want: false, + }, + { + name: "different ipv6", + x: firstEndpointIpv6, + y: thirdEndpointIpv6, + want: false, + }, + { + name: "different ipv4 and ipv6", + x: firstEndpointIpv4, + y: firstEndpointIpv6, + want: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := tt.x.Equals(&tt.y); got != tt.want { + t.Errorf("Equals() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestGetAddressTypeFromString(t *testing.T) { + tests := []struct { + name string + addressTypeStr string + want discovery.AddressType + wantErr bool + }{ + { + name: "happy case ipv4", + addressTypeStr: "IPv4", + want: discovery.AddressTypeIPv4, + wantErr: false, + }, + { + name: "happy case ipv6", + addressTypeStr: "IPv6", + want: discovery.AddressTypeIPv6, + wantErr: false, + }, + { + name: "empty string", + addressTypeStr: "", + want: "", + wantErr: true, + }, + { + name: "case wrong", + addressTypeStr: "IPV6", + want: "", + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := GetAddressTypeFromString(tt.addressTypeStr) + if (err != nil) != tt.wantErr { + t.Errorf("GetAddressTypeFromString() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("GetAddressTypeFromString() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestGetEndpoints(t *testing.T) { + firstEndpoint := Endpoint{ + Id: instId + "-1", + IP: ipv4, + AddressType: discovery.AddressTypeIPv4, + ServicePort: Port{ + Port: 80, + }, + ClusterId: clusterId, + } + secondEndpoint := Endpoint{ + Id: instId + "2", + IP: ipv4, + AddressType: discovery.AddressTypeIPv4, + ServicePort: Port{ + Port: 80, + Name: "", + }, + ClusterId: clusterId2, + } + thirdEndpoint := Endpoint{ + Id: instId + "3", + IP: ipv4, + AddressType: discovery.AddressTypeIPv4, + ServicePort: Port{ + Port: 80, + Name: "", + }, + ClusterId: clusterId2, + } + + svc := Service{ + Namespace: namespaceName, + Name: serviceName, + Endpoints: []*Endpoint{ + &firstEndpoint, &secondEndpoint, &thirdEndpoint, + }, + } + + tests := []struct { + name string + x string + wantEndpts []*Endpoint + }{ + { + name: "return-first-endpoint", + x: clusterId, + wantEndpts: []*Endpoint{&firstEndpoint}, + }, + { + name: "return-two-endpoints", + x: clusterId2, + wantEndpts: []*Endpoint{&secondEndpoint, &thirdEndpoint}, + }, + { + name: "return-nil", + x: clusterId3, + wantEndpts: nil, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if gotEndpts := svc.GetEndpoints(tt.x); !cmp.Equal(gotEndpts, tt.wantEndpts) { + t.Errorf("Equals() = %v, Want = %v", gotEndpts, tt.wantEndpts) + } + }) + } +} diff --git a/pkg/version/version.go b/pkg/version/version.go index b446912d..96a31e99 100644 --- a/pkg/version/version.go +++ b/pkg/version/version.go @@ -22,3 +22,18 @@ func GetVersion() string { return "" } + +func GetPackageVersion() string { + return PackageName + " " + GetVersion() +} + +func GetUserAgentKey() string { + return PackageName +} + +func GetUserAgentValue() string { + if GitVersion != "" { + return strings.TrimPrefix(GitVersion, "v") + } + return "" +} diff --git a/samples/client-hello.yaml b/samples/client-hello.yaml new file mode 100644 index 00000000..ef168a6d --- /dev/null +++ b/samples/client-hello.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Pod +metadata: + name: client-hello + namespace: demo +spec: + containers: + - command: + - sleep + - "1d" + image: alpine + name: client-hello \ No newline at end of file diff --git a/samples/coredns-clusterrole.yaml b/samples/coredns-clusterrole.yaml new file mode 100644 index 00000000..242212fa --- /dev/null +++ b/samples/coredns-clusterrole.yaml @@ -0,0 +1,58 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + eks.amazonaws.com/component: coredns + k8s-app: kube-dns + kubernetes.io/bootstrapping: rbac-defaults + name: system:coredns +rules: +- apiGroups: + - "" + resources: + - endpoints + - services + - pods + - namespaces + verbs: + - list + - watch +- apiGroups: + - "" + resources: + - nodes + verbs: + - get +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - create + - get + - list + - patch + - update + - watch +- apiGroups: + - multicluster.x-k8s.io + resources: + - serviceimports + verbs: + - create + - get + - list + - patch + - update + - watch +- apiGroups: + - multicluster.x-k8s.io + resources: + - serviceexports + verbs: + - create + - get + - list + - patch + - update + - watch \ No newline at end of file diff --git a/samples/coredns-configmap.yaml b/samples/coredns-configmap.yaml new file mode 100644 index 00000000..4eb3ea24 --- /dev/null +++ b/samples/coredns-configmap.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +data: + Corefile: | + .:53 { + errors + health + multicluster clusterset.local + kubernetes cluster.local in-addr.arpa ip6.arpa { + pods insecure + fallthrough in-addr.arpa ip6.arpa + } + prometheus :9153 + forward . /etc/resolv.conf + cache 30 + loop + reload + loadbalance + } +kind: ConfigMap +metadata: + annotations: + labels: + eks.amazonaws.com/component: coredns + k8s-app: kube-dns + name: coredns + namespace: kube-system \ No newline at end of file diff --git a/samples/coredns-deployment.yaml b/samples/coredns-deployment.yaml new file mode 100644 index 00000000..d5e3949b --- /dev/null +++ b/samples/coredns-deployment.yaml @@ -0,0 +1,137 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + eks.amazonaws.com/component: coredns + k8s-app: kube-dns + kubernetes.io/name: CoreDNS + name: coredns + namespace: kube-system +spec: + progressDeadlineSeconds: 600 + replicas: 2 + revisionHistoryLimit: 10 + selector: + matchLabels: + eks.amazonaws.com/component: coredns + k8s-app: kube-dns + strategy: + rollingUpdate: + maxSurge: 25% + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + annotations: + eks.amazonaws.com/compute-type: ec2 + creationTimestamp: null + labels: + eks.amazonaws.com/component: coredns + k8s-app: kube-dns + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/os + operator: In + values: + - linux + - key: beta.kubernetes.io/arch + operator: In + values: + - amd64 + - arm64 + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: k8s-app + operator: In + values: + - kube-dns + topologyKey: kubernetes.io/hostname + weight: 100 + containers: + - args: + - -conf + - /etc/coredns/Corefile + image: ghcr.io/aws/aws-cloud-map-mcs-controller-for-k8s/coredns-multicluster/coredns:v1.8.6 + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 5 + httpGet: + path: /health + port: 8080 + scheme: HTTP + initialDelaySeconds: 60 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + name: coredns + ports: + - containerPort: 53 + name: dns + protocol: UDP + - containerPort: 53 + name: dns-tcp + protocol: TCP + - containerPort: 9153 + name: metrics + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + path: /health + port: 8080 + scheme: HTTP + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + resources: + limits: + memory: 170Mi + requests: + cpu: 100m + memory: 70Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_BIND_SERVICE + drop: + - all + readOnlyRootFilesystem: true + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /etc/coredns + name: config-volume + readOnly: true + - mountPath: /tmp + name: tmp + dnsPolicy: Default + priorityClassName: system-cluster-critical + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: coredns + serviceAccountName: coredns + terminationGracePeriodSeconds: 30 + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + - key: CriticalAddonsOnly + operator: Exists + volumes: + - emptyDir: {} + name: tmp + - configMap: + defaultMode: 420 + items: + - key: Corefile + path: Corefile + name: coredns + name: config-volume diff --git a/samples/eksctl-cluster.yaml b/samples/eksctl-cluster.yaml new file mode 100644 index 00000000..31135473 --- /dev/null +++ b/samples/eksctl-cluster.yaml @@ -0,0 +1,18 @@ +apiVersion: eksctl.io/v1alpha5 +kind: ClusterConfig +metadata: + name: $CLUSTER_NAME + region: $AWS_REGION + version: "1.21" +vpc: + cidr: $VPC_CIDR + autoAllocateIPv6: false + clusterEndpoints: + publicAccess: true + privateAccess: true +managedNodeGroups: +- name: $NODEGROUP_NAME + instanceType: t2.small + minSize: 1 + maxSize: 10 + desiredCapacity: 1 \ No newline at end of file diff --git a/samples/example-clusterproperty.yaml b/samples/example-clusterproperty.yaml new file mode 100644 index 00000000..4d058187 --- /dev/null +++ b/samples/example-clusterproperty.yaml @@ -0,0 +1,17 @@ +# An example object of `cluster.clusterset.k8s.io ClusterProperty` + +apiVersion: about.k8s.io/v1alpha1 +kind: ClusterProperty +metadata: + name: cluster.clusterset.k8s.io +spec: + value: sample-mcs-clusterid +--- +# An example object of `clusterset.k8s.io ClusterProperty`: + +apiVersion: about.k8s.io/v1alpha1 +kind: ClusterProperty +metadata: + name: clusterset.k8s.io +spec: + value: sample-mcs-clustersetid diff --git a/samples/demo-deployment.yaml b/samples/example-deployment.yaml similarity index 94% rename from samples/demo-deployment.yaml rename to samples/example-deployment.yaml index 3063ef78..79dbcd7c 100644 --- a/samples/demo-deployment.yaml +++ b/samples/example-deployment.yaml @@ -1,7 +1,7 @@ apiVersion: apps/v1 kind: Deployment metadata: - namespace: demo + namespace: example name: nginx-deployment labels: app: nginx diff --git a/samples/example-headless.yaml b/samples/example-headless.yaml new file mode 100755 index 00000000..b5dce655 --- /dev/null +++ b/samples/example-headless.yaml @@ -0,0 +1,11 @@ +kind: Service +apiVersion: v1 +metadata: + namespace: example + name: my-service +spec: + clusterIP: None + selector: + app: nginx + ports: + - port: 80 diff --git a/samples/example-service.yaml b/samples/example-service.yaml new file mode 100644 index 00000000..16744d46 --- /dev/null +++ b/samples/example-service.yaml @@ -0,0 +1,10 @@ +kind: Service +apiVersion: v1 +metadata: + namespace: example + name: my-service +spec: + selector: + app: nginx + ports: + - port: 80 diff --git a/samples/example-serviceexport.yaml b/samples/example-serviceexport.yaml new file mode 100644 index 00000000..8c094161 --- /dev/null +++ b/samples/example-serviceexport.yaml @@ -0,0 +1,5 @@ +kind: ServiceExport +apiVersion: multicluster.x-k8s.io/v1alpha1 +metadata: + namespace: example + name: my-service diff --git a/samples/mcsapi-clusterproperty.yaml b/samples/mcsapi-clusterproperty.yaml new file mode 100644 index 00000000..ac39ce8e --- /dev/null +++ b/samples/mcsapi-clusterproperty.yaml @@ -0,0 +1,13 @@ +apiVersion: about.k8s.io/v1alpha1 +kind: ClusterProperty +metadata: + name: cluster.clusterset.k8s.io +spec: + value: ${CLUSTER_ID} +--- +apiVersion: about.k8s.io/v1alpha1 +kind: ClusterProperty +metadata: + name: clusterset.k8s.io +spec: + value: ${CLUSTERSET_ID} \ No newline at end of file diff --git a/testconfig/e2e-deployment.yaml b/samples/nginx-deployment.yaml similarity index 55% rename from testconfig/e2e-deployment.yaml rename to samples/nginx-deployment.yaml index db54afa4..7218a551 100644 --- a/testconfig/e2e-deployment.yaml +++ b/samples/nginx-deployment.yaml @@ -1,12 +1,12 @@ -# e2e-deployment.yaml apiVersion: apps/v1 kind: Deployment metadata: - name: nginx-deployment + namespace: demo + name: nginx-demo labels: app: nginx spec: - replicas: 5 + replicas: 3 selector: matchLabels: app: nginx @@ -16,7 +16,7 @@ spec: app: nginx spec: containers: - - name: nginx - image: nginx:1.14.2 - ports: - - containerPort: 80 + - name: nginx + image: nginxdemos/hello:plain-text + ports: + - containerPort: 80 \ No newline at end of file diff --git a/samples/demo-service.yaml b/samples/nginx-service.yaml similarity index 62% rename from samples/demo-service.yaml rename to samples/nginx-service.yaml index 07bfeb7b..0d2c1508 100644 --- a/samples/demo-service.yaml +++ b/samples/nginx-service.yaml @@ -2,10 +2,9 @@ kind: Service apiVersion: v1 metadata: namespace: demo - name: demo-service + name: nginx-hello spec: selector: app: nginx ports: - - port: 8080 - targetPort: 80 + - port: 80 \ No newline at end of file diff --git a/samples/demo-export.yaml b/samples/nginx-serviceexport.yaml similarity index 81% rename from samples/demo-export.yaml rename to samples/nginx-serviceexport.yaml index 1930db08..da54514f 100644 --- a/samples/demo-export.yaml +++ b/samples/nginx-serviceexport.yaml @@ -2,4 +2,4 @@ kind: ServiceExport apiVersion: multicluster.x-k8s.io/v1alpha1 metadata: namespace: demo - name: demo-service + name: nginx-hello \ No newline at end of file diff --git a/test/test-constants.go b/test/test-constants.go new file mode 100644 index 00000000..15783070 --- /dev/null +++ b/test/test-constants.go @@ -0,0 +1,224 @@ +package test + +import ( + "fmt" + + discovery "k8s.io/api/discovery/v1" + + "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/version" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + aboutv1alpha1 "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/apis/about/v1alpha1" + "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/model" +) + +const ( + HttpNsName = "http-ns-name" + DnsNsName = "dns-ns-name" + HttpNsId = "http-ns-id" + DnsNsId = "dns-ns-id" + SvcName = "svc-name" + SvcId = "svc-id" + ClusterId1 = "test-mcs-clusterid-1" + ClusterSet = "test-mcs-clustersetid" + ClusterId2 = "test-mcs-clusterid-2" + EndptId1 = "tcp-192_168_0_1-1" + EndptId2 = "tcp-192_168_0_2-2" + EndptIdIpv6 = "tcp-2001_0db8_0001_0000_0000_0ab9_C0A8:0102-1" + EndptIp1 = "192.168.0.1" + EndptIp2 = "192.168.0.2" + EndptIpv6 = "2001:0db8:0001:0000:0000:0ab9:C0A8:0102" + EndptReadyTrue = "true" + EndptReadyFalse = "false" + Port1 = 1 + PortStr1 = "1" + PortName1 = "http" + Protocol1 = "TCP" + ServicePort1 = 11 + ServicePortStr1 = "11" + Port2 = 2 + PortStr2 = "2" + PortName2 = "https" + Protocol2 = "UDP" + ServicePort2 = 22 + ServicePortStr2 = "22" + ClusterIp1 = "10.10.10.1" + ClusterIp2 = "10.10.10.2" + OpId1 = "operation-id-1" + OpId2 = "operation-id-2" + OpStart = 1 + SvcType = "ClusterSetIP" + SvcExportCreationTimestamp int64 = 1640995200000 + Hostname = "host" + Nodename = "node" + PackageVersion = "aws-cloud-map-mcs-controller-for-k8s 0.0.1 (abcd)" +) + +func SetTestVersion() { + version.GitVersion = "v0.0.1" + version.GitCommit = "abcd" +} + +func GetTestHttpNamespace() *model.Namespace { + return &model.Namespace{ + Id: HttpNsId, + Name: HttpNsName, + Type: model.HttpNamespaceType, + } +} + +func GetTestDnsNamespace() *model.Namespace { + return &model.Namespace{ + Id: DnsNsId, + Name: DnsNsName, + Type: model.DnsPrivateNamespaceType, + } +} + +func GetTestService() *model.Service { + return &model.Service{ + Namespace: HttpNsName, + Name: SvcName, + Endpoints: []*model.Endpoint{GetTestEndpoint1(), GetTestEndpoint2()}, + } +} + +func GetTestServiceWithEndpoint(endpoints []*model.Endpoint) *model.Service { + return &model.Service{ + Namespace: HttpNsName, + Name: SvcName, + Endpoints: endpoints, + } +} + +func GetTestMulticlusterService() *model.Service { + // Service has two endpoints belonging to two different clusters in the same clusterset + return &model.Service{ + Namespace: HttpNsName, + Name: SvcName, + Endpoints: GetMulticlusterTestEndpoints(), + } +} + +func GetTestEndpoint1() *model.Endpoint { + return &model.Endpoint{ + Id: EndptId1, + IP: EndptIp1, + AddressType: discovery.AddressTypeIPv4, + EndpointPort: model.Port{ + Name: PortName1, + Port: Port1, + Protocol: Protocol1, + }, + ServicePort: model.Port{ + Name: PortName1, + Port: ServicePort1, + TargetPort: PortStr1, + Protocol: Protocol1, + }, + Ready: true, + Hostname: Hostname, + Nodename: Nodename, + ClusterId: ClusterId1, + ClusterSetId: ClusterSet, + ServiceType: model.ClusterSetIPType, + ServiceExportCreationTimestamp: SvcExportCreationTimestamp, + Attributes: map[string]string{model.K8sVersionAttr: PackageVersion}, + } +} + +func GetTestEndpoint2() *model.Endpoint { + return &model.Endpoint{ + Id: EndptId2, + IP: EndptIp2, + AddressType: discovery.AddressTypeIPv4, + EndpointPort: model.Port{ + Name: PortName2, + Port: Port2, + Protocol: Protocol2, + }, + ServicePort: model.Port{ + Name: PortName2, + Port: ServicePort2, + TargetPort: PortStr2, + Protocol: Protocol2, + }, + Ready: true, + Hostname: Hostname, + Nodename: Nodename, + ClusterId: ClusterId1, + ClusterSetId: ClusterSet, + ServiceType: model.ClusterSetIPType, + ServiceExportCreationTimestamp: SvcExportCreationTimestamp, + Attributes: map[string]string{model.K8sVersionAttr: PackageVersion}, + } +} + +func GetTestEndpointIpv6() *model.Endpoint { + return &model.Endpoint{ + Id: EndptId2, + IP: EndptIpv6, + AddressType: discovery.AddressTypeIPv6, + EndpointPort: model.Port{ + Name: PortName2, + Port: Port2, + Protocol: Protocol2, + }, + ServicePort: model.Port{ + Name: PortName2, + Port: ServicePort2, + TargetPort: PortStr2, + Protocol: Protocol2, + }, + Ready: true, + Hostname: Hostname, + Nodename: Nodename, + ClusterId: ClusterId1, + ClusterSetId: ClusterSet, + ServiceType: model.ClusterSetIPType, + ServiceExportCreationTimestamp: SvcExportCreationTimestamp, + Attributes: map[string]string{model.K8sVersionAttr: PackageVersion}, + } +} + +func GetMulticlusterTestEndpoints() []*model.Endpoint { + endpoint1 := GetTestEndpoint1() + endpoint2 := GetTestEndpoint2() + // Set Different ClusterIds + endpoint2.ClusterId = ClusterId2 + return []*model.Endpoint{endpoint1, endpoint2} +} + +func GetTestEndpoints(count int) (endpts []*model.Endpoint) { + // use +3 offset go avoid collision with test endpoint 1 and 2 + for i := 3; i < count+3; i++ { + e := GetTestEndpoint1() + e.ClusterId = ClusterId1 + e.Id = fmt.Sprintf("tcp-192_168_0_%d-1", i) + e.IP = fmt.Sprintf("192.168.0.%d", i) + endpts = append(endpts, e) + } + return endpts +} + +func ClusterIdForTest() *aboutv1alpha1.ClusterProperty { + return &aboutv1alpha1.ClusterProperty{ + ObjectMeta: metav1.ObjectMeta{ + Name: model.ClusterIdPropertyName, + }, + Spec: aboutv1alpha1.ClusterPropertySpec{ + Value: ClusterId1, + }, + } +} + +func ClusterSetIdForTest() *aboutv1alpha1.ClusterProperty { + return &aboutv1alpha1.ClusterProperty{ + ObjectMeta: metav1.ObjectMeta{ + Name: model.ClusterSetIdPropertyName, + }, + Spec: aboutv1alpha1.ClusterPropertySpec{ + Value: ClusterSet, + }, + } +} diff --git a/testconfig/e2e-export.yaml b/testconfig/e2e-export.yaml deleted file mode 100644 index 07fb59b4..00000000 --- a/testconfig/e2e-export.yaml +++ /dev/null @@ -1,7 +0,0 @@ -# e2e-export.yaml - -kind: ServiceExport -apiVersion: multicluster.x-k8s.io/v1alpha1 -metadata: - namespace: aws-cloudmap-mcs-e2e - name: e2e-service-one diff --git a/testconfig/e2e-service-one.yaml b/testconfig/e2e-service-one.yaml deleted file mode 100644 index 4c18fb88..00000000 --- a/testconfig/e2e-service-one.yaml +++ /dev/null @@ -1,12 +0,0 @@ -# e2e-service-one.yaml -kind: Service -apiVersion: v1 -metadata: - namespace: aws-cloudmap-mcs-e2e - name: e2e-service-one -spec: - selector: - app: nginx - ports: - - port: 8080 - targetPort: 80