diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 7f161dda6c..2c06c1bec8 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -26,4 +26,20 @@ updates: commit-message: prefix: ":seedling:" labels: - - "ok-to-test" \ No newline at end of file + - "ok-to-test" + +- package-ecosystem: "gomod" + directory: "/hack/tools" + schedule: + interval: "weekly" + ignore: + # Ignore k8s modules as they are upgraded manually + # together with controller-runtime and CAPI dependencies. + - dependency-name: "k8s.io/*" + update-types: ["version-update:semver-major", "version-update:semver-minor"] + - dependency-name: "sigs.k8s.io/*" + update-types: ["version-update:semver-major", "version-update:semver-minor"] + commit-message: + prefix: ":seedling:" + labels: + - "ok-to-test" diff --git a/.github/workflows/golangci-lint.yaml b/.github/workflows/golangci-lint.yaml index dfdf9a69d8..3bb90ac02d 100644 --- a/.github/workflows/golangci-lint.yaml +++ b/.github/workflows/golangci-lint.yaml @@ -13,10 +13,10 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - - uses: actions/setup-go@v3 + - uses: actions/setup-go@v4 with: go-version: 1.19 - name: golangci-lint uses: golangci/golangci-lint-action@v3.4.0 with: - version: v1.50.0 + version: v1.52.2 diff --git a/.golangci.yml b/.golangci.yml index 2407678648..29a141358b 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -3,7 +3,6 @@ linters: enable: - asciicheck - bodyclose - - deadcode - depguard - dogsled - errcheck @@ -19,7 +18,6 @@ linters: - gosec - gosimple - govet - - ifshort - importas - ineffassign - misspell @@ -32,14 +30,12 @@ linters: - revive - rowserrcheck - staticcheck - - structcheck - stylecheck - thelper - typecheck - unconvert - unparam - unused - - varcheck - whitespace linters-settings: diff --git a/Makefile b/Makefile index a5663cf92b..964a52ffc8 100644 --- a/Makefile +++ b/Makefile @@ -315,6 +315,7 @@ generate-flavors: $(FLAVOR_DIR) go run ./packaging/flavorgen -f cluster-class > $(FLAVOR_DIR)/clusterclass-template.yaml go run ./packaging/flavorgen -f cluster-topology > $(FLAVOR_DIR)/cluster-template-topology.yaml go run ./packaging/flavorgen -f ignition > $(FLAVOR_DIR)/cluster-template-ignition.yaml + go run ./packaging/flavorgen -f node-ipam > $(FLAVOR_DIR)/cluster-template-node-ipam.yaml ## -------------------------------------- ## Release ## -------------------------------------- diff --git a/README.md b/README.md index c346c47a7c..b276fb52a9 100644 --- a/README.md +++ b/README.md @@ -54,9 +54,9 @@ Note: These OVAs are not updated for security fixes and it is recommended to alw | Kubernetes | Ubuntu 18.04 | Ubuntu 20.04 | Photon 3 | Flatcar Stable | | :--------: | :----------: | :----------: | :------: | :------------: | -| v1.23.16 | [ova](https://storage.googleapis.com/capv-templates/v1.23.16/ubuntu-1804-kube-v1.23.16.ova), [sha256](https://storage.googleapis.com/capv-templates/v1.23.16/ubuntu-1804-kube-v1.23.16.ova.sha256) | [ova](https://storage.googleapis.com/capv-templates/v1.23.16/ubuntu-2004-kube-v1.23.16.ova), [sha256](https://storage.googleapis.com/capv-templates/v1.23.16/ubuntu-2004-kube-v1.23.16.ova.sha256)| [ova](https://storage.googleapis.com/capv-templates/v1.23.16/photon-3-kube-v1.23.16.ova), [sha256](https://storage.googleapis.com/capv-templates/v1.23.16/photon-3-kube-v1.23.16.ova.sha256) | n/a | -| v1.24.10 | [ova](https://storage.googleapis.com/capv-templates/v1.24.10/ubuntu-1804-kube-v1.24.10.ova), [sha256](https://storage.googleapis.com/capv-templates/v1.24.10/ubuntu-1804-kube-v1.24.10.ova.sha256) | [ova](https://storage.googleapis.com/capv-templates/v1.24.10/ubuntu-2004-kube-v1.24.10.ova), [sha256](https://storage.googleapis.com/capv-templates/v1.24.10/ubuntu-2004-kube-v1.24.10.ova.sha256)| [ova](https://storage.googleapis.com/capv-templates/v1.24.10/photon-3-kube-v1.24.10.ova), [sha256](https://storage.googleapis.com/capv-templates/v1.24.10/photon-3-kube-v1.24.10.ova.sha256) | n/a | -| v1.25.6 | [ova](https://storage.googleapis.com/capv-templates/v1.25.6/ubuntu-1804-kube-v1.25.6.ova), [sha256](https://storage.googleapis.com/capv-templates/v1.25.6/ubuntu-1804-kube-v1.25.6.ova.sha256) | [ova](https://storage.googleapis.com/capv-templates/v1.25.6/ubuntu-2004-kube-v1.25.6.ova), [sha256](https://storage.googleapis.com/capv-templates/v1.25.6/ubuntu-2004-kube-v1.25.6.ova.sha256)| [ova](https://storage.googleapis.com/capv-templates/v1.25.6/photon-3-kube-v1.25.6.ova), [sha256](https://storage.googleapis.com/capv-templates/v1.25.6/photon-3-kube-v1.25.6.ova.sha256) | [ova](https://storage.googleapis.com/capv-templates/v1.25.6/flatcar-stable-3374.2.4-kube-v1.25.6.ova), [sha256](https://storage.googleapis.com/capv-templates/v1.25.6/flatcar-stable-3374.2.4-kube-v1.25.6.ova.sha256) | +| v1.24.11 | [ova](https://storage.googleapis.com/capv-templates/v1.24.11/ubuntu-1804-kube-v1.24.11.ova), [sha256](https://storage.googleapis.com/capv-templates/v1.24.11/ubuntu-1804-kube-v1.24.11.ova.sha256) | [ova](https://storage.googleapis.com/capv-templates/v1.24.11/ubuntu-2004-kube-v1.24.11.ova), [sha256](https://storage.googleapis.com/capv-templates/v1.24.11/ubuntu-2004-kube-v1.24.11.ova.sha256)| [ova](https://storage.googleapis.com/capv-templates/v1.24.11/photon-3-kube-v1.24.11.ova), [sha256](https://storage.googleapis.com/capv-templates/v1.24.11/photon-3-kube-v1.24.11.ova.sha256) | [ova](https://storage.googleapis.com/capv-templates/v1.24.11/flatcar-stable-3374.2.5-kube-v1.24.11.ova), [sha256](https://storage.googleapis.com/capv-templates/v1.24.11/flatcar-stable-3374.2.5-kube-v1.24.11.ova.sha256) | +| v1.25.7 | [ova](https://storage.googleapis.com/capv-templates/v1.25.7/ubuntu-1804-kube-v1.25.7.ova), [sha256](https://storage.googleapis.com/capv-templates/v1.25.7/ubuntu-1804-kube-v1.25.7.ova.sha256) | [ova](https://storage.googleapis.com/capv-templates/v1.25.7/ubuntu-2004-kube-v1.25.7.ova), [sha256](https://storage.googleapis.com/capv-templates/v1.25.7/ubuntu-2004-kube-v1.25.7.ova.sha256)| [ova](https://storage.googleapis.com/capv-templates/v1.25.7/photon-3-kube-v1.25.7.ova), [sha256](https://storage.googleapis.com/capv-templates/v1.25.7/photon-3-kube-v1.25.7.ova.sha256) | [ova](https://storage.googleapis.com/capv-templates/v1.25.7/flatcar-stable-3374.2.5-kube-v1.25.7.ova), [sha256](https://storage.googleapis.com/capv-templates/v1.25.7/flatcar-stable-3374.2.5-kube-v1.25.7.ova.sha256) | +| v1.26.2 | [ova](https://storage.googleapis.com/capv-templates/v1.26.2/ubuntu-1804-kube-v1.26.2.ova), [sha256](https://storage.googleapis.com/capv-templates/v1.26.2/ubuntu-1804-kube-v1.26.2.ova.sha256) | [ova](https://storage.googleapis.com/capv-templates/v1.26.2/ubuntu-2004-kube-v1.26.2.ova), [sha256](https://storage.googleapis.com/capv-templates/v1.26.2/ubuntu-2004-kube-v1.26.2.ova.sha256)| [ova](https://storage.googleapis.com/capv-templates/v1.26.2/photon-3-kube-v1.26.2.ova), [sha256](https://storage.googleapis.com/capv-templates/v1.26.2/photon-3-kube-v1.26.2.ova.sha256) | [ova](https://storage.googleapis.com/capv-templates/v1.26.2/flatcar-stable-3374.2.5-kube-v1.26.2.ova), [sha256](https://storage.googleapis.com/capv-templates/v1.26.2/flatcar-stable-3374.2.5-kube-v1.26.2.ova.sha256) | A full list of the published machine images for CAPV may be obtained with the following command: diff --git a/apis/v1beta1/condition_consts.go b/apis/v1beta1/condition_consts.go index 16a04775d2..a49d59df6c 100644 --- a/apis/v1beta1/condition_consts.go +++ b/apis/v1beta1/condition_consts.go @@ -203,6 +203,10 @@ const ( // from an IPAM provider. IPAddressClaimedCondition clusterv1.ConditionType = "IPAddressClaimed" + // IPAddressClaimsBeingCreatedReason (Severity=Info) documents that claims for the + // IP addresses required by the VSphereVM are being created. + IPAddressClaimsBeingCreatedReason = "IPAddressClaimsBeingCreated" + // WaitingForIPAddressReason (Severity=Info) documents that the VSphereVM is // currently waiting for an IP address to be provisioned. WaitingForIPAddressReason = "WaitingForIPAddress" @@ -210,4 +214,8 @@ const ( // IPAddressInvalidReason (Severity=Error) documents that the IP address // provided by the IPAM provider is not valid. IPAddressInvalidReason = "IPAddressInvalid" + + // IPAddressClaimNotFoundReason (Severity=Error) documents that the IPAddressClaim + // cannot be found. + IPAddressClaimNotFoundReason = "IPAddressClaimNotFound" ) diff --git a/apis/v1beta1/vspheredeploymentzone_webhook_test.go b/apis/v1beta1/vspheredeploymentzone_webhook_test.go index f682947f08..ebcd2b65f5 100644 --- a/apis/v1beta1/vspheredeploymentzone_webhook_test.go +++ b/apis/v1beta1/vspheredeploymentzone_webhook_test.go @@ -38,7 +38,7 @@ func TestVSphereDeploymentZone_Default(t *testing.T) { }, { name: "when control plane is set", - boolPtr: pointer.BoolPtr(false), + boolPtr: pointer.Bool(false), expectedVal: false, }, } diff --git a/config/default/crd/bases/infrastructure.cluster.x-k8s.io_haproxyloadbalancers.yaml b/config/default/crd/bases/infrastructure.cluster.x-k8s.io_haproxyloadbalancers.yaml index 5cdacf69fd..8efa139566 100644 --- a/config/default/crd/bases/infrastructure.cluster.x-k8s.io_haproxyloadbalancers.yaml +++ b/config/default/crd/bases/infrastructure.cluster.x-k8s.io_haproxyloadbalancers.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.10.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.11.4 name: haproxyloadbalancers.infrastructure.cluster.x-k8s.io spec: group: infrastructure.cluster.x-k8s.io diff --git a/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vsphereclusteridentities.yaml b/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vsphereclusteridentities.yaml index 680906fbd1..adf1449121 100644 --- a/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vsphereclusteridentities.yaml +++ b/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vsphereclusteridentities.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.10.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.11.4 name: vsphereclusteridentities.infrastructure.cluster.x-k8s.io spec: group: infrastructure.cluster.x-k8s.io diff --git a/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vsphereclusters.yaml b/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vsphereclusters.yaml index e5d05e2294..4881a9698d 100644 --- a/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vsphereclusters.yaml +++ b/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vsphereclusters.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.10.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.11.4 name: vsphereclusters.infrastructure.cluster.x-k8s.io spec: group: infrastructure.cluster.x-k8s.io diff --git a/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vsphereclustertemplates.yaml b/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vsphereclustertemplates.yaml index 154a02c149..35c8fc7796 100644 --- a/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vsphereclustertemplates.yaml +++ b/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vsphereclustertemplates.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.10.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.11.4 name: vsphereclustertemplates.infrastructure.cluster.x-k8s.io spec: group: infrastructure.cluster.x-k8s.io diff --git a/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vspheredeploymentzones.yaml b/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vspheredeploymentzones.yaml index b955d5e95e..ded4c1f182 100644 --- a/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vspheredeploymentzones.yaml +++ b/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vspheredeploymentzones.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.10.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.11.4 name: vspheredeploymentzones.infrastructure.cluster.x-k8s.io spec: group: infrastructure.cluster.x-k8s.io diff --git a/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vspherefailuredomains.yaml b/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vspherefailuredomains.yaml index 9029cbb5ca..4e894a5c2b 100644 --- a/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vspherefailuredomains.yaml +++ b/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vspherefailuredomains.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.10.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.11.4 name: vspherefailuredomains.infrastructure.cluster.x-k8s.io spec: group: infrastructure.cluster.x-k8s.io diff --git a/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vspheremachines.yaml b/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vspheremachines.yaml index bbfff217c0..4daa06b30e 100644 --- a/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vspheremachines.yaml +++ b/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vspheremachines.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.10.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.11.4 name: vspheremachines.infrastructure.cluster.x-k8s.io spec: group: infrastructure.cluster.x-k8s.io @@ -1263,8 +1262,8 @@ spec: description: The machine address. type: string type: - description: Machine address type, one of Hostname, ExternalIP - or InternalIP. + description: Machine address type, one of Hostname, ExternalIP, + InternalIP, ExternalDNS or InternalDNS. type: string required: - address diff --git a/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vspheremachinetemplates.yaml b/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vspheremachinetemplates.yaml index 697563c52e..116a34528c 100644 --- a/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vspheremachinetemplates.yaml +++ b/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vspheremachinetemplates.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.10.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.11.4 name: vspheremachinetemplates.infrastructure.cluster.x-k8s.io spec: group: infrastructure.cluster.x-k8s.io diff --git a/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vspherevms.yaml b/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vspherevms.yaml index 6324b355e8..55c451b5a6 100644 --- a/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vspherevms.yaml +++ b/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vspherevms.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.10.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.11.4 name: vspherevms.infrastructure.cluster.x-k8s.io spec: group: infrastructure.cluster.x-k8s.io diff --git a/config/deployments/integration-tests/crds/vmoperator.vmware.com_clustervirtualmachineimages.yaml b/config/deployments/integration-tests/crds/vmoperator.vmware.com_clustervirtualmachineimages.yaml index 78c57719e6..ed0827e9be 100644 --- a/config/deployments/integration-tests/crds/vmoperator.vmware.com_clustervirtualmachineimages.yaml +++ b/config/deployments/integration-tests/crds/vmoperator.vmware.com_clustervirtualmachineimages.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.10.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.11.4 name: clustervirtualmachineimages.vmoperator.vmware.com spec: group: vmoperator.vmware.com diff --git a/config/deployments/integration-tests/crds/vmoperator.vmware.com_contentlibraryproviders.yaml b/config/deployments/integration-tests/crds/vmoperator.vmware.com_contentlibraryproviders.yaml index 9687c4abaa..71e31064d1 100644 --- a/config/deployments/integration-tests/crds/vmoperator.vmware.com_contentlibraryproviders.yaml +++ b/config/deployments/integration-tests/crds/vmoperator.vmware.com_contentlibraryproviders.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.10.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.11.4 name: contentlibraryproviders.vmoperator.vmware.com spec: group: vmoperator.vmware.com diff --git a/config/deployments/integration-tests/crds/vmoperator.vmware.com_contentsourcebindings.yaml b/config/deployments/integration-tests/crds/vmoperator.vmware.com_contentsourcebindings.yaml index 175d93a210..db24352ae1 100644 --- a/config/deployments/integration-tests/crds/vmoperator.vmware.com_contentsourcebindings.yaml +++ b/config/deployments/integration-tests/crds/vmoperator.vmware.com_contentsourcebindings.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.10.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.11.4 name: contentsourcebindings.vmoperator.vmware.com spec: group: vmoperator.vmware.com diff --git a/config/deployments/integration-tests/crds/vmoperator.vmware.com_contentsources.yaml b/config/deployments/integration-tests/crds/vmoperator.vmware.com_contentsources.yaml index c74aa352b6..c51d4da939 100644 --- a/config/deployments/integration-tests/crds/vmoperator.vmware.com_contentsources.yaml +++ b/config/deployments/integration-tests/crds/vmoperator.vmware.com_contentsources.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.10.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.11.4 name: contentsources.vmoperator.vmware.com spec: group: vmoperator.vmware.com diff --git a/config/deployments/integration-tests/crds/vmoperator.vmware.com_virtualmachineclassbindings.yaml b/config/deployments/integration-tests/crds/vmoperator.vmware.com_virtualmachineclassbindings.yaml index 1e9a1246b3..a17caf6958 100644 --- a/config/deployments/integration-tests/crds/vmoperator.vmware.com_virtualmachineclassbindings.yaml +++ b/config/deployments/integration-tests/crds/vmoperator.vmware.com_virtualmachineclassbindings.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.10.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.11.4 name: virtualmachineclassbindings.vmoperator.vmware.com spec: group: vmoperator.vmware.com diff --git a/config/deployments/integration-tests/crds/vmoperator.vmware.com_virtualmachineclasses.yaml b/config/deployments/integration-tests/crds/vmoperator.vmware.com_virtualmachineclasses.yaml index 63127f363f..807848dae5 100644 --- a/config/deployments/integration-tests/crds/vmoperator.vmware.com_virtualmachineclasses.yaml +++ b/config/deployments/integration-tests/crds/vmoperator.vmware.com_virtualmachineclasses.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.10.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.11.4 name: virtualmachineclasses.vmoperator.vmware.com spec: group: vmoperator.vmware.com diff --git a/config/deployments/integration-tests/crds/vmoperator.vmware.com_virtualmachineimages.yaml b/config/deployments/integration-tests/crds/vmoperator.vmware.com_virtualmachineimages.yaml index e5ef494a0f..d0c8feee61 100644 --- a/config/deployments/integration-tests/crds/vmoperator.vmware.com_virtualmachineimages.yaml +++ b/config/deployments/integration-tests/crds/vmoperator.vmware.com_virtualmachineimages.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.10.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.11.4 name: virtualmachineimages.vmoperator.vmware.com spec: group: vmoperator.vmware.com diff --git a/config/deployments/integration-tests/crds/vmoperator.vmware.com_virtualmachinepublishrequests.yaml b/config/deployments/integration-tests/crds/vmoperator.vmware.com_virtualmachinepublishrequests.yaml index cb27a9d6ab..052dc913a6 100644 --- a/config/deployments/integration-tests/crds/vmoperator.vmware.com_virtualmachinepublishrequests.yaml +++ b/config/deployments/integration-tests/crds/vmoperator.vmware.com_virtualmachinepublishrequests.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.10.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.11.4 name: virtualmachinepublishrequests.vmoperator.vmware.com spec: group: vmoperator.vmware.com diff --git a/config/deployments/integration-tests/crds/vmoperator.vmware.com_virtualmachines.yaml b/config/deployments/integration-tests/crds/vmoperator.vmware.com_virtualmachines.yaml index 25124f2c58..30b54d777f 100644 --- a/config/deployments/integration-tests/crds/vmoperator.vmware.com_virtualmachines.yaml +++ b/config/deployments/integration-tests/crds/vmoperator.vmware.com_virtualmachines.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.10.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.11.4 name: virtualmachines.vmoperator.vmware.com spec: group: vmoperator.vmware.com diff --git a/config/deployments/integration-tests/crds/vmoperator.vmware.com_virtualmachineservices.yaml b/config/deployments/integration-tests/crds/vmoperator.vmware.com_virtualmachineservices.yaml index 27f47f4cbc..b9d855755f 100644 --- a/config/deployments/integration-tests/crds/vmoperator.vmware.com_virtualmachineservices.yaml +++ b/config/deployments/integration-tests/crds/vmoperator.vmware.com_virtualmachineservices.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.10.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.11.4 name: virtualmachineservices.vmoperator.vmware.com spec: group: vmoperator.vmware.com diff --git a/config/deployments/integration-tests/crds/vmoperator.vmware.com_virtualmachinesetresourcepolicies.yaml b/config/deployments/integration-tests/crds/vmoperator.vmware.com_virtualmachinesetresourcepolicies.yaml index fc98e50ba3..bec1bebbfc 100644 --- a/config/deployments/integration-tests/crds/vmoperator.vmware.com_virtualmachinesetresourcepolicies.yaml +++ b/config/deployments/integration-tests/crds/vmoperator.vmware.com_virtualmachinesetresourcepolicies.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.10.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.11.4 name: virtualmachinesetresourcepolicies.vmoperator.vmware.com spec: group: vmoperator.vmware.com diff --git a/config/deployments/integration-tests/crds/vmoperator.vmware.com_webconsolerequests.yaml b/config/deployments/integration-tests/crds/vmoperator.vmware.com_webconsolerequests.yaml index 5588ea7fa6..2cb8dbc604 100644 --- a/config/deployments/integration-tests/crds/vmoperator.vmware.com_webconsolerequests.yaml +++ b/config/deployments/integration-tests/crds/vmoperator.vmware.com_webconsolerequests.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.10.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.11.4 name: webconsolerequests.vmoperator.vmware.com spec: group: vmoperator.vmware.com diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index 61ccd72267..99a4a0582e 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -18,9 +18,8 @@ spec: containers: - args: - --leader-elect - - --logtostderr - --v=4 - - "--feature-gates=NodeAntiAffinity=${EXP_NODE_ANTI_AFFINITY:=false},NodeLabeling=${EXP_NODE_LABELING:=false}" + - "--feature-gates=NodeAntiAffinity=${EXP_NODE_ANTI_AFFINITY:=false}" image: gcr.io/cluster-api-provider-vsphere/release/manager:latest imagePullPolicy: IfNotPresent name: manager @@ -36,6 +35,17 @@ spec: httpGet: path: /healthz port: healthz + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + runAsUser: 65532 + runAsGroup: 65532 + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault terminationGracePeriodSeconds: 10 tolerations: - effect: NoSchedule diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 907492f2c6..51dec08bf0 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -2,7 +2,6 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - creationTimestamp: null name: manager-role rules: - apiGroups: @@ -106,15 +105,6 @@ rules: - get - list - watch -- apiGroups: - - cluster.x-k8s.io - resources: - - clusters - - machines - verbs: - - get - - list - - watch - apiGroups: - cluster.x-k8s.io resources: @@ -173,9 +163,6 @@ rules: - delete - get - list - - patch - - update - - watch - apiGroups: - "" resources: @@ -316,7 +303,7 @@ rules: - create - get - list - - update + - patch - watch - apiGroups: - ipam.cluster.x-k8s.io diff --git a/config/supervisor/crd/vmware.infrastructure.cluster.x-k8s.io_providerserviceaccounts.yaml b/config/supervisor/crd/vmware.infrastructure.cluster.x-k8s.io_providerserviceaccounts.yaml index 0f3aa994fb..3f7112437b 100644 --- a/config/supervisor/crd/vmware.infrastructure.cluster.x-k8s.io_providerserviceaccounts.yaml +++ b/config/supervisor/crd/vmware.infrastructure.cluster.x-k8s.io_providerserviceaccounts.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.10.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.11.4 name: providerserviceaccounts.vmware.infrastructure.cluster.x-k8s.io spec: group: vmware.infrastructure.cluster.x-k8s.io diff --git a/config/supervisor/crd/vmware.infrastructure.cluster.x-k8s.io_vsphereclusters.yaml b/config/supervisor/crd/vmware.infrastructure.cluster.x-k8s.io_vsphereclusters.yaml index ddaf97b2da..6bbe41f6a9 100644 --- a/config/supervisor/crd/vmware.infrastructure.cluster.x-k8s.io_vsphereclusters.yaml +++ b/config/supervisor/crd/vmware.infrastructure.cluster.x-k8s.io_vsphereclusters.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.10.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.11.4 name: vsphereclusters.vmware.infrastructure.cluster.x-k8s.io spec: group: vmware.infrastructure.cluster.x-k8s.io diff --git a/config/supervisor/crd/vmware.infrastructure.cluster.x-k8s.io_vsphereclustertemplates.yaml b/config/supervisor/crd/vmware.infrastructure.cluster.x-k8s.io_vsphereclustertemplates.yaml index ac521192c2..5c475d731f 100644 --- a/config/supervisor/crd/vmware.infrastructure.cluster.x-k8s.io_vsphereclustertemplates.yaml +++ b/config/supervisor/crd/vmware.infrastructure.cluster.x-k8s.io_vsphereclustertemplates.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.10.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.11.4 name: vsphereclustertemplates.vmware.infrastructure.cluster.x-k8s.io spec: group: vmware.infrastructure.cluster.x-k8s.io diff --git a/config/supervisor/crd/vmware.infrastructure.cluster.x-k8s.io_vspheremachines.yaml b/config/supervisor/crd/vmware.infrastructure.cluster.x-k8s.io_vspheremachines.yaml index fd90d36738..9a8aeaf82b 100644 --- a/config/supervisor/crd/vmware.infrastructure.cluster.x-k8s.io_vspheremachines.yaml +++ b/config/supervisor/crd/vmware.infrastructure.cluster.x-k8s.io_vspheremachines.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.10.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.11.4 name: vspheremachines.vmware.infrastructure.cluster.x-k8s.io spec: group: vmware.infrastructure.cluster.x-k8s.io diff --git a/config/supervisor/crd/vmware.infrastructure.cluster.x-k8s.io_vspheremachinetemplates.yaml b/config/supervisor/crd/vmware.infrastructure.cluster.x-k8s.io_vspheremachinetemplates.yaml index c3be90998d..e20ce5edaf 100644 --- a/config/supervisor/crd/vmware.infrastructure.cluster.x-k8s.io_vspheremachinetemplates.yaml +++ b/config/supervisor/crd/vmware.infrastructure.cluster.x-k8s.io_vspheremachinetemplates.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.10.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.11.4 name: vspheremachinetemplates.vmware.infrastructure.cluster.x-k8s.io spec: group: vmware.infrastructure.cluster.x-k8s.io diff --git a/config/webhook/manifests.yaml b/config/webhook/manifests.yaml index 51b80ae52a..7c867e393e 100644 --- a/config/webhook/manifests.yaml +++ b/config/webhook/manifests.yaml @@ -2,7 +2,6 @@ apiVersion: admissionregistration.k8s.io/v1 kind: MutatingWebhookConfiguration metadata: - creationTimestamp: null name: mutating-webhook-configuration webhooks: - admissionReviewVersions: @@ -93,7 +92,6 @@ webhooks: apiVersion: admissionregistration.k8s.io/v1 kind: ValidatingWebhookConfiguration metadata: - creationTimestamp: null name: validating-webhook-configuration webhooks: - admissionReviewVersions: diff --git a/controllers/clustermodule_reconciler.go b/controllers/clustermodule_reconciler.go index 54c8fa85eb..bfcf0479c9 100644 --- a/controllers/clustermodule_reconciler.go +++ b/controllers/clustermodule_reconciler.go @@ -198,7 +198,7 @@ func (r Reconciler) PopulateWatchesOnController(controller controller.Controller return err } - if err := controller.Watch( + return controller.Watch( &source.Kind{Type: &clusterv1.MachineDeployment{}}, handler.EnqueueRequestsFromMapFunc(r.toAffinityInput), predicate.Funcs{ @@ -209,21 +209,18 @@ func (r Reconciler) PopulateWatchesOnController(controller controller.Controller return false }, }, - ); err != nil { - return err - } - return nil + ) } func (r Reconciler) fetchMachineOwnerObjects(ctx *context.ClusterContext) (map[string]clustermodule.Wrapper, error) { objects := map[string]clustermodule.Wrapper{} - name, ok := ctx.VSphereCluster.GetLabels()[clusterv1.ClusterLabelName] + name, ok := ctx.VSphereCluster.GetLabels()[clusterv1.ClusterNameLabel] if !ok { return nil, errors.Errorf("missing CAPI cluster label") } - labels := map[string]string{clusterv1.ClusterLabelName: name} + labels := map[string]string{clusterv1.ClusterNameLabel: name} kcpList := &controlplanev1.KubeadmControlPlaneList{} if err := r.Client.List( ctx, kcpList, diff --git a/controllers/clustermodule_reconciler_test.go b/controllers/clustermodule_reconciler_test.go index 1c2b6637c3..e5f3ad188c 100644 --- a/controllers/clustermodule_reconciler_test.go +++ b/controllers/clustermodule_reconciler_test.go @@ -374,7 +374,7 @@ func machineDeployment(name, namespace, cluster string) *clusterv1.MachineDeploy ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, - Labels: map[string]string{clusterv1.ClusterLabelName: cluster}, + Labels: map[string]string{clusterv1.ClusterNameLabel: cluster}, }, } } @@ -388,7 +388,7 @@ func controlPlane(name, namespace, cluster string) *controlplanev1.KubeadmContro ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, - Labels: map[string]string{clusterv1.ClusterLabelName: cluster}, + Labels: map[string]string{clusterv1.ClusterNameLabel: cluster}, }, } } diff --git a/controllers/node_controller.go b/controllers/node_controller.go deleted file mode 100644 index a280bc85df..0000000000 --- a/controllers/node_controller.go +++ /dev/null @@ -1,178 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controllers - -import ( - goctx "context" - "fmt" - "strings" - - apiv1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/controllers/remote" - clusterutilv1 "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/annotations" - "sigs.k8s.io/cluster-api/util/patch" - "sigs.k8s.io/cluster-api/util/predicates" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/event" - "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/predicate" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - - "sigs.k8s.io/cluster-api-provider-vsphere/pkg/constants" - "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" - "sigs.k8s.io/cluster-api-provider-vsphere/pkg/record" -) - -// +kubebuilder:rbac:groups=core,resources=nodes,verbs=get;list;watch;update;patch -// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machines;clusters,verbs=get;watch;list - -const ( - nodeLabelControllerNameShort = "node-label-controller" -) - -// AddNodeLabelControllerToManager adds the VM controller to the provided manager. -func AddNodeLabelControllerToManager(ctx *context.ControllerManagerContext, mgr manager.Manager) error { - var ( - controllerNameLong = fmt.Sprintf("%s/%s/%s", ctx.Namespace, ctx.Name, nodeLabelControllerNameShort) - ) - - controllerContext := &context.ControllerContext{ - ControllerManagerContext: ctx, - Name: nodeLabelControllerNameShort, - Recorder: record.New(mgr.GetEventRecorderFor(controllerNameLong)), - Logger: ctx.Logger.WithName(nodeLabelControllerNameShort), - } - r := nodeLabelReconciler{ - ControllerContext: controllerContext, - remoteClientGetter: remote.NewClusterClient, - } - if _, err := ctrl.NewControllerManagedBy(mgr). - For(&clusterv1.Machine{}). - WithOptions(controller.Options{MaxConcurrentReconciles: ctx.MaxConcurrentReconciles}). - WithEventFilter(predicates.ResourceNotPaused(ctrl.LoggerFrom(ctx))). - WithEventFilter(predicate.Funcs{ - CreateFunc: func(e event.CreateEvent) bool { - return false - }, - DeleteFunc: func(deleteEvent event.DeleteEvent) bool { - return false - }, - }). - Build(r); err != nil { - return err - } - return nil -} - -type nodeLabelReconciler struct { - *context.ControllerContext - - remoteClientGetter remote.ClusterClientGetter -} - -type nodeContext struct { - Cluster *clusterv1.Cluster - Machine *clusterv1.Machine -} - -func (r nodeLabelReconciler) Reconcile(ctx goctx.Context, req ctrl.Request) (_ ctrl.Result, reterr error) { - logger := r.Logger.WithName(req.Namespace).WithName(req.Name) - - machine := &clusterv1.Machine{} - key := client.ObjectKey{ - Namespace: req.Namespace, - Name: req.Name, - } - if err := r.Client.Get(ctx, key, machine); err != nil { - if apierrors.IsNotFound(err) { - logger.Info("Machine not found, won't reconcile", "machine", key) - return reconcile.Result{}, nil - } - return reconcile.Result{}, err - } - - cluster, err := clusterutilv1.GetClusterFromMetadata(r.ControllerContext, r.Client, machine.ObjectMeta) - if err == nil { - if annotations.IsPaused(cluster, machine) { - logger.V(4).Info("Machine linked to a cluster that is paused") - return reconcile.Result{}, nil - } - } - - if !machine.DeletionTimestamp.IsZero() { - return reconcile.Result{}, nil - } - - nodeCtx := &nodeContext{ - Cluster: cluster, - Machine: machine, - } - return r.reconcileNormal(nodeCtx) -} - -func (r nodeLabelReconciler) reconcileNormal(ctx *nodeContext) (reconcile.Result, error) { - logger := r.Logger.WithName(ctx.Machine.Namespace).WithName(ctx.Machine.Name) - logger = logger.WithValues("cluster", ctx.Cluster.Name, "machine", ctx.Machine.Name) - - // Check the current labels on the machine - labels := ctx.Machine.GetLabels() - nodePrefixLabels := map[string]string{} - for key, value := range labels { - if strings.HasPrefix(key, constants.NodeLabelPrefix) { - nodePrefixLabels[key] = value - } - } - - if len(nodePrefixLabels) == 0 { - return reconcile.Result{}, nil - } - - clusterClient, err := r.remoteClientGetter(r, nodeLabelControllerNameShort, r.Client, client.ObjectKeyFromObject(ctx.Cluster)) - if err != nil { - logger.Info("The control plane is not ready yet", "err", err) - return reconcile.Result{RequeueAfter: clusterNotReadyRequeueTime}, nil - } - - node := &apiv1.Node{} - if err := clusterClient.Get(r, client.ObjectKey{Name: ctx.Machine.GetName()}, node); err != nil { - logger.Error(err, "unable to get node object", "node", ctx.Machine.GetName()) - return reconcile.Result{}, err - } - - patchHelper, err := patch.NewHelper(node, clusterClient) - if err != nil { - return reconcile.Result{}, err - } - - nodeLabels := node.GetLabels() - for k, v := range nodePrefixLabels { - nodeLabels[k] = v - } - node.Labels = nodeLabels - if err := patchHelper.Patch(r, node); err != nil { - logger.Error(err, "unable to patch node object", "node", node.Name) - return reconcile.Result{}, err - } - - logger.V(4).Info("Marked node with prefixed labels", "node", node.Name, "number-of-labels", len(nodePrefixLabels)) - return reconcile.Result{}, nil -} diff --git a/controllers/serviceaccount_controller.go b/controllers/serviceaccount_controller.go index 167e579aa9..93eeca1fdc 100644 --- a/controllers/serviceaccount_controller.go +++ b/controllers/serviceaccount_controller.go @@ -103,7 +103,7 @@ type ServiceAccountReconciler struct { remoteClientGetter remote.ClusterClientGetter } -func (r ServiceAccountReconciler) Reconcile(ctx goctx.Context, req reconcile.Request) (_ reconcile.Result, reterr error) { +func (r ServiceAccountReconciler) Reconcile(_ goctx.Context, req reconcile.Request) (_ reconcile.Result, reterr error) { r.ControllerContext.Logger.V(4).Info("Starting Reconcile") // Get the vSphereCluster for this request. diff --git a/controllers/serviceaccount_controller_intg_test.go b/controllers/serviceaccount_controller_intg_test.go index 7ab9860361..c2ffde7a5f 100644 --- a/controllers/serviceaccount_controller_intg_test.go +++ b/controllers/serviceaccount_controller_intg_test.go @@ -127,7 +127,7 @@ var _ = Describe("ProviderServiceAccount controller integration tests", func() { Context("With non-existent Cluster object", func() { It("cannot reconcile the ProviderServiceAccount object", func() { By("Deleting the CAPI cluster object", func() { - clusterName, ok := intCtx.VSphereCluster.GetLabels()[clusterv1.ClusterLabelName] + clusterName, ok := intCtx.VSphereCluster.GetLabels()[clusterv1.ClusterNameLabel] Expect(ok).To(BeTrue()) cluster := &clusterv1.Cluster{} key := client.ObjectKey{Namespace: intCtx.Namespace, Name: clusterName} @@ -153,7 +153,7 @@ var _ = Describe("ProviderServiceAccount controller integration tests", func() { Context("With non-existent Cluster credentials secret", func() { It("cannot reconcile the ProviderServiceAccount object", func() { By("Deleting the CAPI kubeconfig secret object", func() { - clusterName, ok := intCtx.VSphereCluster.GetLabels()[clusterv1.ClusterLabelName] + clusterName, ok := intCtx.VSphereCluster.GetLabels()[clusterv1.ClusterNameLabel] Expect(ok).To(BeTrue()) secret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ diff --git a/controllers/servicediscovery_controller.go b/controllers/servicediscovery_controller.go index 3bcf0634d4..fed6cf18e7 100644 --- a/controllers/servicediscovery_controller.go +++ b/controllers/servicediscovery_controller.go @@ -126,7 +126,7 @@ type serviceDiscoveryReconciler struct { remoteClientGetter remote.ClusterClientGetter } -func (r serviceDiscoveryReconciler) Reconcile(ctx goctx.Context, req reconcile.Request) (_ reconcile.Result, reterr error) { +func (r serviceDiscoveryReconciler) Reconcile(_ goctx.Context, req reconcile.Request) (_ reconcile.Result, reterr error) { logger := r.Logger.WithName(req.Namespace).WithName(req.Name) logger.V(4).Info("Starting Reconcile") diff --git a/controllers/vmware/test/controllers_suite_test.go b/controllers/vmware/test/controllers_suite_test.go index b43a1e1cb7..a871506337 100644 --- a/controllers/vmware/test/controllers_suite_test.go +++ b/controllers/vmware/test/controllers_suite_test.go @@ -32,7 +32,7 @@ import ( "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" "k8s.io/klog/v2" - clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/envtest" logf "sigs.k8s.io/controller-runtime/pkg/log" diff --git a/controllers/vmware/test/controllers_test.go b/controllers/vmware/test/controllers_test.go index 1d58916157..224fa86425 100644 --- a/controllers/vmware/test/controllers_test.go +++ b/controllers/vmware/test/controllers_test.go @@ -30,7 +30,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" - clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" ctrlmgr "sigs.k8s.io/controller-runtime/pkg/manager" @@ -156,8 +156,8 @@ func deployCAPIMachine(namespace string, cluster *clusterv1.Cluster, k8sClient c Namespace: namespace, Finalizers: []string{"test"}, Labels: map[string]string{ - clusterv1.ClusterLabelName: cluster.Name, - clusterv1.MachineControlPlaneLabelName: "true", + clusterv1.ClusterNameLabel: cluster.Name, + clusterv1.MachineControlPlaneLabel: "", }, OwnerReferences: []metav1.OwnerReference{ { @@ -236,10 +236,7 @@ func getManager(cfg *rest.Config, networkProvider string) manager.Manager { return err } - if err := controllers.AddMachineControllerToManager(ctx, mgr, &vmwarev1.VSphereMachine{}); err != nil { - return err - } - return nil + return controllers.AddMachineControllerToManager(ctx, mgr, &vmwarev1.VSphereMachine{}) } mgr, err := manager.New(opts) diff --git a/controllers/vmware/vspherecluster_reconciler.go b/controllers/vmware/vspherecluster_reconciler.go index c7dbd5384c..1324a4e31a 100644 --- a/controllers/vmware/vspherecluster_reconciler.go +++ b/controllers/vmware/vspherecluster_reconciler.go @@ -63,7 +63,7 @@ type ClusterReconciler struct { // +kubebuilder:rbac:groups="",resources=persistentvolumeclaims,verbs=get;list;watch;update;create;delete // +kubebuilder:rbac:groups="",resources=persistentvolumeclaims/status,verbs=get;update;patch -func (r ClusterReconciler) Reconcile(ctx goctx.Context, req ctrl.Request) (_ ctrl.Result, reterr error) { +func (r ClusterReconciler) Reconcile(_ goctx.Context, req ctrl.Request) (_ ctrl.Result, reterr error) { logger := r.Logger.WithName(req.Namespace).WithName(req.Name) logger.V(3).Info("Starting Reconcile vsphereCluster") @@ -266,7 +266,7 @@ func (r *ClusterReconciler) reconcileAPIEndpoints(ctx *vmware.ClusterContext) er if err != nil { return errors.Wrapf(err, "failed to get Machines for Cluster %s/%s", - ctx.VSphereCluster.Namespace, ctx.VSphereCluster.Name) + ctx.Cluster.Namespace, ctx.Cluster.Name) } // Define a variable to assign the API endpoints of control plane diff --git a/controllers/vmware/vspherecluster_reconciler_test.go b/controllers/vmware/vspherecluster_reconciler_test.go index 875e6dc5a0..182fc973d6 100644 --- a/controllers/vmware/vspherecluster_reconciler_test.go +++ b/controllers/vmware/vspherecluster_reconciler_test.go @@ -43,7 +43,7 @@ var _ = Describe("Cluster Controller Tests", func() { const ( clusterName = "test-cluster" machineName = "test-machine" - controlPlaneLabelTrue = "true" + controlPlaneLabelTrue = true className = "test-className" imageName = "test-imageName" storageClass = "test-storageClass" @@ -62,7 +62,7 @@ var _ = Describe("Cluster Controller Tests", func() { cluster = util.CreateCluster(clusterName) vsphereCluster = util.CreateVSphereCluster(clusterName) ctx = util.CreateClusterContext(cluster, vsphereCluster) - vsphereMachine = util.CreateVSphereMachine(machineName, clusterName, controlPlaneLabelTrue, className, imageName, storageClass) + vsphereMachine = util.CreateVSphereMachine(machineName, clusterName, className, imageName, storageClass, controlPlaneLabelTrue) reconciler = &ClusterReconciler{ ControllerContext: ctx.ControllerContext, diff --git a/controllers/vspherecluster_controller.go b/controllers/vspherecluster_controller.go index 5a285dde06..335f93d4fe 100644 --- a/controllers/vspherecluster_controller.go +++ b/controllers/vspherecluster_controller.go @@ -37,6 +37,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" vmwarev1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/vmware/v1beta1" "sigs.k8s.io/cluster-api-provider-vsphere/controllers/vmware" + "sigs.k8s.io/cluster-api-provider-vsphere/feature" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" inframanager "sigs.k8s.io/cluster-api-provider-vsphere/pkg/manager" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/record" @@ -163,8 +164,10 @@ func AddClusterControllerToManager(ctx *context.ControllerManagerContext, mgr ma return err } - // TODO (srm09): Need to figure out how to add the watches only when the anti-affinity feature flag is enabled. - return reconciler.clusterModuleReconciler.PopulateWatchesOnController(c) + if feature.Gates.Enabled(feature.NodeAntiAffinity) { + return reconciler.clusterModuleReconciler.PopulateWatchesOnController(c) + } + return nil } func clusterToInfrastructureMapFunc(managerContext *context.ControllerManagerContext) handler.MapFunc { diff --git a/controllers/vspherecluster_reconciler.go b/controllers/vspherecluster_reconciler.go index b38764d27a..bf071ffb4e 100644 --- a/controllers/vspherecluster_reconciler.go +++ b/controllers/vspherecluster_reconciler.go @@ -60,7 +60,7 @@ type clusterReconciler struct { } // Reconcile ensures the back-end state reflects the Kubernetes resource state intent. -func (r clusterReconciler) Reconcile(ctx goctx.Context, req ctrl.Request) (_ ctrl.Result, reterr error) { +func (r clusterReconciler) Reconcile(_ goctx.Context, req ctrl.Request) (_ ctrl.Result, reterr error) { // Get the VSphereCluster resource for this request. vsphereCluster := &infrav1.VSphereCluster{} if err := r.Client.Get(r, req.NamespacedName, vsphereCluster); err != nil { diff --git a/controllers/vspherecluster_reconciler_test.go b/controllers/vspherecluster_reconciler_test.go index 1c7f932885..8f7123f758 100644 --- a/controllers/vspherecluster_reconciler_test.go +++ b/controllers/vspherecluster_reconciler_test.go @@ -303,6 +303,9 @@ var _ = Describe("VIM based VSphere ClusterReconciler", func() { ObjectMeta: metav1.ObjectMeta{ Name: "vsphere-test1", Namespace: "default", + OwnerReferences: []metav1.OwnerReference{ + {Kind: "Cluster", APIVersion: clusterv1.GroupVersion.String(), Name: capiCluster.Name, UID: "blah"}, + }, }, Spec: infrav1.VSphereClusterSpec{ IdentityRef: &infrav1.VSphereIdentityReference{ @@ -320,39 +323,36 @@ var _ = Describe("VIM based VSphere ClusterReconciler", func() { return testEnv.Get(ctx, key, instance) }, timeout).Should(BeNil()) - By("setting the OwnerRef on the VSphereCluster") - Eventually(func() bool { - ph, err := patch.NewHelper(instance, testEnv) - Expect(err).ShouldNot(HaveOccurred()) - instance.OwnerReferences = append(instance.OwnerReferences, metav1.OwnerReference{Kind: "Cluster", APIVersion: clusterv1.GroupVersion.String(), Name: capiCluster.Name, UID: "blah"}) - Expect(ph.Patch(ctx, instance, patch.WithStatusObservedGeneration{})).ShouldNot(HaveOccurred()) - return true - }, timeout).Should(BeTrue()) - - By("setting the VSphereCluster's VCenterAvailableCondition to true") + By("checking that the finalizers on the object are set") Eventually(func() bool { if err := testEnv.Get(ctx, key, instance); err != nil { return false } - return conditions.IsTrue(instance, infrav1.VCenterAvailableCondition) + return len(instance.Finalizers) > 0 }, timeout).Should(BeTrue()) By("deleting the vspherecluster which has the secret with legacy finalizer") - Eventually(func() error { - return testEnv.Delete(ctx, instance) - }, timeout).Should(BeNil()) + Expect(testEnv.Delete(ctx, instance)).To(Succeed()) + + By("checking that the secret is deleted") + secretKey := client.ObjectKey{Namespace: secret.Namespace, Name: secret.Name} + Eventually(func() bool { + err := testEnv.Get(ctx, secretKey, secret) + return apierrors.IsNotFound(err) + }, timeout).Should(BeTrue()) + // confirm that the VSphereCluster is deleted Eventually(func() bool { err := testEnv.Get(ctx, key, instance) return apierrors.IsNotFound(err) }, timeout).Should(BeTrue()) - By("checking that the secret is deleted") + /*By("checking that the secret is deleted") secretKey := client.ObjectKey{Namespace: secret.Namespace, Name: secret.Name} Eventually(func() bool { err := testEnv.Get(ctx, secretKey, secret) return apierrors.IsNotFound(err) - }, timeout).Should(BeTrue()) + }, timeout).Should(BeTrue())*/ }) }) @@ -611,7 +611,7 @@ var _ = Describe("VIM based VSphere ClusterReconciler", func() { Eventually(func() int { machines := &infrav1.VSphereMachineList{} if err := testEnv.List(ctx, machines, client.InNamespace(namespace), - client.MatchingLabels(map[string]string{clusterv1.ClusterLabelName: capiCluster.Name})); err != nil { + client.MatchingLabels(map[string]string{clusterv1.ClusterNameLabel: capiCluster.Name})); err != nil { return -1 } return len(machines.Items) @@ -621,7 +621,7 @@ var _ = Describe("VIM based VSphere ClusterReconciler", func() { Eventually(func() int { machines := &infrav1.VSphereMachineList{} if err := testEnv.List(ctx, machines, client.InNamespace(namespace), - client.MatchingLabels(map[string]string{clusterv1.ClusterLabelName: capiCluster.Name})); err != nil { + client.MatchingLabels(map[string]string{clusterv1.ClusterNameLabel: capiCluster.Name})); err != nil { return 0 } ownerRefSet := 0 @@ -646,7 +646,7 @@ func createVsphereMachine(ctx context.Context, env *helpers.TestEnvironment, nam ObjectMeta: metav1.ObjectMeta{ GenerateName: "test-vsp", Namespace: namespace, - Labels: map[string]string{clusterv1.ClusterLabelName: clusterName}, + Labels: map[string]string{clusterv1.ClusterNameLabel: clusterName}, }, Spec: infrav1.VSphereMachineSpec{ VirtualMachineCloneSpec: infrav1.VirtualMachineCloneSpec{ diff --git a/controllers/vspheredeploymentzone_controller_test.go b/controllers/vspheredeploymentzone_controller_test.go index 7377f47f09..baf7ed1918 100644 --- a/controllers/vspheredeploymentzone_controller_test.go +++ b/controllers/vspheredeploymentzone_controller_test.go @@ -484,7 +484,7 @@ func createMachine(machineName, clusterName, namespace string, isControlPlane bo Name: machineName, Namespace: namespace, Labels: map[string]string{ - clusterv1.ClusterLabelName: clusterName, + clusterv1.ClusterNameLabel: clusterName, }, }, Spec: clusterv1.MachineSpec{ @@ -504,7 +504,7 @@ func createMachine(machineName, clusterName, namespace string, isControlPlane bo }, } if isControlPlane { - m.Labels[clusterv1.MachineControlPlaneLabelName] = "" + m.Labels[clusterv1.MachineControlPlaneLabel] = "" } return m } diff --git a/controllers/vspheremachine_controller.go b/controllers/vspheremachine_controller.go index 5e8163b861..7efeecab39 100644 --- a/controllers/vspheremachine_controller.go +++ b/controllers/vspheremachine_controller.go @@ -179,7 +179,7 @@ type machineReconciler struct { } // Reconcile ensures the back-end state reflects the Kubernetes resource state intent. -func (r machineReconciler) Reconcile(ctx goctx.Context, req ctrl.Request) (_ ctrl.Result, reterr error) { +func (r machineReconciler) Reconcile(_ goctx.Context, req ctrl.Request) (_ ctrl.Result, reterr error) { var machineContext context.MachineContext logger := r.Logger.WithName(req.Namespace).WithName(req.Name) logger.V(3).Info("Starting Reconcile VSphereMachine") diff --git a/controllers/vspheremachine_controller_test.go b/controllers/vspheremachine_controller_test.go index 7505018dd6..5219fc4053 100644 --- a/controllers/vspheremachine_controller_test.go +++ b/controllers/vspheremachine_controller_test.go @@ -95,7 +95,7 @@ var _ = Describe("VsphereMachineReconciler", func() { Namespace: testNs.Name, Finalizers: []string{clusterv1.MachineFinalizer}, Labels: map[string]string{ - clusterv1.ClusterLabelName: capiCluster.Name, + clusterv1.ClusterNameLabel: capiCluster.Name, }, }, Spec: clusterv1.MachineSpec{ @@ -114,8 +114,8 @@ var _ = Describe("VsphereMachineReconciler", func() { Name: "vsphere-machine-1", Namespace: testNs.Name, Labels: map[string]string{ - clusterv1.ClusterLabelName: capiCluster.Name, - clusterv1.MachineControlPlaneLabelName: "", + clusterv1.ClusterNameLabel: capiCluster.Name, + clusterv1.MachineControlPlaneLabel: "", }, OwnerReferences: []metav1.OwnerReference{ { @@ -181,7 +181,7 @@ var _ = Describe("VsphereMachineReconciler", func() { Eventually(func() bool { vms := infrav1.VSphereVMList{} Expect(testEnv.List(ctx, &vms, client.InNamespace(testNs.Name), client.MatchingLabels{ - clusterv1.ClusterLabelName: capiCluster.Name, + clusterv1.ClusterNameLabel: capiCluster.Name, })).To(Succeed()) return isPresentAndFalseWithReason(infraMachine, infrav1.VMProvisionedCondition, infrav1.WaitingForBootstrapDataReason) && len(vms.Items) == 0 diff --git a/controllers/vspherevm_controller.go b/controllers/vspherevm_controller.go index cd36fbf1c9..9f07cfc825 100644 --- a/controllers/vspherevm_controller.go +++ b/controllers/vspherevm_controller.go @@ -65,8 +65,6 @@ import ( // +kubebuilder:rbac:groups=controlplane.cluster.x-k8s.io,resources=kubeadmcontrolplanes,verbs=get;list;watch // +kubebuilder:rbac:groups="",resources=events,verbs=get;list;watch;create;update;patch // +kubebuilder:rbac:groups=core,resources=nodes,verbs=get;list;delete -// +kubebuilder:rbac:groups=ipam.cluster.x-k8s.io,resources=ipaddressclaims,verbs=get;create;update;watch;list -// +kubebuilder:rbac:groups=ipam.cluster.x-k8s.io,resources=ipaddresses,verbs=get;list;watch // AddVMControllerToManager adds the VM controller to the provided manager. // @@ -146,6 +144,13 @@ func AddVMControllerToManager(ctx *context.ControllerManagerContext, mgr manager if err != nil { return err } + + err = controller.Watch( + &source.Kind{Type: &ipamv1.IPAddressClaim{}}, + handler.EnqueueRequestsFromMapFunc(r.ipAddressClaimToVSphereVM)) + if err != nil { + return err + } return nil } @@ -251,8 +256,9 @@ func (r vmReconciler) Reconcile(ctx goctx.Context, req ctrl.Request) (_ ctrl.Res // always update the readyCondition. conditions.SetSummary(vmContext.VSphereVM, conditions.WithConditions( - infrav1.VMProvisionedCondition, infrav1.VCenterAvailableCondition, + infrav1.IPAddressClaimedCondition, + infrav1.VMProvisionedCondition, ), ) @@ -323,34 +329,12 @@ func (r vmReconciler) reconcileDelete(ctx *context.VMContext) (reconcile.Result, } // Attempt to delete the node corresponding to the vsphere VM - err = r.deleteNode(ctx, vm.Name) - if err != nil { + if err := r.deleteNode(ctx, vm.Name); err != nil { r.Logger.V(6).Info("unable to delete node", "err", err) } - // Remove finalizers from any ipam claims - for devIdx, device := range ctx.VSphereVM.Spec.Network.Devices { - for poolRefIdx := range device.AddressesFromPools { - // check if claim exists - ipAddrClaim := &ipamv1.IPAddressClaim{} - ipAddrClaimName := govmomi.IPAddressClaimName(ctx.VSphereVM.Name, devIdx, poolRefIdx) - ctx.Logger.Info("removing finalizer", "IPAddressClaim", ipAddrClaimName) - ipAddrClaimKey := apitypes.NamespacedName{ - Namespace: ctx.VSphereVM.Namespace, - Name: ipAddrClaimName, - } - if err := ctx.Client.Get(ctx, ipAddrClaimKey, ipAddrClaim); err != nil { - if apierrors.IsNotFound(err) { - continue - } - return reconcile.Result{}, errors.Wrapf(err, fmt.Sprintf("failed to find IPAddressClaim %q to remove the finalizer", ipAddrClaimName)) - } - if ctrlutil.RemoveFinalizer(ipAddrClaim, infrav1.IPAddressClaimFinalizer) { - if err := ctx.Client.Update(ctx, ipAddrClaim); err != nil { - return reconcile.Result{}, errors.Wrapf(err, fmt.Sprintf("failed to update IPAddressClaim %q", ipAddrClaimName)) - } - } - } + if err := r.deleteIPAddressClaims(ctx); err != nil { + return reconcile.Result{}, err } // The VM is deleted so remove the finalizer. @@ -380,10 +364,7 @@ func (r vmReconciler) deleteNode(ctx *context.VMContext, name string) error { Name: name, }, } - if err := clusterClient.Delete(ctx, node); err != nil { - return err - } - return nil + return clusterClient.Delete(ctx, node) } func (r vmReconciler) reconcileNormal(ctx *context.VMContext) (reconcile.Result, error) { @@ -400,6 +381,10 @@ func (r vmReconciler) reconcileNormal(ctx *context.VMContext) (reconcile.Result, return reconcile.Result{}, nil } + if err := r.reconcileIPAddressClaims(ctx); err != nil { + return reconcile.Result{}, err + } + // Get or create the VM. vm, err := r.VMService.ReconcileVM(ctx) if err != nil { @@ -439,7 +424,6 @@ func (r vmReconciler) reconcileNormal(ctx *context.VMContext) (reconcile.Result, ctx.VSphereVM.Status.Ready = true conditions.MarkTrue(ctx.VSphereVM, infrav1.VMProvisionedCondition) ctx.Logger.Info("VSphereVM is ready") - return reconcile.Result{}, nil } @@ -473,7 +457,7 @@ func (r vmReconciler) clusterToVSphereVMs(a ctrlclient.Object) []reconcile.Reque vms := &infrav1.VSphereVMList{} err := r.Client.List(goctx.Background(), vms, ctrlclient.MatchingLabels( map[string]string{ - clusterv1.ClusterLabelName: a.GetName(), + clusterv1.ClusterNameLabel: a.GetName(), }, )) if err != nil { @@ -496,7 +480,7 @@ func (r vmReconciler) vsphereClusterToVSphereVMs(a ctrlclient.Object) []reconcil if !ok { return nil } - clusterName, ok := vsphereCluster.Labels[clusterv1.ClusterLabelName] + clusterName, ok := vsphereCluster.Labels[clusterv1.ClusterNameLabel] if !ok { return nil } @@ -505,7 +489,7 @@ func (r vmReconciler) vsphereClusterToVSphereVMs(a ctrlclient.Object) []reconcil vms := &infrav1.VSphereVMList{} err := r.Client.List(goctx.Background(), vms, ctrlclient.MatchingLabels( map[string]string{ - clusterv1.ClusterLabelName: clusterName, + clusterv1.ClusterNameLabel: clusterName, }, )) if err != nil { @@ -523,6 +507,29 @@ func (r vmReconciler) vsphereClusterToVSphereVMs(a ctrlclient.Object) []reconcil return requests } +func (r vmReconciler) ipAddressClaimToVSphereVM(a ctrlclient.Object) []reconcile.Request { + ipAddressClaim, ok := a.(*ipamv1.IPAddressClaim) + if !ok { + return nil + } + + requests := []reconcile.Request{} + if clusterutilv1.HasOwner(ipAddressClaim.OwnerReferences, infrav1.GroupVersion.String(), []string{"VSphereVM"}) { + for _, ref := range ipAddressClaim.OwnerReferences { + if ref.Kind == "VSphereVM" { + requests = append(requests, reconcile.Request{ + NamespacedName: apitypes.NamespacedName{ + Name: ref.Name, + Namespace: ipAddressClaim.Namespace, + }, + }) + break + } + } + } + return requests +} + func (r vmReconciler) retrieveVcenterSession(ctx goctx.Context, vsphereVM *infrav1.VSphereVM) (*session.Session, error) { // Get cluster object and then get VSphereCluster object diff --git a/controllers/vspherevm_controller_test.go b/controllers/vspherevm_controller_test.go index f591528b38..c404710cee 100644 --- a/controllers/vspherevm_controller_test.go +++ b/controllers/vspherevm_controller_test.go @@ -98,7 +98,7 @@ func TestReconcileNormal_WaitingForIPAddrAllocation(t *testing.T) { Name: "foo", Namespace: "test", Labels: map[string]string{ - clusterv1.ClusterLabelName: "valid-cluster", + clusterv1.ClusterNameLabel: "valid-cluster", }, }, } @@ -109,7 +109,7 @@ func TestReconcileNormal_WaitingForIPAddrAllocation(t *testing.T) { Name: "foo-vm", Namespace: "test", Labels: map[string]string{ - clusterv1.ClusterLabelName: "valid-cluster", + clusterv1.ClusterNameLabel: "valid-cluster", }, OwnerReferences: []metav1.OwnerReference{{APIVersion: clusterv1.GroupVersion.String(), Kind: "Machine", Name: "foo"}}, }, @@ -126,7 +126,7 @@ func TestReconcileNormal_WaitingForIPAddrAllocation(t *testing.T) { infrav1.VMFinalizer, }, Labels: map[string]string{ - clusterv1.ClusterLabelName: "valid-cluster", + clusterv1.ClusterNameLabel: "valid-cluster", }, OwnerReferences: []metav1.OwnerReference{{APIVersion: infrav1.GroupVersion.String(), Kind: "VSphereMachine", Name: "foo-vm"}}, // To make sure PatchHelper does not error out @@ -406,7 +406,7 @@ func TestRetrievingVCenterCredentialsFromCluster(t *testing.T) { Name: "foo", Namespace: "test", Labels: map[string]string{ - clusterv1.ClusterLabelName: "valid-cluster", + clusterv1.ClusterNameLabel: "valid-cluster", }, }, } @@ -418,7 +418,7 @@ func TestRetrievingVCenterCredentialsFromCluster(t *testing.T) { Name: "foo-vm", Namespace: "test", Labels: map[string]string{ - clusterv1.ClusterLabelName: "valid-cluster", + clusterv1.ClusterNameLabel: "valid-cluster", }, OwnerReferences: []metav1.OwnerReference{{APIVersion: clusterv1.GroupVersion.String(), Kind: "Machine", Name: "foo"}}, }, @@ -432,7 +432,7 @@ func TestRetrievingVCenterCredentialsFromCluster(t *testing.T) { Name: "foo", Namespace: "test", Labels: map[string]string{ - clusterv1.ClusterLabelName: "valid-cluster", + clusterv1.ClusterNameLabel: "valid-cluster", }, OwnerReferences: []metav1.OwnerReference{{APIVersion: infrav1.GroupVersion.String(), Kind: "VSphereMachine", Name: "foo-vm"}}, // To make sure PatchHelper does not error out @@ -488,7 +488,7 @@ func Test_reconcile(t *testing.T) { Name: "foo", Namespace: ns, Labels: map[string]string{ - clusterv1.ClusterLabelName: "valid-cluster", + clusterv1.ClusterNameLabel: "valid-cluster", }, }, } @@ -501,7 +501,7 @@ func Test_reconcile(t *testing.T) { Name: "foo", Namespace: ns, Labels: map[string]string{ - clusterv1.ClusterLabelName: "valid-cluster", + clusterv1.ClusterNameLabel: "valid-cluster", }, OwnerReferences: []metav1.OwnerReference{{APIVersion: infrav1.GroupVersion.String(), Kind: "VSphereMachine", Name: "foo-vm"}}, Finalizers: []string{infrav1.VMFinalizer}, @@ -645,7 +645,7 @@ func createMachineOwnerHierarchy(machine *clusterv1.Machine) []client.Object { var ( objs []client.Object - clusterName, _ = machine.Labels[clusterv1.ClusterLabelName] + clusterName, _ = machine.Labels[clusterv1.ClusterNameLabel] ) objs = append(objs, &clusterv1.MachineSet{ @@ -653,7 +653,7 @@ func createMachineOwnerHierarchy(machine *clusterv1.Machine) []client.Object { Name: fmt.Sprintf("%s-ms", machine.Name), Namespace: machine.Namespace, Labels: map[string]string{ - clusterv1.ClusterLabelName: clusterName, + clusterv1.ClusterNameLabel: clusterName, }, OwnerReferences: []metav1.OwnerReference{ { @@ -670,7 +670,7 @@ func createMachineOwnerHierarchy(machine *clusterv1.Machine) []client.Object { Name: fmt.Sprintf("%s-md", machine.Name), Namespace: machine.Namespace, Labels: map[string]string{ - clusterv1.ClusterLabelName: clusterName, + clusterv1.ClusterNameLabel: clusterName, }, }, }) diff --git a/controllers/vspherevm_ipaddress_reconciler.go b/controllers/vspherevm_ipaddress_reconciler.go new file mode 100644 index 0000000000..77c5cb1fac --- /dev/null +++ b/controllers/vspherevm_ipaddress_reconciler.go @@ -0,0 +1,229 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "fmt" + + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + kerrors "k8s.io/apimachinery/pkg/util/errors" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + ipamv1 "sigs.k8s.io/cluster-api/exp/ipam/api/v1alpha1" + clusterutilv1 "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/conditions" + "sigs.k8s.io/controller-runtime/pkg/client" + ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" + "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" + "sigs.k8s.io/cluster-api-provider-vsphere/pkg/util" +) + +// +kubebuilder:rbac:groups=ipam.cluster.x-k8s.io,resources=ipaddressclaims,verbs=get;create;patch;watch;list +// +kubebuilder:rbac:groups=ipam.cluster.x-k8s.io,resources=ipaddresses,verbs=get;list;watch + +// reconcileIPAddressClaims ensures that VSphereVMs that are configured with .spec.network.devices.addressFromPools +// have corresponding IPAddressClaims. +func (r vmReconciler) reconcileIPAddressClaims(ctx *context.VMContext) error { + totalClaims, claimsCreated := 0, 0 + claimsFulfilled := 0 + + var ( + claims []conditions.Getter + errList []error + ) + + for devIdx, device := range ctx.VSphereVM.Spec.Network.Devices { + for poolRefIdx, poolRef := range device.AddressesFromPools { + totalClaims++ + ipAddrClaimName := util.IPAddressClaimName(ctx.VSphereVM.Name, devIdx, poolRefIdx) + ipAddrClaim := &ipamv1.IPAddressClaim{} + ipAddrClaimKey := client.ObjectKey{ + Namespace: ctx.VSphereVM.Namespace, + Name: ipAddrClaimName, + } + err := ctx.Client.Get(ctx, ipAddrClaimKey, ipAddrClaim) + if err != nil && !apierrors.IsNotFound(err) { + ctx.Logger.Error(err, "fetching IPAddressClaim failed", "name", ipAddrClaimName) + return err + } + ipAddrClaim, created, err := createOrPatchIPAddressClaim(ctx, ipAddrClaimName, poolRef) + if err != nil { + ctx.Logger.Error(err, "createOrPatchIPAddressClaim failed", "name", ipAddrClaimName) + errList = append(errList, err) + continue + } + if created { + claimsCreated++ + } + if ipAddrClaim.Status.AddressRef.Name != "" { + claimsFulfilled++ + } + + // Since this is eventually used to calculate the status of the + // IPAddressClaimed condition for the VSphereVM object. + if conditions.Has(ipAddrClaim, clusterv1.ReadyCondition) { + claims = append(claims, ipAddrClaim) + } + } + } + + if len(errList) > 0 { + aggregatedErr := kerrors.NewAggregate(errList) + conditions.MarkFalse(ctx.VSphereVM, + infrav1.IPAddressClaimedCondition, + infrav1.IPAddressClaimNotFoundReason, + clusterv1.ConditionSeverityError, + aggregatedErr.Error()) + return aggregatedErr + } + + // Calculating the IPAddressClaimedCondition from the Ready Condition of the individual IPAddressClaims. + // This will not work if the IPAM provider does not set the Ready condition on the IPAddressClaim. + // To correctly calculate the status of the condition, we would want all the IPAddressClaim objects + // to report the Ready Condition. + if len(claims) == totalClaims { + conditions.SetAggregate(ctx.VSphereVM, + infrav1.IPAddressClaimedCondition, + claims, + conditions.AddSourceRef(), + conditions.WithStepCounter()) + return nil + } + + // Fallback logic to calculate the state of the IPAddressClaimed condition + switch { + case totalClaims == claimsFulfilled: + conditions.MarkTrue(ctx.VSphereVM, infrav1.IPAddressClaimedCondition) + case claimsFulfilled < totalClaims && claimsCreated > 0: + conditions.MarkFalse(ctx.VSphereVM, infrav1.IPAddressClaimedCondition, + infrav1.IPAddressClaimsBeingCreatedReason, clusterv1.ConditionSeverityInfo, + "%d/%d claims being created", claimsCreated, totalClaims) + case claimsFulfilled < totalClaims && claimsCreated == 0: + conditions.MarkFalse(ctx.VSphereVM, infrav1.IPAddressClaimedCondition, + infrav1.WaitingForIPAddressReason, clusterv1.ConditionSeverityInfo, + "%d/%d claims being processed", totalClaims-claimsFulfilled, totalClaims) + } + return nil +} + +// createOrPatchIPAddressClaim creates/patches an IPAddressClaim object for a device requesting an address +// from an externally managed IPPool. Ensures that the claim has a reference to the cluster of the VM to +// support pausing reconciliation. +// The responsibility of the IP address resolution is handled by an external IPAM provider. +func createOrPatchIPAddressClaim(ctx *context.VMContext, name string, poolRef corev1.TypedLocalObjectReference) (*ipamv1.IPAddressClaim, bool, error) { + claim := &ipamv1.IPAddressClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: ctx.VSphereVM.Namespace, + }, + } + mutateFn := func() (err error) { + claim.SetOwnerReferences(clusterutilv1.EnsureOwnerRef( + claim.OwnerReferences, + metav1.OwnerReference{ + APIVersion: ctx.VSphereVM.APIVersion, + Kind: ctx.VSphereVM.Kind, + Name: ctx.VSphereVM.Name, + UID: ctx.VSphereVM.UID, + })) + + ctrlutil.AddFinalizer(claim, infrav1.IPAddressClaimFinalizer) + + if claim.Labels == nil { + claim.Labels = make(map[string]string) + } + claim.Labels[clusterv1.ClusterNameLabel] = ctx.VSphereVM.Labels[clusterv1.ClusterNameLabel] + + claim.Spec.PoolRef.APIGroup = poolRef.APIGroup + claim.Spec.PoolRef.Kind = poolRef.Kind + claim.Spec.PoolRef.Name = poolRef.Name + return nil + } + + result, err := ctrlutil.CreateOrPatch(ctx, ctx.Client, claim, mutateFn) + if err != nil { + ctx.Logger.Error( + err, + "failed to CreateOrPatch IPAddressClaim", + "namespace", + claim.Namespace, + "name", + claim.Name, + ) + return nil, false, err + } + key := types.NamespacedName{ + Namespace: claim.Namespace, + Name: claim.Name, + } + switch result { + case ctrlutil.OperationResultCreated: + ctx.Logger.Info( + "created claim", + "claim", + key, + ) + return claim, true, nil + case ctrlutil.OperationResultUpdated: + ctx.Logger.Info( + "updated claim", + "claim", + key, + ) + case ctrlutil.OperationResultNone, ctrlutil.OperationResultUpdatedStatus, ctrlutil.OperationResultUpdatedStatusOnly: + ctx.Logger.V(5).Info( + "no change required for claim", + "claim", key, + "operation", result, + ) + } + return claim, false, nil +} + +// deleteIPAddressClaims removes the finalizers from the IPAddressClaim objects +// thus freeing them up for garbage collection. +func (r vmReconciler) deleteIPAddressClaims(ctx *context.VMContext) error { + for devIdx, device := range ctx.VSphereVM.Spec.Network.Devices { + for poolRefIdx := range device.AddressesFromPools { + // check if claim exists + ipAddrClaim := &ipamv1.IPAddressClaim{} + ipAddrClaimName := util.IPAddressClaimName(ctx.VSphereVM.Name, devIdx, poolRefIdx) + ctx.Logger.Info("removing finalizer", "IPAddressClaim", ipAddrClaimName) + ipAddrClaimKey := client.ObjectKey{ + Namespace: ctx.VSphereVM.Namespace, + Name: ipAddrClaimName, + } + if err := ctx.Client.Get(ctx, ipAddrClaimKey, ipAddrClaim); err != nil { + if apierrors.IsNotFound(err) { + continue + } + return errors.Wrapf(err, fmt.Sprintf("failed to find IPAddressClaim %q to remove the finalizer", ipAddrClaimName)) + } + if ctrlutil.RemoveFinalizer(ipAddrClaim, infrav1.IPAddressClaimFinalizer) { + if err := ctx.Client.Update(ctx, ipAddrClaim); err != nil { + return errors.Wrapf(err, fmt.Sprintf("failed to update IPAddressClaim %q", ipAddrClaimName)) + } + } + } + } + return nil +} diff --git a/controllers/vspherevm_ipaddress_reconciler_test.go b/controllers/vspherevm_ipaddress_reconciler_test.go new file mode 100644 index 0000000000..1c5223fb9d --- /dev/null +++ b/controllers/vspherevm_ipaddress_reconciler_test.go @@ -0,0 +1,253 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + goctx "context" + "testing" + + "github.com/go-logr/logr" + "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/pointer" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + ipamv1 "sigs.k8s.io/cluster-api/exp/ipam/api/v1alpha1" + "sigs.k8s.io/cluster-api/util/conditions" + "sigs.k8s.io/controller-runtime/pkg/client" + ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" + "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" + "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context/fake" + "sigs.k8s.io/cluster-api-provider-vsphere/pkg/util" +) + +func Test_vmReconciler_reconcileIPAddressClaims(t *testing.T) { + name, namespace := "test-vm", "my-namespace" + setup := func(vsphereVM *infrav1.VSphereVM, initObjects ...client.Object) *context.VMContext { + ctx := fake.NewControllerContext(fake.NewControllerManagerContext(initObjects...)) + return &context.VMContext{ + ControllerContext: ctx, + VSphereVM: vsphereVM, + Logger: logr.Discard(), + } + } + + t.Run("when VSphereVM Spec has address pool references", func(t *testing.T) { + vsphereVM := &infrav1.VSphereVM{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: map[string]string{ + clusterv1.ClusterNameLabel: "my-cluster", + }, + }, + Spec: infrav1.VSphereVMSpec{ + VirtualMachineCloneSpec: infrav1.VirtualMachineCloneSpec{ + Network: infrav1.NetworkSpec{ + Devices: []infrav1.NetworkDeviceSpec{{ + AddressesFromPools: []corev1.TypedLocalObjectReference{ + poolRef("my-pool-1"), + }}, + { + AddressesFromPools: []corev1.TypedLocalObjectReference{ + poolRef("my-pool-2"), + poolRef("my-pool-3"), + }, + }, + }, + }, + }, + }, + } + + t.Run("when no claims exist", func(t *testing.T) { + g := gomega.NewWithT(t) + + testCtx := setup(vsphereVM) + err := vmReconciler{}.reconcileIPAddressClaims(testCtx) + g.Expect(err).ToNot(gomega.HaveOccurred()) + + ipAddrClaimList := &ipamv1.IPAddressClaimList{} + g.Expect(testCtx.Client.List(goctx.TODO(), ipAddrClaimList)).To(gomega.Succeed()) + g.Expect(ipAddrClaimList.Items).To(gomega.HaveLen(3)) + + for idx := range ipAddrClaimList.Items { + claim := ipAddrClaimList.Items[idx] + g.Expect(claim.Finalizers).To(gomega.HaveLen(1)) + g.Expect(ctrlutil.ContainsFinalizer(&claim, infrav1.IPAddressClaimFinalizer)).To(gomega.BeTrue()) + + g.Expect(claim.OwnerReferences).To(gomega.HaveLen(1)) + g.Expect(claim.OwnerReferences[0].Name).To(gomega.Equal(vsphereVM.Name)) + g.Expect(claim.Labels).To(gomega.HaveKeyWithValue(clusterv1.ClusterNameLabel, "my-cluster")) + } + + claimedCondition := conditions.Get(testCtx.VSphereVM, infrav1.IPAddressClaimedCondition) + g.Expect(claimedCondition).NotTo(gomega.BeNil()) + g.Expect(claimedCondition.Status).To(gomega.Equal(corev1.ConditionFalse)) + g.Expect(claimedCondition.Reason).To(gomega.Equal(infrav1.IPAddressClaimsBeingCreatedReason)) + g.Expect(claimedCondition.Message).To(gomega.Equal("3/3 claims being created")) + }) + + ipAddrClaim := func(name, poolName string) *ipamv1.IPAddressClaim { + return &ipamv1.IPAddressClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: ipamv1.IPAddressClaimSpec{PoolRef: poolRef(poolName)}, + Status: ipamv1.IPAddressClaimStatus{}, + } + } + + t.Run("when all claims exist", func(t *testing.T) { + g := gomega.NewWithT(t) + + testCtx := setup(vsphereVM, + ipAddrClaim(util.IPAddressClaimName(name, 0, 0), "my-pool-1"), + ipAddrClaim(util.IPAddressClaimName(name, 1, 0), "my-pool-2"), + ipAddrClaim(util.IPAddressClaimName(name, 1, 1), "my-pool-3"), + ) + err := vmReconciler{}.reconcileIPAddressClaims(testCtx) + g.Expect(err).ToNot(gomega.HaveOccurred()) + + claimedCondition := conditions.Get(testCtx.VSphereVM, infrav1.IPAddressClaimedCondition) + g.Expect(claimedCondition).NotTo(gomega.BeNil()) + g.Expect(claimedCondition.Status).To(gomega.Equal(corev1.ConditionFalse)) + g.Expect(claimedCondition.Reason).To(gomega.Equal(infrav1.WaitingForIPAddressReason)) + g.Expect(claimedCondition.Message).To(gomega.Equal("3/3 claims being processed")) + + ipAddrClaimList := &ipamv1.IPAddressClaimList{} + g.Expect(testCtx.Client.List(goctx.TODO(), ipAddrClaimList)).To(gomega.Succeed()) + + for idx := range ipAddrClaimList.Items { + claim := ipAddrClaimList.Items[idx] + g.Expect(claim.Finalizers).To(gomega.HaveLen(1)) + g.Expect(ctrlutil.ContainsFinalizer(&claim, infrav1.IPAddressClaimFinalizer)).To(gomega.BeTrue()) + + g.Expect(claim.OwnerReferences).To(gomega.HaveLen(1)) + g.Expect(claim.OwnerReferences[0].Name).To(gomega.Equal(vsphereVM.Name)) + g.Expect(claim.Labels).To(gomega.HaveKeyWithValue(clusterv1.ClusterNameLabel, "my-cluster")) + } + }) + + t.Run("when all claims exist and are realized", func(t *testing.T) { + g := gomega.NewWithT(t) + + realizedIPAddrClaimOne := ipAddrClaim(util.IPAddressClaimName(name, 0, 0), "my-pool-1") + realizedIPAddrClaimOne.Status.AddressRef.Name = "blah-one" + + realizedIPAddrClaimTwo := ipAddrClaim(util.IPAddressClaimName(name, 1, 0), "my-pool-2") + realizedIPAddrClaimTwo.Status.AddressRef.Name = "blah-two" + + realizedIPAddrClaimThree := ipAddrClaim(util.IPAddressClaimName(name, 1, 1), "my-pool-3") + realizedIPAddrClaimThree.Status.AddressRef.Name = "blah-three" + + testCtx := setup(vsphereVM, realizedIPAddrClaimOne, realizedIPAddrClaimTwo, realizedIPAddrClaimThree) + err := vmReconciler{}.reconcileIPAddressClaims(testCtx) + g.Expect(err).ToNot(gomega.HaveOccurred()) + + claimedCondition := conditions.Get(testCtx.VSphereVM, infrav1.IPAddressClaimedCondition) + g.Expect(claimedCondition).NotTo(gomega.BeNil()) + g.Expect(claimedCondition.Status).To(gomega.Equal(corev1.ConditionTrue)) + + ipAddrClaimList := &ipamv1.IPAddressClaimList{} + g.Expect(testCtx.Client.List(goctx.TODO(), ipAddrClaimList)).To(gomega.Succeed()) + + for idx := range ipAddrClaimList.Items { + claim := ipAddrClaimList.Items[idx] + g.Expect(claim.Finalizers).To(gomega.HaveLen(1)) + g.Expect(ctrlutil.ContainsFinalizer(&claim, infrav1.IPAddressClaimFinalizer)).To(gomega.BeTrue()) + + g.Expect(claim.OwnerReferences).To(gomega.HaveLen(1)) + g.Expect(claim.OwnerReferences[0].Name).To(gomega.Equal(vsphereVM.Name)) + g.Expect(claim.Labels).To(gomega.HaveKeyWithValue(clusterv1.ClusterNameLabel, "my-cluster")) + } + }) + + t.Run("when all existing claims have Ready Condition set", func(t *testing.T) { + g := gomega.NewWithT(t) + + ipAddrClaimWithReadyConditionTrue := ipAddrClaim(util.IPAddressClaimName(name, 0, 0), "my-pool-1") + ipAddrClaimWithReadyConditionTrue.Status.Conditions = clusterv1.Conditions{ + *conditions.TrueCondition(clusterv1.ReadyCondition), + } + + ipAddrClaimWithReadyConditionFalse := ipAddrClaim(util.IPAddressClaimName(name, 1, 0), "my-pool-2") + ipAddrClaimWithReadyConditionFalse.Status.Conditions = clusterv1.Conditions{ + *conditions.FalseCondition(clusterv1.ReadyCondition, "IPAddressFetchProgress", clusterv1.ConditionSeverityInfo, ""), + } + + secondIPAddrClaimWithReadyConditionTrue := ipAddrClaim(util.IPAddressClaimName(name, 1, 1), "my-pool-3") + secondIPAddrClaimWithReadyConditionTrue.Status.Conditions = clusterv1.Conditions{ + *conditions.TrueCondition(clusterv1.ReadyCondition), + } + + testCtx := setup(vsphereVM, + ipAddrClaimWithReadyConditionTrue, + ipAddrClaimWithReadyConditionFalse, + secondIPAddrClaimWithReadyConditionTrue, + ) + err := vmReconciler{}.reconcileIPAddressClaims(testCtx) + g.Expect(err).ToNot(gomega.HaveOccurred()) + + claimedCondition := conditions.Get(testCtx.VSphereVM, infrav1.IPAddressClaimedCondition) + g.Expect(claimedCondition).NotTo(gomega.BeNil()) + g.Expect(claimedCondition.Status).To(gomega.Equal(corev1.ConditionFalse)) + }) + + t.Run("when some existing claims have Ready Condition set", func(t *testing.T) { + g := gomega.NewWithT(t) + + ipAddrClaimWithReadyConditionTrue := ipAddrClaim(util.IPAddressClaimName(name, 0, 0), "my-pool-1") + ipAddrClaimWithReadyConditionTrue.Status.Conditions = clusterv1.Conditions{ + *conditions.TrueCondition(clusterv1.ReadyCondition), + } + ipAddrClaimWithReadyConditionTrue.Status.AddressRef.Name = "blah-one" + + ipAddrClaimWithReadyConditionFalse := ipAddrClaim(util.IPAddressClaimName(name, 1, 0), "my-pool-2") + ipAddrClaimWithReadyConditionFalse.Status.Conditions = clusterv1.Conditions{ + *conditions.FalseCondition(clusterv1.ReadyCondition, "IPAddressFetchProgress", clusterv1.ConditionSeverityInfo, ""), + } + + iPAddrClaimWithNoReadyCondition := ipAddrClaim(util.IPAddressClaimName(name, 1, 1), "my-pool-3") + + testCtx := setup(vsphereVM, + ipAddrClaimWithReadyConditionTrue, + ipAddrClaimWithReadyConditionFalse, + iPAddrClaimWithNoReadyCondition, + ) + err := vmReconciler{}.reconcileIPAddressClaims(testCtx) + g.Expect(err).ToNot(gomega.HaveOccurred()) + + claimedCondition := conditions.Get(testCtx.VSphereVM, infrav1.IPAddressClaimedCondition) + g.Expect(claimedCondition).NotTo(gomega.BeNil()) + g.Expect(claimedCondition.Status).To(gomega.Equal(corev1.ConditionFalse)) + g.Expect(claimedCondition.Reason).To(gomega.Equal(infrav1.WaitingForIPAddressReason)) + g.Expect(claimedCondition.Message).To(gomega.Equal("2/3 claims being processed")) + }) + }) +} + +func poolRef(name string) corev1.TypedLocalObjectReference { + return corev1.TypedLocalObjectReference{ + APIGroup: pointer.String("test.ipam.provider.io/v1"), + Name: name, + Kind: "my-pool-kind", + } +} diff --git a/docs/node-ipam-demo.md b/docs/node-ipam-demo.md index 7958dc8bda..7653e5b256 100644 --- a/docs/node-ipam-demo.md +++ b/docs/node-ipam-demo.md @@ -151,10 +151,15 @@ export VSPHERE_DATASTORE="the-datastore" export VSPHERE_NETWORK="VM Network" export VSPHERE_RESOURCE_POOL="*/Resources" export VSPHERE_FOLDER="vm" -export VSPHERE_TEMPLATE="ubuntu-2004-kube-v1.25.5" +export VSPHERE_TEMPLATE="ubuntu-2004-kube-v1.26.2" export VSPHERE_SSH_AUTHORIZED_KEY="ssh-rsa AAAAB3N..." export VSPHERE_TLS_THUMBPRINT="97:48:03:8D:78:A9..." export VSPHERE_STORAGE_POLICY="policy-one" +export CPI_IMAGE_K8S_VERSION=v1.26.0 +export NODE_IPAM_POOL_NAME=example-pool +export NODE_IPAM_POOL_API_GROUP=ipam.cluster.x-k8s.io +export NODE_IPAM_POOL_KIND=InClusterIPPool +export NAMESERVER="8.8.8.8" export CONTROL_PLANE_ENDPOINT_IP="1.2.3.4" ``` @@ -163,9 +168,8 @@ export CONTROL_PLANE_ENDPOINT_IP="1.2.3.4" The CAPV README.md file includes links to ovas. ```bash -wget https://storage.googleapis.com/capv-templates/v1.25.5/ubuntu-2004-kube-v1.25.5.ova -govc import.ova ubuntu-2004-kube-v1.25.5.ova -govc vm.markastemplate ubuntu-2004-kube-v1.25.5 +govc import.ova https://storage.googleapis.com/capv-templates/v1.26.2/ubuntu-2004-kube-v1.26.2.ova +govc vm.markastemplate ubuntu-2004-kube-v1.26.2 ``` ## Generate a Workload Cluster Config @@ -176,74 +180,13 @@ to the same namespace as the IP pool created in earlier steps. ```bash clusterctl generate cluster ipam-example \ --infrastructure vsphere \ + --flavor node-ipam \ --target-namespace cluster-ns \ - --kubernetes-version v1.25.5 \ + --kubernetes-version v1.26.2 \ --control-plane-machine-count 1 \ --worker-machine-count 1 > cluster.yaml ``` -## Edit the Generated cluster.yaml to use the IP Pool - -The generated `cluster.yaml` file contains `VSphereMachineTemplate` -declarations that describe network device settings of nodes in the cluster. - -By default, DHCP is enabled. Disable DHCP. - -Because DHCP is disabled, nameserver configuration must be added for the Node -to function correctly. - -To turn off DHCP and also add a reference to the pool created in prior steps, -modify *both* the control plane and worker `VSphereMachineTemplate` -configurations as follows: - -```yaml ---- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 -kind: VSphereMachineTemplate -metadata: - name: ipam-example - namespace: cluster-ns # Same namespace as the IP pool -spec: - template: - spec: - cloneMode: linkedClone - datacenter: dc0 - datastore: sharedVmfs-0 - diskGiB: 25 - folder: folder0 - memoryMiB: 8192 - network: - devices: - - networkName: port-group-vlan-101 - # BEGIN NEW CONFIGURATION - dhcp4: false # SET THIS TO FALSE - addressesFromPools: # add reference to pool created earlier - - apiGroup: ipam.cluster.x-k8s.io - kind: InClusterIPPool - name: example-pool - nameservers: - - "8.8.8.8" - # END NEW CONFIGURATION - numCPUs: 2 - os: Linux - resourcePool: rp0 - server: vsphere-server-url.com - storagePolicyName: "" - template: ubuntu-2004-kube-v1.25.5 - thumbprint: B8:03:5B:35:93:1... -``` - -The `addressesFromPools` is an array of `TypedLocalObjectReference` that refer -to pool objects. The `apiGroup` and `kind` in this example refer to the -`InClusterIPPool` CRD, the object type created in earlier steps. The name -matches the pool instance this cluster should use. Also note the namespace of -`VsphereMachineTemplate` matches the `InClusterIPPool`. - -At the time of writing this doc, the `InClusterIPPool` and its associated IPAM -provider is the only implementation choice available. As other IPAM providers -become available, the `apiGroup` and `kind` variables can be changed to point a -provider of choice. - ## Create the Workload Cluster Apply the cluster YAML to the management cluster. Obtain the cluster's diff --git a/docs/proposal/node-affinity-and-anti-affinity.md b/docs/proposal/node-affinity-and-anti-affinity.md index 114255787f..ee2a0d6469 100644 --- a/docs/proposal/node-affinity-and-anti-affinity.md +++ b/docs/proposal/node-affinity-and-anti-affinity.md @@ -260,7 +260,7 @@ A new controller is added which is responsible for propagating the labels with a Two new feature flags are introduced to control the availability of these new behaviors: 1. **NodeAntiAffinity** which is set to `false` by default. This controls the creation of cluster modules to dictate anti affinity for VM placement. -2. **NodeLabeling** which is set to `false` by default. This controls the propagation of labels with a special prefix from Machine to Node objects. +2. **NodeLabeling** which is set to `false` by default. This controls the propagation of labels with a special prefix from Machine to Node objects. Starting from v1.7.0 release, this feature flag is deprecated and this functionality will not be provided by CAPV. CAPI v1.4.0 natively supports this feature. ## Upgrade Strategy diff --git a/feature/feature.go b/feature/feature.go index c900f4de28..cf96b4185d 100644 --- a/feature/feature.go +++ b/feature/feature.go @@ -33,13 +33,17 @@ const ( // alpha: v1.4 NodeAntiAffinity featuregate.Feature = "NodeAntiAffinity" + //nolint: gocritic // NodeLabeling is a feature gate for the functionality to propagate Machine labels // with the prefix to the Node objects. // This is a stop-gap measure which will be removed when we have this functionality // present in CAPI. // See https://github.com/kubernetes-sigs/cluster-api/pull/6255 + // Since this is natively supported in CAPI with the v1.4 release, this feature is being + // deprecated. // // alpha: v1.4 + // deprecated: v1.7 NodeLabeling featuregate.Feature = "NodeLabeling" ) @@ -52,5 +56,5 @@ func init() { var defaultCAPVFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{ // Every feature should be initiated here: NodeAntiAffinity: {Default: false, PreRelease: featuregate.Alpha}, - NodeLabeling: {Default: false, PreRelease: featuregate.Alpha}, + NodeLabeling: {Default: false, PreRelease: featuregate.Deprecated}, } diff --git a/go.mod b/go.mod index 2dcaa119f8..cfbce561d2 100644 --- a/go.mod +++ b/go.mod @@ -4,39 +4,38 @@ go 1.19 require ( github.com/antihax/optional v1.0.0 - github.com/go-logr/logr v1.2.3 + github.com/go-logr/logr v1.2.4 github.com/google/gofuzz v1.2.0 github.com/google/uuid v1.3.0 github.com/hashicorp/go-version v1.3.0 - github.com/onsi/ginkgo/v2 v2.8.3 - github.com/onsi/gomega v1.27.1 + github.com/onsi/ginkgo/v2 v2.9.2 + github.com/onsi/gomega v1.27.6 github.com/pkg/errors v0.9.1 - github.com/spf13/cobra v1.6.1 + github.com/spf13/cobra v1.7.0 github.com/stretchr/testify v1.8.2 github.com/vmware-tanzu/net-operator-api v0.0.0-20210401185409-b0dc6c297707 github.com/vmware-tanzu/vm-operator/api v0.0.0-20221204041501-ec03d8bb733c github.com/vmware-tanzu/vm-operator/external/ncp v0.0.0-20211209213435-0f4ab286f64f github.com/vmware-tanzu/vm-operator/external/tanzu-topology v0.0.0-20211209213435-0f4ab286f64f - github.com/vmware/govmomi v0.30.2 - golang.org/x/crypto v0.6.0 + github.com/vmware/govmomi v0.30.4 + golang.org/x/crypto v0.8.0 golang.org/x/exp v0.0.0-20221002003631-540bb7301a08 - golang.org/x/mod v0.8.0 - golang.org/x/net v0.7.0 - golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1 - golang.org/x/text v0.7.0 + golang.org/x/mod v0.10.0 + golang.org/x/oauth2 v0.7.0 + golang.org/x/text v0.9.0 gopkg.in/gcfg.v1 v1.2.3 gopkg.in/yaml.v2 v2.4.0 - k8s.io/api v0.25.0 - k8s.io/apiextensions-apiserver v0.25.0 - k8s.io/apimachinery v0.25.0 - k8s.io/client-go v0.25.0 + k8s.io/api v0.26.1 + k8s.io/apiextensions-apiserver v0.26.1 + k8s.io/apimachinery v0.26.1 + k8s.io/client-go v0.26.1 k8s.io/cluster-bootstrap v0.25.0 - k8s.io/component-base v0.25.0 + k8s.io/component-base v0.26.1 k8s.io/klog/v2 v2.80.1 - k8s.io/utils v0.0.0-20220823124924-e9cbc92d1a73 - sigs.k8s.io/cluster-api v1.3.3 - sigs.k8s.io/cluster-api/test v1.3.5 - sigs.k8s.io/controller-runtime v0.13.1 + k8s.io/utils v0.0.0-20221128185143-99ec85e7a448 + sigs.k8s.io/cluster-api v1.4.1 + sigs.k8s.io/cluster-api/test v1.4.1 + sigs.k8s.io/controller-runtime v0.14.5 sigs.k8s.io/kind v0.17.0 sigs.k8s.io/yaml v1.3.0 ) @@ -45,22 +44,22 @@ require ( github.com/blang/semver/v4 v4.0.0 // indirect github.com/emicklei/go-restful/v3 v3.9.0 // indirect github.com/go-logr/zapr v1.2.3 // indirect - github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect + github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/google/gnostic v0.6.9 // indirect - github.com/google/go-github/v45 v45.2.0 // indirect + github.com/google/go-github/v48 v48.2.0 // indirect github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 // indirect github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2 // indirect - github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/pelletier/go-toml/v2 v2.0.5 // indirect + github.com/pelletier/go-toml/v2 v2.0.6 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/stretchr/objx v0.5.0 // indirect go.uber.org/atomic v1.10.0 // indirect go.uber.org/multierr v1.8.0 // indirect - go.uber.org/zap v1.22.0 // indirect - golang.org/x/sys v0.5.0 // indirect - golang.org/x/term v0.5.0 // indirect - golang.org/x/tools v0.6.0 // indirect + go.uber.org/zap v1.24.0 // indirect + golang.org/x/net v0.9.0 // indirect + golang.org/x/sys v0.7.0 // indirect + golang.org/x/term v0.7.0 // indirect + golang.org/x/tools v0.7.0 // indirect ) require ( @@ -71,7 +70,7 @@ require ( github.com/Masterminds/sprig/v3 v3.2.3 // indirect github.com/Microsoft/go-winio v0.5.0 // indirect github.com/alessio/shellescape v1.4.1 // indirect - github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20220816024939-bc8df83d7b9d // indirect + github.com/antlr/antlr4/runtime/Go/antlr v1.4.10 // indirect github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver v3.5.1+incompatible @@ -90,23 +89,23 @@ require ( github.com/go-openapi/jsonpointer v0.19.5 // indirect github.com/go-openapi/jsonreference v0.20.0 // indirect github.com/go-openapi/swag v0.22.3 // indirect - github.com/gobuffalo/flect v0.3.0 // indirect + github.com/gobuffalo/flect v1.0.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.2 // indirect - github.com/google/cel-go v0.12.5 // indirect + github.com/golang/protobuf v1.5.3 // indirect + github.com/google/cel-go v0.12.6 // indirect github.com/google/go-cmp v0.5.9 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/huandu/xstrings v1.3.3 // indirect github.com/imdario/mergo v0.3.13 // indirect - github.com/inconshreveable/mousetrap v1.0.1 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/magiconair/properties v1.8.6 // indirect + github.com/magiconair/properties v1.8.7 // indirect github.com/mailru/easyjson v0.7.7 // indirect - github.com/mattn/go-isatty v0.0.16 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect + github.com/mattn/go-isatty v0.0.17 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.2 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect @@ -115,32 +114,32 @@ require ( github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.0.2 // indirect github.com/pelletier/go-toml v1.9.5 // indirect - github.com/prometheus/client_golang v1.13.0 // indirect - github.com/prometheus/client_model v0.2.0 // indirect + github.com/prometheus/client_golang v1.14.0 // indirect + github.com/prometheus/client_model v0.3.0 // indirect github.com/prometheus/common v0.37.0 // indirect github.com/prometheus/procfs v0.8.0 // indirect github.com/shopspring/decimal v1.3.1 // indirect github.com/sirupsen/logrus v1.8.1 // indirect - github.com/spf13/afero v1.9.2 // indirect + github.com/spf13/afero v1.9.3 // indirect github.com/spf13/cast v1.5.0 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 - github.com/spf13/viper v1.13.0 // indirect + github.com/spf13/viper v1.15.0 // indirect github.com/stoewer/go-strcase v1.2.0 // indirect - github.com/subosito/gotenv v1.4.1 // indirect - github.com/valyala/fastjson v1.6.3 // indirect - golang.org/x/time v0.0.0-20220722155302-e5dcc9cfc0b9 // indirect + github.com/subosito/gotenv v1.4.2 // indirect + github.com/valyala/fastjson v1.6.4 // indirect + golang.org/x/time v0.3.0 // indirect gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc // indirect + google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef // indirect google.golang.org/protobuf v1.28.1 // indirect gopkg.in/fsnotify.v1 v1.4.7 gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiserver v0.25.0 // indirect - k8s.io/kube-openapi v0.0.0-20220803164354-a70c9af30aea // indirect + k8s.io/apiserver v0.26.1 // indirect + k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 // indirect sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect ) diff --git a/go.sum b/go.sum index f969db827f..70ed12d784 100644 --- a/go.sum +++ b/go.sum @@ -39,7 +39,6 @@ cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3f dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= -github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest v0.11.12/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= @@ -84,8 +83,8 @@ github.com/alessio/shellescape v1.4.1 h1:V7yhSDDn8LP4lc4jS8pFkt0zCnzVJlG5JXy9BVK github.com/alessio/shellescape v1.4.1/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30= github.com/antihax/optional v1.0.0 h1:xK2lYat7ZLaVVcIuj82J8kIro4V6kDe0AUDFboUCwcg= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20220816024939-bc8df83d7b9d h1:0xIrH2lJbraclvJT3pvTf3u2oCAL60cAqiv4qRpz4EI= -github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20220816024939-bc8df83d7b9d/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY= +github.com/antlr/antlr4/runtime/Go/antlr v1.4.10 h1:yL7+Jz0jTC6yykIK/Wh74gnTJnrGr5AyrNMXuA0gves= +github.com/antlr/antlr4/runtime/Go/antlr v1.4.10/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= @@ -207,8 +206,8 @@ github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTg github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= -github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= +github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/zapr v0.4.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= github.com/go-logr/zapr v1.2.3 h1:a9vnzlIBPQBBkeaR9IuMUfmVOrQlkoC4YfPoFkX3T7A= github.com/go-logr/zapr v1.2.3/go.mod h1:eIauM6P8qSvTw5o2ez6UEAfGjQKrxQTl5EoK+Qa2oG4= @@ -231,10 +230,11 @@ github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/gobuffalo/flect v0.3.0 h1:erfPWM+K1rFNIQeRPdeEXxo8yFr/PO17lhRnS8FUrtk= -github.com/gobuffalo/flect v0.3.0/go.mod h1:5pf3aGnsvqvCj50AVni7mJJF8ICxGZ8HomberC3pXLE= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/gobuffalo/flect v1.0.2 h1:eqjPGSo2WmjgY2XlpGwo2NXgL3RucAKo4k4qQMNA5sA= +github.com/gobuffalo/flect v1.0.2/go.mod h1:A5msMlrHtLqh9umBSnvabjsMrCcCpAyzglnDvkbYKHs= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= @@ -271,12 +271,13 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/cel-go v0.12.5 h1:DmzaiSgoaqGCjtpPQWl26/gND+yRpim56H1jCVev6d8= -github.com/google/cel-go v0.12.5/go.mod h1:Jk7ljRzLBhkmiAwBoUxB1sZSCVBAzkqPF25olK/iRDw= +github.com/google/cel-go v0.12.6 h1:kjeKudqV0OygrAqA9fX6J55S8gj+Jre2tckIm5RoG4M= +github.com/google/cel-go v0.12.6/go.mod h1:Jk7ljRzLBhkmiAwBoUxB1sZSCVBAzkqPF25olK/iRDw= github.com/google/gnostic v0.6.9 h1:ZK/5VhkoX835RikCHpSUJV9a+S3e1zLh59YnyWeBW+0= github.com/google/gnostic v0.6.9/go.mod h1:Nm8234We1lq6iB9OmlgNv3nH91XLLVZHCDayfA3xq+E= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -291,8 +292,8 @@ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-github/v45 v45.2.0 h1:5oRLszbrkvxDDqBCNj2hjDZMKmvexaZ1xw/FCD+K3FI= -github.com/google/go-github/v45 v45.2.0/go.mod h1:FObaZJEDSTa/WGCzZ2Z3eoCDXWJKMenWWTrd8jrta28= +github.com/google/go-github/v48 v48.2.0 h1:68puzySE6WqUY9KWmpOsDEQfDZsso98rT6pZcz9HqcE= +github.com/google/go-github/v48 v48.2.0/go.mod h1:dDlehKBDo850ZPvCTK0sEqTCVWcrGl2LcDiajkYi89Y= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= @@ -375,8 +376,8 @@ github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc= -github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= @@ -413,8 +414,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo= -github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -425,12 +426,13 @@ github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaO github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ= -github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= +github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/matttproud/golang_protobuf_extensions v1.0.2 h1:hAHbPm5IJGijwng3PWk09JkG9WeqChjprR5s9bBZ+OM= +github.com/matttproud/golang_protobuf_extensions v1.0.2/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= @@ -451,7 +453,6 @@ github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae h1:O4SWKdcHVCvYqyDV+9CJA1fcDN2L11Bule0iFy3YlAI= -github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -469,7 +470,6 @@ github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRW github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= @@ -480,16 +480,15 @@ github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+ github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= -github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo/v2 v2.8.3 h1:RpbK1G8nWPNaCVFBWsOGnEQQGgASi6b8fxcWBvDYjxQ= -github.com/onsi/ginkgo/v2 v2.8.3/go.mod h1:6OaUA8BCi0aZfmzYT/q9AacwTzDpNbxILUT+TlBq6MY= +github.com/onsi/ginkgo/v2 v2.9.2 h1:BA2GMJOtfGAfagzYtrAlufIP0lq6QERkFmHLMLPwFSU= +github.com/onsi/ginkgo/v2 v2.9.2/go.mod h1:WHcJJG2dIlcCqVfBAwUCrJxSPFb6v4azBwgxeMeDuts= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= -github.com/onsi/gomega v1.27.1 h1:rfztXRbg6nv/5f+Raen9RcGoSecHIFgBBLQK3Wdj754= -github.com/onsi/gomega v1.27.1/go.mod h1:aHX5xOykVYzWOV4WqQy0sy8BQptgukenXpCXfadcIAw= +github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE= +github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= @@ -499,8 +498,8 @@ github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/9 github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= -github.com/pelletier/go-toml/v2 v2.0.5 h1:ipoSadvV8oGUjnUbMub59IDPPwfxF694nG/jwbMiyQg= -github.com/pelletier/go-toml/v2 v2.0.5/go.mod h1:OMHamSCAODeSsVrwwvcJOaoN0LIUIaFVNZzmWyNfXas= +github.com/pelletier/go-toml/v2 v2.0.6 h1:nrzqCb7j9cDFj2coyLNLaZuJTLjWjlaz6nvTvIwycIU= +github.com/pelletier/go-toml/v2 v2.0.6/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -518,13 +517,14 @@ github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5Fsn github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= -github.com/prometheus/client_golang v1.13.0 h1:b71QUfeo5M8gq2+evJdTPfZhYMAU0uKPkyPJ7TPsloU= -github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ= +github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= +github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= +github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= @@ -567,8 +567,8 @@ github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4k github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/afero v1.9.2 h1:j49Hj62F0n+DaZ1dDCvhABaPNSGNkt32oRFxI33IEMw= -github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= +github.com/spf13/afero v1.9.3 h1:41FoI0fD7OR7mGcKE/aOiLkGreyf8ifIOQmJANWogMk= +github.com/spf13/afero v1.9.3/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= @@ -577,8 +577,8 @@ github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3 github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= -github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA= -github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= +github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= +github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= @@ -588,8 +588,8 @@ github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= -github.com/spf13/viper v1.13.0 h1:BWSJ/M+f+3nmdz9bxB+bWX28kkALN2ok11D0rSo8EJU= -github.com/spf13/viper v1.13.0/go.mod h1:Icm2xNL3/8uyh/wFuB1jI7TiTNKp8632Nwegu+zgdYw= +github.com/spf13/viper v1.15.0 h1:js3yy885G8xwJa6iOISGFwd+qlUo5AvyXb7CiihdtiU= +github.com/spf13/viper v1.15.0/go.mod h1:fFcTBJxvhhzSJiZy8n+PeW6t8l+KeT/uTARa0jHOQLA= github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -607,16 +607,17 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/subosito/gotenv v1.4.1 h1:jyEFiXpy21Wm81FBN71l9VoMMV8H8jG+qIK3GCpY6Qs= -github.com/subosito/gotenv v1.4.1/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= +github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8= +github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/valyala/fastjson v1.6.3 h1:tAKFnnwmeMGPbwJ7IwxcTPCNr3uIzoIj3/Fh90ra4xc= -github.com/valyala/fastjson v1.6.3/go.mod h1:CLCAqky6SMuOcxStkYQvblddUtoRxhYMGLrsQns1aXY= +github.com/valyala/fastjson v1.6.4 h1:uAUNq9Z6ymTgGhcm0UynUAB6tlbakBrz6CQFax3BXVQ= +github.com/valyala/fastjson v1.6.4/go.mod h1:CLCAqky6SMuOcxStkYQvblddUtoRxhYMGLrsQns1aXY= github.com/vmware-tanzu/net-operator-api v0.0.0-20210401185409-b0dc6c297707 h1:2onys8tWlQh7DFiOz6+68AwJdW9EBOEv6RTKzwh1x7A= github.com/vmware-tanzu/net-operator-api v0.0.0-20210401185409-b0dc6c297707/go.mod h1:pDB0pUiFYufuP3lUkQX9fZ67PYnKvqBpDcJN3mSrw5U= github.com/vmware-tanzu/vm-operator/api v0.0.0-20221204041501-ec03d8bb733c h1:7UAqqa9GTxMTtCmgkGWk0JdWZoZssgdbSywpIurgjC0= @@ -625,8 +626,8 @@ github.com/vmware-tanzu/vm-operator/external/ncp v0.0.0-20211209213435-0f4ab286f github.com/vmware-tanzu/vm-operator/external/ncp v0.0.0-20211209213435-0f4ab286f64f/go.mod h1:5rqRJ9zGR+KnKbkGx373WgN8xJpvAj99kHnfoDYRO5I= github.com/vmware-tanzu/vm-operator/external/tanzu-topology v0.0.0-20211209213435-0f4ab286f64f h1:wwYUf16/g8bLywQMQJB5VHbDtuf6aOFH24Ar2/yA7+I= github.com/vmware-tanzu/vm-operator/external/tanzu-topology v0.0.0-20211209213435-0f4ab286f64f/go.mod h1:dfYrWS8DMRN+XZfhu8M4LVHmeGvYB29Ipd7j4uIq+mU= -github.com/vmware/govmomi v0.30.2 h1:zPMmLTtAfBgOVsTgwKOzVVahQIOC4A2oyFQFSsn/0ag= -github.com/vmware/govmomi v0.30.2/go.mod h1:F7adsVewLNHsW/IIm7ziFURaXDaHEwcc+ym4r3INMdY= +github.com/vmware/govmomi v0.30.4 h1:BCKLoTmiBYRuplv3GxKEMBLtBaJm8PA56vo9bddIpYQ= +github.com/vmware/govmomi v0.30.4/go.mod h1:F7adsVewLNHsW/IIm7ziFURaXDaHEwcc+ym4r3INMdY= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= @@ -653,7 +654,7 @@ go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= -go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= +go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= @@ -661,8 +662,8 @@ go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95a go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= -go.uber.org/zap v1.22.0 h1:Zcye5DUgBloQ9BaT4qc9BnjOFog5TvBSAGkJ3Nf70c0= -go.uber.org/zap v1.22.0/go.mod h1:H4siCOZOrAolnUPJEkfaSjDqyP+BDS0DdDWzwcgt3+U= +go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= +go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -679,8 +680,8 @@ golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= -golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc= -golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= +golang.org/x/crypto v0.8.0 h1:pd9TJtTueMTVQXzk8E2XESSMQDj/U7OUu0PqJqPXQjQ= +golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -718,8 +719,8 @@ golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hM golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8= -golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk= +golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -771,8 +772,8 @@ golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= -golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM= +golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -784,8 +785,8 @@ golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1 h1:lxqLZaMad/dJHMFZH0NiNpiEZI/nhgWhe4wgzpE+MuA= -golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= +golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -856,7 +857,6 @@ golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -865,15 +865,15 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= -golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.7.0 h1:BEvjmm5fURWqcfbSKTdpkDXYBrUS1c0m8agp14W48vQ= +golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -885,16 +885,16 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220722155302-e5dcc9cfc0b9 h1:ftMN5LMiBFjbzleLqtoBZk7KdJwhuybIU+FckUHgoyQ= -golang.org/x/time v0.0.0-20220722155302-e5dcc9cfc0b9/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -955,8 +955,8 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM= -golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4= +golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1030,8 +1030,8 @@ google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc h1:Nf+EdcTLHR8qDNN/KfkQL0u0ssxt9OhbaWCl5C0ucEI= -google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef h1:uQ2vjV/sHTsWSqdKeLqmwitzgvjMl7o4IdtHwUDXSJY= +google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -1087,7 +1087,6 @@ gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= @@ -1120,29 +1119,29 @@ honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 k8s.io/api v0.17.2/go.mod h1:BS9fjjLc4CMuqfSO8vgbHPKMt5+SF0ET6u/RVDihTo4= k8s.io/api v0.17.4/go.mod h1:5qxx6vjmwUVG2nHQTKGlLts8Tbok8PzHl4vHtVFuZCA= k8s.io/api v0.21.1/go.mod h1:FstGROTmsSHBarKc8bylzXih8BLNYTiS3TZcsoEDg2s= -k8s.io/api v0.25.0 h1:H+Q4ma2U/ww0iGB78ijZx6DRByPz6/733jIuFpX70e0= -k8s.io/api v0.25.0/go.mod h1:ttceV1GyV1i1rnmvzT3BST08N6nGt+dudGrquzVQWPk= +k8s.io/api v0.26.1 h1:f+SWYiPd/GsiWwVRz+NbFyCgvv75Pk9NK6dlkZgpCRQ= +k8s.io/api v0.26.1/go.mod h1:xd/GBNgR0f707+ATNyPmQ1oyKSgndzXij81FzWGsejg= k8s.io/apiextensions-apiserver v0.21.1/go.mod h1:KESQFCGjqVcVsZ9g0xX5bacMjyX5emuWcS2arzdEouA= -k8s.io/apiextensions-apiserver v0.25.0 h1:CJ9zlyXAbq0FIW8CD7HHyozCMBpDSiH7EdrSTCZcZFY= -k8s.io/apiextensions-apiserver v0.25.0/go.mod h1:3pAjZiN4zw7R8aZC5gR0y3/vCkGlAjCazcg1me8iB/E= +k8s.io/apiextensions-apiserver v0.26.1 h1:cB8h1SRk6e/+i3NOrQgSFij1B2S0Y0wDoNl66bn8RMI= +k8s.io/apiextensions-apiserver v0.26.1/go.mod h1:AptjOSXDGuE0JICx/Em15PaoO7buLwTs0dGleIHixSM= k8s.io/apimachinery v0.17.2/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= k8s.io/apimachinery v0.17.4/go.mod h1:gxLnyZcGNdZTCLnq3fgzyg2A5BVCHTNDFrw8AmuJ+0g= k8s.io/apimachinery v0.21.1/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY= -k8s.io/apimachinery v0.25.0 h1:MlP0r6+3XbkUG2itd6vp3oxbtdQLQI94fD5gCS+gnoU= -k8s.io/apimachinery v0.25.0/go.mod h1:qMx9eAk0sZQGsXGu86fab8tZdffHbwUfsvzqKn4mfB0= +k8s.io/apimachinery v0.26.1 h1:8EZ/eGJL+hY/MYCNwhmDzVqq2lPl3N3Bo8rvweJwXUQ= +k8s.io/apimachinery v0.26.1/go.mod h1:tnPmbONNJ7ByJNz9+n9kMjNP8ON+1qoAIIC70lztu74= k8s.io/apiserver v0.21.1/go.mod h1:nLLYZvMWn35glJ4/FZRhzLG/3MPxAaZTgV4FJZdr+tY= -k8s.io/apiserver v0.25.0 h1:8kl2ifbNffD440MyvHtPaIz1mw4mGKVgWqM0nL+oyu4= -k8s.io/apiserver v0.25.0/go.mod h1:BKwsE+PTC+aZK+6OJQDPr0v6uS91/HWxX7evElAH6xo= +k8s.io/apiserver v0.26.1 h1:6vmnAqCDO194SVCPU3MU8NcDgSqsUA62tBUSWrFXhsc= +k8s.io/apiserver v0.26.1/go.mod h1:wr75z634Cv+sifswE9HlAo5FQ7UoUauIICRlOE+5dCg= k8s.io/client-go v0.17.2/go.mod h1:QAzRgsa0C2xl4/eVpeVAZMvikCn8Nm81yqVx3Kk9XYI= k8s.io/client-go v0.21.1/go.mod h1:/kEw4RgW+3xnBGzvp9IWxKSNA+lXn3A7AuH3gdOAzLs= -k8s.io/client-go v0.25.0 h1:CVWIaCETLMBNiTUta3d5nzRbXvY5Hy9Dpl+VvREpu5E= -k8s.io/client-go v0.25.0/go.mod h1:lxykvypVfKilxhTklov0wz1FoaUZ8X4EwbhS6rpRfN8= +k8s.io/client-go v0.26.1 h1:87CXzYJnAMGaa/IDDfRdhTzxk/wzGZ+/HUQpqgVSZXU= +k8s.io/client-go v0.26.1/go.mod h1:IWNSglg+rQ3OcvDkhY6+QLeasV4OYHDjdqeWkDQZwGE= k8s.io/cluster-bootstrap v0.25.0 h1:KJ2/r0dV+bLfTK5EBobAVKvjGel3N4Qqh3bvnzh9qPk= k8s.io/cluster-bootstrap v0.25.0/go.mod h1:x/TCtY3EiuR/rODkA3SvVQT3uSssQLf9cXcmSjdDTe0= k8s.io/code-generator v0.21.1/go.mod h1:hUlps5+9QaTrKx+jiM4rmq7YmH8wPOIko64uZCHDh6Q= k8s.io/component-base v0.21.1/go.mod h1:NgzFZ2qu4m1juby4TnrmpR8adRk6ka62YdH5DkIIyKA= -k8s.io/component-base v0.25.0 h1:haVKlLkPCFZhkcqB6WCvpVxftrg6+FK5x1ZuaIDaQ5Y= -k8s.io/component-base v0.25.0/go.mod h1:F2Sumv9CnbBlqrpdf7rKZTmmd2meJq0HizeyY/yAFxk= +k8s.io/component-base v0.26.1 h1:4ahudpeQXHZL5kko+iDHqLj/FSGAEUnSVO0EBbgDd+4= +k8s.io/component-base v0.26.1/go.mod h1:VHrLR0b58oC035w6YQiBSbtsf0ThuSwXP+p5dD/kAWU= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= @@ -1156,25 +1155,25 @@ k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4= k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE= -k8s.io/kube-openapi v0.0.0-20220803164354-a70c9af30aea h1:3QOH5+2fGsY8e1qf+GIFpg+zw/JGNrgyZRQR7/m6uWg= -k8s.io/kube-openapi v0.0.0-20220803164354-a70c9af30aea/go.mod h1:C/N6wCaBHeBHkHUesQOQy2/MZqGgMAFPqGsGQLdbZBU= +k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 h1:+70TFaan3hfJzs+7VK2o+OGxg8HsuBr/5f6tVAjDu6E= +k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4= k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20200821003339-5e75c0163111/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210527160623-6fdb442a123b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20220823124924-e9cbc92d1a73 h1:H9TCJUUx+2VA0ZiD9lvtaX8fthFsMoD+Izn93E/hm8U= -k8s.io/utils v0.0.0-20220823124924-e9cbc92d1a73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20221128185143-99ec85e7a448 h1:KTgPnR10d5zhztWptI952TNtt/4u5h3IzDXkdIMuo2Y= +k8s.io/utils v0.0.0-20221128185143-99ec85e7a448/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= -sigs.k8s.io/cluster-api v1.3.3 h1:sHRAbuev6+bz3OAySmdmT62md/D/UUIy0EYwvP38H/4= -sigs.k8s.io/cluster-api v1.3.3/go.mod h1:nnXmR51rHshpMEXmB4LZIwdiXWKXV6yaooB1KzrL0Qs= -sigs.k8s.io/cluster-api/test v1.3.5 h1:+uY+oMwIK5NOH1S/VPCl5gl9TuUBRRdr/qiji4r1B7I= -sigs.k8s.io/cluster-api/test v1.3.5/go.mod h1:oRc0fAaj3ZPMuV0GTRQbseZ6IrNyDUB8ReKil5lkAtw= +sigs.k8s.io/cluster-api v1.4.1 h1:GtA7OJGhLvgJMgEIxKIoGLxXezM3THI/Yi10QpQ0EN4= +sigs.k8s.io/cluster-api v1.4.1/go.mod h1:IIebZTsqyXU8CHbINV2zuMh0/wykqdr+vEXxQNeteEU= +sigs.k8s.io/cluster-api/test v1.4.1 h1:4ezS3IVGqL+GmsPx4gzo2e4hs2GOrMBsgch+RQPBEWs= +sigs.k8s.io/cluster-api/test v1.4.1/go.mod h1:RHazilXiXNuRYYh/aiX6ZvpUaXOzwNjmaEou/TltSOs= sigs.k8s.io/controller-runtime v0.9.0/go.mod h1:TgkfvrhhEw3PlI0BRL/5xM+89y3/yc0ZDfdbTl84si8= -sigs.k8s.io/controller-runtime v0.13.1 h1:tUsRCSJVM1QQOOeViGeX3GMT3dQF1eePPw6sEE3xSlg= -sigs.k8s.io/controller-runtime v0.13.1/go.mod h1:Zbz+el8Yg31jubvAEyglRZGdLAjplZl+PgtYNI6WNTI= +sigs.k8s.io/controller-runtime v0.14.5 h1:6xaWFqzT5KuAQ9ufgUaj1G/+C4Y1GRkhrxl+BJ9i+5s= +sigs.k8s.io/controller-runtime v0.14.5/go.mod h1:WqIdsAY6JBsjfc/CqO0CORmNtoCtE4S6qbPc9s68h+0= sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k= sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/kind v0.17.0 h1:CScmGz/wX66puA06Gj8OZb76Wmk7JIjgWf5JDvY7msM= diff --git a/hack/ensure-golangci-lint.sh b/hack/ensure-golangci-lint.sh new file mode 100755 index 0000000000..fef55ba8cb --- /dev/null +++ b/hack/ensure-golangci-lint.sh @@ -0,0 +1,427 @@ +#!/usr/bin/env bash + +# Copyright 2021 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# NOTE: This script is copied from https://raw.githubusercontent.com/golangci/golangci-lint/main/install.sh. + +set -e + +if [[ "${TRACE-0}" == "1" ]]; then + set -o xtrace +fi + +usage() { + this=$1 + cat </dev/null +} +echoerr() { + echo "$@" 1>&2 +} +log_prefix() { + # Invoked indirectly + # shellcheck disable=SC2317 + echo "$0" +} +_logp=6 +log_set_priority() { + _logp="$1" +} +log_priority() { + if test -z "$1"; then + echo "$_logp" + return + fi + [ "$1" -le "$_logp" ] +} +log_tag() { + case $1 in + 0) echo "emerg" ;; + 1) echo "alert" ;; + 2) echo "crit" ;; + 3) echo "err" ;; + 4) echo "warning" ;; + 5) echo "notice" ;; + 6) echo "info" ;; + 7) echo "debug" ;; + *) echo "$1" ;; + esac +} +log_debug() { + log_priority 7 || return 0 + echoerr "$(log_prefix)" "$(log_tag 7)" "$@" +} +log_info() { + log_priority 6 || return 0 + echoerr "$(log_prefix)" "$(log_tag 6)" "$@" +} +log_err() { + log_priority 3 || return 0 + echoerr "$(log_prefix)" "$(log_tag 3)" "$@" +} +log_crit() { + log_priority 2 || return 0 + echoerr "$(log_prefix)" "$(log_tag 2)" "$@" +} +uname_os() { + os=$(uname -s | tr '[:upper:]' '[:lower:]') + case "$os" in + cygwin_nt*) os="windows" ;; + mingw*) os="windows" ;; + msys_nt*) os="windows" ;; + esac + echo "$os" +} +uname_arch() { + arch=$(uname -m) + case $arch in + x86_64) arch="amd64" ;; + x86) arch="386" ;; + i686) arch="386" ;; + i386) arch="386" ;; + aarch64) arch="arm64" ;; + armv5*) arch="armv5" ;; + armv6*) arch="armv6" ;; + armv7*) arch="armv7" ;; + esac + echo "${arch}" +} +uname_os_check() { + os=$(uname_os) + case "$os" in + darwin) return 0 ;; + dragonfly) return 0 ;; + freebsd) return 0 ;; + linux) return 0 ;; + android) return 0 ;; + nacl) return 0 ;; + netbsd) return 0 ;; + openbsd) return 0 ;; + plan9) return 0 ;; + solaris) return 0 ;; + windows) return 0 ;; + esac + log_crit "uname_os_check '$(uname -s)' got converted to '$os' which is not a GOOS value. Please file bug at https://github.com/client9/shlib" + return 1 +} +uname_arch_check() { + arch=$(uname_arch) + case "$arch" in + 386) return 0 ;; + amd64) return 0 ;; + arm64) return 0 ;; + armv5) return 0 ;; + armv6) return 0 ;; + armv7) return 0 ;; + ppc64) return 0 ;; + ppc64le) return 0 ;; + mips) return 0 ;; + mipsle) return 0 ;; + mips64) return 0 ;; + mips64le) return 0 ;; + s390x) return 0 ;; + amd64p32) return 0 ;; + esac + log_crit "uname_arch_check '$(uname -m)' got converted to '$arch' which is not a GOARCH value. Please file bug report at https://github.com/client9/shlib" + return 1 +} +untar() { + tarball=$1 + case "${tarball}" in + *.tar.gz | *.tgz) tar --no-same-owner -xzf "${tarball}" ;; + *.tar) tar --no-same-owner -xf "${tarball}" ;; + *.zip) unzip "${tarball}" ;; + *) + log_err "untar unknown archive format for ${tarball}" + return 1 + ;; + esac +} +http_download_curl() { + local_file=$1 + source_url=$2 + header=$3 + if [ -z "$header" ]; then + code=$(curl -w '%{http_code}' -sL -o "$local_file" "$source_url") + else + code=$(curl -w '%{http_code}' -sL -H "$header" -o "$local_file" "$source_url") + fi + if [ "$code" != "200" ]; then + log_debug "http_download_curl received HTTP status $code" + return 1 + fi + return 0 +} +http_download_wget() { + local_file=$1 + source_url=$2 + header=$3 + if [ -z "$header" ]; then + wget -q -O "$local_file" "$source_url" + else + wget -q --header "$header" -O "$local_file" "$source_url" + fi +} +http_download() { + log_debug "http_download $2" + if is_command curl; then + http_download_curl "$@" + return + elif is_command wget; then + http_download_wget "$@" + return + fi + log_crit "http_download unable to find wget or curl" + return 1 +} +http_copy() { + tmp=$(mktemp) + http_download "${tmp}" "$1" "$2" || return 1 + body=$(cat "$tmp") + rm -f "${tmp}" + echo "$body" +} +github_release() { + owner_repo=$1 + version=$2 + test -z "$version" && version="latest" + giturl="https://github.com/${owner_repo}/releases/${version}" + json=$(http_copy "$giturl" "Accept:application/json") + test -z "$json" && return 1 + version=$(echo "$json" | tr -s '\n' ' ' | sed 's/.*"tag_name":"//' | sed 's/".*//') + test -z "$version" && return 1 + echo "$version" +} +hash_sha256() { + TARGET=${1:-/dev/stdin} + if is_command gsha256sum; then + hash=$(gsha256sum "$TARGET") || return 1 + echo "$hash" | cut -d ' ' -f 1 + elif is_command sha256sum; then + hash=$(sha256sum "$TARGET") || return 1 + echo "$hash" | cut -d ' ' -f 1 + elif is_command shasum; then + hash=$(shasum -a 256 "$TARGET" 2>/dev/null) || return 1 + echo "$hash" | cut -d ' ' -f 1 + elif is_command openssl; then + hash=$(openssl -dst openssl dgst -sha256 "$TARGET") || return 1 + echo "$hash" | cut -d ' ' -f a + else + log_crit "hash_sha256 unable to find command to compute sha-256 hash" + return 1 + fi +} +hash_sha256_verify() { + TARGET=$1 + checksums=$2 + if [ -z "$checksums" ]; then + log_err "hash_sha256_verify checksum file not specified in arg2" + return 1 + fi + BASENAME=${TARGET##*/} + want=$(grep "${BASENAME}" "${checksums}" 2>/dev/null | tr '\t' ' ' | cut -d ' ' -f 1) + if [ -z "$want" ]; then + log_err "hash_sha256_verify unable to find checksum for '${TARGET}' in '${checksums}'" + return 1 + fi + got=$(hash_sha256 "$TARGET") + if [ "$want" != "$got" ]; then + log_err "hash_sha256_verify checksum for '$TARGET' did not verify ${want} vs $got" + return 1 + fi +} +cat /dev/null < 0 { + state[ipamDeviceConfig.MACAddress] = infrav1.NetworkDeviceSpec{ + IPAddrs: prefixesAsStrings(addressWithPrefixes), + Gateway4: ipamDeviceConfig.IPAMConfigGateway4, + Gateway6: ipamDeviceConfig.IPAMConfigGateway6, + } + } + } + + if len(errs) > 0 { + var msgs []string + for _, err := range errs { + msgs = append(msgs, err.Error()) + } + msg := strings.Join(msgs, "\n") + return state, errors.New(msg) + } + return state, nil +} + +// buildIPAMDeviceConfigs checks that all the IPAddressClaims have been satisfied. +// If each IPAddressClaim has an associated IPAddress, a slice of ipamDeviceConfig +// is returned, one for each device with addressesFromPools. +// If any of the IPAddressClaims do not have an associated IPAddress yet, +// a custom error is returned. +func buildIPAMDeviceConfigs(ctx context.VMContext, networkStatus []infrav1.NetworkStatus) ([]ipamDeviceConfig, error) { + boundClaims, totalClaims := 0, 0 + ipamDeviceConfigs := []ipamDeviceConfig{} + for devIdx, networkSpecDevice := range ctx.VSphereVM.Spec.Network.Devices { + if len(networkStatus) == 0 || + len(networkStatus) <= devIdx || + networkStatus[devIdx].MACAddr == "" { + return ipamDeviceConfigs, errors.New("waiting for devices to have MAC address set") + } + + ipamDeviceConfig := ipamDeviceConfig{ + IPAMAddresses: []*ipamv1.IPAddress{}, + MACAddress: networkStatus[devIdx].MACAddr, + NetworkSpecGateway4: networkSpecDevice.Gateway4, + NetworkSpecGateway6: networkSpecDevice.Gateway6, + DeviceIndex: devIdx, + } + + for poolRefIdx := range networkSpecDevice.AddressesFromPools { + totalClaims++ + ipAddrClaimName := util.IPAddressClaimName(ctx.VSphereVM.Name, ipamDeviceConfig.DeviceIndex, poolRefIdx) + ipAddrClaim, err := getIPAddrClaim(ctx, ipAddrClaimName) + if err != nil { + ctx.Logger.Error(err, "error fetching IPAddressClaim", "name", ipAddrClaimName) + if apierrors.IsNotFound(err) { + // it would be odd for this to occur, a findorcreate just happened in a previous step + continue + } + return nil, err + } + + ctx.Logger.V(5).Info("fetched IPAddressClaim", "name", ipAddrClaimName, "namespace", ctx.VSphereVM.Namespace) + ipAddrName := ipAddrClaim.Status.AddressRef.Name + if ipAddrName == "" { + ctx.Logger.V(5).Info("IPAddress not yet bound to IPAddressClaim", "name", ipAddrClaimName, "namespace", ctx.VSphereVM.Namespace) + continue + } + + ipAddr := &ipamv1.IPAddress{} + ipAddrKey := apitypes.NamespacedName{ + Namespace: ctx.VSphereVM.Namespace, + Name: ipAddrName, + } + if err := ctx.Client.Get(ctx, ipAddrKey, ipAddr); err != nil { + // because the ref was set on the claim, it is expected this error should not occur + return nil, err + } + ipamDeviceConfig.IPAMAddresses = append(ipamDeviceConfig.IPAMAddresses, ipAddr) + boundClaims++ + } + ipamDeviceConfigs = append(ipamDeviceConfigs, ipamDeviceConfig) + } + if boundClaims < totalClaims { + ctx.Logger.Info("waiting for ip address claims to be bound", + "total claims", totalClaims, + "claims bound", boundClaims) + return nil, ErrWaitingForIPAddr + } + return ipamDeviceConfigs, nil +} + +// getIPAddrClaim fetches an IPAddressClaim from the api with the given name. +func getIPAddrClaim(ctx context.VMContext, ipAddrClaimName string) (*ipamv1.IPAddressClaim, error) { + ipAddrClaim := &ipamv1.IPAddressClaim{} + ipAddrClaimKey := apitypes.NamespacedName{ + Namespace: ctx.VSphereVM.Namespace, + Name: ipAddrClaimName, + } + + ctx.Logger.V(5).Info("fetching IPAddressClaim", "name", ipAddrClaimKey.String()) + if err := ctx.Client.Get(ctx, ipAddrClaimKey, ipAddrClaim); err != nil { + return nil, err + } + return ipAddrClaim, nil +} diff --git a/pkg/services/govmomi/ipam/status_test.go b/pkg/services/govmomi/ipam/status_test.go new file mode 100644 index 0000000000..e4728d1c85 --- /dev/null +++ b/pkg/services/govmomi/ipam/status_test.go @@ -0,0 +1,854 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ipam + +import ( + "testing" + + "github.com/onsi/gomega" + "gopkg.in/yaml.v2" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apitypes "k8s.io/apimachinery/pkg/types" + ipamv1a1 "sigs.k8s.io/cluster-api/exp/ipam/api/v1alpha1" + + infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" + "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" + "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context/fake" + "sigs.k8s.io/cluster-api-provider-vsphere/pkg/util" +) + +var ( + devMAC = "0:0:0:0:a" + myAPIGroup = "my-pool-api-group" +) + +func Test_buildIPAMDeviceConfigs(t *testing.T) { + var ( + ctx context.VMContext + networkStatus []infrav1.NetworkStatus + claim1, claim2, claim3 *ipamv1a1.IPAddressClaim + address1, address2, address3 *ipamv1a1.IPAddress + g *gomega.WithT + ) + + before := func() { + ctx = *fake.NewVMContext(fake.NewControllerContext(fake.NewControllerManagerContext())) + networkStatus = []infrav1.NetworkStatus{ + {Connected: true, MACAddr: devMAC}, + } + + g = gomega.NewWithT(t) + namespace := "my-namespace" + + claim1 = &ipamv1a1.IPAddressClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "vsphereVM1-0-0", + Namespace: namespace, + }, + } + + claim2 = &ipamv1a1.IPAddressClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "vsphereVM1-0-1", + Namespace: namespace, + }, + } + + claim3 = &ipamv1a1.IPAddressClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "vsphereVM1-0-2", + Namespace: namespace, + }, + } + + address1 = &ipamv1a1.IPAddress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "vsphereVM1-0-0-address0", + Namespace: namespace, + }, + } + address2 = &ipamv1a1.IPAddress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "vsphereVM1-0-1-address1", + Namespace: namespace, + }, + } + + address3 = &ipamv1a1.IPAddress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "vsphereVM1-0-2-address2", + Namespace: namespace, + }, + } + } + + t.Run("when a device has a IPAddressPool", func(_ *testing.T) { + before() + ctx.VSphereVM = &infrav1.VSphereVM{ + ObjectMeta: metav1.ObjectMeta{ + Name: "vsphereVM1", + Namespace: "my-namespace", + }, + Spec: infrav1.VSphereVMSpec{ + VirtualMachineCloneSpec: infrav1.VirtualMachineCloneSpec{ + Network: infrav1.NetworkSpec{ + Devices: []infrav1.NetworkDeviceSpec{ + { + MACAddr: devMAC, + AddressesFromPools: []corev1.TypedLocalObjectReference{ + { + APIGroup: &myAPIGroup, + Name: "my-pool-1", + Kind: "my-pool-kind", + }, + { + APIGroup: &myAPIGroup, + Name: "my-pool-1", + Kind: "my-pool-kind", + }, + { + APIGroup: &myAPIGroup, + Name: "my-pool-ipv6", + Kind: "my-pool-kind", + }, + }, + }, + }, + }, + }, + }, + } + + // Creates ip address claims + g.Expect(ctx.Client.Create(ctx, claim1)).NotTo(gomega.HaveOccurred()) + g.Expect(ctx.Client.Create(ctx, claim2)).NotTo(gomega.HaveOccurred()) + g.Expect(ctx.Client.Create(ctx, claim3)).NotTo(gomega.HaveOccurred()) + + // IP provider has not provided Addresses yet + _, err := buildIPAMDeviceConfigs(ctx, networkStatus) + g.Expect(err).To(gomega.Equal(ErrWaitingForIPAddr)) + + // Simulate IP provider reconciling one claim + g.Expect(ctx.Client.Create(ctx, address3)).NotTo(gomega.HaveOccurred()) + + ipAddrClaim := &ipamv1a1.IPAddressClaim{} + ipAddrClaimKey := apitypes.NamespacedName{ + Namespace: ctx.VSphereVM.Namespace, + Name: "vsphereVM1-0-2", + } + g.Expect(ctx.Client.Get(ctx, ipAddrClaimKey, ipAddrClaim)).NotTo(gomega.HaveOccurred()) + ipAddrClaim.Status.AddressRef.Name = "vsphereVM1-0-2-address2" + g.Expect(ctx.Client.Update(ctx, ipAddrClaim)).NotTo(gomega.HaveOccurred()) + + // Only the last claim has been bound + _, err = buildIPAMDeviceConfigs(ctx, networkStatus) + g.Expect(err).To(gomega.Equal(ErrWaitingForIPAddr)) + + // Simulate IP provider reconciling remaining claims + g.Expect(ctx.Client.Create(ctx, address1)).NotTo(gomega.HaveOccurred()) + g.Expect(ctx.Client.Create(ctx, address2)).NotTo(gomega.HaveOccurred()) + + ipAddrClaimKey = apitypes.NamespacedName{ + Namespace: ctx.VSphereVM.Namespace, + Name: "vsphereVM1-0-0", + } + g.Expect(ctx.Client.Get(ctx, ipAddrClaimKey, ipAddrClaim)).NotTo(gomega.HaveOccurred()) + ipAddrClaim.Status.AddressRef.Name = "vsphereVM1-0-0-address0" + g.Expect(ctx.Client.Update(ctx, ipAddrClaim)).NotTo(gomega.HaveOccurred()) + + ipAddrClaimKey = apitypes.NamespacedName{ + Namespace: ctx.VSphereVM.Namespace, + Name: "vsphereVM1-0-1", + } + g.Expect(ctx.Client.Get(ctx, ipAddrClaimKey, ipAddrClaim)).NotTo(gomega.HaveOccurred()) + ipAddrClaim.Status.AddressRef.Name = "vsphereVM1-0-1-address1" + g.Expect(ctx.Client.Update(ctx, ipAddrClaim)).NotTo(gomega.HaveOccurred()) + + // Now that claims are fulfilled, reconciling should update + // ipAddrs on network spec + configs, err := buildIPAMDeviceConfigs(ctx, networkStatus) + g.Expect(err).NotTo(gomega.HaveOccurred()) + g.Expect(configs).To(gomega.HaveLen(1)) + + config := configs[0] + g.Expect(config.MACAddress).To(gomega.Equal(devMAC)) + g.Expect(config.DeviceIndex).To(gomega.Equal(0)) + g.Expect(config.IPAMAddresses).To(gomega.HaveLen(3)) + }) + + t.Run("when a device has no pools", func(_ *testing.T) { + before() + ctx.VSphereVM = &infrav1.VSphereVM{ + ObjectMeta: metav1.ObjectMeta{ + Name: "vsphereVM1", + Namespace: "my-namespace", + }, + Spec: infrav1.VSphereVMSpec{ + VirtualMachineCloneSpec: infrav1.VirtualMachineCloneSpec{ + Network: infrav1.NetworkSpec{ + Devices: []infrav1.NetworkDeviceSpec{ + { + MACAddr: devMAC, + DHCP4: true, + }, + }, + }, + }, + }, + } + + // The IPAddressClaimed condition should not be added + config, err := buildIPAMDeviceConfigs(ctx, networkStatus) + g.Expect(err).NotTo(gomega.HaveOccurred()) + g.Expect(config[0].IPAMAddresses).To(gomega.HaveLen(0)) + }) +} + +func Test_BuildState(t *testing.T) { + var ( + ctx context.VMContext + networkStatus []infrav1.NetworkStatus + claim1, claim2, claim3 *ipamv1a1.IPAddressClaim + address1, address2, address3 *ipamv1a1.IPAddress + g *gomega.WithT + ) + type nameservers struct { + Addresses []string `json:"addresses"` + } + type ethernet struct { + Addresses []string `json:"addresses"` + DHCP4 bool `json:"dhcp4"` + DHCP6 bool `json:"dhcp6"` + Gateway4 string `json:"gateway4"` + Match map[string]string `json:"match"` + Nameservers nameservers `json:"nameservers"` + } + type network struct { + Ethernets map[string]ethernet `json:"ethernets"` + } + type vmMetadata struct { + Network network `json:"network"` + } + + before := func() { + ctx = *fake.NewVMContext(fake.NewControllerContext(fake.NewControllerManagerContext())) + networkStatus = []infrav1.NetworkStatus{ + {Connected: true, MACAddr: devMAC}, + } + + g = gomega.NewWithT(t) + + claim1 = &ipamv1a1.IPAddressClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "vsphereVM1-0-0", + Namespace: "my-namespace", + }, + } + + claim2 = &ipamv1a1.IPAddressClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "vsphereVM1-0-1", + Namespace: "my-namespace", + }, + } + + claim3 = &ipamv1a1.IPAddressClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "vsphereVM1-0-2", + Namespace: "my-namespace", + }, + } + + address1 = &ipamv1a1.IPAddress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "vsphereVM1-0-0-address0", + Namespace: "my-namespace", + }, + Spec: ipamv1a1.IPAddressSpec{ + Address: "10.0.0.50", + Prefix: 24, + Gateway: "10.0.0.1", + }, + } + address2 = &ipamv1a1.IPAddress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "vsphereVM1-0-1-address1", + Namespace: "my-namespace", + }, + Spec: ipamv1a1.IPAddressSpec{ + Address: "10.0.1.50", + Prefix: 30, + Gateway: "10.0.0.1", + }, + } + + address3 = &ipamv1a1.IPAddress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "vsphereVM1-0-2-address2", + Namespace: "my-namespace", + }, + Spec: ipamv1a1.IPAddressSpec{ + Address: "fe80::cccc:12", + Prefix: 64, + Gateway: "fe80::cccc:1", + }, + } + } + + t.Run("when a device has a IPAddressPool", func(_ *testing.T) { + before() + ctx.VSphereVM = &infrav1.VSphereVM{ + ObjectMeta: metav1.ObjectMeta{ + Name: "vsphereVM1", + Namespace: "my-namespace", + }, + Spec: infrav1.VSphereVMSpec{ + VirtualMachineCloneSpec: infrav1.VirtualMachineCloneSpec{ + Network: infrav1.NetworkSpec{ + Devices: []infrav1.NetworkDeviceSpec{ + { + MACAddr: devMAC, + AddressesFromPools: []corev1.TypedLocalObjectReference{ + { + APIGroup: &myAPIGroup, + Name: "my-pool-1", + Kind: "my-pool-kind", + }, + { + APIGroup: &myAPIGroup, + Name: "my-pool-1", + Kind: "my-pool-kind", + }, + { + APIGroup: &myAPIGroup, + Name: "my-pool-ipv6", + Kind: "my-pool-kind", + }, + }, + }, + }, + }, + }, + }, + } + + // Creates ip address claims + g.Expect(ctx.Client.Create(ctx, claim1)).NotTo(gomega.HaveOccurred()) + g.Expect(ctx.Client.Create(ctx, claim2)).NotTo(gomega.HaveOccurred()) + g.Expect(ctx.Client.Create(ctx, claim3)).NotTo(gomega.HaveOccurred()) + + // IP provider has not provided Addresses yet + _, err := BuildState(ctx, networkStatus) + g.Expect(err).To(gomega.Equal(ErrWaitingForIPAddr)) + + // Simulate IP provider reconciling one claim + g.Expect(ctx.Client.Create(ctx, address3)).NotTo(gomega.HaveOccurred()) + + ipAddrClaim := &ipamv1a1.IPAddressClaim{} + ipAddrClaimKey := apitypes.NamespacedName{ + Namespace: ctx.VSphereVM.Namespace, + Name: "vsphereVM1-0-2", + } + g.Expect(ctx.Client.Get(ctx, ipAddrClaimKey, ipAddrClaim)).NotTo(gomega.HaveOccurred()) + ipAddrClaim.Status.AddressRef.Name = "vsphereVM1-0-2-address2" + g.Expect(ctx.Client.Update(ctx, ipAddrClaim)).NotTo(gomega.HaveOccurred()) + + // Only the last claim has been bound + _, err = BuildState(ctx, networkStatus) + g.Expect(err).To(gomega.Equal(ErrWaitingForIPAddr)) + + // Simulate IP provider reconciling remaining claims + g.Expect(ctx.Client.Create(ctx, address1)).NotTo(gomega.HaveOccurred()) + g.Expect(ctx.Client.Create(ctx, address2)).NotTo(gomega.HaveOccurred()) + + ipAddrClaimKey = apitypes.NamespacedName{ + Namespace: ctx.VSphereVM.Namespace, + Name: "vsphereVM1-0-0", + } + g.Expect(ctx.Client.Get(ctx, ipAddrClaimKey, ipAddrClaim)).NotTo(gomega.HaveOccurred()) + ipAddrClaim.Status.AddressRef.Name = "vsphereVM1-0-0-address0" + g.Expect(ctx.Client.Update(ctx, ipAddrClaim)).NotTo(gomega.HaveOccurred()) + + ipAddrClaimKey = apitypes.NamespacedName{ + Namespace: ctx.VSphereVM.Namespace, + Name: "vsphereVM1-0-1", + } + g.Expect(ctx.Client.Get(ctx, ipAddrClaimKey, ipAddrClaim)).NotTo(gomega.HaveOccurred()) + ipAddrClaim.Status.AddressRef.Name = "vsphereVM1-0-1-address1" + g.Expect(ctx.Client.Update(ctx, ipAddrClaim)).NotTo(gomega.HaveOccurred()) + + // Now that claims are fulfilled, reconciling should update + // ipAddrs on network spec + state, err := BuildState(ctx, networkStatus) + g.Expect(err).NotTo(gomega.HaveOccurred()) + g.Expect(state).To(gomega.HaveLen(1)) + + g.Expect(state[devMAC].IPAddrs).To(gomega.HaveLen(3)) + g.Expect(state[devMAC].IPAddrs[0]).To(gomega.Equal("10.0.0.50/24")) + g.Expect(state[devMAC].Gateway4).To(gomega.Equal("10.0.0.1")) + g.Expect(state[devMAC].IPAddrs[1]).To(gomega.Equal("10.0.1.50/30")) + g.Expect(state[devMAC].Gateway4).To(gomega.Equal("10.0.0.1")) + g.Expect(state[devMAC].IPAddrs[2]).To(gomega.Equal("fe80::cccc:12/64")) + g.Expect(state[devMAC].Gateway6).To(gomega.Equal("fe80::cccc:1")) + }) + + t.Run("when a device has no pools", func(_ *testing.T) { + before() + ctx.VSphereVM = &infrav1.VSphereVM{ + ObjectMeta: metav1.ObjectMeta{ + Name: "vsphereVM1", + Namespace: "my-namespace", + }, + Spec: infrav1.VSphereVMSpec{ + VirtualMachineCloneSpec: infrav1.VirtualMachineCloneSpec{ + Network: infrav1.NetworkSpec{ + Devices: []infrav1.NetworkDeviceSpec{ + { + MACAddr: devMAC, + DHCP4: true, + }, + }, + }, + }, + }, + } + + state, err := BuildState(ctx, networkStatus) + g.Expect(err).NotTo(gomega.HaveOccurred()) + g.Expect(state).To(gomega.HaveLen(0)) + }) + + t.Run("when one device has no pool and is DHCP true, and one device has a IPAddressPool", func(_ *testing.T) { + before() + devMAC0 := "0:0:0:0:a" + devMAC1 := "0:0:0:0:b" + + claim := &ipamv1a1.IPAddressClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "vsphereVM1-1-0", + Namespace: "my-namespace", + }, + } + address := &ipamv1a1.IPAddress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "vsphereVM1-1-0-address", + Namespace: "my-namespace", + }, + Spec: ipamv1a1.IPAddressSpec{ + Address: "10.0.0.50", + Prefix: 24, + Gateway: "10.0.0.1", + }, + } + + ctx.VSphereVM = &infrav1.VSphereVM{ + ObjectMeta: metav1.ObjectMeta{ + Name: "vsphereVM1", + Namespace: "my-namespace", + }, + Spec: infrav1.VSphereVMSpec{ + VirtualMachineCloneSpec: infrav1.VirtualMachineCloneSpec{ + Network: infrav1.NetworkSpec{ + Devices: []infrav1.NetworkDeviceSpec{ + { + DHCP4: true, + }, + { + AddressesFromPools: []corev1.TypedLocalObjectReference{ + { + APIGroup: &myAPIGroup, + Name: "my-pool-1", + Kind: "my-pool-kind", + }, + }, + Nameservers: []string{"1.1.1.1"}, + }, + }, + }, + }, + }, + } + + networkStatus = []infrav1.NetworkStatus{ + {Connected: true}, + {Connected: true}, + } + + // Creates ip address claims + g.Expect(ctx.Client.Create(ctx, claim)).NotTo(gomega.HaveOccurred()) + + // VSphere has not yet assigned MAC addresses to the machine's devices + _, err := BuildState(ctx, networkStatus) + g.Expect(err).To(gomega.MatchError("waiting for devices to have MAC address set")) + + networkStatus = []infrav1.NetworkStatus{ + {Connected: true, MACAddr: devMAC0}, + {Connected: true, MACAddr: devMAC1}, + } + + // IP provider has not provided Addresses yet + _, err = BuildState(ctx, networkStatus) + g.Expect(err).To(gomega.MatchError("waiting for IP address claims to be bound")) + + // Simulate IP provider reconciling one claim + g.Expect(ctx.Client.Create(ctx, address)).NotTo(gomega.HaveOccurred()) + + ipAddrClaim := &ipamv1a1.IPAddressClaim{} + ipAddrClaimKey := apitypes.NamespacedName{ + Namespace: ctx.VSphereVM.Namespace, + Name: "vsphereVM1-1-0", + } + g.Expect(ctx.Client.Get(ctx, ipAddrClaimKey, ipAddrClaim)).NotTo(gomega.HaveOccurred()) + + ipAddrClaim.Status.AddressRef.Name = "vsphereVM1-1-0-address" + g.Expect(ctx.Client.Update(ctx, ipAddrClaim)).NotTo(gomega.HaveOccurred()) + + // Now that claims are fulfilled, reconciling should update + // ipAddrs on network spec + ipamState, err := BuildState(ctx, networkStatus) + g.Expect(err).NotTo(gomega.HaveOccurred()) + + g.Expect(ipamState).To(gomega.HaveLen(1)) + + _, found := ipamState[devMAC0] + g.Expect(found).To(gomega.BeFalse()) + + g.Expect(ipamState[devMAC1].IPAddrs).To(gomega.HaveLen(1)) + g.Expect(ipamState[devMAC1].IPAddrs[0]).To(gomega.Equal("10.0.0.50/24")) + g.Expect(ipamState[devMAC1].Gateway4).To(gomega.Equal("10.0.0.1")) + + // Compute the new metadata from the context to see if the addresses are rendered correctly + metadataBytes, err := util.GetMachineMetadata(ctx.VSphereVM.Name, *ctx.VSphereVM, ipamState, networkStatus...) + g.Expect(err).NotTo(gomega.HaveOccurred()) + + metadata := vmMetadata{} + g.Expect(yaml.Unmarshal(metadataBytes, &metadata)).To(gomega.Succeed()) + + g.Expect(metadata.Network.Ethernets["id0"].Addresses).To(gomega.BeNil()) + g.Expect(metadata.Network.Ethernets["id0"].DHCP4).To(gomega.BeTrue()) + + g.Expect(metadata.Network.Ethernets["id1"].Addresses).To(gomega.ConsistOf("10.0.0.50/24")) + g.Expect(metadata.Network.Ethernets["id1"].DHCP4).To(gomega.BeFalse()) + g.Expect(metadata.Network.Ethernets["id1"].Gateway4).To(gomega.Equal("10.0.0.1")) + g.Expect(metadata.Network.Ethernets["id1"].Nameservers.Addresses).To(gomega.ConsistOf("1.1.1.1")) + }) + + t.Run("when realized IP addresses are incorrect", func(t *testing.T) { + var ( + devMAC0 = "0:0:0:0:a" + devMAC1 = "0:0:0:0:b" + ) + + beforeWithClaimsAndAddressCreated := func() { + before() + + claim1 = &ipamv1a1.IPAddressClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "vsphereVM1-0-0", + Namespace: "my-namespace", + }, + Status: ipamv1a1.IPAddressClaimStatus{ + AddressRef: corev1.LocalObjectReference{ + Name: "vsphereVM1-0-0", + }, + }, + } + + claim2 = &ipamv1a1.IPAddressClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "vsphereVM1-0-1", + Namespace: "my-namespace", + }, + Status: ipamv1a1.IPAddressClaimStatus{ + AddressRef: corev1.LocalObjectReference{ + Name: "vsphereVM1-0-1", + }, + }, + } + + claim3 = &ipamv1a1.IPAddressClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "vsphereVM1-1-0", + Namespace: "my-namespace", + }, + Status: ipamv1a1.IPAddressClaimStatus{ + AddressRef: corev1.LocalObjectReference{ + Name: "vsphereVM1-1-0", + }, + }, + } + + address1 = &ipamv1a1.IPAddress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "vsphereVM1-0-0", + Namespace: "my-namespace", + }, + Spec: ipamv1a1.IPAddressSpec{ + Address: "10.0.1.50", + Prefix: 24, + Gateway: "10.0.0.1", + }, + } + + address2 = &ipamv1a1.IPAddress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "vsphereVM1-0-1", + Namespace: "my-namespace", + }, + Spec: ipamv1a1.IPAddressSpec{ + Address: "10.0.1.51", + Prefix: 24, + Gateway: "10.0.0.1", + }, + } + + address3 = &ipamv1a1.IPAddress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "vsphereVM1-1-0", + Namespace: "my-namespace", + }, + Spec: ipamv1a1.IPAddressSpec{ + Address: "11.0.1.50", + Prefix: 24, + Gateway: "11.0.0.1", + }, + } + + ctx.VSphereVM = &infrav1.VSphereVM{ + ObjectMeta: metav1.ObjectMeta{ + Name: "vsphereVM1", + Namespace: "my-namespace", + }, + Spec: infrav1.VSphereVMSpec{ + VirtualMachineCloneSpec: infrav1.VirtualMachineCloneSpec{ + Network: infrav1.NetworkSpec{ + Devices: []infrav1.NetworkDeviceSpec{ + { + MACAddr: devMAC0, + AddressesFromPools: []corev1.TypedLocalObjectReference{ + { + APIGroup: &myAPIGroup, + Name: "my-pool-1", + Kind: "my-pool-kind", + }, + { + APIGroup: &myAPIGroup, + Name: "my-pool-2", + Kind: "my-pool-kind", + }, + }, + }, + { + MACAddr: devMAC1, + AddressesFromPools: []corev1.TypedLocalObjectReference{ + { + APIGroup: &myAPIGroup, + Name: "my-pool-3", + Kind: "my-pool-kind", + }, + }, + }, + }, + }, + }, + }, + } + + networkStatus = []infrav1.NetworkStatus{ + {Connected: true, MACAddr: devMAC0}, + {Connected: true, MACAddr: devMAC1}, + } + + g.Expect(ctx.Client.Create(ctx, claim1)).NotTo(gomega.HaveOccurred()) + g.Expect(ctx.Client.Create(ctx, claim2)).NotTo(gomega.HaveOccurred()) + g.Expect(ctx.Client.Create(ctx, claim3)).NotTo(gomega.HaveOccurred()) + + g.Expect(ctx.Client.Create(ctx, address1)).NotTo(gomega.HaveOccurred()) + g.Expect(ctx.Client.Create(ctx, address2)).NotTo(gomega.HaveOccurred()) + g.Expect(ctx.Client.Create(ctx, address3)).NotTo(gomega.HaveOccurred()) + } + + t.Run("when a provider assigns an IPAddress without an Address field", func(_ *testing.T) { + beforeWithClaimsAndAddressCreated() + + address1.Spec.Address = "" + g.Expect(ctx.Client.Update(ctx, address1)).NotTo(gomega.HaveOccurred()) + + _, err := BuildState(ctx, networkStatus) + g.Expect(err).To(gomega.HaveOccurred()) + g.Expect(err).To(gomega.MatchError("IPAddress my-namespace/vsphereVM1-0-0 has invalid ip address: \"/24\"")) + }) + + t.Run("when a provider assigns an IPAddress with an invalid IP in the Address field", func(_ *testing.T) { + beforeWithClaimsAndAddressCreated() + // Simulate an invalid ip address was provided: the address is not a valid ip + address1.Spec.Address = "invalid-ip" + g.Expect(ctx.Client.Update(ctx, address1)).NotTo(gomega.HaveOccurred()) + + _, err := BuildState(ctx, networkStatus) + g.Expect(err).To(gomega.HaveOccurred()) + g.Expect(err).To(gomega.MatchError("IPAddress my-namespace/vsphereVM1-0-0 has invalid ip address: \"invalid-ip/24\"")) + }) + + t.Run("when a provider assigns an IPAddress with an invalid value in the Prefix field", func(_ *testing.T) { + beforeWithClaimsAndAddressCreated() + // Simulate an invalid prefix address was provided: the prefix is out of bounds + address1.Spec.Prefix = 200 + g.Expect(ctx.Client.Update(ctx, address1)).NotTo(gomega.HaveOccurred()) + + _, err := BuildState(ctx, networkStatus) + g.Expect(err).To(gomega.HaveOccurred()) + g.Expect(err).To(gomega.MatchError("IPAddress my-namespace/vsphereVM1-0-0 has invalid ip address: \"10.0.1.50/200\"")) + }) + + t.Run("when a provider assigns an IPAddress with an invalid value in the Gateway field", func(_ *testing.T) { + beforeWithClaimsAndAddressCreated() + // Simulate an invalid gateway was provided: the gateway is an invalid ip + address1.Spec.Gateway = "invalid-gateway" + g.Expect(ctx.Client.Update(ctx, address1)).NotTo(gomega.HaveOccurred()) + + _, err := BuildState(ctx, networkStatus) + g.Expect(err).To(gomega.HaveOccurred()) + g.Expect(err).To(gomega.MatchError("IPAddress my-namespace/vsphereVM1-0-0 has invalid gateway: \"invalid-gateway\"")) + }) + + t.Run("when a provider assigns an IPAddress where the Gateway and Address fields are mismatched", func(_ *testing.T) { + beforeWithClaimsAndAddressCreated() + // Simulate mismatch address and gateways were provided + address1.Spec.Address = "10.0.1.50" + address1.Spec.Gateway = "fd01::1" + g.Expect(ctx.Client.Update(ctx, address1)).NotTo(gomega.HaveOccurred()) + + _, err := BuildState(ctx, networkStatus) + g.Expect(err).To(gomega.HaveOccurred()) + g.Expect(err).To(gomega.MatchError("IPAddress my-namespace/vsphereVM1-0-0 has mismatched gateway and address IP families")) + + // Simulate mismatch address and gateways were provided + address1.Spec.Address = "fd00:cccc::1" + address1.Spec.Gateway = "10.0.0.1" + g.Expect(ctx.Client.Update(ctx, address1)).NotTo(gomega.HaveOccurred()) + + _, err = BuildState(ctx, networkStatus) + g.Expect(err).To(gomega.HaveOccurred()) + g.Expect(err).To(gomega.MatchError("IPAddress my-namespace/vsphereVM1-0-0 has mismatched gateway and address IP families")) + }) + + t.Run("when there are multiple IPAddresses for a device with different Gateways", func(_ *testing.T) { + beforeWithClaimsAndAddressCreated() + // Simulate multiple gateways were provided + address1.Spec.Address = "10.0.1.50" + address1.Spec.Gateway = "10.0.0.2" + g.Expect(ctx.Client.Update(ctx, address1)).NotTo(gomega.HaveOccurred()) + address2.Spec.Address = "10.0.1.51" + address2.Spec.Gateway = "10.0.0.3" + g.Expect(ctx.Client.Update(ctx, address2)).NotTo(gomega.HaveOccurred()) + + _, err := BuildState(ctx, networkStatus) + g.Expect(err).To(gomega.HaveOccurred()) + g.Expect(err).To(gomega.MatchError("the IPv4 IPAddresses assigned to the same device (index 0) do not have the same gateway")) + + // Simulate multiple gateways were provided + address1.Spec.Address = "fd00:cccc::2" + address1.Spec.Gateway = "fd00::1" + g.Expect(ctx.Client.Update(ctx, address1)).NotTo(gomega.HaveOccurred()) + address2.Spec.Address = "fd00:cccc::3" + address2.Spec.Gateway = "fd00::2" + g.Expect(ctx.Client.Update(ctx, address2)).NotTo(gomega.HaveOccurred()) + + _, err = BuildState(ctx, networkStatus) + g.Expect(err).To(gomega.HaveOccurred()) + g.Expect(err).To(gomega.MatchError("the IPv6 IPAddresses assigned to the same device (index 0) do not have the same gateway")) + }) + + t.Run("when a user specified gateway does not match the gateway provided by IPAM", func(_ *testing.T) { + beforeWithClaimsAndAddressCreated() + + ctx.VSphereVM.Spec.VirtualMachineCloneSpec.Network.Devices[0].Gateway4 = "10.10.10.1" + ctx.VSphereVM.Spec.VirtualMachineCloneSpec.Network.Devices[0].Gateway6 = "fd00::2" + address2.Spec.Address = "fd00:cccc::1" + address2.Spec.Gateway = "fd00::1" + g.Expect(ctx.Client.Update(ctx, address2)).NotTo(gomega.HaveOccurred()) + + _, err := BuildState(ctx, networkStatus) + g.Expect(err).To(gomega.HaveOccurred()) + g.Expect(err).To(gomega.MatchError(gomega.ContainSubstring("the IPv4 Gateway for IPAddress vsphereVM1-0-0 does not match the Gateway4 already configured on device (index 0)"))) + + // Fix the Gateway4 for dev0 + ctx.VSphereVM.Spec.VirtualMachineCloneSpec.Network.Devices[0].Gateway4 = "10.0.0.1" + _, err = BuildState(ctx, networkStatus) + g.Expect(err).To(gomega.HaveOccurred()) + g.Expect(err).To(gomega.MatchError("the IPv6 Gateway for IPAddress vsphereVM1-0-1 does not match the Gateway6 already configured on device (index 0)")) + }) + + t.Run("when there are multiple IPAM ip configuration issues on one vm, it notes all of the problems", func(_ *testing.T) { + beforeWithClaimsAndAddressCreated() + + beforeWithClaimsAndAddressCreated() + + address1.Spec.Address = "10.10.10.10.10" + address2.Spec.Address = "11.11.11.11.11" + address3.Spec.Address = "12.12.12.12.12" + g.Expect(ctx.Client.Update(ctx, address1)).NotTo(gomega.HaveOccurred()) + g.Expect(ctx.Client.Update(ctx, address2)).NotTo(gomega.HaveOccurred()) + g.Expect(ctx.Client.Update(ctx, address3)).NotTo(gomega.HaveOccurred()) + + _, err := BuildState(ctx, networkStatus) + g.Expect(err).To(gomega.HaveOccurred()) + g.Expect(err).To(gomega.MatchError( + gomega.ContainSubstring("IPAddress my-namespace/vsphereVM1-0-0 has invalid ip address: \"10.10.10.10.10/24\""))) + g.Expect(err).To(gomega.MatchError( + gomega.ContainSubstring("IPAddress my-namespace/vsphereVM1-0-1 has invalid ip address: \"11.11.11.11.11/24\""))) + g.Expect(err).To(gomega.MatchError( + gomega.ContainSubstring("IPAddress my-namespace/vsphereVM1-1-0 has invalid ip address: \"12.12.12.12.12/24\""))) + }) + + t.Run("when there are multiple IPAM gateway configuration issues on one vm, it notes all of the problems", func(_ *testing.T) { + beforeWithClaimsAndAddressCreated() + + address1.Spec.Gateway = "10.10.10.10.10" + address2.Spec.Gateway = "11.11.11.11.11" + address3.Spec.Gateway = "12.12.12.12.12" + g.Expect(ctx.Client.Update(ctx, address1)).NotTo(gomega.HaveOccurred()) + g.Expect(ctx.Client.Update(ctx, address2)).NotTo(gomega.HaveOccurred()) + g.Expect(ctx.Client.Update(ctx, address3)).NotTo(gomega.HaveOccurred()) + + _, err := BuildState(ctx, networkStatus) + g.Expect(err).To(gomega.HaveOccurred()) + g.Expect(err).To(gomega.MatchError( + gomega.ContainSubstring("IPAddress my-namespace/vsphereVM1-0-0 has invalid gateway: \"10.10.10.10.10\""))) + g.Expect(err).To(gomega.MatchError( + gomega.ContainSubstring("IPAddress my-namespace/vsphereVM1-0-1 has invalid gateway: \"11.11.11.11.11\""))) + g.Expect(err).To(gomega.MatchError( + gomega.ContainSubstring("IPAddress my-namespace/vsphereVM1-1-0 has invalid gateway: \"12.12.12.12.12\""))) + }) + + t.Run("when there are duplicate IPAddresses", func(_ *testing.T) { + beforeWithClaimsAndAddressCreated() + + address1.Spec.Address = "10.0.0.50" + address2.Spec.Address = "10.0.0.50" + g.Expect(ctx.Client.Update(ctx, address1)).NotTo(gomega.HaveOccurred()) + g.Expect(ctx.Client.Update(ctx, address2)).NotTo(gomega.HaveOccurred()) + + _, err := BuildState(ctx, networkStatus) + g.Expect(err).To(gomega.HaveOccurred()) + g.Expect(err).To(gomega.MatchError( + gomega.ContainSubstring("IPAddress my-namespace/vsphereVM1-0-1 is a duplicate of another address: \"10.0.0.50/24\""))) + }) + }) +} diff --git a/pkg/services/govmomi/pci/device.go b/pkg/services/govmomi/pci/device.go index e48958a633..3079135f0f 100644 --- a/pkg/services/govmomi/pci/device.go +++ b/pkg/services/govmomi/pci/device.go @@ -17,11 +17,11 @@ limitations under the License. package pci import ( + "context" "fmt" "github.com/vmware/govmomi/object" "github.com/vmware/govmomi/vim25/types" - "golang.org/x/net/context" infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" ) diff --git a/pkg/services/govmomi/service.go b/pkg/services/govmomi/service.go index 9a2e61813c..84e2b43870 100644 --- a/pkg/services/govmomi/service.go +++ b/pkg/services/govmomi/service.go @@ -19,7 +19,6 @@ package govmomi import ( "encoding/base64" "fmt" - "net/netip" "github.com/pkg/errors" "github.com/vmware/govmomi/object" @@ -28,16 +27,12 @@ import ( "github.com/vmware/govmomi/property" "github.com/vmware/govmomi/vim25/mo" "github.com/vmware/govmomi/vim25/types" - "golang.org/x/exp/slices" corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" apitypes "k8s.io/apimachinery/pkg/types" "k8s.io/utils/pointer" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" capierrors "sigs.k8s.io/cluster-api/errors" - ipamv1 "sigs.k8s.io/cluster-api/exp/ipam/api/v1alpha1" "sigs.k8s.io/cluster-api/util/conditions" infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" @@ -45,6 +40,7 @@ import ( "sigs.k8s.io/cluster-api-provider-vsphere/pkg/services/govmomi/cluster" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/services/govmomi/clustermodules" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/services/govmomi/extra" + "sigs.k8s.io/cluster-api-provider-vsphere/pkg/services/govmomi/ipam" govmominet "sigs.k8s.io/cluster-api-provider-vsphere/pkg/services/govmomi/net" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/services/govmomi/pci" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/util" @@ -77,10 +73,6 @@ func (vms *VMService) ReconcileVM(ctx *context.VMContext) (vm infrav1.VirtualMac // event is triggered. defer reconcileVSphereVMOnTaskCompletion(ctx) - if ok, err := vms.reconcileIPAddressClaims(ctx); err != nil || !ok { - return vm, err - } - // Before going further, we need the VM's managed object reference. vmRef, err := findVM(ctx) //nolint:nestif @@ -92,12 +84,12 @@ func (vms *VMService) ReconcileVM(ctx *context.VMContext) (vm infrav1.VirtualMac // If the machine was not found by BIOS UUID it means that it got deleted from vcenter directly if wasNotFoundByBIOSUUID(err) { ctx.VSphereVM.Status.FailureReason = capierrors.MachineStatusErrorPtr(capierrors.UpdateMachineError) - ctx.VSphereVM.Status.FailureMessage = pointer.StringPtr(fmt.Sprintf("Unable to find VM by BIOS UUID %s. The vm was removed from infra", ctx.VSphereVM.Spec.BiosUUID)) + ctx.VSphereVM.Status.FailureMessage = pointer.String(fmt.Sprintf("Unable to find VM by BIOS UUID %s. The vm was removed from infra", ctx.VSphereVM.Spec.BiosUUID)) return vm, err } - // Otherwise, this is a new machine and the the VM should be created. - // NOTE: We are setting this condition only in case it does not exists so we avoid to get flickering LastConditionTime + // Otherwise, this is a new machine and the VM should be created. + // NOTE: We are setting this condition only in case it does not exist, so we avoid to get flickering LastConditionTime // in case of cloning errors or powering on errors. if !conditions.Has(ctx.VSphereVM, infrav1.VMProvisionedCondition) { conditions.MarkFalse(ctx.VSphereVM, infrav1.VMProvisionedCondition, infrav1.CloningReason, clusterv1.ConditionSeverityInfo, "") @@ -274,182 +266,20 @@ func (vms *VMService) reconcileNetworkStatus(ctx *virtualMachineContext) error { return nil } -// reconcileIPAddressClaims ensures that VSphereVMs that are configured with -// .spec.network.devices.addressFromPools have corresponding IPAddressClaims. -func (vms *VMService) reconcileIPAddressClaims(ctx *context.VMContext) (bool, error) { - for devIdx, device := range ctx.VSphereVM.Spec.Network.Devices { - for poolRefIdx, poolRef := range device.AddressesFromPools { - // check if claim exists - ipAddrClaim := &ipamv1.IPAddressClaim{} - ipAddrClaimName := IPAddressClaimName(ctx.VSphereVM.Name, devIdx, poolRefIdx) - ipAddrClaimKey := apitypes.NamespacedName{ - Namespace: ctx.VSphereVM.Namespace, - Name: ipAddrClaimName, - } - var err error - if err = ctx.Client.Get(ctx, ipAddrClaimKey, ipAddrClaim); err != nil && !apierrors.IsNotFound(err) { - return false, err - } - if err == nil { - ctx.Logger.V(5).Info("IPAddressClaim found", "name", ipAddrClaimName) - } - if apierrors.IsNotFound(err) { - if err = createIPAddressClaim(ctx, ipAddrClaimName, poolRef); err != nil { - return false, err - } - msg := "Waiting for IPAddressClaim to have an IPAddress bound" - markIPAddressClaimedConditionWaitingForClaimAddress(ctx.VSphereVM, msg) - } - } - } - return true, nil -} - -// createIPAddressClaim sets up the ipam IPAddressClaim object and creates it in -// the API. -func createIPAddressClaim(ctx *context.VMContext, ipAddrClaimName string, poolRef corev1.TypedLocalObjectReference) error { - ctx.Logger.Info("creating IPAddressClaim", "name", ipAddrClaimName) - claim := &ipamv1.IPAddressClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: ipAddrClaimName, - Namespace: ctx.VSphereVM.Namespace, - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: ctx.VSphereVM.APIVersion, - Kind: ctx.VSphereVM.Kind, - Name: ctx.VSphereVM.Name, - UID: ctx.VSphereVM.UID, - }, - }, - Finalizers: []string{infrav1.IPAddressClaimFinalizer}, - }, - Spec: ipamv1.IPAddressClaimSpec{PoolRef: poolRef}, - } - return ctx.Client.Create(ctx, claim) -} - -// reconcileIPAddresses prevents successful reconcilliation of a VSphereVM -// until an IPAM Provider updates each IPAddressClaim associated to the -// VSphereVM with a reference to an IPAddress. This function is a no-op if the -// VSphereVM has no associated IPAddressClaims. A discovered IPAddress is -// expected to contain a valid IP, Prefix and Gateway. +// reconcileIPAddresses works to check that all the IPAddressClaim objects for the +// VSphereVM object have been bound. +// This function is a no-op if the VSphereVM has no associated IPAddressClaims. +// A discovered IPAddress is expected to contain a valid IP, Prefix and Gateway. func (vms *VMService) reconcileIPAddresses(ctx *virtualMachineContext) (bool, error) { - ctx.IPAMState = map[string]infrav1.NetworkDeviceSpec{} - for devIdx, device := range ctx.VSphereVM.Spec.Network.Devices { - var ipAddrs []string - var gateway4 string - var gateway6 string - - //TODO: Break this up into smaller functions - for poolRefIdx := range device.AddressesFromPools { - // check if claim exists - ipAddrClaim := &ipamv1.IPAddressClaim{} - ipAddrClaimName := IPAddressClaimName(ctx.VSphereVM.Name, devIdx, poolRefIdx) - ipAddrClaimKey := apitypes.NamespacedName{ - Namespace: ctx.VSphereVM.Namespace, - Name: ipAddrClaimName, - } - var err error - ctx.Logger.V(5).Info("fetching IPAddressClaim", "name", ipAddrClaimKey.String()) - if err = ctx.Client.Get(ctx, ipAddrClaimKey, ipAddrClaim); err != nil && !apierrors.IsNotFound(err) { - ctx.Logger.Error(err, "error fetching IPAddressClaim", "name", ipAddrClaimName) - return false, err - } - - ipAddrName := ipAddrClaim.Status.AddressRef.Name - ctx.Logger.V(5).Info("fetched IPAddressClaim", "name", ipAddrClaimName, "IPAddressClaim.Status.AddressRef.Name", ipAddrName) - if ipAddrName == "" { - ctx.Logger.V(5).Info("IPAddress name was empty on IPAddressClaim", "name", ipAddrClaimName, "IPAddressClaim.Status.AddressRef.Name", ipAddrName) - msg := "Waiting for IPAddressClaim to have an IPAddress bound" - markIPAddressClaimedConditionWaitingForClaimAddress(ctx.VSphereVM, msg) - return false, errors.New(msg) - } - - ipAddr := &ipamv1.IPAddress{} - ipAddrKey := apitypes.NamespacedName{ - Namespace: ctx.VSphereVM.Namespace, - Name: ipAddrName, - } - if err = ctx.Client.Get(ctx, ipAddrKey, ipAddr); err != nil { - return false, err - } - - toAdd := fmt.Sprintf("%s/%d", ipAddr.Spec.Address, ipAddr.Spec.Prefix) - parsedPrefix, err := netip.ParsePrefix(toAdd) - if err != nil { - msg := fmt.Sprintf("IPAddress %s/%s has invalid ip address: %q", - ipAddrKey.Namespace, - ipAddrKey.Name, - toAdd, - ) - return markIPAddressClaimedConditionInvalidIPWithError(ctx.VSphereVM, msg) - } - - if !slices.Contains(ipAddrs, toAdd) { - ipAddrs = append(ipAddrs, toAdd) - - gatewayAddr, err := netip.ParseAddr(ipAddr.Spec.Gateway) - if err != nil { - msg := fmt.Sprintf("IPAddress %s/%s has invalid gateway: %q", - ipAddrKey.Namespace, - ipAddrKey.Name, - ipAddr.Spec.Gateway, - ) - return markIPAddressClaimedConditionInvalidIPWithError(ctx.VSphereVM, msg) - } - - if parsedPrefix.Addr().Is4() != gatewayAddr.Is4() { - msg := fmt.Sprintf("IPAddress %s/%s has mismatched gateway and address IP families", - ipAddrKey.Namespace, - ipAddrKey.Name, - ) - return markIPAddressClaimedConditionInvalidIPWithError(ctx.VSphereVM, msg) - } - - if gatewayAddr.Is4() { - if device.Gateway4 != "" && device.Gateway4 != ipAddr.Spec.Gateway { - msg := fmt.Sprintf("The IPv4 Gateway for IPAddress %s does not match the Gateway4 already configured on device (index %d)", - ipAddrName, - devIdx, - ) - return markIPAddressClaimedConditionInvalidIPWithError(ctx.VSphereVM, msg) - } - if gateway4 != "" && gateway4 != ipAddr.Spec.Gateway { - msg := fmt.Sprintf("The IPv4 IPAddresses assigned to the same device (index %d) do not have the same gateway", - devIdx, - ) - return markIPAddressClaimedConditionInvalidIPWithError(ctx.VSphereVM, msg) - } - gateway4 = ipAddr.Spec.Gateway - } else { - if device.Gateway6 != "" && device.Gateway6 != ipAddr.Spec.Gateway { - msg := fmt.Sprintf("The IPv6 Gateway for IPAddress %s does not match the Gateway6 already configured on device (index %d)", - ipAddrName, - devIdx, - ) - return markIPAddressClaimedConditionInvalidIPWithError(ctx.VSphereVM, msg) - } - if gateway6 != "" && gateway6 != ipAddr.Spec.Gateway { - msg := fmt.Sprintf("The IPv6 IPAddresses assigned to the same device (index %d) do not have the same gateway", - devIdx, - ) - return markIPAddressClaimedConditionInvalidIPWithError(ctx.VSphereVM, msg) - } - gateway6 = ipAddr.Spec.Gateway - } - } - ctx.IPAMState[device.MACAddr] = infrav1.NetworkDeviceSpec{ - IPAddrs: ipAddrs, - Gateway4: gateway4, - Gateway6: gateway6, - } - } + ipamState, err := ipam.BuildState(ctx.VMContext, ctx.State.Network) + if err != nil && !errors.Is(err, ipam.ErrWaitingForIPAddr) { + return false, err } - - if len(ctx.IPAMState) > 0 { - conditions.MarkTrue(ctx.VSphereVM, infrav1.IPAddressClaimedCondition) + if errors.Is(err, ipam.ErrWaitingForIPAddr) { + conditions.MarkFalse(ctx.VSphereVM, infrav1.VMProvisionedCondition, infrav1.WaitingForIPAddressReason, clusterv1.ConditionSeverityInfo, err.Error()) + return false, nil } - + ctx.IPAMState = ipamState return true, nil } @@ -853,26 +683,3 @@ func (vms *VMService) reconcileClusterModuleMembership(ctx *virtualMachineContex } return nil } - -func markIPAddressClaimedConditionInvalidIPWithError(vm *infrav1.VSphereVM, msg string) (bool, error) { - conditions.MarkFalse(vm, - infrav1.IPAddressClaimedCondition, - infrav1.IPAddressInvalidReason, - clusterv1.ConditionSeverityError, - msg) - return false, errors.New(msg) -} - -func markIPAddressClaimedConditionWaitingForClaimAddress(vm *infrav1.VSphereVM, msg string) { - conditions.MarkFalse(vm, - infrav1.IPAddressClaimedCondition, - infrav1.WaitingForIPAddressReason, - clusterv1.ConditionSeverityInfo, - msg) -} - -// IPAddressClaimName returns a name given a VsphereVM name, deviceIndex, and -// poolIndex. -func IPAddressClaimName(vmName string, deviceIndex, poolIndex int) string { - return fmt.Sprintf("%s-%d-%d", vmName, deviceIndex, poolIndex) -} diff --git a/pkg/services/govmomi/service_test.go b/pkg/services/govmomi/service_test.go index bf8b1fd0da..b6115cb69d 100644 --- a/pkg/services/govmomi/service_test.go +++ b/pkg/services/govmomi/service_test.go @@ -26,681 +26,14 @@ import ( "github.com/vmware/govmomi/simulator" "github.com/vmware/govmomi/vim25" "github.com/vmware/govmomi/vim25/types" - corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - apitypes "k8s.io/apimachinery/pkg/types" "k8s.io/utils/pointer" - ipamv1a1 "sigs.k8s.io/cluster-api/exp/ipam/api/v1alpha1" - "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/controller-runtime/pkg/client/fake" infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" ) -var myAPIGroup = "my-pool-api-group" - -//nolint:errcheck -func Test_reconcileIPAddressClaims_ShouldGenerateIPAddressClaims(t *testing.T) { - scheme := runtime.NewScheme() - _ = ipamv1a1.AddToScheme(scheme) - - var ctx *context.VMContext - var g *WithT - var vms *VMService - - before := func() { - ctx = emptyVMContext() - ctx.Client = fake.NewClientBuilder().WithScheme(scheme).Build() - - vms = &VMService{} - g = NewWithT(t) - } - - t.Run("when a device has a IPAddressPool", func(_ *testing.T) { - before() - ctx.VSphereVM = &infrav1.VSphereVM{ - ObjectMeta: metav1.ObjectMeta{ - Name: "vsphereVM1", - Namespace: "my-namespace", - }, - Spec: infrav1.VSphereVMSpec{ - VirtualMachineCloneSpec: infrav1.VirtualMachineCloneSpec{ - Network: infrav1.NetworkSpec{ - Devices: []infrav1.NetworkDeviceSpec{ - { - AddressesFromPools: []corev1.TypedLocalObjectReference{ - { - APIGroup: &myAPIGroup, - Name: "my-pool-1", - Kind: "my-pool-kind", - }, - }, - }, - { - AddressesFromPools: []corev1.TypedLocalObjectReference{ - { - APIGroup: &myAPIGroup, - Name: "my-pool-2", - Kind: "my-pool-kind", - }, - { - APIGroup: &myAPIGroup, - Name: "my-pool-3", - Kind: "my-pool-kind", - }, - }, - }, - }, - }, - }, - }, - } - - reconciled, err := vms.reconcileIPAddressClaims(ctx) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(reconciled).To(BeTrue()) - - ipAddrClaimKey := apitypes.NamespacedName{ - Name: "vsphereVM1-0-0", - Namespace: "my-namespace", - } - ipAddrClaim := &ipamv1a1.IPAddressClaim{} - ctx.Client.Get(ctx, ipAddrClaimKey, ipAddrClaim) - g.Expect(ipAddrClaim.Spec.PoolRef.Name).To(Equal("my-pool-1")) - - ipAddrClaimKey = apitypes.NamespacedName{ - Name: "vsphereVM1-1-0", - Namespace: "my-namespace", - } - ipAddrClaim = &ipamv1a1.IPAddressClaim{} - ctx.Client.Get(ctx, ipAddrClaimKey, ipAddrClaim) - g.Expect(ipAddrClaim.Spec.PoolRef.Name).To(Equal("my-pool-2")) - - ipAddrClaimKey = apitypes.NamespacedName{ - Name: "vsphereVM1-1-1", - Namespace: "my-namespace", - } - ipAddrClaim = &ipamv1a1.IPAddressClaim{} - ctx.Client.Get(ctx, ipAddrClaimKey, ipAddrClaim) - g.Expect(ipAddrClaim.Spec.PoolRef.Name).To(Equal("my-pool-3")) - - // Ensure that duplicate claims are not created - reconciled, err = vms.reconcileIPAddressClaims(ctx) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(reconciled).To(BeTrue()) - - ipAddrClaims := &ipamv1a1.IPAddressClaimList{} - ctx.Client.List(ctx, ipAddrClaims) - g.Expect(ipAddrClaims.Items).To(HaveLen(3)) - - // Ensure that the VM has a IPAddressClaimed condition set to False - // for the WaitingForIPAddress reason. - claimedCondition := conditions.Get(ctx.VSphereVM, infrav1.IPAddressClaimedCondition) - g.Expect(claimedCondition).NotTo(BeNil()) - g.Expect(claimedCondition.Reason).To(Equal(infrav1.WaitingForIPAddressReason)) - }) - - t.Run("when there are no FromPools it does not set the IPAddressClaimedCondition", func(_ *testing.T) { - before() - ctx.VSphereVM = &infrav1.VSphereVM{ - ObjectMeta: metav1.ObjectMeta{ - Name: "vsphereVM1", - Namespace: "my-namespace", - }, - Spec: infrav1.VSphereVMSpec{ - VirtualMachineCloneSpec: infrav1.VirtualMachineCloneSpec{ - Network: infrav1.NetworkSpec{ - Devices: []infrav1.NetworkDeviceSpec{ - { - DHCP4: true, - }, - { - DHCP6: true, - }, - }, - }, - }, - }, - } - - reconciled, err := vms.reconcileIPAddressClaims(ctx) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(reconciled).To(BeTrue()) - - ipAddrClaims := &ipamv1a1.IPAddressClaimList{} - ctx.Client.List(ctx, ipAddrClaims) - g.Expect(ipAddrClaims.Items).To(HaveLen(0)) - - // The condition should not appear when there are no Claims - claimedCondition := conditions.Get(ctx.VSphereVM, infrav1.IPAddressClaimedCondition) - g.Expect(claimedCondition).To(BeNil()) - }) -} - -//nolint:errcheck -func Test_reconcileIPAddresses_ShouldUpdateVMDevicesWithAddresses(t *testing.T) { - scheme := runtime.NewScheme() - _ = ipamv1a1.AddToScheme(scheme) - - var ctx *virtualMachineContext - var claim1, claim2, claim3 *ipamv1a1.IPAddressClaim - var address1, address2, address3 *ipamv1a1.IPAddress - var g *WithT - var vms *VMService - - before := func() { - ctx = emptyVirtualMachineContext() - ctx.Client = fake.NewClientBuilder().WithScheme(scheme).Build() - - vms = &VMService{} - g = NewWithT(t) - - claim1 = &ipamv1a1.IPAddressClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: "vsphereVM1-0-0", - Namespace: "my-namespace", - }, - } - - claim2 = &ipamv1a1.IPAddressClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: "vsphereVM1-0-1", - Namespace: "my-namespace", - }, - } - - claim3 = &ipamv1a1.IPAddressClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: "vsphereVM1-0-2", - Namespace: "my-namespace", - }, - } - - address1 = &ipamv1a1.IPAddress{ - ObjectMeta: metav1.ObjectMeta{ - Name: "vsphereVM1-0-0-address0", - Namespace: "my-namespace", - }, - Spec: ipamv1a1.IPAddressSpec{ - Address: "10.0.0.50", - Prefix: 24, - Gateway: "10.0.0.1", - }, - } - address2 = &ipamv1a1.IPAddress{ - ObjectMeta: metav1.ObjectMeta{ - Name: "vsphereVM1-0-1-address1", - Namespace: "my-namespace", - }, - Spec: ipamv1a1.IPAddressSpec{ - Address: "10.0.1.50", - Prefix: 30, - Gateway: "10.0.0.1", - }, - } - - address3 = &ipamv1a1.IPAddress{ - ObjectMeta: metav1.ObjectMeta{ - Name: "vsphereVM1-0-2-address2", - Namespace: "my-namespace", - }, - Spec: ipamv1a1.IPAddressSpec{ - Address: "fe80::cccc:12", - Prefix: 64, - Gateway: "fe80::cccc:1", - }, - } - } - - t.Run("when a device has a IPAddressPool", func(_ *testing.T) { - before() - devMAC := "0:0:0:0:a" - ctx.VSphereVM = &infrav1.VSphereVM{ - ObjectMeta: metav1.ObjectMeta{ - Name: "vsphereVM1", - Namespace: "my-namespace", - }, - Spec: infrav1.VSphereVMSpec{ - VirtualMachineCloneSpec: infrav1.VirtualMachineCloneSpec{ - Network: infrav1.NetworkSpec{ - Devices: []infrav1.NetworkDeviceSpec{ - { - MACAddr: devMAC, - AddressesFromPools: []corev1.TypedLocalObjectReference{ - { - APIGroup: &myAPIGroup, - Name: "my-pool-1", - Kind: "my-pool-kind", - }, - { - APIGroup: &myAPIGroup, - Name: "my-pool-1", - Kind: "my-pool-kind", - }, - { - APIGroup: &myAPIGroup, - Name: "my-pool-ipv6", - Kind: "my-pool-kind", - }, - }, - }, - }, - }, - }, - }, - } - - // Creates ip address claims - ctx.Client.Create(ctx, claim1) - ctx.Client.Create(ctx, claim2) - ctx.Client.Create(ctx, claim3) - - // IP provider has not provided Addresses yet - reconciled, err := vms.reconcileIPAddresses(ctx) - g.Expect(err).To(MatchError("Waiting for IPAddressClaim to have an IPAddress bound")) - g.Expect(reconciled).To(BeFalse()) - - // Ensure that the VM has a IPAddressClaimed condition set to False - // for the WaitingForIPAddress reason. - claimedCondition := conditions.Get(ctx.VSphereVM, infrav1.IPAddressClaimedCondition) - g.Expect(claimedCondition).NotTo(BeNil()) - g.Expect(claimedCondition.Reason).To(Equal(infrav1.WaitingForIPAddressReason)) - g.Expect(claimedCondition.Message).To(Equal("Waiting for IPAddressClaim to have an IPAddress bound")) - g.Expect(claimedCondition.Status).To(Equal(corev1.ConditionFalse)) - - // Simulate IP provider reconciling claim - ctx.Client.Create(ctx, address1) - ctx.Client.Create(ctx, address2) - ctx.Client.Create(ctx, address3) - - ipAddrClaim := &ipamv1a1.IPAddressClaim{} - ipAddrClaimKey := apitypes.NamespacedName{ - Namespace: ctx.VSphereVM.Namespace, - Name: "vsphereVM1-0-0", - } - err = ctx.Client.Get(ctx, ipAddrClaimKey, ipAddrClaim) - g.Expect(err).NotTo(HaveOccurred()) - - ipAddrClaim.Status.AddressRef.Name = "vsphereVM1-0-0-address0" - - ctx.Client.Update(ctx, ipAddrClaim) - - ipAddrClaimKey = apitypes.NamespacedName{ - Namespace: ctx.VSphereVM.Namespace, - Name: "vsphereVM1-0-1", - } - err = ctx.Client.Get(ctx, ipAddrClaimKey, ipAddrClaim) - g.Expect(err).NotTo(HaveOccurred()) - - ipAddrClaim.Status.AddressRef.Name = "vsphereVM1-0-1-address1" - - ctx.Client.Update(ctx, ipAddrClaim) - - ipAddrClaimKey = apitypes.NamespacedName{ - Namespace: ctx.VSphereVM.Namespace, - Name: "vsphereVM1-0-2", - } - err = ctx.Client.Get(ctx, ipAddrClaimKey, ipAddrClaim) - g.Expect(err).NotTo(HaveOccurred()) - - ipAddrClaim.Status.AddressRef.Name = "vsphereVM1-0-2-address2" - - ctx.Client.Update(ctx, ipAddrClaim) - - // Now that claims are fulfilled, reconciling should update - // ipAddrs on network spec - reconciled, err = vms.reconcileIPAddresses(ctx) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(reconciled).To(BeTrue()) - g.Expect(ctx.IPAMState).To(HaveLen(1)) - g.Expect(ctx.IPAMState[devMAC].IPAddrs).To(HaveLen(3)) - g.Expect(ctx.IPAMState[devMAC].IPAddrs[0]).To(Equal("10.0.0.50/24")) - g.Expect(ctx.IPAMState[devMAC].Gateway4).To(Equal("10.0.0.1")) - g.Expect(ctx.IPAMState[devMAC].IPAddrs[1]).To(Equal("10.0.1.50/30")) - g.Expect(ctx.IPAMState[devMAC].Gateway4).To(Equal("10.0.0.1")) - g.Expect(ctx.IPAMState[devMAC].IPAddrs[2]).To(Equal("fe80::cccc:12/64")) - g.Expect(ctx.IPAMState[devMAC].Gateway6).To(Equal("fe80::cccc:1")) - claimedCondition = conditions.Get(ctx.VSphereVM, infrav1.IPAddressClaimedCondition) - g.Expect(claimedCondition).NotTo(BeNil()) - g.Expect(claimedCondition.Status).To(Equal(corev1.ConditionTrue)) - }) - - t.Run("when a device has no pools", func(_ *testing.T) { - before() - devMAC := "0:0:0:0:a" - ctx.VSphereVM = &infrav1.VSphereVM{ - ObjectMeta: metav1.ObjectMeta{ - Name: "vsphereVM1", - Namespace: "my-namespace", - }, - Spec: infrav1.VSphereVMSpec{ - VirtualMachineCloneSpec: infrav1.VirtualMachineCloneSpec{ - Network: infrav1.NetworkSpec{ - Devices: []infrav1.NetworkDeviceSpec{ - { - MACAddr: devMAC, - DHCP4: true, - }, - }, - }, - }, - }, - } - - // The IPAddressClaimed condition should not be added - reconciled, err := vms.reconcileIPAddresses(ctx) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(reconciled).To(BeTrue()) - - g.Expect(ctx.IPAMState).To(BeEmpty()) - - claimedCondition := conditions.Get(ctx.VSphereVM, infrav1.IPAddressClaimedCondition) - g.Expect(claimedCondition).To(BeNil()) - }) -} - -//nolint:errcheck -func Test_reconcileIPAddresses_ShouldUpdateTheStatusOnValidationIssues(t *testing.T) { - scheme := runtime.NewScheme() - _ = ipamv1a1.AddToScheme(scheme) - - var ctx *virtualMachineContext - var claim1, claim2 *ipamv1a1.IPAddressClaim - var address1, address2 *ipamv1a1.IPAddress - var g *WithT - var vms *VMService - - before := func() { - ctx = emptyVirtualMachineContext() - ctx.Client = fake.NewClientBuilder().WithScheme(scheme).Build() - - claim1 = &ipamv1a1.IPAddressClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: "vsphereVM1-0-0", - Namespace: "my-namespace", - }, - Status: ipamv1a1.IPAddressClaimStatus{ - AddressRef: corev1.LocalObjectReference{ - Name: "vsphereVM1-0-0", - }, - }, - } - - claim2 = &ipamv1a1.IPAddressClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: "vsphereVM1-0-1", - Namespace: "my-namespace", - }, - Status: ipamv1a1.IPAddressClaimStatus{ - AddressRef: corev1.LocalObjectReference{ - Name: "vsphereVM1-0-1", - }, - }, - } - - address1 = &ipamv1a1.IPAddress{ - ObjectMeta: metav1.ObjectMeta{ - Name: "vsphereVM1-0-0", - Namespace: "my-namespace", - }, - Spec: ipamv1a1.IPAddressSpec{ - Address: "10.0.1.50", - Prefix: 24, - Gateway: "10.0.0.1", - }, - } - - address2 = &ipamv1a1.IPAddress{ - ObjectMeta: metav1.ObjectMeta{ - Name: "vsphereVM1-0-1", - Namespace: "my-namespace", - }, - Spec: ipamv1a1.IPAddressSpec{ - Address: "10.0.1.51", - Prefix: 24, - Gateway: "10.0.0.1", - }, - } - - ctx.VSphereVM = &infrav1.VSphereVM{ - ObjectMeta: metav1.ObjectMeta{ - Name: "vsphereVM1", - Namespace: "my-namespace", - }, - Spec: infrav1.VSphereVMSpec{ - VirtualMachineCloneSpec: infrav1.VirtualMachineCloneSpec{ - Network: infrav1.NetworkSpec{ - Devices: []infrav1.NetworkDeviceSpec{ - { - AddressesFromPools: []corev1.TypedLocalObjectReference{ - { - APIGroup: &myAPIGroup, - Name: "my-pool-1", - Kind: "my-pool-kind", - }, - { - APIGroup: &myAPIGroup, - Name: "my-pool-2", - Kind: "my-pool-kind", - }, - }, - }, - }, - }, - }, - }, - } - - vms = &VMService{} - - g = NewWithT(t) - - // Creates ip address claims - ctx.Client.Create(ctx, claim1) - ctx.Client.Create(ctx, claim2) - - // Simulate an invalid ip address was provided: the address is empty - ctx.Client.Create(ctx, address1) - ctx.Client.Create(ctx, address2) - } - - t.Run("when a provider assigns an IPAdress without an Address field", func(_ *testing.T) { - before() - address1.Spec.Address = "" - ctx.Client.Update(ctx, address1) - // IP provider has not provided Addresses yet - reconciled, err := vms.reconcileIPAddresses(ctx) - g.Expect(err).To(HaveOccurred()) - g.Expect(reconciled).To(BeFalse()) - - // Ensure that the VM has a IPAddressClaimed condition set to False - // because the simulated ip address is missing the spec address. - claimedCondition := conditions.Get(ctx.VSphereVM, infrav1.IPAddressClaimedCondition) - g.Expect(claimedCondition).NotTo(BeNil()) - g.Expect(claimedCondition.Reason).To(Equal(infrav1.IPAddressInvalidReason)) - g.Expect(claimedCondition.Message).To(Equal("IPAddress my-namespace/vsphereVM1-0-0 has invalid ip address: \"/24\"")) - g.Expect(claimedCondition.Status).To(Equal(corev1.ConditionFalse)) - }) - - t.Run("when a provider assigns an IPAddress with an invalid IP in the Address field", func(_ *testing.T) { - before() - // Simulate an invalid ip address was provided: the address is not a valid ip - address1.Spec.Address = "invalid-ip" - ctx.Client.Update(ctx, address1) - - // IP provider has not provided Addresses yet - reconciled, err := vms.reconcileIPAddresses(ctx) - g.Expect(err).To(HaveOccurred()) - g.Expect(reconciled).To(BeFalse()) - - // Ensure that the VM has a IPAddressClaimed condition set to False - // because the simulated ip address is missing the spec address. - claimedCondition := conditions.Get(ctx.VSphereVM, infrav1.IPAddressClaimedCondition) - g.Expect(claimedCondition).NotTo(BeNil()) - g.Expect(claimedCondition.Reason).To(Equal(infrav1.IPAddressInvalidReason)) - g.Expect(claimedCondition.Message).To(Equal("IPAddress my-namespace/vsphereVM1-0-0 has invalid ip address: \"invalid-ip/24\"")) - g.Expect(claimedCondition.Status).To(Equal(corev1.ConditionFalse)) - }) - - t.Run("when a provider assigns an IPAddress with an invalid value in the Prefix field", func(_ *testing.T) { - before() - // Simulate an invalid prefix address was provided: the prefix is out of bounds - address1.Spec.Prefix = 200 - ctx.Client.Update(ctx, address1) - - // IP provider has not provided Addresses yet - reconciled, err := vms.reconcileIPAddresses(ctx) - g.Expect(err).To(HaveOccurred()) - g.Expect(reconciled).To(BeFalse()) - - // Ensure that the VM has a IPAddressClaimed condition set to False - // because the simulated ip address is missing the spec address. - claimedCondition := conditions.Get(ctx.VSphereVM, infrav1.IPAddressClaimedCondition) - g.Expect(claimedCondition).NotTo(BeNil()) - g.Expect(claimedCondition.Reason).To(Equal(infrav1.IPAddressInvalidReason)) - g.Expect(claimedCondition.Message).To(Equal("IPAddress my-namespace/vsphereVM1-0-0 has invalid ip address: \"10.0.1.50/200\"")) - g.Expect(claimedCondition.Status).To(Equal(corev1.ConditionFalse)) - }) - - t.Run("when a provider assigns an IPAddress with an invalid value in the Gateway field", func(_ *testing.T) { - before() - // Simulate an invalid gateway was provided: the gateway is an invalid ip - address1.Spec.Gateway = "invalid-gateway" - ctx.Client.Update(ctx, address1) - - // IP provider has not provided Addresses yet - reconciled, err := vms.reconcileIPAddresses(ctx) - g.Expect(err).To(HaveOccurred()) - g.Expect(reconciled).To(BeFalse()) - - // Ensure that the VM has a IPAddressClaimed condition set to False - // because the simulated ip address is missing the spec address. - claimedCondition := conditions.Get(ctx.VSphereVM, infrav1.IPAddressClaimedCondition) - g.Expect(claimedCondition).NotTo(BeNil()) - g.Expect(claimedCondition.Reason).To(Equal(infrav1.IPAddressInvalidReason)) - g.Expect(claimedCondition.Message).To(Equal("IPAddress my-namespace/vsphereVM1-0-0 has invalid gateway: \"invalid-gateway\"")) - g.Expect(claimedCondition.Status).To(Equal(corev1.ConditionFalse)) - }) - - t.Run("when a provider assigns an IPAddress where the Gateway and Address fields are mismatched", func(_ *testing.T) { - before() - // Simulate mismatch address and gateways were provided - address1.Spec.Address = "10.0.1.50" - address1.Spec.Gateway = "fd01::1" - ctx.Client.Update(ctx, address1) - - // IP provider has not provided Addresses yet - reconciled, err := vms.reconcileIPAddresses(ctx) - g.Expect(err).To(HaveOccurred()) - g.Expect(reconciled).To(BeFalse()) - - // Ensure that the VM has a IPAddressClaimed condition set to False - // because the simulated ip address is missing the spec address. - claimedCondition := conditions.Get(ctx.VSphereVM, infrav1.IPAddressClaimedCondition) - g.Expect(claimedCondition).NotTo(BeNil()) - g.Expect(claimedCondition.Reason).To(Equal(infrav1.IPAddressInvalidReason)) - g.Expect(claimedCondition.Message).To(Equal("IPAddress my-namespace/vsphereVM1-0-0 has mismatched gateway and address IP families")) - g.Expect(claimedCondition.Status).To(Equal(corev1.ConditionFalse)) - - // Simulate mismatch address and gateways were provided - address1.Spec.Address = "fd00:cccc::1" - address1.Spec.Gateway = "10.0.0.1" - ctx.Client.Update(ctx, address1) - - // IP provider has not provided Addresses yet - reconciled, err = vms.reconcileIPAddresses(ctx) - g.Expect(err).To(HaveOccurred()) - g.Expect(reconciled).To(BeFalse()) - - // Ensure that the VM has a IPAddressClaimed condition set to False - // because the simulated ip address is missing the spec address. - claimedCondition = conditions.Get(ctx.VSphereVM, infrav1.IPAddressClaimedCondition) - g.Expect(claimedCondition).NotTo(BeNil()) - g.Expect(claimedCondition.Reason).To(Equal(infrav1.IPAddressInvalidReason)) - g.Expect(claimedCondition.Message).To(Equal("IPAddress my-namespace/vsphereVM1-0-0 has mismatched gateway and address IP families")) - g.Expect(claimedCondition.Status).To(Equal(corev1.ConditionFalse)) - }) - - t.Run("when there are multiple IPAddresses for a device with different Gateways", func(_ *testing.T) { - before() - // Simulate multiple gateways were provided - address1.Spec.Address = "10.0.1.50" - address1.Spec.Gateway = "10.0.0.2" - ctx.Client.Update(ctx, address1) - address2.Spec.Address = "10.0.1.51" - address2.Spec.Gateway = "10.0.0.3" - ctx.Client.Update(ctx, address2) - - // IP provider has not provided Addresses yet - reconciled, err := vms.reconcileIPAddresses(ctx) - g.Expect(err).To(HaveOccurred()) - g.Expect(reconciled).To(BeFalse()) - - // Ensure that the VM has a IPAddressClaimed condition set to False - // because the simulated ip address is missing the spec address. - claimedCondition := conditions.Get(ctx.VSphereVM, infrav1.IPAddressClaimedCondition) - g.Expect(claimedCondition).NotTo(BeNil()) - g.Expect(claimedCondition.Reason).To(Equal(infrav1.IPAddressInvalidReason)) - g.Expect(claimedCondition.Message).To(Equal("The IPv4 IPAddresses assigned to the same device (index 0) do not have the same gateway")) - g.Expect(claimedCondition.Status).To(Equal(corev1.ConditionFalse)) - - // Simulate multiple gateways were provided - address1.Spec.Address = "fd00:cccc::2" - address1.Spec.Gateway = "fd00::1" - ctx.Client.Update(ctx, address1) - address2.Spec.Address = "fd00:cccc::3" - address2.Spec.Gateway = "fd00::2" - ctx.Client.Update(ctx, address2) - - // IP provider has not provided Addresses yet - reconciled, err = vms.reconcileIPAddresses(ctx) - g.Expect(err).To(HaveOccurred()) - g.Expect(reconciled).To(BeFalse()) - - // Ensure that the VM has a IPAddressClaimed condition set to False - // because the simulated ip address is missing the spec address. - claimedCondition = conditions.Get(ctx.VSphereVM, infrav1.IPAddressClaimedCondition) - g.Expect(claimedCondition).NotTo(BeNil()) - g.Expect(claimedCondition.Reason).To(Equal(infrav1.IPAddressInvalidReason)) - g.Expect(claimedCondition.Message).To(Equal("The IPv6 IPAddresses assigned to the same device (index 0) do not have the same gateway")) - g.Expect(claimedCondition.Status).To(Equal(corev1.ConditionFalse)) - }) - - t.Run("when a user specified gateway does not match the gateway provided by IPAM", func(_ *testing.T) { - before() - - ctx.VSphereVM.Spec.VirtualMachineCloneSpec.Network.Devices[0].Gateway4 = "10.10.10.1" - ctx.VSphereVM.Spec.VirtualMachineCloneSpec.Network.Devices[0].Gateway6 = "fd00::2" - address2.Spec.Address = "fd00:cccc::1" - address2.Spec.Gateway = "fd00::1" - ctx.Client.Update(ctx, address2) - - reconciled, err := vms.reconcileIPAddresses(ctx) - g.Expect(err).To(MatchError("The IPv4 Gateway for IPAddress vsphereVM1-0-0 does not match the Gateway4 already configured on device (index 0)")) - g.Expect(reconciled).To(BeFalse()) - - claimedCondition := conditions.Get(ctx.VSphereVM, infrav1.IPAddressClaimedCondition) - g.Expect(claimedCondition).NotTo(BeNil()) - g.Expect(claimedCondition.Reason).To(Equal(infrav1.IPAddressInvalidReason)) - g.Expect(claimedCondition.Message).To(Equal("The IPv4 Gateway for IPAddress vsphereVM1-0-0 does not match the Gateway4 already configured on device (index 0)")) - g.Expect(claimedCondition.Status).To(Equal(corev1.ConditionFalse)) - - // Fix the Gateway4 for dev0 - ctx.VSphereVM.Spec.VirtualMachineCloneSpec.Network.Devices[0].Gateway4 = "10.0.0.1" - reconciled, err = vms.reconcileIPAddresses(ctx) - g.Expect(err).To(MatchError("The IPv6 Gateway for IPAddress vsphereVM1-0-1 does not match the Gateway6 already configured on device (index 0)")) - g.Expect(reconciled).To(BeFalse()) - - claimedCondition = conditions.Get(ctx.VSphereVM, infrav1.IPAddressClaimedCondition) - g.Expect(claimedCondition).NotTo(BeNil()) - g.Expect(claimedCondition.Reason).To(Equal(infrav1.IPAddressInvalidReason)) - g.Expect(claimedCondition.Message).To(Equal("The IPv6 Gateway for IPAddress vsphereVM1-0-1 does not match the Gateway6 already configured on device (index 0)")) - g.Expect(claimedCondition.Status).To(Equal(corev1.ConditionFalse)) - }) -} - func emptyVirtualMachineContext() *virtualMachineContext { return &virtualMachineContext{ VMContext: context.VMContext{ @@ -714,15 +47,6 @@ func emptyVirtualMachineContext() *virtualMachineContext { } } -func emptyVMContext() *context.VMContext { - return &context.VMContext{ - Logger: logr.Discard(), - ControllerContext: &context.ControllerContext{ - ControllerManagerContext: &context.ControllerManagerContext{}, - }, - } -} - func Test_reconcilePCIDevices(t *testing.T) { var vmCtx *virtualMachineContext var g *WithT diff --git a/pkg/services/network/dummy_provider.go b/pkg/services/network/dummy_provider.go index 5bf7484a2c..f5f89517d8 100644 --- a/pkg/services/network/dummy_provider.go +++ b/pkg/services/network/dummy_provider.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +//nolint:revive package network import ( diff --git a/pkg/services/network/netop_provider.go b/pkg/services/network/netop_provider.go index 005d9026ac..df2a5f5629 100644 --- a/pkg/services/network/netop_provider.go +++ b/pkg/services/network/netop_provider.go @@ -123,7 +123,7 @@ func (np *netopNetworkProvider) ConfigureVirtualMachine(ctx *vmware.ClusterConte return nil } -func (np *netopNetworkProvider) VerifyNetworkStatus(ctx *vmware.ClusterContext, obj runtime.Object) error { +func (np *netopNetworkProvider) VerifyNetworkStatus(_ *vmware.ClusterContext, obj runtime.Object) error { if _, ok := obj.(*netopv1.Network); !ok { return fmt.Errorf("expected Net Operator Network but got %T", obj) } diff --git a/pkg/services/vimmachine.go b/pkg/services/vimmachine.go index ce96dd1eef..93679cee51 100644 --- a/pkg/services/vimmachine.go +++ b/pkg/services/vimmachine.go @@ -121,7 +121,7 @@ func (v *VimMachineService) ReconcileNormal(c context.MachineContext) (bool, err return false, err } - vm, err := v.createOrPatchVSPhereVM(ctx, vsphereVM) + vm, err := v.createOrPatchVSphereVM(ctx, vsphereVM) if err != nil { ctx.Logger.Error(err, "error creating or patching VM", "vsphereVM", vsphereVM) return false, err @@ -329,7 +329,7 @@ func (v *VimMachineService) reconcileNetwork(ctx *context.VIMMachineContext, vm return true, nil } -func (v *VimMachineService) createOrPatchVSPhereVM(ctx *context.VIMMachineContext, vsphereVM *infrav1.VSphereVM) (runtime.Object, error) { +func (v *VimMachineService) createOrPatchVSphereVM(ctx *context.VIMMachineContext, vsphereVM *infrav1.VSphereVM) (runtime.Object, error) { // Create or update the VSphereVM resource. vm := &infrav1.VSphereVM{ ObjectMeta: metav1.ObjectMeta{ @@ -364,12 +364,12 @@ func (v *VimMachineService) createOrPatchVSPhereVM(ctx *context.VIMMachineContex // Ensure the VSphereVM has a label that can be used when searching for // resources associated with the target cluster. - vm.Labels[clusterv1.ClusterLabelName] = ctx.Machine.Labels[clusterv1.ClusterLabelName] + vm.Labels[clusterv1.ClusterNameLabel] = ctx.Machine.Labels[clusterv1.ClusterNameLabel] // For convenience, add a label that makes it easy to figure out if the // VSphereVM resource is part of some control plane. - if val, ok := ctx.Machine.Labels[clusterv1.MachineControlPlaneLabelName]; ok { - vm.Labels[clusterv1.MachineControlPlaneLabelName] = val + if val, ok := ctx.Machine.Labels[clusterv1.MachineControlPlaneLabel]; ok { + vm.Labels[clusterv1.MachineControlPlaneLabel] = val } // Copy the VSphereMachine's VM clone spec into the VSphereVM's diff --git a/pkg/services/vimmachine_test.go b/pkg/services/vimmachine_test.go index 23e55ded59..7f4abf3f22 100644 --- a/pkg/services/vimmachine_test.go +++ b/pkg/services/vimmachine_test.go @@ -238,7 +238,7 @@ var _ = Describe("VimMachineService_GetHostInfo", func() { }) -var _ = Describe("VimMachineService_createOrPatchVSPhereVM", func() { +var _ = Describe("VimMachineService_createOrPatchVSphereVM", func() { var ( controllerCtx *context.ControllerContext machineCtx *context.VIMMachineContext @@ -274,7 +274,7 @@ var _ = Describe("VimMachineService_createOrPatchVSPhereVM", func() { machineCtx.VSphereMachine.Spec.OS = infrav1.Windows }) It("returns a renamed vspherevm object", func() { - vm, err := vimMachineService.createOrPatchVSPhereVM(machineCtx, getVSphereVM(hostAddr, corev1.ConditionTrue)) + vm, err := vimMachineService.createOrPatchVSphereVM(machineCtx, getVSphereVM(hostAddr, corev1.ConditionTrue)) vmName := vm.(*infrav1.VSphereVM).GetName() Expect(err).NotTo(HaveOccurred()) Expect(vmName).To(Equal("fake-long-rname")) @@ -286,7 +286,7 @@ var _ = Describe("VimMachineService_createOrPatchVSPhereVM", func() { machineCtx.VSphereMachine.Spec.OS = infrav1.Linux }) It("returns the same vspherevm name", func() { - vm, err := vimMachineService.createOrPatchVSPhereVM(machineCtx, getVSphereVM(hostAddr, corev1.ConditionTrue)) + vm, err := vimMachineService.createOrPatchVSphereVM(machineCtx, getVSphereVM(hostAddr, corev1.ConditionTrue)) vmName := vm.(*infrav1.VSphereVM).GetName() Expect(err).NotTo(HaveOccurred()) Expect(vmName).To(Equal(fakeLongClusterName)) diff --git a/pkg/services/vmoperator/control_plane_endpoint_test.go b/pkg/services/vmoperator/control_plane_endpoint_test.go index 0b02ba6432..3653b00e8c 100644 --- a/pkg/services/vmoperator/control_plane_endpoint_test.go +++ b/pkg/services/vmoperator/control_plane_endpoint_test.go @@ -35,7 +35,7 @@ import ( "sigs.k8s.io/cluster-api-provider-vsphere/pkg/util" ) -func getVirtualMachineService(cpService CPService, ctx *vmware.ClusterContext) *vmoprv1.VirtualMachineService { +func getVirtualMachineService(_ CPService, ctx *vmware.ClusterContext) *vmoprv1.VirtualMachineService { vms := newVirtualMachineService(ctx) nsname := types.NamespacedName{ Namespace: vms.Namespace, diff --git a/pkg/services/vmoperator/vmopmachine_test.go b/pkg/services/vmoperator/vmopmachine_test.go index 574a83aaee..7f1fc77b69 100644 --- a/pkg/services/vmoperator/vmopmachine_test.go +++ b/pkg/services/vmoperator/vmopmachine_test.go @@ -60,7 +60,7 @@ var _ = Describe("VirtualMachine tests", func() { const ( machineName = "test-machine" clusterName = "test-cluster" - controlPlaneLabelTrue = "true" + controlPlaneLabelTrue = true k8sVersion = "test-k8sVersion" className = "test-className" imageName = "test-imageName" @@ -106,8 +106,8 @@ var _ = Describe("VirtualMachine tests", func() { // Create all necessary dependencies cluster = util.CreateCluster(clusterName) vsphereCluster = util.CreateVSphereCluster(clusterName) - machine = util.CreateMachine(machineName, clusterName, controlPlaneLabelTrue, k8sVersion) - vsphereMachine = util.CreateVSphereMachine(machineName, clusterName, controlPlaneLabelTrue, className, imageName, storageClass) + machine = util.CreateMachine(machineName, clusterName, k8sVersion, controlPlaneLabelTrue) + vsphereMachine = util.CreateVSphereMachine(machineName, clusterName, className, imageName, storageClass, controlPlaneLabelTrue) clusterContext := util.CreateClusterContext(cluster, vsphereCluster) ctx = util.CreateMachineContext(clusterContext, machine, vsphereMachine) ctx.ControllerContext = clusterContext.ControllerContext diff --git a/pkg/util/cluster.go b/pkg/util/cluster.go index 29c5e160e7..bd4303e21c 100644 --- a/pkg/util/cluster.go +++ b/pkg/util/cluster.go @@ -31,7 +31,7 @@ import ( // GetVSphereClusterFromVMwareMachine gets the vmware.infrastructure.cluster.x-k8s.io.VSphereCluster resource for the given VSphereMachine. // TODO (srm09): Rename this to a more appropriate name. func GetVSphereClusterFromVMwareMachine(ctx context.Context, c client.Client, machine *vmwarev1b1.VSphereMachine) (*vmwarev1b1.VSphereCluster, error) { - clusterName := machine.Labels[clusterv1.ClusterLabelName] + clusterName := machine.Labels[clusterv1.ClusterNameLabel] if clusterName == "" { return nil, errors.Errorf("error getting VSphereCluster name from VSphereMachine %s/%s", machine.Namespace, machine.Name) @@ -56,7 +56,7 @@ func GetVSphereClusterFromVMwareMachine(ctx context.Context, c client.Client, ma // GetVSphereClusterFromVSphereMachine gets the infrastructure.cluster.x-k8s.io.VSphereCluster resource for the given VSphereMachine. func GetVSphereClusterFromVSphereMachine(ctx context.Context, c client.Client, machine *infrav1.VSphereMachine) (*infrav1.VSphereCluster, error) { - clusterName := machine.Labels[clusterv1.ClusterLabelName] + clusterName := machine.Labels[clusterv1.ClusterNameLabel] if clusterName == "" { return nil, errors.Errorf("error getting VSphereCluster name from VSphereMachine %s/%s", machine.Namespace, machine.Name) diff --git a/pkg/util/ipaddress_claim.go b/pkg/util/ipaddress_claim.go new file mode 100644 index 0000000000..9e6b3dbb24 --- /dev/null +++ b/pkg/util/ipaddress_claim.go @@ -0,0 +1,9 @@ +package util + +import "fmt" + +// IPAddressClaimName returns a name given a VsphereVM name, deviceIndex, and +// poolIndex. +func IPAddressClaimName(vmName string, deviceIndex, poolIndex int) string { + return fmt.Sprintf("%s-%d-%d", vmName, deviceIndex, poolIndex) +} diff --git a/pkg/util/machines.go b/pkg/util/machines.go index ff8737e5e4..38e5fbce90 100644 --- a/pkg/util/machines.go +++ b/pkg/util/machines.go @@ -41,7 +41,7 @@ func GetVSphereMachinesInCluster( ctx context.Context, controllerClient client.Client, namespace, clusterName string) ([]*infrav1.VSphereMachine, error) { - labels := map[string]string{clusterv1.ClusterLabelName: clusterName} + labels := map[string]string{clusterv1.ClusterNameLabel: clusterName} machineList := &infrav1.VSphereMachineList{} if err := controllerClient.List( @@ -107,7 +107,7 @@ func GetMachinePreferredIPAddress(machine *infrav1.VSphereMachine) (string, erro // IsControlPlaneMachine returns true if the provided resource is // a member of the control plane. func IsControlPlaneMachine(machine metav1.Object) bool { - _, ok := machine.GetLabels()[clusterv1.MachineControlPlaneLabelName] + _, ok := machine.GetLabels()[clusterv1.MachineControlPlaneLabel] return ok } @@ -121,6 +121,12 @@ func GetMachineMetadata(hostname string, vsphereVM infrav1.VSphereVM, ipamState var waitForIPv4, waitForIPv6 bool for i := range vsphereVM.Spec.Network.Devices { vsphereVM.Spec.Network.Devices[i].DeepCopyInto(&devices[i]) + + // Add the MAC Address to the network device + if len(networkStatuses) > i { + devices[i].MACAddr = networkStatuses[i].MACAddr + } + if state, ok := ipamState[devices[i].MACAddr]; ok { devices[i].IPAddrs = append(devices[i].IPAddrs, state.IPAddrs...) devices[i].Gateway4 = state.Gateway4 @@ -153,6 +159,8 @@ func GetMachineMetadata(hostname string, vsphereVM infrav1.VSphereVM, ipamState } // Add the MAC Address to the network device + // networkStatuses may be longer than devices + // and we want to add all the networks for i, status := range networkStatuses { devices[i].MACAddr = status.MACAddr } diff --git a/pkg/util/machines_test.go b/pkg/util/machines_test.go index 3c70ca0305..892683b55f 100644 --- a/pkg/util/machines_test.go +++ b/pkg/util/machines_test.go @@ -834,6 +834,63 @@ network: addresses: - "fe80::3/64" gateway6: "fe80::1" +`, + }, + { + name: "more-network-statuses-than-spec-devices", + machine: &infrav1.VSphereVM{ + Spec: infrav1.VSphereVMSpec{ + VirtualMachineCloneSpec: infrav1.VirtualMachineCloneSpec{ + Network: infrav1.NetworkSpec{ + Devices: []infrav1.NetworkDeviceSpec{ + { + NetworkName: "network1", + MACAddr: "00:00:00:00:00", + DHCP4: true, + }, + { + NetworkName: "network12", + MACAddr: "00:00:00:00:01", + DHCP6: true, + }, + }, + }, + }, + }, + }, + networkStatuses: []infrav1.NetworkStatus{ + {MACAddr: "00:00:00:00:ab"}, + {MACAddr: "00:00:00:00:cd"}, + {MACAddr: "00:00:00:00:ef"}, + }, + expected: ` +instance-id: "test-vm" +local-hostname: "test-vm" +wait-on-network: + ipv4: true + ipv6: true +network: + version: 2 + ethernets: + id0: + match: + macaddress: "00:00:00:00:ab" + set-name: "eth0" + wakeonlan: true + dhcp4: true + dhcp6: false + id1: + match: + macaddress: "00:00:00:00:cd" + set-name: "eth1" + wakeonlan: true + dhcp4: false + dhcp6: true + id2: + match: + macaddress: "00:00:00:00:ef" + set-name: "eth2" + wakeonlan: true `, }, } @@ -999,7 +1056,7 @@ func Test_GetVSphereClusterFromVSphereMachine(t *testing.T) { } machine := &vmwarev1.VSphereMachine{ ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{clusterv1.ClusterLabelName: "foo"}, + Labels: map[string]string{clusterv1.ClusterNameLabel: "foo"}, Name: "foo-machine-1", Namespace: ns, }, diff --git a/pkg/util/testutil.go b/pkg/util/testutil.go index 0d326e1236..456f11cb12 100644 --- a/pkg/util/testutil.go +++ b/pkg/util/testutil.go @@ -76,8 +76,8 @@ func CreateVSphereCluster(clusterName string) *infrav1.VSphereCluster { } } -func CreateMachine(machineName, clusterName, controlPlaneLabel, k8sVersion string) *clusterv1.Machine { - return &clusterv1.Machine{ +func CreateMachine(machineName, clusterName, k8sVersion string, controlPlaneLabel bool) *clusterv1.Machine { + machine := &clusterv1.Machine{ TypeMeta: metav1.TypeMeta{ APIVersion: clusterv1.GroupVersion.String(), Kind: machineKind, @@ -85,8 +85,7 @@ func CreateMachine(machineName, clusterName, controlPlaneLabel, k8sVersion strin ObjectMeta: metav1.ObjectMeta{ Name: machineName, Labels: map[string]string{ - clusterv1.MachineControlPlaneLabelName: controlPlaneLabel, - clusterNameLabelName: clusterName, + clusterNameLabelName: clusterName, }, }, Spec: clusterv1.MachineSpec{ @@ -104,10 +103,16 @@ func CreateMachine(machineName, clusterName, controlPlaneLabel, k8sVersion strin }, }, } + if controlPlaneLabel { + labels := machine.GetLabels() + labels[clusterv1.MachineControlPlaneLabel] = "" + machine.SetLabels(labels) + } + return machine } -func CreateVSphereMachine(machineName, clusterName, controlPlaneLabel, className, imageName, storageClass string) *infrav1.VSphereMachine { - return &infrav1.VSphereMachine{ +func CreateVSphereMachine(machineName, clusterName, className, imageName, storageClass string, controlPlaneLabel bool) *infrav1.VSphereMachine { + vsphereMachine := &infrav1.VSphereMachine{ TypeMeta: metav1.TypeMeta{ APIVersion: infrav1.GroupVersion.String(), Kind: infraMachineKind, @@ -115,8 +120,7 @@ func CreateVSphereMachine(machineName, clusterName, controlPlaneLabel, className ObjectMeta: metav1.ObjectMeta{ Name: machineName, Labels: map[string]string{ - clusterv1.MachineControlPlaneLabelName: controlPlaneLabel, - clusterv1.ClusterLabelName: clusterName, + clusterv1.ClusterNameLabel: clusterName, }, }, Spec: infrav1.VSphereMachineSpec{ @@ -125,6 +129,12 @@ func CreateVSphereMachine(machineName, clusterName, controlPlaneLabel, className StorageClass: storageClass, }, } + if controlPlaneLabel { + labels := vsphereMachine.GetLabels() + labels[clusterv1.MachineControlPlaneLabel] = "" + vsphereMachine.SetLabels(labels) + } + return vsphereMachine } func createScheme() *runtime.Scheme { diff --git a/templates/cluster-template-ignition.yaml b/templates/cluster-template-ignition.yaml index 01fc05bac7..4865d5b36c 100644 --- a/templates/cluster-template-ignition.yaml +++ b/templates/cluster-template-ignition.yaml @@ -106,7 +106,7 @@ spec: value: "10" - name: vip_retryperiod value: "2" - image: ghcr.io/kube-vip/kube-vip:v0.5.5 + image: ghcr.io/kube-vip/kube-vip:v0.5.11 imagePullPolicy: IfNotPresent name: kube-vip resources: {} diff --git a/templates/cluster-template-node-ipam.yaml b/templates/cluster-template-node-ipam.yaml new file mode 100644 index 0000000000..8eb82e8e33 --- /dev/null +++ b/templates/cluster-template-node-ipam.yaml @@ -0,0 +1,1005 @@ +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + labels: + cluster.x-k8s.io/cluster-name: '${CLUSTER_NAME}' + name: '${CLUSTER_NAME}' + namespace: '${NAMESPACE}' +spec: + clusterNetwork: + pods: + cidrBlocks: + - 192.168.0.0/16 + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlane + name: '${CLUSTER_NAME}' + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereCluster + name: '${CLUSTER_NAME}' +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: VSphereCluster +metadata: + name: '${CLUSTER_NAME}' + namespace: '${NAMESPACE}' +spec: + controlPlaneEndpoint: + host: ${CONTROL_PLANE_ENDPOINT_IP} + port: 6443 + identityRef: + kind: Secret + name: '${CLUSTER_NAME}' + server: '${VSPHERE_SERVER}' + thumbprint: '${VSPHERE_TLS_THUMBPRINT}' +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: VSphereMachineTemplate +metadata: + name: '${CLUSTER_NAME}' + namespace: '${NAMESPACE}' +spec: + template: + spec: + cloneMode: linkedClone + datacenter: '${VSPHERE_DATACENTER}' + datastore: '${VSPHERE_DATASTORE}' + diskGiB: 25 + folder: '${VSPHERE_FOLDER}' + memoryMiB: 8192 + network: + devices: + - addressesFromPools: + - apiGroup: ${NODE_IPAM_POOL_API_GROUP} + kind: ${NODE_IPAM_POOL_KIND} + name: ${NODE_IPAM_POOL_NAME} + nameservers: + - ${NAMESERVER} + networkName: '${VSPHERE_NETWORK}' + numCPUs: 2 + os: Linux + resourcePool: '${VSPHERE_RESOURCE_POOL}' + server: '${VSPHERE_SERVER}' + storagePolicyName: '${VSPHERE_STORAGE_POLICY}' + template: '${VSPHERE_TEMPLATE}' + thumbprint: '${VSPHERE_TLS_THUMBPRINT}' +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: VSphereMachineTemplate +metadata: + name: ${CLUSTER_NAME}-worker + namespace: '${NAMESPACE}' +spec: + template: + spec: + cloneMode: linkedClone + datacenter: '${VSPHERE_DATACENTER}' + datastore: '${VSPHERE_DATASTORE}' + diskGiB: 25 + folder: '${VSPHERE_FOLDER}' + memoryMiB: 8192 + network: + devices: + - addressesFromPools: + - apiGroup: ${NODE_IPAM_POOL_API_GROUP} + kind: ${NODE_IPAM_POOL_KIND} + name: ${NODE_IPAM_POOL_NAME} + nameservers: + - ${NAMESERVER} + networkName: '${VSPHERE_NETWORK}' + numCPUs: 2 + os: Linux + resourcePool: '${VSPHERE_RESOURCE_POOL}' + server: '${VSPHERE_SERVER}' + storagePolicyName: '${VSPHERE_STORAGE_POLICY}' + template: '${VSPHERE_TEMPLATE}' + thumbprint: '${VSPHERE_TLS_THUMBPRINT}' +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlane +metadata: + name: '${CLUSTER_NAME}' + namespace: '${NAMESPACE}' +spec: + kubeadmConfigSpec: + clusterConfiguration: + apiServer: + extraArgs: + cloud-provider: external + controllerManager: + extraArgs: + cloud-provider: external + files: + - content: | + apiVersion: v1 + kind: Pod + metadata: + creationTimestamp: null + name: kube-vip + namespace: kube-system + spec: + containers: + - args: + - manager + env: + - name: cp_enable + value: "true" + - name: vip_interface + value: ${VIP_NETWORK_INTERFACE=""} + - name: address + value: ${CONTROL_PLANE_ENDPOINT_IP} + - name: port + value: "6443" + - name: vip_arp + value: "true" + - name: vip_leaderelection + value: "true" + - name: vip_leaseduration + value: "15" + - name: vip_renewdeadline + value: "10" + - name: vip_retryperiod + value: "2" + image: ghcr.io/kube-vip/kube-vip:v0.5.11 + imagePullPolicy: IfNotPresent + name: kube-vip + resources: {} + securityContext: + capabilities: + add: + - NET_ADMIN + - NET_RAW + volumeMounts: + - mountPath: /etc/kubernetes/admin.conf + name: kubeconfig + hostAliases: + - hostnames: + - kubernetes + ip: 127.0.0.1 + hostNetwork: true + volumes: + - hostPath: + path: /etc/kubernetes/admin.conf + type: FileOrCreate + name: kubeconfig + status: {} + owner: root:root + path: /etc/kubernetes/manifests/kube-vip.yaml + initConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + cloud-provider: external + name: '{{ local_hostname }}' + joinConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + cloud-provider: external + name: '{{ local_hostname }}' + preKubeadmCommands: + - hostnamectl set-hostname "{{ ds.meta_data.hostname }}" + - echo "::1 ipv6-localhost ipv6-loopback localhost6 localhost6.localdomain6" + >/etc/hosts + - echo "127.0.0.1 {{ ds.meta_data.hostname }} {{ local_hostname }} localhost + localhost.localdomain localhost4 localhost4.localdomain4" >>/etc/hosts + users: + - name: capv + sshAuthorizedKeys: + - '${VSPHERE_SSH_AUTHORIZED_KEY}' + sudo: ALL=(ALL) NOPASSWD:ALL + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereMachineTemplate + name: '${CLUSTER_NAME}' + replicas: ${CONTROL_PLANE_MACHINE_COUNT} + version: '${KUBERNETES_VERSION}' +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: '${CLUSTER_NAME}-md-0' + namespace: '${NAMESPACE}' +spec: + template: + spec: + joinConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + cloud-provider: external + name: '{{ local_hostname }}' + preKubeadmCommands: + - hostnamectl set-hostname "{{ ds.meta_data.hostname }}" + - echo "::1 ipv6-localhost ipv6-loopback localhost6 localhost6.localdomain6" + >/etc/hosts + - echo "127.0.0.1 {{ ds.meta_data.hostname }} {{ local_hostname }} localhost + localhost.localdomain localhost4 localhost4.localdomain4" >>/etc/hosts + users: + - name: capv + sshAuthorizedKeys: + - '${VSPHERE_SSH_AUTHORIZED_KEY}' + sudo: ALL=(ALL) NOPASSWD:ALL +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineDeployment +metadata: + labels: + cluster.x-k8s.io/cluster-name: '${CLUSTER_NAME}' + name: '${CLUSTER_NAME}-md-0' + namespace: '${NAMESPACE}' +spec: + clusterName: '${CLUSTER_NAME}' + replicas: ${WORKER_MACHINE_COUNT} + selector: + matchLabels: {} + template: + metadata: + labels: + cluster.x-k8s.io/cluster-name: '${CLUSTER_NAME}' + spec: + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + name: '${CLUSTER_NAME}-md-0' + clusterName: '${CLUSTER_NAME}' + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereMachineTemplate + name: ${CLUSTER_NAME}-worker + version: '${KUBERNETES_VERSION}' +--- +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + labels: + cluster.x-k8s.io/cluster-name: '${CLUSTER_NAME}' + name: ${CLUSTER_NAME}-crs-0 + namespace: '${NAMESPACE}' +spec: + clusterSelector: + matchLabels: + cluster.x-k8s.io/cluster-name: '${CLUSTER_NAME}' + resources: + - kind: Secret + name: vsphere-csi-controller + - kind: ConfigMap + name: vsphere-csi-controller-role + - kind: ConfigMap + name: vsphere-csi-controller-binding + - kind: Secret + name: csi-vsphere-config + - kind: ConfigMap + name: csi.vsphere.vmware.com + - kind: ConfigMap + name: vsphere-csi-node + - kind: ConfigMap + name: vsphere-csi-controller + - kind: Secret + name: cloud-controller-manager + - kind: Secret + name: cloud-provider-vsphere-credentials + - kind: ConfigMap + name: cpi-manifests +--- +apiVersion: v1 +kind: Secret +metadata: + name: '${CLUSTER_NAME}' + namespace: '${NAMESPACE}' +stringData: + password: ${VSPHERE_PASSWORD} + username: ${VSPHERE_USERNAME} +--- +apiVersion: v1 +kind: Secret +metadata: + name: vsphere-csi-controller + namespace: '${NAMESPACE}' +stringData: + data: | + apiVersion: v1 + kind: ServiceAccount + metadata: + name: vsphere-csi-controller + namespace: kube-system +type: addons.cluster.x-k8s.io/resource-set +--- +apiVersion: v1 +data: + data: | + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: vsphere-csi-controller-role + rules: + - apiGroups: + - storage.k8s.io + resources: + - csidrivers + verbs: + - create + - delete + - apiGroups: + - "" + resources: + - nodes + - pods + - secrets + - configmaps + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - watch + - update + - create + - delete + - patch + - apiGroups: + - storage.k8s.io + resources: + - volumeattachments + verbs: + - get + - list + - watch + - update + - patch + - apiGroups: + - storage.k8s.io + resources: + - volumeattachments/status + verbs: + - patch + - apiGroups: + - "" + resources: + - persistentvolumeclaims + verbs: + - get + - list + - watch + - update + - apiGroups: + - storage.k8s.io + resources: + - storageclasses + - csinodes + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - list + - watch + - create + - update + - patch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - watch + - list + - delete + - update + - create + - apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshots + verbs: + - get + - list + - apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshotcontents + verbs: + - get + - list +kind: ConfigMap +metadata: + name: vsphere-csi-controller-role + namespace: '${NAMESPACE}' +--- +apiVersion: v1 +data: + data: | + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: vsphere-csi-controller-binding + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: vsphere-csi-controller-role + subjects: + - kind: ServiceAccount + name: vsphere-csi-controller + namespace: kube-system +kind: ConfigMap +metadata: + name: vsphere-csi-controller-binding + namespace: '${NAMESPACE}' +--- +apiVersion: v1 +kind: Secret +metadata: + name: csi-vsphere-config + namespace: '${NAMESPACE}' +stringData: + data: | + apiVersion: v1 + kind: Secret + metadata: + name: csi-vsphere-config + namespace: kube-system + stringData: + csi-vsphere.conf: |+ + [Global] + cluster-id = "${NAMESPACE}/${CLUSTER_NAME}" + + [VirtualCenter "${VSPHERE_SERVER}"] + user = "${VSPHERE_USERNAME}" + password = "${VSPHERE_PASSWORD}" + datacenters = "${VSPHERE_DATACENTER}" + + [Network] + public-network = "${VSPHERE_NETWORK}" + + type: Opaque +type: addons.cluster.x-k8s.io/resource-set +--- +apiVersion: v1 +data: + data: | + apiVersion: storage.k8s.io/v1 + kind: CSIDriver + metadata: + name: csi.vsphere.vmware.com + spec: + attachRequired: true +kind: ConfigMap +metadata: + name: csi.vsphere.vmware.com + namespace: '${NAMESPACE}' +--- +apiVersion: v1 +data: + data: | + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: vsphere-csi-node + namespace: kube-system + spec: + selector: + matchLabels: + app: vsphere-csi-node + template: + metadata: + labels: + app: vsphere-csi-node + role: vsphere-csi + spec: + containers: + - args: + - --v=5 + - --csi-address=$(ADDRESS) + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + env: + - name: ADDRESS + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: /var/lib/kubelet/plugins/csi.vsphere.vmware.com/csi.sock + image: quay.io/k8scsi/csi-node-driver-registrar:v2.0.1 + lifecycle: + preStop: + exec: + command: + - /bin/sh + - -c + - rm -rf /registration/csi.vsphere.vmware.com-reg.sock /csi/csi.sock + name: node-driver-registrar + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /csi + name: plugin-dir + - mountPath: /registration + name: registration-dir + - env: + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + - name: X_CSI_MODE + value: node + - name: X_CSI_SPEC_REQ_VALIDATION + value: "false" + - name: VSPHERE_CSI_CONFIG + value: /etc/cloud/csi-vsphere.conf + - name: LOGGER_LEVEL + value: PRODUCTION + - name: X_CSI_LOG_LEVEL + value: INFO + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.1.0 + livenessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + periodSeconds: 5 + timeoutSeconds: 3 + name: vsphere-csi-node + ports: + - containerPort: 9808 + name: healthz + protocol: TCP + resources: {} + securityContext: + allowPrivilegeEscalation: true + capabilities: + add: + - SYS_ADMIN + privileged: true + volumeMounts: + - mountPath: /etc/cloud + name: vsphere-config-volume + - mountPath: /csi + name: plugin-dir + - mountPath: /var/lib/kubelet + mountPropagation: Bidirectional + name: pods-mount-dir + - mountPath: /dev + name: device-dir + - args: + - --csi-address=/csi/csi.sock + image: quay.io/k8scsi/livenessprobe:v2.1.0 + name: liveness-probe + resources: {} + volumeMounts: + - mountPath: /csi + name: plugin-dir + dnsPolicy: Default + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + volumes: + - name: vsphere-config-volume + secret: + secretName: csi-vsphere-config + - hostPath: + path: /var/lib/kubelet/plugins_registry + type: Directory + name: registration-dir + - hostPath: + path: /var/lib/kubelet/plugins/csi.vsphere.vmware.com/ + type: DirectoryOrCreate + name: plugin-dir + - hostPath: + path: /var/lib/kubelet + type: Directory + name: pods-mount-dir + - hostPath: + path: /dev + name: device-dir + updateStrategy: + type: RollingUpdate +kind: ConfigMap +metadata: + name: vsphere-csi-node + namespace: '${NAMESPACE}' +--- +apiVersion: v1 +data: + data: | + apiVersion: apps/v1 + kind: Deployment + metadata: + name: vsphere-csi-controller + namespace: kube-system + spec: + replicas: 1 + selector: + matchLabels: + app: vsphere-csi-controller + template: + metadata: + labels: + app: vsphere-csi-controller + role: vsphere-csi + spec: + containers: + - args: + - --v=4 + - --timeout=300s + - --csi-address=$(ADDRESS) + - --leader-election + env: + - name: ADDRESS + value: /csi/csi.sock + image: quay.io/k8scsi/csi-attacher:v3.0.0 + name: csi-attacher + resources: {} + volumeMounts: + - mountPath: /csi + name: socket-dir + - env: + - name: CSI_ENDPOINT + value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock + - name: X_CSI_MODE + value: controller + - name: VSPHERE_CSI_CONFIG + value: /etc/cloud/csi-vsphere.conf + - name: LOGGER_LEVEL + value: PRODUCTION + - name: X_CSI_LOG_LEVEL + value: INFO + image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.1.0 + livenessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + periodSeconds: 5 + timeoutSeconds: 3 + name: vsphere-csi-controller + ports: + - containerPort: 9808 + name: healthz + protocol: TCP + resources: {} + volumeMounts: + - mountPath: /etc/cloud + name: vsphere-config-volume + readOnly: true + - mountPath: /var/lib/csi/sockets/pluginproxy/ + name: socket-dir + - args: + - --csi-address=$(ADDRESS) + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + image: quay.io/k8scsi/livenessprobe:v2.1.0 + name: liveness-probe + resources: {} + volumeMounts: + - mountPath: /var/lib/csi/sockets/pluginproxy/ + name: socket-dir + - args: + - --leader-election + env: + - name: X_CSI_FULL_SYNC_INTERVAL_MINUTES + value: "30" + - name: LOGGER_LEVEL + value: PRODUCTION + - name: VSPHERE_CSI_CONFIG + value: /etc/cloud/csi-vsphere.conf + image: gcr.io/cloud-provider-vsphere/csi/release/syncer:v2.1.0 + name: vsphere-syncer + resources: {} + volumeMounts: + - mountPath: /etc/cloud + name: vsphere-config-volume + readOnly: true + - args: + - --v=4 + - --timeout=300s + - --csi-address=$(ADDRESS) + - --leader-election + - --default-fstype=ext4 + env: + - name: ADDRESS + value: /csi/csi.sock + image: quay.io/k8scsi/csi-provisioner:v2.0.0 + name: csi-provisioner + resources: {} + volumeMounts: + - mountPath: /csi + name: socket-dir + dnsPolicy: Default + serviceAccountName: vsphere-csi-controller + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Exists + volumes: + - name: vsphere-config-volume + secret: + secretName: csi-vsphere-config + - emptyDir: {} + name: socket-dir +kind: ConfigMap +metadata: + name: vsphere-csi-controller + namespace: '${NAMESPACE}' +--- +apiVersion: v1 +kind: Secret +metadata: + name: cloud-controller-manager + namespace: '${NAMESPACE}' +stringData: + data: | + apiVersion: v1 + kind: ServiceAccount + metadata: + labels: + component: cloud-controller-manager + vsphere-cpi-infra: service-account + name: cloud-controller-manager + namespace: kube-system +type: addons.cluster.x-k8s.io/resource-set +--- +apiVersion: v1 +kind: Secret +metadata: + name: cloud-provider-vsphere-credentials + namespace: '${NAMESPACE}' +stringData: + data: | + apiVersion: v1 + kind: Secret + metadata: + labels: + component: cloud-controller-manager + vsphere-cpi-infra: secret + name: cloud-provider-vsphere-credentials + namespace: kube-system + stringData: + ${VSPHERE_SERVER}.password: ${VSPHERE_PASSWORD} + ${VSPHERE_SERVER}.username: ${VSPHERE_USERNAME} + type: Opaque +type: addons.cluster.x-k8s.io/resource-set +--- +apiVersion: v1 +data: + data: | + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + labels: + component: cloud-controller-manager + vsphere-cpi-infra: role + name: system:cloud-controller-manager + rules: + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update + - apiGroups: + - "" + resources: + - nodes + verbs: + - '*' + - apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch + - apiGroups: + - "" + resources: + - services + verbs: + - list + - patch + - update + - watch + - apiGroups: + - "" + resources: + - services/status + verbs: + - patch + - apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - endpoints + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - watch + - list + - update + - create + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + labels: + component: cloud-controller-manager + vsphere-cpi-infra: cluster-role-binding + name: system:cloud-controller-manager + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:cloud-controller-manager + subjects: + - kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system + - kind: User + name: cloud-controller-manager + --- + apiVersion: v1 + data: + vsphere.conf: | + global: + port: 443 + secretName: cloud-provider-vsphere-credentials + secretNamespace: kube-system + thumbprint: '${VSPHERE_TLS_THUMBPRINT}' + vcenter: + ${VSPHERE_SERVER}: + datacenters: + - '${VSPHERE_DATACENTER}' + server: '${VSPHERE_SERVER}' + kind: ConfigMap + metadata: + name: vsphere-cloud-config + namespace: kube-system + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + labels: + component: cloud-controller-manager + vsphere-cpi-infra: role-binding + name: servicecatalog.k8s.io:apiserver-authentication-reader + namespace: kube-system + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader + subjects: + - kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system + - kind: User + name: cloud-controller-manager + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + labels: + component: cloud-controller-manager + tier: control-plane + name: vsphere-cloud-controller-manager + namespace: kube-system + spec: + selector: + matchLabels: + name: vsphere-cloud-controller-manager + template: + metadata: + labels: + component: cloud-controller-manager + name: vsphere-cloud-controller-manager + tier: control-plane + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + - matchExpressions: + - key: node-role.kubernetes.io/master + operator: Exists + containers: + - args: + - --v=2 + - --cloud-provider=vsphere + - --cloud-config=/etc/cloud/vsphere.conf + image: gcr.io/cloud-provider-vsphere/cpi/release/manager:${CPI_IMAGE_K8S_VERSION} + name: vsphere-cloud-controller-manager + resources: + requests: + cpu: 200m + volumeMounts: + - mountPath: /etc/cloud + name: vsphere-config-volume + readOnly: true + hostNetwork: true + priorityClassName: system-node-critical + securityContext: + runAsUser: 1001 + serviceAccountName: cloud-controller-manager + tolerations: + - effect: NoSchedule + key: node.cloudprovider.kubernetes.io/uninitialized + value: "true" + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoSchedule + key: node.kubernetes.io/not-ready + operator: Exists + volumes: + - configMap: + name: vsphere-cloud-config + name: vsphere-config-volume + updateStrategy: + type: RollingUpdate +kind: ConfigMap +metadata: + name: cpi-manifests + namespace: '${NAMESPACE}' diff --git a/templates/cluster-template-topology.yaml b/templates/cluster-template-topology.yaml index de2ff024c1..85188ae57e 100644 --- a/templates/cluster-template-topology.yaml +++ b/templates/cluster-template-topology.yaml @@ -48,7 +48,7 @@ spec: value: "10" - name: vip_retryperiod value: "2" - image: ghcr.io/kube-vip/kube-vip:v0.5.5 + image: ghcr.io/kube-vip/kube-vip:v0.5.11 imagePullPolicy: IfNotPresent name: kube-vip resources: {} diff --git a/templates/cluster-template.yaml b/templates/cluster-template.yaml index f2b5cf10a7..5588258ba5 100644 --- a/templates/cluster-template.yaml +++ b/templates/cluster-template.yaml @@ -132,7 +132,7 @@ spec: value: "10" - name: vip_retryperiod value: "2" - image: ghcr.io/kube-vip/kube-vip:v0.5.5 + image: ghcr.io/kube-vip/kube-vip:v0.5.11 imagePullPolicy: IfNotPresent name: kube-vip resources: {} diff --git a/test/e2e/anti_affinity_test.go b/test/e2e/anti_affinity_test.go index 24521b9d5d..db8c4ab287 100644 --- a/test/e2e/anti_affinity_test.go +++ b/test/e2e/anti_affinity_test.go @@ -199,13 +199,13 @@ func FetchWorkerVMsForCluster(ctx context.Context, bootstrapClusterProxy framewo vms, client.InNamespace(ns), client.MatchingLabels{ - clusterv1.ClusterLabelName: clusterName, + clusterv1.ClusterNameLabel: clusterName, }) Expect(err).ToNot(HaveOccurred()) workerVMs := []infrav1.VSphereVM{} for _, vm := range vms.Items { - if _, ok := vm.Labels[clusterv1.MachineControlPlaneLabelName]; !ok { + if _, ok := vm.Labels[clusterv1.MachineControlPlaneLabel]; !ok { workerVMs = append(workerVMs, vm) } } diff --git a/test/e2e/config/vsphere-ci.yaml b/test/e2e/config/vsphere-ci.yaml index 5bead4fc92..1bcc32db04 100644 --- a/test/e2e/config/vsphere-ci.yaml +++ b/test/e2e/config/vsphere-ci.yaml @@ -8,19 +8,19 @@ # For creating local images, run ./hack/e2e.sh images: - - name: registry.k8s.io/cluster-api/cluster-api-controller:v1.3.0 + - name: registry.k8s.io/cluster-api/cluster-api-controller:v1.4.1 loadBehavior: tryLoad - - name: registry.k8s.io/cluster-api/kubeadm-bootstrap-controller:v1.3.0 + - name: registry.k8s.io/cluster-api/kubeadm-bootstrap-controller:v1.4.1 loadBehavior: tryLoad - - name: registry.k8s.io/cluster-api/kubeadm-control-plane-controller:v1.3.0 + - name: registry.k8s.io/cluster-api/kubeadm-control-plane-controller:v1.4.1 loadBehavior: tryLoad - name: gcr.io/k8s-staging-cluster-api/capv-manager:e2e loadBehavior: mustLoad - - name: quay.io/jetstack/cert-manager-cainjector:v1.10.0 + - name: quay.io/jetstack/cert-manager-cainjector:v1.11.0 loadBehavior: tryLoad - - name: quay.io/jetstack/cert-manager-webhook:v1.10.0 + - name: quay.io/jetstack/cert-manager-webhook:v1.11.0 loadBehavior: tryLoad - - name: quay.io/jetstack/cert-manager-controller:v1.10.0 + - name: quay.io/jetstack/cert-manager-controller:v1.11.0 loadBehavior: tryLoad providers: @@ -33,16 +33,17 @@ providers: value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v0.4.7/core-components.yaml" type: "url" files: - - sourcePath: "../data/shared/metadata.yaml" + - sourcePath: "../data/shared/v1beta1/metadata.yaml" replacements: - old: "imagePullPolicy: Always" new: "imagePullPolicy: IfNotPresent" - - name: v1.3.0 + - name: v1.4.1 # Use manifest from source files - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.3.0/core-components.yaml" + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.4.1/core-components.yaml" type: "url" + contract: v1beta1 files: - - sourcePath: "../data/shared/metadata.yaml" + - sourcePath: "../data/shared/v1beta1/metadata.yaml" replacements: - old: "imagePullPolicy: Always" new: "imagePullPolicy: IfNotPresent" @@ -55,16 +56,17 @@ providers: type: "url" contract: v1alpha4 files: - - sourcePath: "../data/shared/metadata.yaml" + - sourcePath: "../data/shared/v1beta1/metadata.yaml" replacements: - old: "imagePullPolicy: Always" new: "imagePullPolicy: IfNotPresent" - - name: v1.3.0 + - name: v1.4.1 # Use manifest from source files - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.3.0/bootstrap-components.yaml" + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.4.1/bootstrap-components.yaml" type: "url" + contract: v1beta1 files: - - sourcePath: "../data/shared/metadata.yaml" + - sourcePath: "../data/shared/v1beta1/metadata.yaml" replacements: - old: "imagePullPolicy: Always" new: "imagePullPolicy: IfNotPresent" @@ -77,16 +79,17 @@ providers: type: "url" contract: v1alpha4 files: - - sourcePath: "../data/shared/metadata.yaml" + - sourcePath: "../data/shared/v1beta1/metadata.yaml" replacements: - old: "imagePullPolicy: Always" new: "imagePullPolicy: IfNotPresent" - - name: v1.3.0 + - name: v1.4.1 # Use manifest from source files - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.3.0/control-plane-components.yaml" + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.4.1/control-plane-components.yaml" type: "url" + contract: v1beta1 files: - - sourcePath: "../data/shared/metadata.yaml" + - sourcePath: "../data/shared/v1beta1/metadata.yaml" replacements: - old: "imagePullPolicy: Always" new: "imagePullPolicy: IfNotPresent" @@ -101,7 +104,7 @@ providers: files: - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/capi-upgrades/v1alpha4/cluster-template.yaml" - sourcePath: "../../../metadata.yaml" - - name: v1.6.0 + - name: v1.7.0 # Use manifest from source files value: ../../../../cluster-api-provider-vsphere/config/default contract: v1beta1 @@ -125,7 +128,7 @@ providers: - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/cluster-template-dhcp-overrides.yaml" - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/clusterclass-quick-start.yaml" - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/cluster-template-ignition.yaml" - - sourcePath: "../../../metadata.yaml" + - sourcePath: "../data/shared/v1beta1_provider/metadata.yaml" variables: KUBERNETES_VERSION: "v1.25.6" @@ -137,6 +140,7 @@ variables: WORKER_MACHINE_COUNT: 1 IP_FAMILY: "IPv4" CLUSTER_CLASS_NAME: "quick-start" + VSPHERE_TLS_THUMBPRINT: "18:EC:35:60:54:68:92:F6:F8:92:3E:4D:11:A1:0D:13:9C:E9:3E:B6" VSPHERE_DATACENTER: "SDDC-Datacenter" VSPHERE_FOLDER: "clusterapi" VSPHERE_RESOURCE_POOL: "clusterapi" @@ -154,7 +158,6 @@ variables: VENDOR_ID: 4318 # CAPV feature flags EXP_NODE_ANTI_AFFINITY: "true" - EXP_NODE_LABELING: "true" intervals: default/wait-controllers: ["5m", "10s"] diff --git a/test/e2e/config/vsphere-dev.yaml b/test/e2e/config/vsphere-dev.yaml index eebebc42f6..9b26e86b93 100644 --- a/test/e2e/config/vsphere-dev.yaml +++ b/test/e2e/config/vsphere-dev.yaml @@ -11,19 +11,19 @@ # - from the CAPV repository root, `make e2e` to build the vsphere provider image and run e2e tests. images: - - name: registry.k8s.io/cluster-api/cluster-api-controller:v1.3.0 + - name: registry.k8s.io/cluster-api/cluster-api-controller:v1.4.1 loadBehavior: tryLoad - - name: registry.k8s.io/cluster-api/kubeadm-bootstrap-controller:v1.3.0 + - name: registry.k8s.io/cluster-api/kubeadm-bootstrap-controller:v1.4.1 loadBehavior: tryLoad - - name: registry.k8s.io/cluster-api/kubeadm-control-plane-controller:v1.3.0 + - name: registry.k8s.io/cluster-api/kubeadm-control-plane-controller:v1.4.1 loadBehavior: tryLoad - name: gcr.io/k8s-staging-cluster-api/capv-manager:e2e loadBehavior: mustLoad - - name: quay.io/jetstack/cert-manager-cainjector:v1.10.0 + - name: quay.io/jetstack/cert-manager-cainjector:v1.11.0 loadBehavior: tryLoad - - name: quay.io/jetstack/cert-manager-webhook:v1.10.0 + - name: quay.io/jetstack/cert-manager-webhook:v1.11.0 loadBehavior: tryLoad - - name: quay.io/jetstack/cert-manager-controller:v1.10.0 + - name: quay.io/jetstack/cert-manager-controller:v1.11.0 loadBehavior: tryLoad providers: @@ -40,9 +40,9 @@ providers: replacements: - old: "imagePullPolicy: Always" new: "imagePullPolicy: IfNotPresent" - - name: v1.3.0 + - name: v1.4.1 # Use manifest from source files - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.3.0/core-components.yaml" + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.4.1/core-components.yaml" type: "url" contract: v1beta1 files: @@ -63,9 +63,9 @@ providers: replacements: - old: "imagePullPolicy: Always" new: "imagePullPolicy: IfNotPresent" - - name: v1.3.0 + - name: v1.4.1 # Use manifest from source files - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.3.0/bootstrap-components.yaml" + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.4.1/bootstrap-components.yaml" type: "url" contract: "v1beta1" files: @@ -86,9 +86,9 @@ providers: replacements: - old: "imagePullPolicy: Always" new: "imagePullPolicy: IfNotPresent" - - name: v1.3.0 + - name: v1.4.1 # Use manifest from source files - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.3.0/control-plane-components.yaml" + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.4.1/control-plane-components.yaml" type: "url" files: - sourcePath: "../data/shared/metadata.yaml" @@ -106,7 +106,7 @@ providers: files: - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/capi-upgrades/v1alpha4/cluster-template.yaml" - sourcePath: "../../../metadata.yaml" - - name: v1.6.0 + - name: v1.7.0 # Use manifest from source files value: ../../../../cluster-api-provider-vsphere/config/default contract: v1beta1 @@ -170,7 +170,17 @@ variables: VENDOR_ID: 4318 # CAPV feature flags EXP_NODE_ANTI_AFFINITY: "true" - EXP_NODE_LABELING: "true" + # Following CAPV variables is used for multivc_test.go. This is the second VSphere and should be set if multivc test is enabled. + VSPHERE2_SERVER: "vcenter2.vmware.com" + VSPHERE2_TLS_THUMBPRINT: "AA:BB:CC:DD:11:22:33:44:EE:FF" + VSPHERE2_RESOURCE_POOL: "ResourcePool" + VSPHERE2_TEMPLATE: "ubuntu-2004-kube-v1.25.6" + # Dedicated IP to be used by kube-vip + VSPHERE2_CONTROL_PLANE_ENDPOINT_IP: + # Following variables are also required and please use env variables to avoid disclosure of sensitive data + VSPHERE2_USERNAME: + VSPHERE2_PASSWORD: + intervals: default/wait-controllers: ["5m", "10s"] diff --git a/test/e2e/data/shared/metadata.yaml b/test/e2e/data/shared/v1beta1/metadata.yaml similarity index 93% rename from test/e2e/data/shared/metadata.yaml rename to test/e2e/data/shared/v1beta1/metadata.yaml index dfe7948ad7..d8e8461d1e 100644 --- a/test/e2e/data/shared/metadata.yaml +++ b/test/e2e/data/shared/v1beta1/metadata.yaml @@ -6,6 +6,9 @@ apiVersion: clusterctl.cluster.x-k8s.io/v1alpha3 kind: Metadata releaseSeries: + - major: 1 + minor: 4 + contract: v1beta1 - major: 1 minor: 3 contract: v1beta1 diff --git a/test/e2e/data/shared/v1beta1_provider/metadata.yaml b/test/e2e/data/shared/v1beta1_provider/metadata.yaml new file mode 100644 index 0000000000..6edf676aba --- /dev/null +++ b/test/e2e/data/shared/v1beta1_provider/metadata.yaml @@ -0,0 +1,20 @@ +# maps release series of major.minor to cluster-api contract version +# the contract version may change between minor or major versions, but *not* +# between patch versions. +# +# update this file only when a new major or minor version is released +apiVersion: clusterctl.cluster.x-k8s.io/v1alpha3 +kind: Metadata +releaseSeries: + - major: 0 + minor: 5 + contract: v1alpha2 + - major: 0 + minor: 7 + contract: v1alpha3 + - major: 0 + minor: 8 + contract: v1alpha4 + - major: 1 + minor: 7 + contract: v1beta1 diff --git a/test/e2e/dhcp_overrides_test.go b/test/e2e/dhcp_overrides_test.go index 70e6809298..a49f59605b 100644 --- a/test/e2e/dhcp_overrides_test.go +++ b/test/e2e/dhcp_overrides_test.go @@ -79,8 +79,8 @@ var _ = Describe("DHCPOverrides configuration test", func() { Namespace: namespace.Name, ClusterName: clusterName, KubernetesVersion: e2eConfig.GetVariable(KubernetesVersion), - ControlPlaneMachineCount: pointer.Int64Ptr(1), - WorkerMachineCount: pointer.Int64Ptr(1), + ControlPlaneMachineCount: pointer.Int64(1), + WorkerMachineCount: pointer.Int64(1), }, WaitForClusterIntervals: e2eConfig.GetIntervals(specName, "wait-cluster"), WaitForControlPlaneIntervals: e2eConfig.GetIntervals(specName, "wait-control-plane"), diff --git a/test/e2e/gpu_pci_passthrough_test.go b/test/e2e/gpu_pci_passthrough_test.go index 2a3141d098..9166322fe5 100644 --- a/test/e2e/gpu_pci_passthrough_test.go +++ b/test/e2e/gpu_pci_passthrough_test.go @@ -55,8 +55,8 @@ var _ = Describe("Cluster creation with GPU devices as PCI passthrough [speciali Namespace: namespace.Name, ClusterName: clusterName, KubernetesVersion: e2eConfig.GetVariable(KubernetesVersion), - ControlPlaneMachineCount: pointer.Int64Ptr(1), - WorkerMachineCount: pointer.Int64Ptr(1), + ControlPlaneMachineCount: pointer.Int64(1), + WorkerMachineCount: pointer.Int64(1), }, WaitForClusterIntervals: e2eConfig.GetIntervals("", "wait-cluster"), WaitForControlPlaneIntervals: e2eConfig.GetIntervals("", "wait-control-plane"), @@ -75,7 +75,7 @@ var _ = Describe("Cluster creation with GPU devices as PCI passthrough [speciali func verifyPCIDeviceOnWorkerNodes(clusterName, namespace string) { list := getVSphereVMsForCluster(clusterName, namespace) for _, vm := range list.Items { - if _, ok := vm.GetLabels()[v1beta1.MachineControlPlaneLabelName]; !ok { + if _, ok := vm.GetLabels()[v1beta1.MachineControlPlaneLabel]; !ok { finder := find.NewFinder(vsphereClient.Client, false) dc, err := finder.Datacenter(ctx, vm.Spec.Datacenter) Expect(err).NotTo(HaveOccurred()) diff --git a/test/e2e/log_collector.go b/test/e2e/log_collector.go index 345254a3d6..cc4e658909 100644 --- a/test/e2e/log_collector.go +++ b/test/e2e/log_collector.go @@ -40,7 +40,7 @@ const ( type LogCollector struct{} -func (collector LogCollector) CollectMachinePoolLog(ctx context.Context, managementClusterClient client.Client, m *expv1.MachinePool, outputPath string) error { +func (collector LogCollector) CollectMachinePoolLog(_ context.Context, _ client.Client, _ *expv1.MachinePool, _ string) error { return nil } diff --git a/test/e2e/mhc_remediation_test.go b/test/e2e/mhc_remediation_test.go index 7277753c03..0e72d2b6bd 100644 --- a/test/e2e/mhc_remediation_test.go +++ b/test/e2e/mhc_remediation_test.go @@ -21,9 +21,21 @@ import ( capi_e2e "sigs.k8s.io/cluster-api/test/e2e" ) -var _ = Describe("When testing unhealthy machines remediation", func() { - capi_e2e.MachineRemediationSpec(ctx, func() capi_e2e.MachineRemediationSpecInput { - return capi_e2e.MachineRemediationSpecInput{ +var _ = XDescribe("When testing control plane node remediation", func() { + capi_e2e.KCPRemediationSpec(ctx, func() capi_e2e.KCPRemediationSpecInput { + return capi_e2e.KCPRemediationSpecInput{ + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + BootstrapClusterProxy: bootstrapClusterProxy, + ArtifactFolder: artifactFolder, + SkipCleanup: skipCleanup, + } + }) +}) + +var _ = Describe("When testing unhealthy worker node remediation", func() { + capi_e2e.MachineDeploymentRemediationSpec(ctx, func() capi_e2e.MachineDeploymentRemediationSpecInput { + return capi_e2e.MachineDeploymentRemediationSpecInput{ E2EConfig: e2eConfig, ClusterctlConfigPath: clusterctlConfigPath, BootstrapClusterProxy: bootstrapClusterProxy, diff --git a/test/e2e/multivc_test.go b/test/e2e/multivc_test.go new file mode 100644 index 0000000000..bd5c92e9a0 --- /dev/null +++ b/test/e2e/multivc_test.go @@ -0,0 +1,211 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "context" + "fmt" + "os" + "path/filepath" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/test/framework" + "sigs.k8s.io/cluster-api/test/framework/clusterctl" + "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/controller-runtime/pkg/client" + + infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" + "sigs.k8s.io/cluster-api-provider-vsphere/test/helpers" +) + +type MultiVCenterSpecInput struct { + InfraClients + Global GlobalInput + Namespace *corev1.Namespace + Datacenter string +} + +var _ = Describe("Cluster creation with multivc [specialized-infra]", func() { + var namespace *corev1.Namespace + + BeforeEach(func() { + Expect(bootstrapClusterProxy).NotTo(BeNil(), "BootstrapClusterProxy can't be nil") + namespace = setupSpecNamespace("capv-e2e") + }) + + AfterEach(func() { + cleanupSpecNamespace(namespace) + }) + + It("should create a cluster successfully", func() { + VerifyMultiVC(ctx, MultiVCenterSpecInput{ + Namespace: namespace, + Datacenter: vsphereDatacenter, + InfraClients: InfraClients{ + Client: vsphereClient, + RestClient: restClient, + Finder: vsphereFinder, + }, + Global: GlobalInput{ + BootstrapClusterProxy: bootstrapClusterProxy, + ClusterctlConfigPath: clusterctlConfigPath, + E2EConfig: e2eConfig, + ArtifactFolder: artifactFolder, + }, + }) + }) +}) + +func VerifyMultiVC(ctx context.Context, input MultiVCenterSpecInput) { + var ( + specName = "" // default template + namespace = input.Namespace + clusterResources = new(clusterctl.ApplyClusterTemplateAndWaitResult) + + mgmtClusterProxy framework.ClusterProxy + selfHostedNamespace *corev1.Namespace + selfHostedCancelWatches context.CancelFunc + ) + + clusterName := fmt.Sprintf("%s-%s", "mgmtcluster", util.RandomString(6)) + Expect(namespace).NotTo(BeNil()) + + By("creating a workload cluster") + configCluster := defaultConfigCluster(clusterName, namespace.Name, specName, 1, 1, GlobalInput{ + BootstrapClusterProxy: bootstrapClusterProxy, + ClusterctlConfigPath: clusterctlConfigPath, + E2EConfig: e2eConfig, + ArtifactFolder: artifactFolder, + }) + + clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{ + ClusterProxy: input.Global.BootstrapClusterProxy, + ConfigCluster: configCluster, + WaitForClusterIntervals: input.Global.E2EConfig.GetIntervals("", "wait-cluster"), + WaitForControlPlaneIntervals: input.Global.E2EConfig.GetIntervals("", "wait-control-plane"), + WaitForMachineDeployments: input.Global.E2EConfig.GetIntervals("", "wait-worker-nodes"), + }, clusterResources) + + vms := getVSphereVMsForCluster(clusterName, namespace.Name) + Expect(len(vms.Items)).To(BeNumerically(">", 0)) + + _, err := vsphereFinder.DatacenterOrDefault(ctx, input.Datacenter) + Expect(err).ShouldNot(HaveOccurred()) + + By("Turning the workload cluster into a management cluster") + + // Get a ClusterBroker so we can interact with the workload cluster + mgmtClusterProxy = input.Global.BootstrapClusterProxy.GetWorkloadCluster(ctx, namespace.Name, clusterName) + + Byf("Creating a namespace for hosting the %s test spec", specName) + selfHostedNamespace, selfHostedCancelWatches = framework.CreateNamespaceAndWatchEvents(ctx, framework.CreateNamespaceAndWatchEventsInput{ + Creator: mgmtClusterProxy.GetClient(), + ClientSet: mgmtClusterProxy.GetClientSet(), + Name: namespace.Name, + LogFolder: filepath.Join(artifactFolder, "clusters", "bootstrap"), + }) + + By("Initializing the workload cluster") + helpers.InitBootstrapCluster(mgmtClusterProxy, e2eConfig, clusterctlConfigPath, artifactFolder) + + By("Ensure API servers are stable before doing move") + // Nb. This check was introduced to prevent doing move to self-hosted in an aggressive way and thus avoid flakes. + // More specifically, we were observing the test failing to get objects from the API server during move, so we + // are now testing the API servers are stable before starting move. + Consistently(func() error { + kubeSystem := &corev1.Namespace{} + return input.Global.BootstrapClusterProxy.GetClient().Get(ctx, client.ObjectKey{Name: "kube-system"}, kubeSystem) + }, "5s", "100ms").Should(BeNil(), "Failed to assert bootstrap API server stability") + Consistently(func() error { + kubeSystem := &corev1.Namespace{} + return mgmtClusterProxy.GetClient().Get(ctx, client.ObjectKey{Name: "kube-system"}, kubeSystem) + }, "5s", "100ms").Should(BeNil(), "Failed to assert self-hosted API server stability") + + // Get the machines of the workloadCluster before it is moved to become self-hosted to make sure that the move did not trigger + // any unexpected rollouts. + preMoveMachineList := &unstructured.UnstructuredList{} + preMoveMachineList.SetGroupVersionKind(clusterv1.GroupVersion.WithKind("MachineList")) + err = input.Global.BootstrapClusterProxy.GetClient().List( + ctx, + preMoveMachineList, + client.InNamespace(namespace.Name), + client.MatchingLabels{clusterv1.ClusterNameLabel: clusterName}, + ) + Expect(err).NotTo(HaveOccurred(), "Failed to list machines before move") + + By("Moving the cluster to self hosted") + clusterctl.Move(ctx, clusterctl.MoveInput{ + LogFolder: filepath.Join(input.Global.ArtifactFolder, "clusters", "bootstrap"), + ClusterctlConfigPath: input.Global.ClusterctlConfigPath, + FromKubeconfigPath: input.Global.BootstrapClusterProxy.GetKubeconfigPath(), + ToKubeconfigPath: mgmtClusterProxy.GetKubeconfigPath(), + Namespace: namespace.Name, + }) + + Expect(selfHostedNamespace.Name).ShouldNot(BeNil(), "namespace should have name") + + wlClusterName := fmt.Sprintf("%s-%s", "wlcluster", util.RandomString(6)) + + _ = os.Setenv("VSPHERE_SERVER", e2eConfig.GetVariable("VSPHERE2_SERVER")) + + _ = os.Setenv("VSPHERE_TLS_THUMBPRINT", e2eConfig.GetVariable("VSPHERE2_TLS_THUMBPRINT")) + _ = os.Setenv("VSPHERE_USERNAME", os.Getenv("VSPHERE2_USERNAME")) + _ = os.Setenv("VSPHERE_PASSWORD", os.Getenv("VSPHERE2_PASSWORD")) + _ = os.Setenv("VSPHERE_RESOURCE_POOL", e2eConfig.GetVariable("VSPHERE2_RESOURCE_POOL")) + _ = os.Setenv("VSPHERE_TEMPLATE", e2eConfig.GetVariable("VSPHERE2_TEMPLATE")) + _ = os.Setenv("CONTROL_PLANE_ENDPOINT_IP", e2eConfig.GetVariable("VSPHERE2_CONTROL_PLANE_ENDPOINT_IP")) + + By("creating a workload cluster from vsphere hosted management cluster") + wlConfigCluster := defaultConfigCluster(wlClusterName, namespace.Name, specName, 1, 1, GlobalInput{ + BootstrapClusterProxy: mgmtClusterProxy, + ClusterctlConfigPath: clusterctlConfigPath, + E2EConfig: e2eConfig, + ArtifactFolder: artifactFolder, + }) + + clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{ + ClusterProxy: mgmtClusterProxy, + ConfigCluster: wlConfigCluster, + WaitForClusterIntervals: input.Global.E2EConfig.GetIntervals("", "wait-cluster"), + WaitForControlPlaneIntervals: input.Global.E2EConfig.GetIntervals("", "wait-control-plane"), + WaitForMachineDeployments: input.Global.E2EConfig.GetIntervals("", "wait-worker-nodes"), + }, clusterResources) + + vms = getVSphereVMs(mgmtClusterProxy, wlClusterName, namespace.Name) + Expect(len(vms.Items)).To(BeNumerically(">", 0)) + if selfHostedCancelWatches != nil { + selfHostedCancelWatches() + } +} +func getVSphereVMs(clusterProxy framework.ClusterProxy, clusterName, namespace string) *infrav1.VSphereVMList { + var vms infrav1.VSphereVMList + err := clusterProxy.GetClient().List( + ctx, + &vms, + client.InNamespace(namespace), + client.MatchingLabels{ + clusterv1.ClusterNameLabel: clusterName, + }, + ) + Expect(err).NotTo(HaveOccurred()) + + return &vms +} diff --git a/test/e2e/storage_policy_test.go b/test/e2e/storage_policy_test.go index 5bbe65f083..8d09f83c2d 100644 --- a/test/e2e/storage_policy_test.go +++ b/test/e2e/storage_policy_test.go @@ -146,7 +146,7 @@ func getVSphereVMsForCluster(clusterName, namespace string) *infrav1.VSphereVMLi &vms, client.InNamespace(namespace), client.MatchingLabels{ - v1beta1.ClusterLabelName: clusterName, + v1beta1.ClusterNameLabel: clusterName, }, ) Expect(err).NotTo(HaveOccurred()) diff --git a/test/helpers/envtest.go b/test/helpers/envtest.go index e964f90348..10cba13f9d 100644 --- a/test/helpers/envtest.go +++ b/test/helpers/envtest.go @@ -155,11 +155,7 @@ func NewTestEnvironment() *TestEnvironment { return err } - if err := (&infrav1.VSphereFailureDomain{}).SetupWebhookWithManager(mgr); err != nil { - return err - } - - return nil + return (&infrav1.VSphereFailureDomain{}).SetupWebhookWithManager(mgr) } mgr, err := manager.New(managerOpts) diff --git a/test/helpers/vmware/intg_test_context.go b/test/helpers/vmware/intg_test_context.go index c2876c3067..c726019ff1 100644 --- a/test/helpers/vmware/intg_test_context.go +++ b/test/helpers/vmware/intg_test_context.go @@ -193,7 +193,7 @@ func createVSphereCluster(ctx context.Context, integrationTestClient client.Clie ObjectMeta: metav1.ObjectMeta{ Namespace: namespace, Name: name, - Labels: map[string]string{clusterv1.ClusterLabelName: capiClusterName}, + Labels: map[string]string{clusterv1.ClusterNameLabel: capiClusterName}, }, } Expect(integrationTestClient.Create(ctx, vsphereCluster)).To(Succeed()) diff --git a/test/integration/cluster_lifecycle_test.go b/test/integration/cluster_lifecycle_test.go index 83d0748919..52f4b49d2f 100644 --- a/test/integration/cluster_lifecycle_test.go +++ b/test/integration/cluster_lifecycle_test.go @@ -79,8 +79,8 @@ var _ = Describe("Cluster lifecycle tests", func() { // Cluster. cluster := assertEventuallyExists(clustersResource, mf.ClusterComponents.Cluster.Name, testNamespace, nil) clusterOwnerRef := toOwnerRef(cluster) - clusterOwnerRef.Controller = pointer.BoolPtr(true) - clusterOwnerRef.BlockOwnerDeletion = pointer.BoolPtr(true) + clusterOwnerRef.Controller = pointer.Bool(true) + clusterOwnerRef.BlockOwnerDeletion = pointer.Bool(true) assertEventuallyExists(vsphereclustersResource, mf.ClusterComponents.Cluster.Name, testNamespace, clusterOwnerRef) }) @@ -124,8 +124,8 @@ var _ = Describe("Cluster lifecycle tests", func() { // controller OwnerRef that points to the VSphereMachine. machine := assertEventuallyExists(machinesResource, controlPlane.Machine.Name, testNamespace, nil) machineOwnerRef := toOwnerRef(machine) - machineOwnerRef.Controller = pointer.BoolPtr(true) - machineOwnerRef.BlockOwnerDeletion = pointer.BoolPtr(true) + machineOwnerRef.Controller = pointer.Bool(true) + machineOwnerRef.BlockOwnerDeletion = pointer.Bool(true) assertEventuallyExists(kubeadmconfigResources, controlPlane.Machine.Name, testNamespace, machineOwnerRef) assertEventuallyExists(virtualmachinesResource, controlPlane.Machine.Name, testNamespace, nil) diff --git a/test/integration/integration-dev.yaml b/test/integration/integration-dev.yaml index 27e91dc2be..da60261750 100644 --- a/test/integration/integration-dev.yaml +++ b/test/integration/integration-dev.yaml @@ -11,32 +11,32 @@ # - from the CAPV repository root, `make e2e` to build the vsphere provider image and run e2e tests. images: - - name: registry.k8s.io/cluster-api/cluster-api-controller:v1.1.0 + - name: registry.k8s.io/cluster-api/cluster-api-controller:v1.4.1 loadBehavior: tryLoad - - name: registry.k8s.io/cluster-api/kubeadm-bootstrap-controller:v1.1.0 + - name: registry.k8s.io/cluster-api/kubeadm-bootstrap-controller:v1.4.1 loadBehavior: tryLoad - - name: registry.k8s.io/cluster-api/kubeadm-control-plane-controller:v1.1.0 + - name: registry.k8s.io/cluster-api/kubeadm-control-plane-controller:v1.4.1 loadBehavior: tryLoad - name: gcr.io/k8s-staging-cluster-api/capv-manager:e2e loadBehavior: mustLoad - - name: quay.io/jetstack/cert-manager-cainjector:v1.5.3 + - name: quay.io/jetstack/cert-manager-cainjector:v1.11.0 loadBehavior: tryLoad - - name: quay.io/jetstack/cert-manager-webhook:v1.5.3 + - name: quay.io/jetstack/cert-manager-webhook:v1.11.0 loadBehavior: tryLoad - - name: quay.io/jetstack/cert-manager-controller:v1.5.3 + - name: quay.io/jetstack/cert-manager-controller:v1.11.0 loadBehavior: tryLoad providers: - name: cluster-api type: CoreProvider versions: - - name: v1.1.0 + - name: v1.4.1 # Use manifest from source files - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.0.0/core-components.yaml" + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.4.1/core-components.yaml" type: "url" contract: v1beta1 files: - - sourcePath: "./data/shared/metadata.yaml" + - sourcePath: "../e2e/data/shared/v1beta1/metadata.yaml" replacements: - old: "imagePullPolicy: Always" new: "imagePullPolicy: IfNotPresent" @@ -44,13 +44,13 @@ providers: - name: kubeadm type: BootstrapProvider versions: - - name: v1.1.0 + - name: v1.4.1 # Use manifest from source files - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.0.0/bootstrap-components.yaml" + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.4.1/bootstrap-components.yaml" type: "url" - contract: "v1beta1" + contract: v1beta1 files: - - sourcePath: "./data/shared/metadata.yaml" + - sourcePath: "../e2e/data/shared/v1beta1/metadata.yaml" replacements: - old: "imagePullPolicy: Always" new: "imagePullPolicy: IfNotPresent" @@ -58,12 +58,13 @@ providers: - name: kubeadm type: ControlPlaneProvider versions: - - name: v1.1.0 + - name: v1.4.1 # Use manifest from source files - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.0.0/control-plane-components.yaml" + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.4.1/control-plane-components.yaml" type: "url" + contract: v1beta1 files: - - sourcePath: "./data/shared/metadata.yaml" + - sourcePath: "../e2e/data/shared/v1beta1/metadata.yaml" replacements: - old: "imagePullPolicy: Always" new: "imagePullPolicy: IfNotPresent" @@ -71,12 +72,12 @@ providers: - name: vsphere type: InfrastructureProvider versions: - - name: v1.6.0 + - name: v1.7.0 # Use manifest from source files value: ../../../cluster-api-provider-vsphere/config/deployments/integration-tests contract: v1beta1 files: - - sourcePath: "../../metadata.yaml" + - sourcePath: "../e2e/data/shared/v1beta1_provider/metadata.yaml" replacements: - old: gcr.io/cluster-api-provider-vsphere/release/manager:latest new: gcr.io/k8s-staging-cluster-api/capv-manager:e2e diff --git a/test/integration/integration_suite_test.go b/test/integration/integration_suite_test.go index 3f1d53216c..ee3d8b4cbe 100644 --- a/test/integration/integration_suite_test.go +++ b/test/integration/integration_suite_test.go @@ -14,7 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -//nolint package integration import ( @@ -58,12 +57,11 @@ const ( dummyVirtualMachineImageName = "dummy-image" dummyDistributionVersion = "dummy-distro.123" dummyImageRepository = "vmware" - dummyDnsVersion = "v1.3.1_vmware.1" + dummyDNSVersion = "v1.3.1_vmware.1" dummyEtcdVersion = "v3.3.10_vmware.1" numControlPlaneMachines = 1 controlPlaneMachineClassName = "dummy-control-plane-class" controlPlaneMachineStorageClass = "dummy-control-plane-storage-class" - controlPlaneEndPoint = "https://dummy-lb:6443" numWorkerMachines = 1 VirtualMachineDistributionProperty = "vmware-system.guest.kubernetes.distribution.image.version" ) @@ -135,18 +133,6 @@ var ( Resource: "namespaces", } - configmapsResource = schema.GroupVersionResource{ - Group: "", - Version: "v1", - Resource: "configmaps", - } - - eventsResource = schema.GroupVersionResource{ - Group: "", - Version: "v1", - Resource: "events", - } - virtualmachinesResource = schema.GroupVersionResource{ Group: vmoprv1.SchemeGroupVersion.Group, Version: vmoprv1.SchemeGroupVersion.Version, @@ -195,7 +181,6 @@ func TestCAPV(t *testing.T) { RunSpecs(t, "CAPV Supervisor integration tests") } -// Test suite flags var ( // configPath is the path to the e2e config file. configPath string @@ -210,7 +195,6 @@ var ( skipCleanup bool ) -// Test suite global vars var ( // e2eConfig to be used for this test, read from configPath. e2eConfig *clusterctl.E2EConfig @@ -337,7 +321,7 @@ func generateVirtualMachineImage() *vmoprv1.VirtualMachineImage { }, CoreDNS: ImageVersion{ ImageRepository: dummyImageRepository, - Version: dummyDnsVersion, + Version: dummyDNSVersion, }, } @@ -426,7 +410,6 @@ func createClusterComponents(testNamespace string) *ClusterComponents { } func createControlPlaneComponentsList(testNamespace string) []*ControlPlaneComponents { - cpMachineNameFmt := "%s-control-plane-%d" var controlPlaneComponentsList []*ControlPlaneComponents @@ -443,8 +426,8 @@ func createControlPlaneComponentsList(testNamespace string) []*ControlPlaneCompo Name: fmt.Sprintf(cpMachineNameFmt, testClusterName, i), Namespace: testNamespace, Labels: map[string]string{ - clusterv1.MachineControlPlaneLabelName: "true", - clusterv1.ClusterLabelName: testClusterName, + clusterv1.MachineControlPlaneLabel: "", + clusterv1.ClusterNameLabel: testClusterName, }, }, Spec: infrav1.VSphereMachineSpec{ @@ -486,8 +469,8 @@ func createControlPlaneComponentsList(testNamespace string) []*ControlPlaneCompo Name: fmt.Sprintf(cpMachineNameFmt, testClusterName, i), Namespace: testNamespace, Labels: map[string]string{ - clusterv1.MachineControlPlaneLabelName: "true", - clusterv1.ClusterLabelName: testClusterName, + clusterv1.MachineControlPlaneLabel: "", + clusterv1.ClusterNameLabel: testClusterName, }, }, Spec: clusterv1.MachineSpec{ @@ -532,7 +515,7 @@ func createWorkerComponents(testNamespace string) *WorkerComponents { Name: fmt.Sprintf(workerMachineDeploymentNameFmt, testClusterName), Namespace: testNamespace, Labels: map[string]string{ - clusterv1.ClusterLabelName: testClusterName, + clusterv1.ClusterNameLabel: testClusterName, }, }, } @@ -547,7 +530,7 @@ func createWorkerComponents(testNamespace string) *WorkerComponents { Name: fmt.Sprintf(workerMachineDeploymentNameFmt, testClusterName), Namespace: testNamespace, Labels: map[string]string{ - clusterv1.ClusterLabelName: testClusterName, + clusterv1.ClusterNameLabel: testClusterName, }, }, } @@ -563,7 +546,7 @@ func createWorkerComponents(testNamespace string) *WorkerComponents { Name: fmt.Sprintf(workerMachineDeploymentNameFmt, testClusterName), Namespace: testNamespace, Labels: map[string]string{ - clusterv1.ClusterLabelName: testClusterName, + clusterv1.ClusterNameLabel: testClusterName, }, }, Spec: clusterv1.MachineDeploymentSpec{ @@ -571,13 +554,13 @@ func createWorkerComponents(testNamespace string) *WorkerComponents { Replicas: &numWorker, Selector: metav1.LabelSelector{ MatchLabels: map[string]string{ - clusterv1.ClusterLabelName: testClusterName, + clusterv1.ClusterNameLabel: testClusterName, }, }, Template: clusterv1.MachineTemplateSpec{ ObjectMeta: clusterv1.ObjectMeta{ Labels: map[string]string{ - clusterv1.ClusterLabelName: testClusterName, + clusterv1.ClusterNameLabel: testClusterName, }, }, Spec: clusterv1.MachineSpec{ @@ -624,6 +607,7 @@ func createResource(resource schema.GroupVersionResource, obj runtimeObjectWithN Expect(err).NotTo(HaveOccurred(), "Error creating %s %s/%s", resource, obj.GetNamespace(), obj.GetName()) } +//nolint:unparam func deleteResource(resource schema.GroupVersionResource, name, namespace string, propagationPolicy *metav1.DeletionPropagation) { deleteOptions := metav1.DeleteOptions{PropagationPolicy: propagationPolicy} err := k8sClient.Resource(resource).Namespace(namespace).Delete(ctx, name, deleteOptions) @@ -648,6 +632,7 @@ func updateResourceStatus(resource schema.GroupVersionResource, obj runtimeObjec Expect(err).NotTo(HaveOccurred(), "Error updating status of %s %s/%s", resource, obj.GetNamespace(), obj.GetName()) } +//nolint:gocritic func assertEventuallyExists(resource schema.GroupVersionResource, name, ns string, ownerRef *metav1.OwnerReference) *unstructuredv1.Unstructured { var obj *unstructuredv1.Unstructured EventuallyWithOffset(1, func() (bool, error) { @@ -731,7 +716,7 @@ func assertVirtualMachineState(machine *clusterv1.Machine, vm *vmoprv1.VirtualMa } // assertClusterEventuallyGetsControlPlaneEndpoint ensures that the cluster -// receives a control plane endpoint that matches the expected IP address +// receives a control plane endpoint that matches the expected IP address. func assertClusterEventuallyGetsControlPlaneEndpoint(clusterName, clusterNs string, ipAddress string) { EventuallyWithOffset(1, func() bool { vsphereCluster := &infrav1.VSphereCluster{} @@ -786,18 +771,6 @@ func toOwnerRef(obj canBeReferenced) *metav1.OwnerReference { } } -func toControllerOwnerRef(obj canBeReferenced) *metav1.OwnerReference { - ptrBool := true - return &metav1.OwnerReference{ - APIVersion: obj.GroupVersionKind().GroupVersion().String(), - Kind: obj.GroupVersionKind().Kind, - Name: obj.GetName(), - UID: obj.GetUID(), - Controller: &ptrBool, - BlockOwnerDeletion: &ptrBool, - } -} - func setIPAddressOnMachine(machineName, machineNs, ipAddress string) { vsphereMachine := &infrav1.VSphereMachine{} getResource(vspheremachinesResource, machineName, machineNs, vsphereMachine) diff --git a/test/integration/sanity_test.go b/test/integration/sanity_test.go index ef0d98cc55..a377143439 100644 --- a/test/integration/sanity_test.go +++ b/test/integration/sanity_test.go @@ -70,8 +70,8 @@ var _ = Describe("Sanity tests", func() { // and that the VSphereCluster has an OwnerRef that points to the CAPI Cluster. cluster := assertEventuallyExists(clustersResource, mf.ClusterComponents.Cluster.Name, mf.ClusterComponents.Cluster.Namespace, nil) clusterOwnerRef := toOwnerRef(cluster) - clusterOwnerRef.Controller = pointer.BoolPtr(true) - clusterOwnerRef.BlockOwnerDeletion = pointer.BoolPtr(true) + clusterOwnerRef.Controller = pointer.Bool(true) + clusterOwnerRef.BlockOwnerDeletion = pointer.Bool(true) assertEventuallyExists(vsphereclustersResource, mf.ClusterComponents.Cluster.Name, mf.ClusterComponents.Cluster.Namespace, clusterOwnerRef) // CREATE the CAPI Machine, VSphereMachine, and KubeadmConfig resources for @@ -86,8 +86,8 @@ var _ = Describe("Sanity tests", func() { // the CAPI Machine. machine := assertEventuallyExists(machinesResource, controlPlane.Machine.Name, controlPlane.Machine.Namespace, nil) machineOwnerRef := toOwnerRef(machine) - machineOwnerRef.Controller = pointer.BoolPtr(true) - machineOwnerRef.BlockOwnerDeletion = pointer.BoolPtr(true) + machineOwnerRef.Controller = pointer.Bool(true) + machineOwnerRef.BlockOwnerDeletion = pointer.Bool(true) assertEventuallyExists(vspheremachinesResource, controlPlane.Machine.Name, controlPlane.Machine.Namespace, machineOwnerRef) assertEventuallyExists(kubeadmconfigResources, controlPlane.Machine.Name, controlPlane.Machine.Namespace, machineOwnerRef) })