From 05659db4f9173bc6def04c857e1d1944183a793d Mon Sep 17 00:00:00 2001 From: Rajashree Mandaogane Date: Tue, 16 Nov 2021 08:58:18 -0800 Subject: [PATCH] Accept service account issuer for Pod based IAM EKS-A users can leverage the IAM Role for Service Account (IRSA) feature following the [special instructions for DIY Kubernetes] (https://github.com/aws/amazon-eks-pod-identity-webhook/blob/master/SELF_HOSTED_SETUP.md) to enable AWS authentication. This commit enables that by allowing the configuration of service-account-issuer flag for kube-apiserver with the identity issuer URL as per the self-hosted setup guide. --- .../anywhere.eks.amazonaws.com_clusters.yaml | 7 + pkg/api/v1alpha1/cluster.go | 11 + pkg/api/v1alpha1/cluster_types.go | 15 + pkg/api/v1alpha1/cluster_types_test.go | 49 + pkg/providers/docker/config/template-cp.yaml | 3 + pkg/providers/docker/docker.go | 3 + pkg/providers/docker/docker_test.go | 35 + .../valid_deployment_cp_pod_iam_expected.yaml | 274 ++++++ pkg/providers/vsphere/config/template-cp.yaml | 3 + .../expected_results_pod_iam_config.yaml | 889 ++++++++++++++++++ pkg/providers/vsphere/vsphere.go | 4 + pkg/providers/vsphere/vsphere_test.go | 32 + 12 files changed, 1325 insertions(+) create mode 100644 pkg/providers/docker/testdata/valid_deployment_cp_pod_iam_expected.yaml create mode 100644 pkg/providers/vsphere/testdata/expected_results_pod_iam_config.yaml diff --git a/config/crd/bases/anywhere.eks.amazonaws.com_clusters.yaml b/config/crd/bases/anywhere.eks.amazonaws.com_clusters.yaml index 08f30330198a..6bd893fe05b2 100644 --- a/config/crd/bases/anywhere.eks.amazonaws.com_clusters.yaml +++ b/config/crd/bases/anywhere.eks.amazonaws.com_clusters.yaml @@ -166,6 +166,13 @@ spec: description: 'Deprecated: This field has no function and is going to be removed in a future release.' type: string + podIamConfig: + properties: + serviceAccountIssuer: + type: string + required: + - serviceAccountIssuer + type: object proxyConfiguration: properties: httpProxy: diff --git a/pkg/api/v1alpha1/cluster.go b/pkg/api/v1alpha1/cluster.go index 0fa97df2fa8c..302d917947e4 100644 --- a/pkg/api/v1alpha1/cluster.go +++ b/pkg/api/v1alpha1/cluster.go @@ -163,6 +163,7 @@ var clusterConfigValidations = []func(*Cluster) error{ validateIdentityProviderRefs, validateProxyConfig, validateMirrorConfig, + validatePodIAMConfig, } func GetClusterConfig(fileName string) (*Cluster, error) { @@ -465,3 +466,13 @@ func validateGitOps(clusterConfig *Cluster) error { } return nil } + +func validatePodIAMConfig(clusterConfig *Cluster) error { + if clusterConfig.Spec.PodIAMConfig == nil { + return nil + } + if clusterConfig.Spec.PodIAMConfig.ServiceAccountIssuer == "" { + return errors.New("ServiceAccount Issuer can't be empty while configuring IAM roles for pods") + } + return nil +} diff --git a/pkg/api/v1alpha1/cluster_types.go b/pkg/api/v1alpha1/cluster_types.go index 902132649389..cd68ae15d04b 100644 --- a/pkg/api/v1alpha1/cluster_types.go +++ b/pkg/api/v1alpha1/cluster_types.go @@ -47,6 +47,7 @@ type ClusterSpec struct { ProxyConfiguration *ProxyConfiguration `json:"proxyConfiguration,omitempty"` RegistryMirrorConfiguration *RegistryMirrorConfiguration `json:"registryMirrorConfiguration,omitempty"` ManagementCluster ManagementCluster `json:"managementCluster,omitempty"` + PodIAMConfig *PodIAMConfig `json:"podIamConfig,omitempty"` } func (n *Cluster) Equal(o *Cluster) bool { @@ -363,6 +364,20 @@ func (n *ManagementCluster) Equal(o ManagementCluster) bool { return n.Name == o.Name } +type PodIAMConfig struct { + ServiceAccountIssuer string `json:"serviceAccountIssuer"` +} + +func (n *PodIAMConfig) Equal(o *PodIAMConfig) bool { + if n == o { + return true + } + if n == nil || o == nil { + return false + } + return n.ServiceAccountIssuer == o.ServiceAccountIssuer +} + // +kubebuilder:object:root=true // Cluster is the Schema for the clusters API type Cluster struct { diff --git a/pkg/api/v1alpha1/cluster_types_test.go b/pkg/api/v1alpha1/cluster_types_test.go index a5dcfac7f939..45253acefd7b 100644 --- a/pkg/api/v1alpha1/cluster_types_test.go +++ b/pkg/api/v1alpha1/cluster_types_test.go @@ -1159,6 +1159,55 @@ func TestRegistryMirrorConfigurationEqual(t *testing.T) { } } +func TestPodIAMServiceAccountIssuerHasNotChanged(t *testing.T) { + testCases := []struct { + testName string + cluster1PodIAMConfig, cluster2PodIAMConfig *v1alpha1.PodIAMConfig + want bool + }{ + { + testName: "both nil", + cluster1PodIAMConfig: nil, + cluster2PodIAMConfig: nil, + want: true, + }, + { + testName: "one nil, one exists", + cluster1PodIAMConfig: &v1alpha1.PodIAMConfig{ + ServiceAccountIssuer: "https://test", + }, + cluster2PodIAMConfig: nil, + want: false, + }, + { + testName: "both exist, same", + cluster1PodIAMConfig: &v1alpha1.PodIAMConfig{ + ServiceAccountIssuer: "https://test", + }, + cluster2PodIAMConfig: &v1alpha1.PodIAMConfig{ + ServiceAccountIssuer: "https://test", + }, + want: true, + }, + { + testName: "both exist, service account issuer different", + cluster1PodIAMConfig: &v1alpha1.PodIAMConfig{ + ServiceAccountIssuer: "https://test1", + }, + cluster2PodIAMConfig: &v1alpha1.PodIAMConfig{ + ServiceAccountIssuer: "https://test2", + }, + want: false, + }, + } + for _, tt := range testCases { + t.Run(tt.testName, func(t *testing.T) { + g := NewWithT(t) + g.Expect(tt.cluster1PodIAMConfig.Equal(tt.cluster2PodIAMConfig)).To(Equal(tt.want)) + }) + } +} + func setSelfManaged(c *v1alpha1.Cluster, s bool) { if s { c.SetSelfManaged() diff --git a/pkg/providers/docker/config/template-cp.yaml b/pkg/providers/docker/config/template-cp.yaml index 21425de89ab8..cf1ada10b547 100644 --- a/pkg/providers/docker/config/template-cp.yaml +++ b/pkg/providers/docker/config/template-cp.yaml @@ -88,6 +88,9 @@ spec: audit-log-maxbackup: "10" audit-log-maxsize: "512" profiling: "false" +{{- if .serviceAccountIssuer }} + service-account-issuer: {{ .serviceAccountIssuer }} +{{- end }} {{- if .apiserverExtraArgs }} {{ .apiserverExtraArgs.ToYaml | indent 10 }} {{- end }} diff --git a/pkg/providers/docker/docker.go b/pkg/providers/docker/docker.go index 266dacb7dbab..e26f8c6856ca 100644 --- a/pkg/providers/docker/docker.go +++ b/pkg/providers/docker/docker.go @@ -199,6 +199,9 @@ func buildTemplateMapCP(clusterSpec *cluster.Spec) map[string]interface{} { values["controlPlaneTaints"] = clusterSpec.Spec.ControlPlaneConfiguration.Taints } + if clusterSpec.Spec.PodIAMConfig != nil { + values["serviceAccountIssuer"] = clusterSpec.Spec.PodIAMConfig.ServiceAccountIssuer + } return values } diff --git a/pkg/providers/docker/docker_test.go b/pkg/providers/docker/docker_test.go index 893a44d3bae4..da06587b5e09 100644 --- a/pkg/providers/docker/docker_test.go +++ b/pkg/providers/docker/docker_test.go @@ -416,3 +416,38 @@ func TestChangeDiffWithChange(t *testing.T) { tt.Expect(tt.provider.ChangeDiff(clusterSpec, newClusterSpec)).To(Equal(wantDiff)) } + +func TestProviderGenerateCAPISpecForCreateWithPodIAMConfig(t *testing.T) { + mockCtrl := gomock.NewController(t) + ctx := context.Background() + client := dockerMocks.NewMockProviderClient(mockCtrl) + kubectl := dockerMocks.NewMockProviderKubectlClient(mockCtrl) + provider := docker.NewProvider(&v1alpha1.DockerDatacenterConfig{}, client, kubectl, test.FakeNow) + clusterObj := &types.Cluster{ + Name: "test-cluster", + } + clusterSpec := test.NewClusterSpec(func(s *cluster.Spec) { + s.Name = "test-cluster" + s.Spec.KubernetesVersion = "1.19" + s.Spec.ClusterNetwork.Pods.CidrBlocks = []string{"192.168.0.0/16"} + s.Spec.ClusterNetwork.Services.CidrBlocks = []string{"10.128.0.0/12"} + s.Spec.ControlPlaneConfiguration.Count = 1 + s.VersionsBundle = versionsBundle + }) + clusterSpec.Spec.PodIAMConfig = &v1alpha1.PodIAMConfig{ServiceAccountIssuer: "https://test"} + + if provider == nil { + t.Fatalf("provider object is nil") + } + + err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec) + if err != nil { + t.Fatalf("failed to setup and validate: %v", err) + } + + cp, _, err := provider.GenerateCAPISpecForCreate(context.Background(), clusterObj, clusterSpec) + if err != nil { + t.Fatalf("failed to generate cluster api spec contents: %v", err) + } + test.AssertContentToFile(t, string(cp), "testdata/valid_deployment_cp_pod_iam_expected.yaml") +} diff --git a/pkg/providers/docker/testdata/valid_deployment_cp_pod_iam_expected.yaml b/pkg/providers/docker/testdata/valid_deployment_cp_pod_iam_expected.yaml new file mode 100644 index 000000000000..a7e19f0be797 --- /dev/null +++ b/pkg/providers/docker/testdata/valid_deployment_cp_pod_iam_expected.yaml @@ -0,0 +1,274 @@ +apiVersion: cluster.x-k8s.io/v1alpha3 +kind: Cluster +metadata: + name: test-cluster + namespace: eksa-system +spec: + clusterNetwork: + pods: + cidrBlocks: [192.168.0.0/16] + serviceDomain: cluster.local + services: + cidrBlocks: [10.128.0.0/12] + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 + kind: KubeadmControlPlane + name: test-cluster + namespace: eksa-system + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: DockerCluster + name: test-cluster + namespace: eksa-system +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +kind: DockerCluster +metadata: + name: test-cluster + namespace: eksa-system +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +kind: DockerMachineTemplate +metadata: + name: test-cluster-control-plane-template-1234567890000 + namespace: eksa-system +spec: + template: + spec: + extraMounts: + - containerPath: /var/run/docker.sock + hostPath: /var/run/docker.sock + customImage: public.ecr.aws/eks-distro/kubernetes-sigs/kind/node:v1.18.16-eks-1-18-4-216edda697a37f8bf16651af6c23b7e2bb7ef42f-62681885fe3a97ee4f2b110cc277e084e71230fa +--- +apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 +kind: KubeadmControlPlane +metadata: + name: test-cluster + namespace: eksa-system +spec: + infrastructureTemplate: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: DockerMachineTemplate + name: test-cluster-control-plane-template-1234567890000 + namespace: eksa-system + kubeadmConfigSpec: + clusterConfiguration: + imageRepository: public.ecr.aws/eks-distro/kubernetes + etcd: + local: + imageRepository: public.ecr.aws/eks-distro/etcd-io + imageTag: v3.4.14-eks-1-19-2 + dns: + type: CoreDNS + imageRepository: public.ecr.aws/eks-distro/coredns + imageTag: v1.8.0-eks-1-19-2 + apiServer: + certSANs: + - localhost + - 127.0.0.1 + extraArgs: + audit-policy-file: /etc/kubernetes/audit-policy.yaml + audit-log-path: /var/log/kubernetes/api-audit.log + audit-log-maxage: "30" + audit-log-maxbackup: "10" + audit-log-maxsize: "512" + profiling: "false" + service-account-issuer: https://test + extraVolumes: + - hostPath: /etc/kubernetes/audit-policy.yaml + mountPath: /etc/kubernetes/audit-policy.yaml + name: audit-policy + pathType: File + readOnly: true + - hostPath: /var/log/kubernetes + mountPath: /var/log/kubernetes + name: audit-log-dir + pathType: DirectoryOrCreate + readOnly: false + - hostPath: /var/log/kubernetes/api-audit.log + mountPath: /var/log/kubernetes/api-audit.log + name: audit-log + pathType: FileOrCreate + readOnly: false + controllerManager: + extraArgs: + enable-hostpath-provisioner: "true" + profiling: "false" + scheduler: + extraArgs: + profiling: "false" + files: + - content: | + apiVersion: audit.k8s.io/v1beta1 + kind: Policy + rules: + # Log aws-auth configmap changes + - level: RequestResponse + namespaces: ["kube-system"] + verbs: ["update", "patch", "delete"] + resources: + - group: "" # core + resources: ["configmaps"] + resourceNames: ["aws-auth"] + omitStages: + - "RequestReceived" + # The following requests were manually identified as high-volume and low-risk, + # so drop them. + - level: None + users: ["system:kube-proxy"] + verbs: ["watch"] + resources: + - group: "" # core + resources: ["endpoints", "services", "services/status"] + - level: None + users: ["kubelet"] # legacy kubelet identity + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + userGroups: ["system:nodes"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + users: + - system:kube-controller-manager + - system:kube-scheduler + - system:serviceaccount:kube-system:endpoint-controller + verbs: ["get", "update"] + namespaces: ["kube-system"] + resources: + - group: "" # core + resources: ["endpoints"] + - level: None + users: ["system:apiserver"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["namespaces", "namespaces/status", "namespaces/finalize"] + # Don't log HPA fetching metrics. + - level: None + users: + - system:kube-controller-manager + verbs: ["get", "list"] + resources: + - group: "metrics.k8s.io" + # Don't log these read-only URLs. + - level: None + nonResourceURLs: + - /healthz* + - /version + - /swagger* + # Don't log events requests. + - level: None + resources: + - group: "" # core + resources: ["events"] + # node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes + - level: Request + users: ["kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + - level: Request + userGroups: ["system:nodes"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + # deletecollection calls can be large, don't log responses for expected namespace deletions + - level: Request + users: ["system:serviceaccount:kube-system:namespace-controller"] + verbs: ["deletecollection"] + omitStages: + - "RequestReceived" + # Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data, + # so only log at the Metadata level. + - level: Metadata + resources: + - group: "" # core + resources: ["secrets", "configmaps"] + - group: authentication.k8s.io + resources: ["tokenreviews"] + omitStages: + - "RequestReceived" + - level: Request + resources: + - group: "" + resources: ["serviceaccounts/token"] + # Get repsonses can be large; skip them. + - level: Request + verbs: ["get", "list", "watch"] + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for known APIs + - level: RequestResponse + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for all other requests. + - level: Metadata + omitStages: + - "RequestReceived" + owner: root:root + path: /etc/kubernetes/audit-policy.yaml + initConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + cgroup-driver: cgroupfs + eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% + taints: [] + joinConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + cgroup-driver: cgroupfs + eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% + taints: [] + replicas: 1 + version: v1.19.6-eks-1-19-2 diff --git a/pkg/providers/vsphere/config/template-cp.yaml b/pkg/providers/vsphere/config/template-cp.yaml index a0ffd50c1900..da6d3772435c 100644 --- a/pkg/providers/vsphere/config/template-cp.yaml +++ b/pkg/providers/vsphere/config/template-cp.yaml @@ -153,6 +153,9 @@ spec: audit-log-maxbackup: "10" audit-log-maxsize: "512" profiling: "false" +{{- if .serviceAccountIssuer }} + service-account-issuer: {{ .serviceAccountIssuer }} +{{- end }} {{- if .apiserverExtraArgs }} {{ .apiserverExtraArgs.ToYaml | indent 10 }} {{- end }} diff --git a/pkg/providers/vsphere/testdata/expected_results_pod_iam_config.yaml b/pkg/providers/vsphere/testdata/expected_results_pod_iam_config.yaml new file mode 100644 index 000000000000..94dc5e019797 --- /dev/null +++ b/pkg/providers/vsphere/testdata/expected_results_pod_iam_config.yaml @@ -0,0 +1,889 @@ +apiVersion: cluster.x-k8s.io/v1alpha3 +kind: Cluster +metadata: + labels: + cluster.x-k8s.io/cluster-name: test + name: test + namespace: eksa-system +spec: + clusterNetwork: + pods: + cidrBlocks: [192.168.0.0/16] + services: + cidrBlocks: [10.96.0.0/12] + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 + kind: KubeadmControlPlane + name: test + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: VSphereCluster + name: test + managedExternalEtcdRef: + apiVersion: etcdcluster.cluster.x-k8s.io/v1alpha3 + kind: EtcdadmCluster + name: test-etcd +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +kind: VSphereCluster +metadata: + name: test + namespace: eksa-system +spec: + cloudProviderConfiguration: + global: + secretName: cloud-provider-vsphere-credentials + secretNamespace: kube-system + thumbprint: 'ABCDEFG' + insecure: false + network: + name: /SDDC-Datacenter/network/sddc-cgw-network-1 + providerConfig: + cloud: + controllerImage: public.ecr.aws/l0g8r8j6/kubernetes/cloud-provider-vsphere/cpi/manager:v1.18.1-2093eaeda5a4567f0e516d652e0b25b1d7abc774 + virtualCenter: + vsphere_server: + datacenters: SDDC-Datacenter + thumbprint: 'ABCDEFG' + workspace: + datacenter: SDDC-Datacenter + datastore: /SDDC-Datacenter/datastore/WorkloadDatastore + folder: '/SDDC-Datacenter/vm' + resourcePool: '*/Resources' + server: vsphere_server + controlPlaneEndpoint: + host: 1.2.3.4 + port: 6443 + server: vsphere_server + thumbprint: 'ABCDEFG' +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +kind: VSphereMachineTemplate +metadata: + name: test-control-plane-template-1234567890000 + namespace: eksa-system +spec: + template: + spec: + cloneMode: linkedClone + datacenter: SDDC-Datacenter + datastore: /SDDC-Datacenter/datastore/WorkloadDatastore + diskGiB: 25 + folder: '/SDDC-Datacenter/vm' + memoryMiB: 8192 + network: + devices: + - dhcp4: true + networkName: /SDDC-Datacenter/network/sddc-cgw-network-1 + numCPUs: 2 + resourcePool: '*/Resources' + server: vsphere_server + storagePolicyName: "vSAN Default Storage Policy" + template: /SDDC-Datacenter/vm/Templates/ubuntu-1804-kube-v1.19.6 + thumbprint: 'ABCDEFG' +--- +apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 +kind: KubeadmControlPlane +metadata: + name: test + namespace: eksa-system +spec: + infrastructureTemplate: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: VSphereMachineTemplate + name: test-control-plane-template-1234567890000 + kubeadmConfigSpec: + clusterConfiguration: + imageRepository: public.ecr.aws/eks-distro/kubernetes + etcd: + external: + endpoints: [] + caFile: "/etc/kubernetes/pki/etcd/ca.crt" + certFile: "/etc/kubernetes/pki/apiserver-etcd-client.crt" + keyFile: "/etc/kubernetes/pki/apiserver-etcd-client.key" + dns: + type: CoreDNS + imageRepository: public.ecr.aws/eks-distro/coredns + imageTag: v1.8.0-eks-1-19-4 + apiServer: + extraArgs: + cloud-provider: external + audit-policy-file: /etc/kubernetes/audit-policy.yaml + audit-log-path: /var/log/kubernetes/api-audit.log + audit-log-maxage: "30" + audit-log-maxbackup: "10" + audit-log-maxsize: "512" + profiling: "false" + service-account-issuer: https://test + extraVolumes: + - hostPath: /etc/kubernetes/audit-policy.yaml + mountPath: /etc/kubernetes/audit-policy.yaml + name: audit-policy + pathType: File + readOnly: true + - hostPath: /var/log/kubernetes + mountPath: /var/log/kubernetes + name: audit-log-dir + pathType: DirectoryOrCreate + readOnly: false + - hostPath: /var/log/kubernetes/api-audit.log + mountPath: /var/log/kubernetes/api-audit.log + name: audit-log + pathType: FileOrCreate + readOnly: false + controllerManager: + extraArgs: + cloud-provider: external + profiling: "false" + scheduler: + extraArgs: + profiling: "false" + files: + - content: | + apiVersion: v1 + kind: Pod + metadata: + creationTimestamp: null + name: kube-vip + namespace: kube-system + spec: + containers: + - args: + - start + env: + - name: vip_arp + value: "true" + - name: vip_leaderelection + value: "true" + - name: vip_address + value: 1.2.3.4 + - name: vip_interface + value: eth0 + - name: vip_leaseduration + value: "15" + - name: vip_renewdeadline + value: "10" + - name: vip_retryperiod + value: "2" + image: public.ecr.aws/l0g8r8j6/plunder-app/kube-vip:v0.3.2-2093eaeda5a4567f0e516d652e0b25b1d7abc774 + imagePullPolicy: IfNotPresent + name: kube-vip + resources: {} + securityContext: + capabilities: + add: + - NET_ADMIN + - SYS_TIME + volumeMounts: + - mountPath: /etc/kubernetes/admin.conf + name: kubeconfig + hostNetwork: true + volumes: + - hostPath: + path: /etc/kubernetes/admin.conf + type: FileOrCreate + name: kubeconfig + status: {} + owner: root:root + path: /etc/kubernetes/manifests/kube-vip.yaml + - content: | + apiVersion: audit.k8s.io/v1beta1 + kind: Policy + rules: + # Log aws-auth configmap changes + - level: RequestResponse + namespaces: ["kube-system"] + verbs: ["update", "patch", "delete"] + resources: + - group: "" # core + resources: ["configmaps"] + resourceNames: ["aws-auth"] + omitStages: + - "RequestReceived" + # The following requests were manually identified as high-volume and low-risk, + # so drop them. + - level: None + users: ["system:kube-proxy"] + verbs: ["watch"] + resources: + - group: "" # core + resources: ["endpoints", "services", "services/status"] + - level: None + users: ["kubelet"] # legacy kubelet identity + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + userGroups: ["system:nodes"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + users: + - system:kube-controller-manager + - system:kube-scheduler + - system:serviceaccount:kube-system:endpoint-controller + verbs: ["get", "update"] + namespaces: ["kube-system"] + resources: + - group: "" # core + resources: ["endpoints"] + - level: None + users: ["system:apiserver"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["namespaces", "namespaces/status", "namespaces/finalize"] + # Don't log HPA fetching metrics. + - level: None + users: + - system:kube-controller-manager + verbs: ["get", "list"] + resources: + - group: "metrics.k8s.io" + # Don't log these read-only URLs. + - level: None + nonResourceURLs: + - /healthz* + - /version + - /swagger* + # Don't log events requests. + - level: None + resources: + - group: "" # core + resources: ["events"] + # node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes + - level: Request + users: ["kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + - level: Request + userGroups: ["system:nodes"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + # deletecollection calls can be large, don't log responses for expected namespace deletions + - level: Request + users: ["system:serviceaccount:kube-system:namespace-controller"] + verbs: ["deletecollection"] + omitStages: + - "RequestReceived" + # Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data, + # so only log at the Metadata level. + - level: Metadata + resources: + - group: "" # core + resources: ["secrets", "configmaps"] + - group: authentication.k8s.io + resources: ["tokenreviews"] + omitStages: + - "RequestReceived" + - level: Request + resources: + - group: "" + resources: ["serviceaccounts/token"] + # Get repsonses can be large; skip them. + - level: Request + verbs: ["get", "list", "watch"] + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for known APIs + - level: RequestResponse + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for all other requests. + - level: Metadata + omitStages: + - "RequestReceived" + owner: root:root + path: /etc/kubernetes/audit-policy.yaml + initConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + cloud-provider: external + name: '{{ ds.meta_data.hostname }}' + taints: [] + joinConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + cloud-provider: external + name: '{{ ds.meta_data.hostname }}' + taints: [] + preKubeadmCommands: + - hostname "{{ ds.meta_data.hostname }}" + - echo "::1 ipv6-localhost ipv6-loopback" >/etc/hosts + - echo "127.0.0.1 localhost" >>/etc/hosts + - echo "127.0.0.1 {{ ds.meta_data.hostname }}" >>/etc/hosts + - echo "{{ ds.meta_data.hostname }}" >/etc/hostname + useExperimentalRetryJoin: true + users: + - name: capv + sshAuthorizedKeys: + - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ==' + sudo: ALL=(ALL) NOPASSWD:ALL + format: cloud-config + replicas: 3 + version: v1.19.8-eks-1-19-4 +--- +apiVersion: addons.cluster.x-k8s.io/v1alpha3 +kind: ClusterResourceSet +metadata: + labels: + cluster.x-k8s.io/cluster-name: test + name: test-crs-0 + namespace: eksa-system +spec: + clusterSelector: + matchLabels: + cluster.x-k8s.io/cluster-name: test + resources: + - kind: Secret + name: vsphere-csi-controller + - kind: ConfigMap + name: vsphere-csi-controller-role + - kind: ConfigMap + name: vsphere-csi-controller-binding + - kind: Secret + name: csi-vsphere-config + - kind: ConfigMap + name: csi.vsphere.vmware.com + - kind: ConfigMap + name: vsphere-csi-node + - kind: ConfigMap + name: vsphere-csi-controller +--- +kind: EtcdadmCluster +apiVersion: etcdcluster.cluster.x-k8s.io/v1alpha3 +metadata: + name: test-etcd + namespace: eksa-system +spec: + replicas: 3 + etcdadmConfigSpec: + etcdadmBuiltin: true + format: cloud-config + cloudInitConfig: + version: 3.4.14 + installDir: "/usr/bin" + preEtcdadmCommands: + - hostname "{{ ds.meta_data.hostname }}" + - echo "::1 ipv6-localhost ipv6-loopback" >/etc/hosts + - echo "127.0.0.1 localhost" >>/etc/hosts + - echo "127.0.0.1 {{ ds.meta_data.hostname }}" >>/etc/hosts + - echo "{{ ds.meta_data.hostname }}" >/etc/hostname + users: + - name: capv + sshAuthorizedKeys: + - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ==' + sudo: ALL=(ALL) NOPASSWD:ALL + infrastructureTemplate: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: VSphereMachineTemplate + name: test-etcd-template-1234567890000 +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +kind: VSphereMachineTemplate +metadata: + name: test-etcd-template-1234567890000 + namespace: 'eksa-system' +spec: + template: + spec: + cloneMode: linkedClone + datacenter: SDDC-Datacenter + datastore: /SDDC-Datacenter/datastore/WorkloadDatastore + diskGiB: 25 + folder: '/SDDC-Datacenter/vm' + memoryMiB: 8192 + network: + devices: + - dhcp4: true + networkName: /SDDC-Datacenter/network/sddc-cgw-network-1 + numCPUs: 3 + resourcePool: '*/Resources' + server: vsphere_server + storagePolicyName: "vSAN Default Storage Policy" + template: /SDDC-Datacenter/vm/Templates/ubuntu-1804-kube-v1.19.6 + thumbprint: 'ABCDEFG' +--- +apiVersion: v1 +kind: Secret +metadata: + name: vsphere-csi-controller + namespace: eksa-system +stringData: + data: | + apiVersion: v1 + kind: ServiceAccount + metadata: + name: vsphere-csi-controller + namespace: kube-system +type: addons.cluster.x-k8s.io/resource-set +--- +apiVersion: v1 +data: + data: | + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: vsphere-csi-controller-role + rules: + - apiGroups: + - storage.k8s.io + resources: + - csidrivers + verbs: + - create + - delete + - apiGroups: + - "" + resources: + - nodes + - pods + - secrets + - configmaps + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - watch + - update + - create + - delete + - patch + - apiGroups: + - storage.k8s.io + resources: + - volumeattachments + verbs: + - get + - list + - watch + - update + - patch + - apiGroups: + - storage.k8s.io + resources: + - volumeattachments/status + verbs: + - patch + - apiGroups: + - "" + resources: + - persistentvolumeclaims + verbs: + - get + - list + - watch + - update + - apiGroups: + - storage.k8s.io + resources: + - storageclasses + - csinodes + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - list + - watch + - create + - update + - patch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - watch + - list + - delete + - update + - create + - apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshots + verbs: + - get + - list + - apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshotcontents + verbs: + - get + - list +kind: ConfigMap +metadata: + name: vsphere-csi-controller-role + namespace: eksa-system +--- +apiVersion: v1 +data: + data: | + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: vsphere-csi-controller-binding + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: vsphere-csi-controller-role + subjects: + - kind: ServiceAccount + name: vsphere-csi-controller + namespace: kube-system +kind: ConfigMap +metadata: + name: vsphere-csi-controller-binding + namespace: eksa-system +--- +apiVersion: v1 +data: + data: | + apiVersion: storage.k8s.io/v1 + kind: CSIDriver + metadata: + name: csi.vsphere.vmware.com + spec: + attachRequired: true +kind: ConfigMap +metadata: + name: csi.vsphere.vmware.com + namespace: eksa-system +--- +apiVersion: v1 +data: + data: | + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: vsphere-csi-node + namespace: kube-system + spec: + selector: + matchLabels: + app: vsphere-csi-node + template: + metadata: + labels: + app: vsphere-csi-node + role: vsphere-csi + spec: + containers: + - args: + - --v=5 + - --csi-address=$(ADDRESS) + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + env: + - name: ADDRESS + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: /var/lib/kubelet/plugins/csi.vsphere.vmware.com/csi.sock + image: public.ecr.aws/eks-distro/kubernetes-csi/node-driver-registrar:v2.1.0-eks-1-19-4 + lifecycle: + preStop: + exec: + command: + - /bin/sh + - -c + - rm -rf /registration/csi.vsphere.vmware.com-reg.sock /csi/csi.sock + name: node-driver-registrar + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /csi + name: plugin-dir + - mountPath: /registration + name: registration-dir + - env: + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + - name: X_CSI_MODE + value: node + - name: X_CSI_SPEC_REQ_VALIDATION + value: "false" + - name: VSPHERE_CSI_CONFIG + value: /etc/cloud/csi-vsphere.conf + - name: LOGGER_LEVEL + value: PRODUCTION + - name: X_CSI_LOG_LEVEL + value: INFO + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + image: public.ecr.aws/l0g8r8j6/kubernetes-sigs/vsphere-csi-driver/csi/driver:v2.2.0-7c2690c880c6521afdd9ffa8d90443a11c6b817b + livenessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + periodSeconds: 5 + timeoutSeconds: 3 + name: vsphere-csi-node + ports: + - containerPort: 9808 + name: healthz + protocol: TCP + resources: {} + securityContext: + allowPrivilegeEscalation: true + capabilities: + add: + - SYS_ADMIN + privileged: true + volumeMounts: + - mountPath: /etc/cloud + name: vsphere-config-volume + - mountPath: /csi + name: plugin-dir + - mountPath: /var/lib/kubelet + mountPropagation: Bidirectional + name: pods-mount-dir + - mountPath: /dev + name: device-dir + - args: + - --csi-address=/csi/csi.sock + image: public.ecr.aws/eks-distro/kubernetes-csi/livenessprobe:v2.2.0-eks-1-19-4 + name: liveness-probe + resources: {} + volumeMounts: + - mountPath: /csi + name: plugin-dir + dnsPolicy: Default + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + volumes: + - name: vsphere-config-volume + secret: + secretName: csi-vsphere-config + - hostPath: + path: /var/lib/kubelet/plugins_registry + type: Directory + name: registration-dir + - hostPath: + path: /var/lib/kubelet/plugins/csi.vsphere.vmware.com/ + type: DirectoryOrCreate + name: plugin-dir + - hostPath: + path: /var/lib/kubelet + type: Directory + name: pods-mount-dir + - hostPath: + path: /dev + name: device-dir + updateStrategy: + type: RollingUpdate +kind: ConfigMap +metadata: + name: vsphere-csi-node + namespace: eksa-system +--- +apiVersion: v1 +data: + data: | + apiVersion: apps/v1 + kind: Deployment + metadata: + name: vsphere-csi-controller + namespace: kube-system + spec: + replicas: 1 + selector: + matchLabels: + app: vsphere-csi-controller + template: + metadata: + labels: + app: vsphere-csi-controller + role: vsphere-csi + spec: + containers: + - args: + - --v=4 + - --timeout=300s + - --csi-address=$(ADDRESS) + - --leader-election + env: + - name: ADDRESS + value: /csi/csi.sock + image: public.ecr.aws/eks-distro/kubernetes-csi/external-attacher:v3.1.0-eks-1-19-4 + name: csi-attacher + resources: {} + volumeMounts: + - mountPath: /csi + name: socket-dir + - env: + - name: CSI_ENDPOINT + value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock + - name: X_CSI_MODE + value: controller + - name: VSPHERE_CSI_CONFIG + value: /etc/cloud/csi-vsphere.conf + - name: LOGGER_LEVEL + value: PRODUCTION + - name: X_CSI_LOG_LEVEL + value: INFO + image: public.ecr.aws/l0g8r8j6/kubernetes-sigs/vsphere-csi-driver/csi/driver:v2.2.0-7c2690c880c6521afdd9ffa8d90443a11c6b817b + livenessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + periodSeconds: 5 + timeoutSeconds: 3 + name: vsphere-csi-controller + ports: + - containerPort: 9808 + name: healthz + protocol: TCP + resources: {} + volumeMounts: + - mountPath: /etc/cloud + name: vsphere-config-volume + readOnly: true + - mountPath: /var/lib/csi/sockets/pluginproxy/ + name: socket-dir + - args: + - --csi-address=$(ADDRESS) + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + image: public.ecr.aws/eks-distro/kubernetes-csi/livenessprobe:v2.2.0-eks-1-19-4 + name: liveness-probe + resources: {} + volumeMounts: + - mountPath: /var/lib/csi/sockets/pluginproxy/ + name: socket-dir + - args: + - --leader-election + env: + - name: X_CSI_FULL_SYNC_INTERVAL_MINUTES + value: "30" + - name: LOGGER_LEVEL + value: PRODUCTION + - name: VSPHERE_CSI_CONFIG + value: /etc/cloud/csi-vsphere.conf + image: public.ecr.aws/l0g8r8j6/kubernetes-sigs/vsphere-csi-driver/csi/syncer:v2.2.0-7c2690c880c6521afdd9ffa8d90443a11c6b817b + name: vsphere-syncer + resources: {} + volumeMounts: + - mountPath: /etc/cloud + name: vsphere-config-volume + readOnly: true + - args: + - --v=4 + - --timeout=300s + - --csi-address=$(ADDRESS) + - --leader-election + - --default-fstype=ext4 + env: + - name: ADDRESS + value: /csi/csi.sock + image: public.ecr.aws/eks-distro/kubernetes-csi/external-provisioner:v2.1.1-eks-1-19-4 + name: csi-provisioner + resources: {} + volumeMounts: + - mountPath: /csi + name: socket-dir + dnsPolicy: Default + serviceAccountName: vsphere-csi-controller + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + volumes: + - name: vsphere-config-volume + secret: + secretName: csi-vsphere-config + - emptyDir: {} + name: socket-dir +kind: ConfigMap +metadata: + name: vsphere-csi-controller + namespace: eksa-system +--- +apiVersion: v1 +data: + data: | + apiVersion: v1 + data: + csi-migration: "false" + kind: ConfigMap + metadata: + name: internal-feature-states.csi.vsphere.vmware.com + namespace: kube-system +kind: ConfigMap +metadata: + name: internal-feature-states.csi.vsphere.vmware.com + namespace: eksa-system diff --git a/pkg/providers/vsphere/vsphere.go b/pkg/providers/vsphere/vsphere.go index 416930084cac..940372fab5a9 100644 --- a/pkg/providers/vsphere/vsphere.go +++ b/pkg/providers/vsphere/vsphere.go @@ -1245,6 +1245,10 @@ func buildTemplateMapCP(clusterSpec *cluster.Spec, datacenterSpec v1alpha1.VSphe values["awsIamAuth"] = true } + if clusterSpec.Spec.PodIAMConfig != nil { + values["serviceAccountIssuer"] = clusterSpec.Spec.PodIAMConfig.ServiceAccountIssuer + } + return values } diff --git a/pkg/providers/vsphere/vsphere_test.go b/pkg/providers/vsphere/vsphere_test.go index c8d8eca7c8c8..a55059d616a8 100644 --- a/pkg/providers/vsphere/vsphere_test.go +++ b/pkg/providers/vsphere/vsphere_test.go @@ -2671,3 +2671,35 @@ func TestProviderUpgradeNeeded(t *testing.T) { }) } } + +func TestProviderGenerateCAPISpecForCreateWithPodIAMConfig(t *testing.T) { + mockCtrl := gomock.NewController(t) + var tctx testContext + tctx.SaveContext() + defer tctx.RestoreContext() + ctx := context.Background() + kubectl := mocks.NewMockProviderKubectlClient(mockCtrl) + cluster := &types.Cluster{ + Name: "test", + } + clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename) + clusterSpec.Spec.PodIAMConfig = &v1alpha1.PodIAMConfig{ServiceAccountIssuer: "https://test"} + + datacenterConfig := givenDatacenterConfig(t, testClusterConfigMainFilename) + machineConfigs := givenMachineConfigs(t, testClusterConfigMainFilename) + provider := newProviderWithKubectl(t, datacenterConfig, machineConfigs, clusterSpec.Cluster, kubectl) + if provider == nil { + t.Fatalf("provider object is nil") + } + + err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec) + if err != nil { + t.Fatalf("failed to setup and validate: %v", err) + } + + cp, _, err := provider.GenerateCAPISpecForCreate(context.Background(), cluster, clusterSpec) + if err != nil { + t.Fatalf("failed to generate cluster api spec contents: %v", err) + } + test.AssertContentToFile(t, string(cp), "testdata/expected_results_pod_iam_config.yaml") +}