diff --git a/test/infrastructure/docker/api/v1alpha3/conversion.go b/test/infrastructure/docker/api/v1alpha3/conversion.go index 6bdb64825187..ba133d350891 100644 --- a/test/infrastructure/docker/api/v1alpha3/conversion.go +++ b/test/infrastructure/docker/api/v1alpha3/conversion.go @@ -45,6 +45,10 @@ func (src *DockerCluster) ConvertTo(dstRaw conversion.Hub) error { dst.Spec.LoadBalancer.ImageTag = restored.Spec.LoadBalancer.ImageTag } + if restored.Spec.LoadBalancer.CustomHAProxyConfigTemplateRef != nil { + dst.Spec.LoadBalancer.CustomHAProxyConfigTemplateRef = restored.Spec.LoadBalancer.CustomHAProxyConfigTemplateRef + } + return nil } diff --git a/test/infrastructure/docker/api/v1alpha4/conversion.go b/test/infrastructure/docker/api/v1alpha4/conversion.go index a1b42d8f4d53..e8c9ae8d8236 100644 --- a/test/infrastructure/docker/api/v1alpha4/conversion.go +++ b/test/infrastructure/docker/api/v1alpha4/conversion.go @@ -27,13 +27,35 @@ import ( func (src *DockerCluster) ConvertTo(dstRaw conversion.Hub) error { dst := dstRaw.(*infrav1.DockerCluster) - return Convert_v1alpha4_DockerCluster_To_v1beta1_DockerCluster(src, dst, nil) + if err := Convert_v1alpha4_DockerCluster_To_v1beta1_DockerCluster(src, dst, nil); err != nil { + return err + } + + // Manually restore data. + restored := &infrav1.DockerCluster{} + if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { + return err + } + + if restored.Spec.LoadBalancer.CustomHAProxyConfigTemplateRef != nil { + dst.Spec.LoadBalancer.CustomHAProxyConfigTemplateRef = restored.Spec.LoadBalancer.CustomHAProxyConfigTemplateRef + } + + return nil } func (dst *DockerCluster) ConvertFrom(srcRaw conversion.Hub) error { src := srcRaw.(*infrav1.DockerCluster) - return Convert_v1beta1_DockerCluster_To_v1alpha4_DockerCluster(src, dst, nil) + if err := Convert_v1beta1_DockerCluster_To_v1alpha4_DockerCluster(src, dst, nil); err != nil { + return err + } + + if err := utilconversion.MarshalData(src, dst); err != nil { + return err + } + + return nil } func (src *DockerClusterList) ConvertTo(dstRaw conversion.Hub) error { @@ -63,6 +85,10 @@ func (src *DockerClusterTemplate) ConvertTo(dstRaw conversion.Hub) error { dst.Spec.Template.ObjectMeta = restored.Spec.Template.ObjectMeta + if restored.Spec.Template.Spec.LoadBalancer.CustomHAProxyConfigTemplateRef != nil { + dst.Spec.Template.Spec.LoadBalancer.CustomHAProxyConfigTemplateRef = restored.Spec.Template.Spec.LoadBalancer.CustomHAProxyConfigTemplateRef + } + return nil } @@ -171,3 +197,7 @@ func Convert_v1beta1_DockerMachineTemplateResource_To_v1alpha4_DockerMachineTemp // NOTE: custom conversion func is required because spec.template.metadata has been added in v1beta1. return autoConvert_v1beta1_DockerMachineTemplateResource_To_v1alpha4_DockerMachineTemplateResource(in, out, s) } + +func Convert_v1beta1_DockerLoadBalancer_To_v1alpha4_DockerLoadBalancer(in *infrav1.DockerLoadBalancer, out *DockerLoadBalancer, s apiconversion.Scope) error { + return autoConvert_v1beta1_DockerLoadBalancer_To_v1alpha4_DockerLoadBalancer(in, out, s) +} diff --git a/test/infrastructure/docker/api/v1alpha4/zz_generated.conversion.go b/test/infrastructure/docker/api/v1alpha4/zz_generated.conversion.go index 9594641da789..71075f02d170 100644 --- a/test/infrastructure/docker/api/v1alpha4/zz_generated.conversion.go +++ b/test/infrastructure/docker/api/v1alpha4/zz_generated.conversion.go @@ -128,11 +128,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.DockerLoadBalancer)(nil), (*DockerLoadBalancer)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_DockerLoadBalancer_To_v1alpha4_DockerLoadBalancer(a.(*v1beta1.DockerLoadBalancer), b.(*DockerLoadBalancer), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*DockerMachine)(nil), (*v1beta1.DockerMachine)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha4_DockerMachine_To_v1beta1_DockerMachine(a.(*DockerMachine), b.(*v1beta1.DockerMachine), scope) }); err != nil { @@ -233,6 +228,11 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*v1beta1.DockerLoadBalancer)(nil), (*DockerLoadBalancer)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_DockerLoadBalancer_To_v1alpha4_DockerLoadBalancer(a.(*v1beta1.DockerLoadBalancer), b.(*DockerLoadBalancer), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*v1beta1.DockerMachineTemplateResource)(nil), (*DockerMachineTemplateResource)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_DockerMachineTemplateResource_To_v1alpha4_DockerMachineTemplateResource(a.(*v1beta1.DockerMachineTemplateResource), b.(*DockerMachineTemplateResource), scope) }); err != nil { @@ -589,14 +589,10 @@ func autoConvert_v1beta1_DockerLoadBalancer_To_v1alpha4_DockerLoadBalancer(in *v if err := Convert_v1beta1_ImageMeta_To_v1alpha4_ImageMeta(&in.ImageMeta, &out.ImageMeta, s); err != nil { return err } + // WARNING: in.CustomHAProxyConfigTemplateRef requires manual conversion: does not exist in peer-type return nil } -// Convert_v1beta1_DockerLoadBalancer_To_v1alpha4_DockerLoadBalancer is an autogenerated conversion function. -func Convert_v1beta1_DockerLoadBalancer_To_v1alpha4_DockerLoadBalancer(in *v1beta1.DockerLoadBalancer, out *DockerLoadBalancer, s conversion.Scope) error { - return autoConvert_v1beta1_DockerLoadBalancer_To_v1alpha4_DockerLoadBalancer(in, out, s) -} - func autoConvert_v1alpha4_DockerMachine_To_v1beta1_DockerMachine(in *DockerMachine, out *v1beta1.DockerMachine, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_v1alpha4_DockerMachineSpec_To_v1beta1_DockerMachineSpec(&in.Spec, &out.Spec, s); err != nil { diff --git a/test/infrastructure/docker/api/v1beta1/dockercluster_types.go b/test/infrastructure/docker/api/v1beta1/dockercluster_types.go index b4c138bb69f4..617938a6ee67 100644 --- a/test/infrastructure/docker/api/v1beta1/dockercluster_types.go +++ b/test/infrastructure/docker/api/v1beta1/dockercluster_types.go @@ -17,6 +17,7 @@ limitations under the License. package v1beta1 import ( + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -53,6 +54,17 @@ type DockerClusterSpec struct { type DockerLoadBalancer struct { // ImageMeta allows customizing the image used for the cluster load balancer. ImageMeta `json:",inline"` + + // CustomHAProxyConfigTemplateRef allows you to replace the default HAProxy config file. + // This field is a reference to a config map that contains the configuration template. The key of the config map should be equal to 'value'. + // The content of the config map will be processed and will replace the default HAProxy config file. Please use it with caution, as there are + // no checks to ensure the validity of the configuration. This template will support the following variables that will be passed by the controller: + // $IPv6 (bool) indicates if the cluster is IPv6, $FrontendControlPlanePort (string) indicates the frontend control plane port, + // $BackendControlPlanePort (string) indicates the backend control plane port, $BackendServers (map[string]string) indicates the backend server + // where the key is the server name and the value is the address. This map is dynamic and is updated every time a new control plane + // node is added or removed. The template will also support the JoinHostPort function to join the host and port of the backend server. + // +optional + CustomHAProxyConfigTemplateRef *corev1.LocalObjectReference `json:"customHAProxyConfigTemplateRef,omitempty"` } // ImageMeta allows customizing the image used for components that are not diff --git a/test/infrastructure/docker/api/v1beta1/zz_generated.deepcopy.go b/test/infrastructure/docker/api/v1beta1/zz_generated.deepcopy.go index a409b01f326f..b73abe209a17 100644 --- a/test/infrastructure/docker/api/v1beta1/zz_generated.deepcopy.go +++ b/test/infrastructure/docker/api/v1beta1/zz_generated.deepcopy.go @@ -22,6 +22,7 @@ limitations under the License. package v1beta1 import ( + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" apiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" ) @@ -111,7 +112,7 @@ func (in *DockerClusterSpec) DeepCopyInto(out *DockerClusterSpec) { (*out)[key] = *val.DeepCopy() } } - out.LoadBalancer = in.LoadBalancer + in.LoadBalancer.DeepCopyInto(&out.LoadBalancer) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerClusterSpec. @@ -248,6 +249,11 @@ func (in *DockerClusterTemplateSpec) DeepCopy() *DockerClusterTemplateSpec { func (in *DockerLoadBalancer) DeepCopyInto(out *DockerLoadBalancer) { *out = *in out.ImageMeta = in.ImageMeta + if in.CustomHAProxyConfigTemplateRef != nil { + in, out := &in.CustomHAProxyConfigTemplateRef, &out.CustomHAProxyConfigTemplateRef + *out = new(v1.LocalObjectReference) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerLoadBalancer. diff --git a/test/infrastructure/docker/config/crd/bases/infrastructure.cluster.x-k8s.io_dockerclusters.yaml b/test/infrastructure/docker/config/crd/bases/infrastructure.cluster.x-k8s.io_dockerclusters.yaml index a2b88acf0c29..fff0a46a253f 100644 --- a/test/infrastructure/docker/config/crd/bases/infrastructure.cluster.x-k8s.io_dockerclusters.yaml +++ b/test/infrastructure/docker/config/crd/bases/infrastructure.cluster.x-k8s.io_dockerclusters.yaml @@ -385,6 +385,31 @@ spec: description: LoadBalancer allows defining configurations for the cluster load balancer. properties: + customHAProxyConfigTemplateRef: + description: 'CustomHAProxyConfigTemplateRef allows you to replace + the default HAProxy config file. This field is a reference to + a config map that contains the configuration template. The key + of the config map should be equal to ''value''. The content + of the config map will be processed and will replace the default + HAProxy config file. Please use it with caution, as there are + no checks to ensure the validity of the configuration. This + template will support the following variables that will be passed + by the controller: $IPv6 (bool) indicates if the cluster is + IPv6, $FrontendControlPlanePort (string) indicates the frontend + control plane port, $BackendControlPlanePort (string) indicates + the backend control plane port, $BackendServers (map[string]string) + indicates the backend server where the key is the server name + and the value is the address. This map is dynamic and is updated + every time a new control plane node is added or removed. The + template will also support the JoinHostPort function to join + the host and port of the backend server.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + x-kubernetes-map-type: atomic imageRepository: description: ImageRepository sets the container registry to pull the haproxy image from. if not set, "kindest" will be used instead. diff --git a/test/infrastructure/docker/config/crd/bases/infrastructure.cluster.x-k8s.io_dockerclustertemplates.yaml b/test/infrastructure/docker/config/crd/bases/infrastructure.cluster.x-k8s.io_dockerclustertemplates.yaml index 43bdd93fc62b..585b832ce921 100644 --- a/test/infrastructure/docker/config/crd/bases/infrastructure.cluster.x-k8s.io_dockerclustertemplates.yaml +++ b/test/infrastructure/docker/config/crd/bases/infrastructure.cluster.x-k8s.io_dockerclustertemplates.yaml @@ -216,6 +216,34 @@ spec: description: LoadBalancer allows defining configurations for the cluster load balancer. properties: + customHAProxyConfigTemplateRef: + description: 'CustomHAProxyConfigTemplateRef allows you + to replace the default HAProxy config file. This field + is a reference to a config map that contains the configuration + template. The key of the config map should be equal + to ''value''. The content of the config map will be + processed and will replace the default HAProxy config + file. Please use it with caution, as there are no checks + to ensure the validity of the configuration. This template + will support the following variables that will be passed + by the controller: $IPv6 (bool) indicates if the cluster + is IPv6, $FrontendControlPlanePort (string) indicates + the frontend control plane port, $BackendControlPlanePort + (string) indicates the backend control plane port, $BackendServers + (map[string]string) indicates the backend server where + the key is the server name and the value is the address. + This map is dynamic and is updated every time a new + control plane node is added or removed. The template + will also support the JoinHostPort function to join + the host and port of the backend server.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + type: object + x-kubernetes-map-type: atomic imageRepository: description: ImageRepository sets the container registry to pull the haproxy image from. if not set, "kindest" diff --git a/test/infrastructure/docker/internal/controllers/dockermachine_controller.go b/test/infrastructure/docker/internal/controllers/dockermachine_controller.go index 8936020ebbb6..4d3a2cf3588b 100644 --- a/test/infrastructure/docker/internal/controllers/dockermachine_controller.go +++ b/test/infrastructure/docker/internal/controllers/dockermachine_controller.go @@ -25,6 +25,7 @@ import ( "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" @@ -167,11 +168,11 @@ func (r *DockerMachineReconciler) Reconcile(ctx context.Context, req ctrl.Reques // Handle deleted machines if !dockerMachine.ObjectMeta.DeletionTimestamp.IsZero() { - return ctrl.Result{}, r.reconcileDelete(ctx, machine, dockerMachine, externalMachine, externalLoadBalancer) + return ctrl.Result{}, r.reconcileDelete(ctx, dockerCluster, machine, dockerMachine, externalMachine, externalLoadBalancer) } // Handle non-deleted machines - res, err := r.reconcileNormal(ctx, cluster, machine, dockerMachine, externalMachine, externalLoadBalancer) + res, err := r.reconcileNormal(ctx, cluster, dockerCluster, machine, dockerMachine, externalMachine, externalLoadBalancer) // Requeue if the reconcile failed because the ClusterCacheTracker was locked for // the current cluster because of concurrent access. if errors.Is(err, remote.ErrClusterLocked) { @@ -204,7 +205,7 @@ func patchDockerMachine(ctx context.Context, patchHelper *patch.Helper, dockerMa ) } -func (r *DockerMachineReconciler) reconcileNormal(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine, dockerMachine *infrav1.DockerMachine, externalMachine *docker.Machine, externalLoadBalancer *docker.LoadBalancer) (res ctrl.Result, retErr error) { +func (r *DockerMachineReconciler) reconcileNormal(ctx context.Context, cluster *clusterv1.Cluster, dockerCluster *infrav1.DockerCluster, machine *clusterv1.Machine, dockerMachine *infrav1.DockerMachine, externalMachine *docker.Machine, externalLoadBalancer *docker.LoadBalancer) (res ctrl.Result, retErr error) { log := ctrl.LoggerFrom(ctx) // Check if the infrastructure is ready, otherwise return and wait for the cluster object to be updated @@ -271,7 +272,11 @@ func (r *DockerMachineReconciler) reconcileNormal(ctx context.Context, cluster * // we should only do this once, as reconfiguration more or less ensures // node ref setting fails if util.IsControlPlaneMachine(machine) && !dockerMachine.Status.LoadBalancerConfigured { - if err := externalLoadBalancer.UpdateConfiguration(ctx); err != nil { + unsafeLoadBalancerConfigTemplate, err := r.getUnsafeLoadBalancerConfigTemplate(ctx, dockerCluster) + if err != nil { + return ctrl.Result{}, errors.Wrap(err, "failed to retrieve HAProxy configuration from CustomHAProxyConfigTemplateRef") + } + if err := externalLoadBalancer.UpdateConfiguration(ctx, unsafeLoadBalancerConfigTemplate); err != nil { return ctrl.Result{}, errors.Wrap(err, "failed to update DockerCluster.loadbalancer configuration") } dockerMachine.Status.LoadBalancerConfigured = true @@ -390,7 +395,7 @@ func (r *DockerMachineReconciler) reconcileNormal(ctx context.Context, cluster * return ctrl.Result{}, nil } -func (r *DockerMachineReconciler) reconcileDelete(ctx context.Context, machine *clusterv1.Machine, dockerMachine *infrav1.DockerMachine, externalMachine *docker.Machine, externalLoadBalancer *docker.LoadBalancer) error { +func (r *DockerMachineReconciler) reconcileDelete(ctx context.Context, dockerCluster *infrav1.DockerCluster, machine *clusterv1.Machine, dockerMachine *infrav1.DockerMachine, externalMachine *docker.Machine, externalLoadBalancer *docker.LoadBalancer) error { // Set the ContainerProvisionedCondition reporting delete is started, and issue a patch in order to make // this visible to the users. // NB. The operation in docker is fast, so there is the chance the user will not notice the status change; @@ -411,7 +416,11 @@ func (r *DockerMachineReconciler) reconcileDelete(ctx context.Context, machine * // if the deleted machine is a control-plane node, remove it from the load balancer configuration; if util.IsControlPlaneMachine(machine) { - if err := externalLoadBalancer.UpdateConfiguration(ctx); err != nil { + unsafeLoadBalancerConfigTemplate, err := r.getUnsafeLoadBalancerConfigTemplate(ctx, dockerCluster) + if err != nil { + return errors.Wrap(err, "failed to retrieve HAProxy configuration from CustomHAProxyConfigTemplateRef") + } + if err := externalLoadBalancer.UpdateConfiguration(ctx, unsafeLoadBalancerConfigTemplate); err != nil { return errors.Wrap(err, "failed to update DockerCluster.loadbalancer configuration") } } @@ -510,6 +519,25 @@ func (r *DockerMachineReconciler) getBootstrapData(ctx context.Context, machine return base64.StdEncoding.EncodeToString(value), bootstrapv1.Format(format), nil } +func (r *DockerMachineReconciler) getUnsafeLoadBalancerConfigTemplate(ctx context.Context, dockerCluster *infrav1.DockerCluster) (string, error) { + if dockerCluster.Spec.LoadBalancer.CustomHAProxyConfigTemplateRef == nil { + return "", nil + } + var cm *corev1.ConfigMap + key := types.NamespacedName{ + Name: dockerCluster.Spec.LoadBalancer.CustomHAProxyConfigTemplateRef.Name, + Namespace: dockerCluster.Namespace, + } + if err := r.Get(ctx, key, cm); err != nil { + return "", errors.Wrapf(err, "failed to retrieve custom HAProxy configuration ConfigMap %s", key) + } + template, ok := cm.Data["value"] + if !ok { + return "", fmt.Errorf("expected key \"value\" to exist in ConfigMap %s", key) + } + return template, nil +} + // setMachineAddress gets the address from the container corresponding to a docker node and sets it on the Machine object. func setMachineAddress(ctx context.Context, dockerMachine *infrav1.DockerMachine, externalMachine *docker.Machine) error { machineAddresses, err := externalMachine.Address(ctx) diff --git a/test/infrastructure/docker/internal/docker/loadbalancer.go b/test/infrastructure/docker/internal/docker/loadbalancer.go index 2126165b1c1b..b590b1b6c621 100644 --- a/test/infrastructure/docker/internal/docker/loadbalancer.go +++ b/test/infrastructure/docker/internal/docker/loadbalancer.go @@ -19,7 +19,7 @@ package docker import ( "context" "fmt" - "net" + "strconv" "github.com/pkg/errors" ctrl "sigs.k8s.io/controller-runtime" @@ -38,12 +38,13 @@ type lbCreator interface { // LoadBalancer manages the load balancer for a specific docker cluster. type LoadBalancer struct { - name string - image string - container *types.Node - ipFamily clusterv1.ClusterIPFamily - lbCreator lbCreator - controlPlanePort int + name string + image string + container *types.Node + ipFamily clusterv1.ClusterIPFamily + lbCreator lbCreator + backendControlPlanePort string + frontendControlPlanePort string } // NewLoadBalancer returns a new helper for managing a docker loadbalancer with a given name. @@ -72,12 +73,13 @@ func NewLoadBalancer(ctx context.Context, cluster *clusterv1.Cluster, dockerClus image := getLoadBalancerImage(dockerCluster) return &LoadBalancer{ - name: cluster.Name, - image: image, - container: container, - ipFamily: ipFamily, - lbCreator: &Manager{}, - controlPlanePort: dockerCluster.Spec.ControlPlaneEndpoint.Port, + name: cluster.Name, + image: image, + container: container, + ipFamily: ipFamily, + lbCreator: &Manager{}, + frontendControlPlanePort: strconv.Itoa(dockerCluster.Spec.ControlPlaneEndpoint.Port), + backendControlPlanePort: "6443", }, nil } @@ -137,7 +139,7 @@ func (s *LoadBalancer) Create(ctx context.Context) error { } // UpdateConfiguration updates the external load balancer configuration with new control plane nodes. -func (s *LoadBalancer) UpdateConfiguration(ctx context.Context) error { +func (s *LoadBalancer) UpdateConfiguration(ctx context.Context, unsafeLoadBalancerConfig string) error { log := ctrl.LoggerFrom(ctx) if s.container == nil { @@ -161,17 +163,25 @@ func (s *LoadBalancer) UpdateConfiguration(ctx context.Context) error { return errors.Wrapf(err, "failed to get IP for container %s", n.String()) } if s.ipFamily == clusterv1.IPv6IPFamily { - backendServers[n.String()] = net.JoinHostPort(controlPlaneIPv6, "6443") + backendServers[n.String()] = controlPlaneIPv6 } else { - backendServers[n.String()] = net.JoinHostPort(controlPlaneIPv4, "6443") + backendServers[n.String()] = controlPlaneIPv4 } } + loadBalancerConfigTemplate := loadbalancer.DefaultTemplate + if unsafeLoadBalancerConfig != "" { + loadBalancerConfigTemplate = unsafeLoadBalancerConfig + } + loadBalancerConfig, err := loadbalancer.Config(&loadbalancer.ConfigData{ - ControlPlanePort: s.controlPlanePort, - BackendServers: backendServers, - IPv6: s.ipFamily == clusterv1.IPv6IPFamily, - }) + FrontendControlPlanePort: s.frontendControlPlanePort, + BackendControlPlanePort: s.backendControlPlanePort, + BackendServers: backendServers, + IPv6: s.ipFamily == clusterv1.IPv6IPFamily, + }, + loadBalancerConfigTemplate, + ) if err != nil { return errors.WithStack(err) } diff --git a/test/infrastructure/docker/internal/loadbalancer/config.go b/test/infrastructure/docker/internal/loadbalancer/config.go index 8d2f70a02df1..0e31f2a0277e 100644 --- a/test/infrastructure/docker/internal/loadbalancer/config.go +++ b/test/infrastructure/docker/internal/loadbalancer/config.go @@ -18,6 +18,7 @@ package loadbalancer import ( "bytes" + "net" "text/template" "sigs.k8s.io/kind/pkg/errors" @@ -25,13 +26,14 @@ import ( // ConfigData is supplied to the loadbalancer config template. type ConfigData struct { - ControlPlanePort int - BackendServers map[string]string - IPv6 bool + FrontendControlPlanePort string + BackendControlPlanePort string + BackendServers map[string]string + IPv6 bool } -// ConfigTemplate is the loadbalancer config template. -const ConfigTemplate = `# generated by kind +// DefaultTemplate is the loadbalancer config template. +const DefaultTemplate = `# generated by kind global log /dev/log local0 log /dev/log local1 notice @@ -55,9 +57,9 @@ defaults default-server init-addr none frontend control-plane - bind *:{{ .ControlPlanePort }} + bind *:{{ .FrontendControlPlanePort }} {{ if .IPv6 -}} - bind :::{{ .ControlPlanePort }}; + bind :::{{ .FrontendControlPlanePort }}; {{- end }} default_backend kube-apiservers @@ -65,13 +67,15 @@ backend kube-apiservers option httpchk GET /healthz # TODO: we should be verifying (!) {{range $server, $address := .BackendServers}} - server {{ $server }} {{ $address }} check check-ssl verify none resolvers docker resolve-prefer {{ if $.IPv6 -}} ipv6 {{- else -}} ipv4 {{- end }} + server {{ $server }} {{ JoinHostPort $address $.BackendControlPlanePort }} check check-ssl verify none resolvers docker resolve-prefer {{ if $.IPv6 -}} ipv6 {{- else -}} ipv4 {{- end }} {{- end}} ` // Config generates the loadbalancer config from the ConfigTemplate and ConfigData. -func Config(data *ConfigData) (config string, err error) { - t, err := template.New("loadbalancer-config").Parse(ConfigTemplate) +func Config(data *ConfigData, configTemplate string) (config string, err error) { + t, err := template.New("loadbalancer-config").Funcs(template.FuncMap{ + "JoinHostPort": net.JoinHostPort, + }).Parse(configTemplate) if err != nil { return "", errors.Wrap(err, "failed to parse config template") } diff --git a/test/infrastructure/docker/internal/loadbalancer/config_test.go b/test/infrastructure/docker/internal/loadbalancer/config_test.go new file mode 100644 index 000000000000..460b6d9150b1 --- /dev/null +++ b/test/infrastructure/docker/internal/loadbalancer/config_test.go @@ -0,0 +1,192 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package loadbalancer + +import ( + "testing" + + . "github.com/onsi/gomega" +) + +func TestConfig(t *testing.T) { + g := NewWithT(t) + + testCases := []struct { + name string + data *ConfigData + configTemplate string + expectedConfig string + }{ + { + name: "should return default HA proxy config", + data: &ConfigData{ + BackendControlPlanePort: "6443", + FrontendControlPlanePort: "7777", + BackendServers: map[string]string{ + "control-plane-0": "1.1.1.1", + }, + }, + configTemplate: DefaultTemplate, + expectedConfig: `# generated by kind +global + log /dev/log local0 + log /dev/log local1 notice + daemon + # limit memory usage to approximately 18 MB + # (see https://github.com/kubernetes-sigs/kind/pull/3115) + maxconn 100000 + +resolvers docker + nameserver dns 127.0.0.11:53 + +defaults + log global + mode tcp + option dontlognull + # TODO: tune these + timeout connect 5000 + timeout client 50000 + timeout server 50000 + # allow to boot despite dns don't resolve backends + default-server init-addr none + +frontend control-plane + bind *:7777 + + default_backend kube-apiservers + +backend kube-apiservers + option httpchk GET /healthz + # TODO: we should be verifying (!) + + server control-plane-0 1.1.1.1:6443 check check-ssl verify none resolvers docker resolve-prefer ipv4 +`, + }, + { + name: "should return a custom HA config", + data: &ConfigData{ + FrontendControlPlanePort: "7777", + BackendControlPlanePort: "6443", + BackendServers: map[string]string{ + "control-plane-0": "1.1.1.1", + }, + }, + configTemplate: `# generated by kind +global + log /dev/log local0 + log /dev/log local1 notice + daemon + # limit memory usage to approximately 18 MB + # (see https://github.com/kubernetes-sigs/kind/pull/3115) + maxconn 100000 + +resolvers docker + nameserver dns 127.0.0.11:53 + +defaults + log global + mode tcp + option dontlognull + # TODO: tune these + timeout connect 5000 + timeout client 50000 + timeout server 50000 + # allow to boot despite dns don't resolve backends + default-server init-addr none + +frontend control-plane + bind *:{{ .FrontendControlPlanePort }} + {{ if .IPv6 -}} + bind :::{{ .FrontendControlPlanePort }}; + {{- end }} + default_backend kube-apiservers + +backend kube-apiservers + option httpchk GET /healthz + # TODO: we should be verifying (!) + {{range $server, $address := .BackendServers}} + server {{ $server }} {{ JoinHostPort $address $.BackendControlPlanePort }} check check-ssl verify none resolvers docker resolve-prefer {{ if $.IPv6 -}} ipv6 {{- else -}} ipv4 {{- end }} + {{- end}} + +frontend rke2-join + bind *:9345 + {{ if .IPv6 -}} + bind :::9345; + {{- end }} + default_backend rke2-servers +backend rke2-servers + option httpchk GET /v1-rke2/readyz + http-check expect status 403 + {{range $server, $address := .BackendServers}} + server {{ $server }} {{ $address }}:9345 check check-ssl verify none resolvers docker resolve-prefer {{ if $.IPv6 -}} ipv6 {{- else -}} ipv4 {{- end }} + {{- end}} +`, + expectedConfig: `# generated by kind +global + log /dev/log local0 + log /dev/log local1 notice + daemon + # limit memory usage to approximately 18 MB + # (see https://github.com/kubernetes-sigs/kind/pull/3115) + maxconn 100000 + +resolvers docker + nameserver dns 127.0.0.11:53 + +defaults + log global + mode tcp + option dontlognull + # TODO: tune these + timeout connect 5000 + timeout client 50000 + timeout server 50000 + # allow to boot despite dns don't resolve backends + default-server init-addr none + +frontend control-plane + bind *:7777 + + default_backend kube-apiservers + +backend kube-apiservers + option httpchk GET /healthz + # TODO: we should be verifying (!) + + server control-plane-0 1.1.1.1:6443 check check-ssl verify none resolvers docker resolve-prefer ipv4 + +frontend rke2-join + bind *:9345 + + default_backend rke2-servers +backend rke2-servers + option httpchk GET /v1-rke2/readyz + http-check expect status 403 + + server control-plane-0 1.1.1.1:9345 check check-ssl verify none resolvers docker resolve-prefer ipv4 +`, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + config, err := Config(tc.data, tc.configTemplate) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(config).To(Equal(tc.expectedConfig)) + }) + } +}