diff --git a/README.md b/README.md index 11e9405c08..74795a5138 100644 --- a/README.md +++ b/README.md @@ -46,7 +46,7 @@ Note that the external-provisioner does not scale with more replicas. Only one e ### Command line options -#### Recommended optional arguments" +#### Recommended optional arguments * `--csi-address `: This is the path to the CSI driver socket inside the pod that the external-provisioner container will use to issue CSI operations (`/run/csi/socket` is used by default). * `--enable-leader-election`: Enables leader election. This is mandatory when there are multiple replicas of the same external-provisioner running for one CSI driver. Only one of them may be active (=leader). A new leader will be re-elected when current leader dies or becomes unresponsive for ~15 seconds. @@ -64,6 +64,8 @@ Note that the external-provisioner does not scale with more replicas. Only one e #### Other recognized arguments * `--feature-gates `: A set of comma separated `=` pairs that describe feature gates for alpha/experimental features. See [list of features](#feature-status) or `--help` output for list of recognized features. Example: `--feature-gates Topology=true` to enable Topology feature that's disabled by default. +* `--strict-topology`: This controls what topology information is passed to `CreateVolumeRequest.AccessibilityRequirements` in case of delayed binding. See [the table below](#topology-support) for an explanation how this option changes the result. This option has no effect if either `Topology` feature is disabled or `Immediate` volume binding mode is used. + * `--kubeconfig `: Path to Kubernetes client configuration that the external-provisioner uses to connect to Kubernetes API server. When omitted, default token provided by Kubernetes will be used. This option is useful only when the external-provisioner does not run as a Kubernetes pod, e.g. for debugging. Either this or `--master` needs to be set if the external-provisioner is being run out of cluster. * `--master `: Master URL to build a client config from. When omitted, default token provided by Kubernetes will be used. This option is useful only when the external-provisioner does not run as a Kubernetes pod, e.g. for debugging. Either this or `--kubeconfig` needs to be set if the external-provisioner is being run out of cluster. @@ -83,6 +85,17 @@ Note that the external-provisioner does not scale with more replicas. Only one e * `--leader-election-type`: This option was used to choose which leader election resource type to use. Currently, the option defaults to `endpoints`, but will be removed in the future to only support `Lease` based leader election. +### Topology support +When `Topology` feature is enabled and the driver specifies `VOLUME_ACCESSIBILITY_CONSTRAINTS` in its plugin capabilities, external-provisioner prepares `CreateVolumeRequest.AccessibilityRequirements` while calling `Controller.CreateVolume`. The driver has to consider these topology constraints while creating the volume. Below table shows how these `AccessibilityRequirements` are prepared: + +[Delayed binding](https://kubernetes.io/docs/concepts/storage/storage-classes/#volume-binding-mode) | Strict topology | [Allowed topologies](https://kubernetes.io/docs/concepts/storage/storage-classes/#allowed-topologies) | [Resulting accessability requirements](https://github.com/container-storage-interface/spec/blob/master/spec.md#createvolume) +:---: |:---:|:---:|:---| +Yes | Yes | Irrelevant | `Requisite` = `Preferred` = Selected node topology +Yes | No | No | `Requisite` = Aggregated cluster topology
`Preferred` = `Requisite` with selected node topology as first element +Yes | No | Yes | `Requisite` = Allowed topologies
`Preferred` = `Requisite` with selected node topology as first element +No | Irrelevant | No | `Requisite` = Aggregated cluster topology
`Preferred` = `Requisite` with randomly selected node topology as first element +No | Irrelevant | Yes | `Requisite` = Allowed topologies
`Preferred` = `Requisite` with randomly selected node topology as first element + ### CSI error and timeout handling The external-provisioner invokes all gRPC calls to CSI driver with timeout provided by `--timeout` command line argument (15 seconds by default). diff --git a/cmd/csi-provisioner/csi-provisioner.go b/cmd/csi-provisioner/csi-provisioner.go index 73fa6006ec..4f62c85fce 100644 --- a/cmd/csi-provisioner/csi-provisioner.go +++ b/cmd/csi-provisioner/csi-provisioner.go @@ -62,6 +62,7 @@ var ( enableLeaderElection = flag.Bool("enable-leader-election", false, "Enables leader election. If leader election is enabled, additional RBAC rules are required. Please refer to the Kubernetes CSI documentation for instructions on setting up these RBAC rules.") leaderElectionType = flag.String("leader-election-type", "endpoints", "the type of leader election, options are 'endpoints' (default) or 'leases' (strongly recommended). The 'endpoints' option is deprecated in favor of 'leases'.") + strictTopology = flag.Bool("strict-topology", false, "Passes only selected node topology to CreateVolume Request, unlike default behavior of passing aggregated cluster topologies that match with topology keys of the selected node.") featureGates map[string]bool provisionController *controller.ProvisionController @@ -178,7 +179,7 @@ func main() { // Create the provisioner: it implements the Provisioner interface expected by // the controller - csiProvisioner := ctrl.NewCSIProvisioner(clientset, *operationTimeout, identity, *volumeNamePrefix, *volumeNameUUIDLength, grpcClient, snapClient, provisionerName, pluginCapabilities, controllerCapabilities, supportsMigrationFromInTreePluginName) + csiProvisioner := ctrl.NewCSIProvisioner(clientset, *operationTimeout, identity, *volumeNamePrefix, *volumeNameUUIDLength, grpcClient, snapClient, provisionerName, pluginCapabilities, controllerCapabilities, supportsMigrationFromInTreePluginName, *strictTopology) provisionController = controller.NewProvisionController( clientset, provisionerName, diff --git a/pkg/controller/controller.go b/pkg/controller/controller.go index 88ae3ffecf..22f6c41d3b 100644 --- a/pkg/controller/controller.go +++ b/pkg/controller/controller.go @@ -161,6 +161,7 @@ type csiProvisioner struct { pluginCapabilities connection.PluginCapabilitySet controllerCapabilities connection.ControllerCapabilitySet supportsMigrationFromInTreePluginName string + strictTopology bool } var _ controller.Provisioner = &csiProvisioner{} @@ -216,7 +217,8 @@ func NewCSIProvisioner(client kubernetes.Interface, driverName string, pluginCapabilities connection.PluginCapabilitySet, controllerCapabilities connection.ControllerCapabilitySet, - supportsMigrationFromInTreePluginName string) controller.Provisioner { + supportsMigrationFromInTreePluginName string, + strictTopology bool) controller.Provisioner { csiClient := csi.NewControllerClient(grpcClient) provisioner := &csiProvisioner{ @@ -232,6 +234,7 @@ func NewCSIProvisioner(client kubernetes.Interface, pluginCapabilities: pluginCapabilities, controllerCapabilities: controllerCapabilities, supportsMigrationFromInTreePluginName: supportsMigrationFromInTreePluginName, + strictTopology: strictTopology, } return provisioner } @@ -432,7 +435,8 @@ func (p *csiProvisioner) Provision(options controller.ProvisionOptions) (*v1.Per p.driverName, options.PVC.Name, options.StorageClass.AllowedTopologies, - options.SelectedNode) + options.SelectedNode, + p.strictTopology) if err != nil { return nil, fmt.Errorf("error generating accessibility requirements: %v", err) } diff --git a/pkg/controller/controller_test.go b/pkg/controller/controller_test.go index 9b26a3b1f3..83197a04de 100644 --- a/pkg/controller/controller_test.go +++ b/pkg/controller/controller_test.go @@ -392,7 +392,7 @@ func TestCreateDriverReturnsInvalidCapacityDuringProvision(t *testing.T) { defer driver.Stop() pluginCaps, controllerCaps := provisionCapabilities() - csiProvisioner := NewCSIProvisioner(nil, 5*time.Second, "test-provisioner", "test", 5, csiConn.conn, nil, driverName, pluginCaps, controllerCaps, "") + csiProvisioner := NewCSIProvisioner(nil, 5*time.Second, "test-provisioner", "test", 5, csiConn.conn, nil, driverName, pluginCaps, controllerCaps, "", false) // Requested PVC with requestedBytes storage deletePolicy := v1.PersistentVolumeReclaimDelete @@ -1287,7 +1287,7 @@ func runProvisionTest(t *testing.T, k string, tc provisioningTestcase, requested } pluginCaps, controllerCaps := provisionCapabilities() - csiProvisioner := NewCSIProvisioner(clientSet, 5*time.Second, "test-provisioner", "test", 5, csiConn.conn, nil, driverName, pluginCaps, controllerCaps, "") + csiProvisioner := NewCSIProvisioner(clientSet, 5*time.Second, "test-provisioner", "test", 5, csiConn.conn, nil, driverName, pluginCaps, controllerCaps, "", false) out := &csi.CreateVolumeResponse{ Volume: &csi.Volume{ @@ -1650,7 +1650,7 @@ func TestProvisionFromSnapshot(t *testing.T) { }) pluginCaps, controllerCaps := provisionFromSnapshotCapabilities() - csiProvisioner := NewCSIProvisioner(clientSet, 5*time.Second, "test-provisioner", "test", 5, csiConn.conn, client, driverName, pluginCaps, controllerCaps, "") + csiProvisioner := NewCSIProvisioner(clientSet, 5*time.Second, "test-provisioner", "test", 5, csiConn.conn, client, driverName, pluginCaps, controllerCaps, "", false) out := &csi.CreateVolumeResponse{ Volume: &csi.Volume{ @@ -1801,7 +1801,7 @@ func TestProvisionWithTopologyEnabled(t *testing.T) { } clientSet := fakeclientset.NewSimpleClientset(nodes, nodeInfos) - csiProvisioner := NewCSIProvisioner(clientSet, 5*time.Second, "test-provisioner", "test", 5, csiConn.conn, nil, driverName, pluginCaps, controllerCaps, "") + csiProvisioner := NewCSIProvisioner(clientSet, 5*time.Second, "test-provisioner", "test", 5, csiConn.conn, nil, driverName, pluginCaps, controllerCaps, "", false) pv, err := csiProvisioner.Provision(controller.ProvisionOptions{ StorageClass: &storagev1.StorageClass{}, @@ -1855,7 +1855,7 @@ func TestProvisionWithTopologyDisabled(t *testing.T) { clientSet := fakeclientset.NewSimpleClientset() pluginCaps, controllerCaps := provisionWithTopologyCapabilities() - csiProvisioner := NewCSIProvisioner(clientSet, 5*time.Second, "test-provisioner", "test", 5, csiConn.conn, nil, driverName, pluginCaps, controllerCaps, "") + csiProvisioner := NewCSIProvisioner(clientSet, 5*time.Second, "test-provisioner", "test", 5, csiConn.conn, nil, driverName, pluginCaps, controllerCaps, "", false) out := &csi.CreateVolumeResponse{ Volume: &csi.Volume{ @@ -1902,7 +1902,7 @@ func TestProvisionWithMountOptions(t *testing.T) { clientSet := fakeclientset.NewSimpleClientset() pluginCaps, controllerCaps := provisionCapabilities() - csiProvisioner := NewCSIProvisioner(clientSet, 5*time.Second, "test-provisioner", "test", 5, csiConn.conn, nil, driverName, pluginCaps, controllerCaps, "") + csiProvisioner := NewCSIProvisioner(clientSet, 5*time.Second, "test-provisioner", "test", 5, csiConn.conn, nil, driverName, pluginCaps, controllerCaps, "", false) out := &csi.CreateVolumeResponse{ Volume: &csi.Volume{ @@ -2076,7 +2076,7 @@ func runDeleteTest(t *testing.T, k string, tc deleteTestcase) { } pluginCaps, controllerCaps := provisionCapabilities() - csiProvisioner := NewCSIProvisioner(clientSet, 5*time.Second, "test-provisioner", "test", 5, csiConn.conn, nil, driverName, pluginCaps, controllerCaps, "") + csiProvisioner := NewCSIProvisioner(clientSet, 5*time.Second, "test-provisioner", "test", 5, csiConn.conn, nil, driverName, pluginCaps, controllerCaps, "", false) err = csiProvisioner.Delete(tc.persistentVolume) if tc.expectErr && err == nil { diff --git a/pkg/controller/topology.go b/pkg/controller/topology.go index cf55b09749..b6f59e170d 100644 --- a/pkg/controller/topology.go +++ b/pkg/controller/topology.go @@ -139,12 +139,15 @@ func GenerateAccessibilityRequirements( driverName string, pvcName string, allowedTopologies []v1.TopologySelectorTerm, - selectedNode *v1.Node) (*csi.TopologyRequirement, error) { + selectedNode *v1.Node, + strictTopology bool) (*csi.TopologyRequirement, error) { requirement := &csi.TopologyRequirement{} var ( - selectedCSINode *storage.CSINode - err error + selectedCSINode *storage.CSINode + selectedTopology topologyTerm + requisiteTerms []topologyTerm + err error ) // 1. Get CSINode for the selected node @@ -158,20 +161,57 @@ func GenerateAccessibilityRequirements( // This should only happen if the Node is on a pre-1.14 version return nil, nil } + topologyKeys := getTopologyKeys(selectedCSINode, driverName) + if len(topologyKeys) == 0 { + // The scheduler selected a node with no topology information. + // This can happen if: + // + // * the node driver is not deployed on all nodes. + // * the node driver is being restarted and has not re-registered yet. This should be + // temporary and a retry should eventually succeed. + // + // Returning an error in provisioning will cause the scheduler to retry and potentially + // (but not guaranteed) pick a different node. + return nil, fmt.Errorf("no topology key found on CSINode %s", selectedCSINode.Name) + } + var isMissingKey bool + selectedTopology, isMissingKey = getTopologyFromNode(selectedNode, topologyKeys) + if isMissingKey { + return nil, fmt.Errorf("topology labels from selected node %v does not match topology keys from CSINode %v", selectedNode.Labels, topologyKeys) + } + + if strictTopology { + // Make sure that selected node topology is in allowed topologies list + if len(allowedTopologies) != 0 { + allowedTopologiesFlatten := flatten(allowedTopologies) + found := false + for _, t := range allowedTopologiesFlatten { + if t.equal(selectedTopology) { + found = true + break + } + } + if !found { + return nil, fmt.Errorf("selected node '%q' topology '%v' is not in allowed topologies: %v", selectedNode.Name, selectedTopology, allowedTopologiesFlatten) + } + } + // Only pass topology of selected node. + requisiteTerms = append(requisiteTerms, selectedTopology) + } } // 2. Generate CSI Requisite Terms - var requisiteTerms []topologyTerm - if len(allowedTopologies) == 0 { - // Aggregate existing topologies in nodes across the entire cluster. - var err error - requisiteTerms, err = aggregateTopologies(kubeClient, driverName, selectedCSINode) - if err != nil { - return nil, err + if len(requisiteTerms) == 0 { + if len(allowedTopologies) != 0 { + // Distribute out one of the OR layers in allowedTopologies + requisiteTerms = flatten(allowedTopologies) + } else { + // Aggregate existing topologies in nodes across the entire cluster. + requisiteTerms, err = aggregateTopologies(kubeClient, driverName, selectedCSINode) + if err != nil { + return nil, err + } } - } else { - // Distribute out one of the OR layers in allowedTopologies - requisiteTerms = flatten(allowedTopologies) } // It might be possible to reach here if: @@ -202,20 +242,19 @@ func GenerateAccessibilityRequirements( preferredTerms = sortAndShift(requisiteTerms, nil, i) } else { // Delayed binding, use topology from that node to populate preferredTerms - topologyKeys := getTopologyKeys(selectedCSINode, driverName) - selectedTopology, isMissingKey := getTopologyFromNode(selectedNode, topologyKeys) - if isMissingKey { - return nil, fmt.Errorf("topology labels from selected node %v does not match topology keys from CSINode %v", selectedNode.Labels, topologyKeys) - } - - preferredTerms = sortAndShift(requisiteTerms, selectedTopology, 0) - if preferredTerms == nil { - // Topology from selected node is not in requisite. This case should never be hit: - // - If AllowedTopologies is specified, the scheduler should choose a node satisfying the - // constraint. - // - Otherwise, the aggregated topology is guaranteed to contain topology information from the - // selected node. - return nil, fmt.Errorf("topology %v from selected node %q is not in requisite: %v", selectedTopology, selectedNode.Name, requisiteTerms) + if strictTopology { + // In case of strict topology, preferred = requisite + preferredTerms = requisiteTerms + } else { + preferredTerms = sortAndShift(requisiteTerms, selectedTopology, 0) + if preferredTerms == nil { + // Topology from selected node is not in requisite. This case should never be hit: + // - If AllowedTopologies is specified, the scheduler should choose a node satisfying the + // constraint. + // - Otherwise, the aggregated topology is guaranteed to contain topology information from the + // selected node. + return nil, fmt.Errorf("topology %v from selected node %q is not in requisite: %v", selectedTopology, selectedNode.Name, requisiteTerms) + } } } requirement.Preferred = toCSITopology(preferredTerms) diff --git a/pkg/controller/topology_test.go b/pkg/controller/topology_test.go index 79a63eec67..cd7cbad384 100644 --- a/pkg/controller/topology_test.go +++ b/pkg/controller/topology_test.go @@ -391,31 +391,37 @@ func TestStatefulSetSpreading(t *testing.T) { kubeClient := fakeclientset.NewSimpleClientset(nodes, nodeInfos) for name, tc := range testcases { - t.Logf("test: %s", name) + for _, strictTopology := range []bool{false, true} { + if strictTopology { + name += " with strict topology" + } + t.Logf("test: %s", name) - requirements, err := GenerateAccessibilityRequirements( - kubeClient, - testDriverName, - tc.pvcName, - tc.allowedTopologies, - nil, - ) + requirements, err := GenerateAccessibilityRequirements( + kubeClient, + testDriverName, + tc.pvcName, + tc.allowedTopologies, + nil, + strictTopology, + ) - if err != nil { - t.Errorf("unexpected error found: %v", err) - continue - } + if err != nil { + t.Errorf("unexpected error found: %v", err) + continue + } - if requirements == nil { - t.Errorf("expected preferred to be %v but requirements is nil", tc.expectedPreferred) - continue - } - if requirements.Preferred == nil { - t.Errorf("expected preferred to be %v but requirements.Preferred is nil", tc.expectedPreferred) - continue - } - if !helper.Semantic.DeepEqual(requirements.Preferred, tc.expectedPreferred) { - t.Errorf("expected preferred requisite %v; got: %v", tc.expectedPreferred, requirements.Preferred) + if requirements == nil { + t.Errorf("expected preferred to be %v but requirements is nil", tc.expectedPreferred) + continue + } + if requirements.Preferred == nil { + t.Errorf("expected preferred to be %v but requirements.Preferred is nil", tc.expectedPreferred) + continue + } + if !helper.Semantic.DeepEqual(requirements.Preferred, tc.expectedPreferred) { + t.Errorf("expected preferred requisite %v; got: %v", tc.expectedPreferred, requirements.Preferred) + } } } } @@ -779,24 +785,31 @@ func TestAllowedTopologies(t *testing.T) { } for name, tc := range testcases { - t.Logf("test: %s", name) - requirements, err := GenerateAccessibilityRequirements( - nil, /* kubeClient */ - "test-driver", /* driverName */ - "testpvc", - tc.allowedTopologies, - nil /* selectedNode */) + for _, strictTopology := range []bool{false, true} { + if strictTopology { + name += " with strict topology" + } + t.Logf("test: %s", name) + requirements, err := GenerateAccessibilityRequirements( + nil, /* kubeClient */ + "test-driver", /* driverName */ + "testpvc", + tc.allowedTopologies, + nil, /* selectedNode */ + strictTopology, + ) - if err != nil { - t.Errorf("expected no error but got: %v", err) - continue - } - if requirements == nil { - t.Errorf("expected requirements not to be nil") - continue - } - if !requisiteEqual(requirements.Requisite, tc.expectedRequisite) { - t.Errorf("expected requisite %v; got: %v", tc.expectedRequisite, requirements.Requisite) + if err != nil { + t.Errorf("expected no error but got: %v", err) + continue + } + if requirements == nil { + t.Errorf("expected requirements not to be nil") + continue + } + if !requisiteEqual(requirements.Requisite, tc.expectedRequisite) { + t.Errorf("expected requisite %v; got: %v", tc.expectedRequisite, requirements.Requisite) + } } } } @@ -805,12 +818,13 @@ func TestTopologyAggregation(t *testing.T) { // Note: all test cases below include topology from another driver, in addition to the driver // specified in the test case. testcases := map[string]struct { - nodeLabels []map[string]string - topologyKeys []map[string][]string - hasSelectedNode bool // if set, the first map in nodeLabels is for the selected node. - preBetaNode bool // use a node before 1.14 - expectedRequisite []*csi.Topology - expectError bool + nodeLabels []map[string]string + topologyKeys []map[string][]string + hasSelectedNode bool // if set, the first map in nodeLabels is for the selected node. + preBetaNode bool // use a node before 1.14 + expectedRequisite []*csi.Topology + expectedStrictRequisite []*csi.Topology + expectError bool }{ "same keys and values across cluster": { nodeLabels: []map[string]string{ @@ -875,6 +889,9 @@ func TestTopologyAggregation(t *testing.T) { {Segments: map[string]string{"com.example.csi/zone": "zone1"}}, {Segments: map[string]string{"com.example.csi/zone": "zone2"}}, }, + expectedStrictRequisite: []*csi.Topology{ + {Segments: map[string]string{"com.example.csi/zone": "zone1"}}, + }, }, //"different keys across cluster": { // nodeLabels: []map[string]string{ @@ -1041,61 +1058,72 @@ func TestTopologyAggregation(t *testing.T) { } for name, tc := range testcases { - t.Logf("test: %s", name) + for _, strictTopology := range []bool{false, true} { + if strictTopology { + name += " with strict topology" + } + t.Logf("test: %s", name) - nodeVersion := k8sTopologyBetaVersion.String() - if tc.preBetaNode { - nodeVersion = preBetaNodeVersion - } - nodes := buildNodes(tc.nodeLabels, nodeVersion) - nodeInfos := buildNodeInfos(tc.topologyKeys) + nodeVersion := k8sTopologyBetaVersion.String() + if tc.preBetaNode { + nodeVersion = preBetaNodeVersion + } + nodes := buildNodes(tc.nodeLabels, nodeVersion) + nodeInfos := buildNodeInfos(tc.topologyKeys) - kubeClient := fakeclientset.NewSimpleClientset(nodes, nodeInfos) - var selectedNode *v1.Node - if tc.hasSelectedNode { - selectedNode = &nodes.Items[0] - } - requirements, err := GenerateAccessibilityRequirements( - kubeClient, - testDriverName, - "testpvc", - nil, /* allowedTopologies */ - selectedNode, - ) + kubeClient := fakeclientset.NewSimpleClientset(nodes, nodeInfos) + var selectedNode *v1.Node + if tc.hasSelectedNode { + selectedNode = &nodes.Items[0] + } + requirements, err := GenerateAccessibilityRequirements( + kubeClient, + testDriverName, + "testpvc", + nil, /* allowedTopologies */ + selectedNode, + strictTopology, + ) - if tc.expectError { - if err == nil { - t.Error("expected error but got none") + if tc.expectError { + if err == nil { + t.Error("expected error but got none") + } + continue } - continue - } - if !tc.expectError && err != nil { - t.Errorf("expected no error but got: %v", err) - continue - } - if requirements == nil { - if tc.expectedRequisite != nil { - t.Errorf("expected requisite to be %v but requirements is nil", tc.expectedRequisite) + if err != nil { + t.Errorf("expected no error but got: %v", err) + continue + } + expectedRequisite := tc.expectedRequisite + if strictTopology && tc.expectedStrictRequisite != nil { + expectedRequisite = tc.expectedStrictRequisite + } + if requirements == nil { + if expectedRequisite != nil { + t.Errorf("expected requisite to be %v but requirements is nil", expectedRequisite) + } + continue + } + if expectedRequisite == nil { + t.Errorf("expected requirements to be nil but got requisite: %v", requirements.Requisite) + continue + } + if !requisiteEqual(requirements.Requisite, expectedRequisite) { + t.Errorf("expected requisite %v; got: %v", tc.expectedRequisite, requirements.Requisite) } - continue - } - if requirements != nil && tc.expectedRequisite == nil { - t.Errorf("expected requirements to be nil but got requisite: %v", requirements.Requisite) - continue - } - if !requisiteEqual(requirements.Requisite, tc.expectedRequisite) { - t.Errorf("expected requisite %v; got: %v", tc.expectedRequisite, requirements.Requisite) } } } func TestPreferredTopologies(t *testing.T) { testcases := map[string]struct { - allowedTopologies []v1.TopologySelectorTerm - nodeLabels []map[string]string // first node is selected node - topologyKeys []map[string][]string // first entry is from the selected node - expectedPreferred []*csi.Topology - expectError bool + allowedTopologies []v1.TopologySelectorTerm + nodeLabels []map[string]string // first node is selected node + topologyKeys []map[string][]string // first entry is from the selected node + expectedPreferred []*csi.Topology + expectedStrictPreferred []*csi.Topology + expectError bool }{ "allowedTopologies specified": { allowedTopologies: []v1.TopologySelectorTerm{ @@ -1148,6 +1176,14 @@ func TestPreferredTopologies(t *testing.T) { }, }, }, + expectedStrictPreferred: []*csi.Topology{ + { + Segments: map[string]string{ + "com.example.csi/rack": "rackA", + "com.example.csi/zone": "zone2", + }, + }, + }, }, "allowedTopologies specified: no CSINode": { allowedTopologies: []v1.TopologySelectorTerm{ @@ -1230,6 +1266,14 @@ func TestPreferredTopologies(t *testing.T) { }, }, }, + expectedStrictPreferred: []*csi.Topology{ + { + Segments: map[string]string{ + "com.example.csi/rack": "rackA", + "com.example.csi/zone": "zone2", + }, + }, + }, }, "topology aggregation with no topology info": { nodeLabels: []map[string]string{{}, {}, {}}, @@ -1264,44 +1308,54 @@ func TestPreferredTopologies(t *testing.T) { } for name, tc := range testcases { - t.Logf("test: %s", name) + for _, strictTopology := range []bool{false, true} { + if strictTopology { + name += " with strict topology" + } + t.Logf("test: %s", name) - nodes := buildNodes(tc.nodeLabels, k8sTopologyBetaVersion.String()) - nodeInfos := buildNodeInfos(tc.topologyKeys) + nodes := buildNodes(tc.nodeLabels, k8sTopologyBetaVersion.String()) + nodeInfos := buildNodeInfos(tc.topologyKeys) - kubeClient := fakeclientset.NewSimpleClientset(nodes, nodeInfos) - selectedNode := &nodes.Items[0] + kubeClient := fakeclientset.NewSimpleClientset(nodes, nodeInfos) + selectedNode := &nodes.Items[0] - requirements, err := GenerateAccessibilityRequirements( - kubeClient, - testDriverName, - "testpvc", - tc.allowedTopologies, - selectedNode, - ) + requirements, err := GenerateAccessibilityRequirements( + kubeClient, + testDriverName, + "testpvc", + tc.allowedTopologies, + selectedNode, + strictTopology, + ) - if tc.expectError { - if err == nil { - t.Error("expected error but got none") + if tc.expectError { + if err == nil { + t.Error("expected error but got none") + } + continue } - continue - } - if !tc.expectError && err != nil { - t.Errorf("expected no error but got: %v", err) - continue - } - if requirements == nil { - if tc.expectedPreferred != nil { - t.Errorf("expected preferred to be %v but requirements is nil", tc.expectedPreferred) + if err != nil { + t.Errorf("expected no error but got: %v", err) + continue + } + expectedPreferred := tc.expectedPreferred + if strictTopology && tc.expectedStrictPreferred != nil { + expectedPreferred = tc.expectedStrictPreferred + } + if requirements == nil { + if expectedPreferred != nil { + t.Errorf("expected preferred to be %v but requirements is nil", expectedPreferred) + } + continue + } + if expectedPreferred == nil { + t.Errorf("expected requirements to be nil but got preferred: %v", requirements.Preferred) + continue + } + if !helper.Semantic.DeepEqual(requirements.Preferred, expectedPreferred) { + t.Errorf("expected requisite %v; got: %v", tc.expectedPreferred, requirements.Preferred) } - continue - } - if requirements != nil && tc.expectedPreferred == nil { - t.Errorf("expected requirements to be nil but got preferred: %v", requirements.Preferred) - continue - } - if !helper.Semantic.DeepEqual(requirements.Preferred, tc.expectedPreferred) { - t.Errorf("expected requisite %v; got: %v", tc.expectedPreferred, requirements.Preferred) } } }