Skip to content

Commit

Permalink
csi: allow to delete subvolume with retain-snapshot feature
Browse files Browse the repository at this point in the history
This commit includes the retain-snapshot flag to delete
the subvolume.

Signed-off-by: yati1998 <ypadia@redhat.com>
  • Loading branch information
yati1998 committed Feb 12, 2024
1 parent 0690ecb commit a0359c0
Show file tree
Hide file tree
Showing 4 changed files with 84 additions and 71 deletions.
8 changes: 6 additions & 2 deletions .github/workflows/go-test.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -79,9 +79,11 @@ jobs:
run: |
set -ex
kubectl rook-ceph ceph fs subvolume create myfs test-subvol group-a
kubectl rook-ceph ceph fs subvolume create myfs test-subvol-1 group-a
kubectl rook-ceph subvolume ls
kubectl rook-ceph subvolume ls --stale
kubectl rook-ceph subvolume delete test-subvol myfs group-a
kubectl rook-ceph subvolume delete myfs test-subvol group-a
kubectl rook-ceph subvolume delete myfs test-subvol-1
- name: Get mon endpoints
run: |
Expand Down Expand Up @@ -234,10 +236,12 @@ jobs:
- name: Subvolume command
run: |
set -ex
kubectl rook-ceph --operator-namespace test-operator -n test-cluster ceph fs subvolume create myfs test-subvol-1 group-a
kubectl rook-ceph --operator-namespace test-operator -n test-cluster ceph fs subvolume create myfs test-subvol group-a
kubectl rook-ceph --operator-namespace test-operator -n test-cluster subvolume ls
kubectl rook-ceph --operator-namespace test-operator -n test-cluster subvolume ls --stale
kubectl rook-ceph --operator-namespace test-operator -n test-cluster subvolume delete test-subvol myfs group-a
kubectl rook-ceph --operator-namespace test-operator -n test-cluster subvolume delete myfs test-subvol group-a
kubectl rook-ceph --operator-namespace test-operator -n test-cluster subvolume delete myfs test-subvol-1
- name: Get mon endpoints
run: |
Expand Down
20 changes: 11 additions & 9 deletions cmd/commands/subvolume.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,17 +40,19 @@ var listCmd = &cobra.Command{
}

var deleteCmd = &cobra.Command{
Use: "delete",
Short: "Deletes a stale subvolume.",
DisableFlagParsing: true,
Args: cobra.ExactArgs(3),
Example: "kubectl rook-ceph delete <subvolumes> <filesystem> <subvolumegroup>",
Use: "delete",
Short: "Deletes a stale subvolume.",
Args: cobra.RangeArgs(2, 3),
Example: "kubectl rook-ceph delete <filesystem> <subvolume> [subvolumegroup]",
Run: func(cmd *cobra.Command, args []string) {
ctx := cmd.Context()
subList := args[0]
fs := args[1]
svg := args[2]
subvolume.Delete(ctx, clientSets, operatorNamespace, cephClusterNamespace, subList, fs, svg)
fs := args[0]
subvol := args[1]
svg := "csi"
if len(args) > 2 {
svg = args[2]
}
subvolume.Delete(ctx, clientSets, operatorNamespace, cephClusterNamespace, fs, subvol, svg)
},
}

Expand Down
27 changes: 9 additions & 18 deletions docs/subvolume.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,12 +9,11 @@ and delete them without impacting other resources and attached volumes.
The subvolume command will require the following sub commands:
* `ls` : [ls](#ls) lists all the subvolumes
* `--stale`: lists only stale subvolumes
* `delete <subvolumes> <filesystem> <subvolumegroup>`:
[delete](#delete) stale subvolumes as per user's input.
It will list and delete only the stale subvolumes to prevent any loss of data.
* subvolumes: comma-separated list of subvolumes of same filesystem and subvolumegroup.
* filesystem: filesystem name to which the subvolumes belong.
* subvolumegroup: subvolumegroup name to which the subvolumes belong.
* `delete <filesystem> <subvolume> [subvolumegroup]`:
[delete](#delete) a stale subvolume.
* subvolume: subvolume name.
* filesystem: filesystem name to which the subvolume belongs.
* subvolumegroup: subvolumegroup name to which the subvolume belong(default is "csi")
## ls

```bash
Expand All @@ -23,8 +22,8 @@ kubectl rook-ceph subvolume ls
# Filesystem Subvolume SubvolumeGroup State
# ocs-storagecluster-cephfilesystem csi-vol-427774b4-340b-11ed-8d66-0242ac110004 csi in-use
# ocs-storagecluster-cephfilesystem csi-vol-427774b4-340b-11ed-8d66-0242ac110005 csi in-use
# ocs-storagecluster-cephfilesystem csi-vol-427774b4-340b-11ed-8d66-0242ac110006 csi in-use
# ocs-storagecluster-cephfilesystem csi-vol-427774b4-340b-11ed-8d66-0242ac110007 csi stale
# ocs-storagecluster-cephfilesystem csi-vol-427774b4-340b-11ed-8d66-0242ac110007 csi stale-with-snapshot

```

Expand All @@ -33,23 +32,15 @@ kubectl rook-ceph subvolume ls --stale

# Filesystem Subvolume SubvolumeGroup state
# ocs-storagecluster-cephfilesystem csi-vol-427774b4-340b-11ed-8d66-0242ac110004 csi stale
# ocs-storagecluster-cephfilesystem csi-vol-427774b4-340b-11ed-8d66-0242ac110005 csi stale
# ocs-storagecluster-cephfilesystem csi-vol-427774b4-340b-11ed-8d66-0242ac110005 csi stale-with-snapshot

```

## delete

```bash
kubectl rook-ceph subvolume delete csi-vol-427774b4-340b-11ed-8d66-0242ac110004 ocs-storagecluster csi
kubectl rook-ceph subvolume delete ocs-storagecluster csi-vol-427774b4-340b-11ed-8d66-0242ac110004

# Info: subvolume csi-vol-427774b4-340b-11ed-8d66-0242ac110004 deleted

```

```bash
kubectl rook-ceph subvolume delete csi-vol-427774b4-340b-11ed-8d66-0242ac110004,csi-vol-427774b4-340b-11ed-8d66-0242ac110005 ocs-storagecluster csi

# Info: subvolume csi-vol-427774b4-340b-11ed-8d66-0242ac110004 deleted
# Info: subvolume csi-vol-427774b4-340b-11ed-8d66-0242ac110004 deleted
# Info: subvolume "csi-vol-427774b4-340b-11ed-8d66-0242ac110004" deleted

```
100 changes: 58 additions & 42 deletions pkg/filesystem/subvolume.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@ import (
"context"
"encoding/json"
"fmt"
"strings"

"github.com/rook/kubectl-rook-ceph/pkg/exec"
"github.com/rook/kubectl-rook-ceph/pkg/k8sutil"
Expand All @@ -34,10 +33,17 @@ type fsStruct struct {
}

type subVolumeInfo struct {
svg string
fs string
svg string
fs string
state string
}

const (
inUse = "in-use"
stale = "stale"
staleWithSnapshot = "stale-with-snapshot"
)

func List(ctx context.Context, clientsets *k8sutil.Clientsets, operatorNamespace, clusterNamespace string, includeStaleOnly bool) {

subvolumeNames := getK8sRefSubvolume(ctx, clientsets)
Expand All @@ -52,7 +58,7 @@ func getK8sRefSubvolume(ctx context.Context, clientsets *k8sutil.Clientsets) map
}
subvolumeNames := make(map[string]subVolumeInfo)
for _, pv := range pvList.Items {
if pv.Spec.CSI != nil && pv.Spec.CSI.VolumeAttributes["subvolumeName"] != "" {
if pv.Spec.CSI != nil {
subvolumeNames[pv.Spec.CSI.VolumeAttributes["subvolumeName"]] = subVolumeInfo{}
}
}
Expand Down Expand Up @@ -90,28 +96,61 @@ func listCephFSSubvolumes(ctx context.Context, clientsets *k8sutil.Clientsets, o
}
// append the subvolume which doesn't have any snapshot attached to it.
for _, sv := range subvol {
state := getSubvolumeState(ctx, clientsets, operatorNamespace, clusterNamespace, fs.Name, sv.Name, svg.Name)

// Assume the volume is stale unless proven otherwise
stale := true
stalevol := true
// lookup for subvolume in list of the PV references
_, ok := subvolumeNames[sv.Name]
if ok || checkSnapshot(ctx, clientsets, operatorNamespace, clusterNamespace, fs.Name, sv.Name, svg.Name) {
// The volume is not stale if a PV was found, or it has a snapshot
stale = false
if ok {
// The volume is not stale if a PV was found
stalevol = false
}
status := "stale"
if !stale {
status := stale
if !stalevol {
if includeStaleOnly {
continue
}
status = "in-use"
status = inUse
} else {
// check the state of the stale subvolume
// if it is snapshot-retained then skip listing it.
if state == "snapshot-retained" {
status = state
continue
}
// check if the stale subvolume has snapshots.
if checkSnapshot(ctx, clientsets, operatorNamespace, clusterNamespace, fs.Name, sv.Name, svg.Name) {
status = staleWithSnapshot
}

}
subvolumeNames[sv.Name] = subVolumeInfo{fs.Name, svg.Name}
subvolumeNames[sv.Name] = subVolumeInfo{fs.Name, svg.Name, state}
fmt.Println(fs.Name, sv.Name, svg.Name, status)
}
}
}
}

// getSubvolumeState returns the state of the subvolume
func getSubvolumeState(ctx context.Context, clientsets *k8sutil.Clientsets, operatorNamespace, clusterNamespace, fsName, SubVol, SubvolumeGroup string) string {
subVolumeInfo, errvol := exec.RunCommandInOperatorPod(ctx, clientsets, "ceph", []string{"fs", "subvolume", "info", fsName, SubVol, SubvolumeGroup}, operatorNamespace, clusterNamespace, true)
if errvol != nil {
logging.Error(errvol, "failed to get filesystems")
return ""
}
var info map[string]interface{}
err := json.Unmarshal([]byte(subVolumeInfo), &info)
if err != nil {
logging.Fatal(fmt.Errorf("failed to unmarshal: %q", err))
}
state, ok := info["state"].(string)
if !ok {
logging.Fatal(fmt.Errorf("failed to get the state of subvolume: %q", SubVol))
}
return state
}

// gets list of filesystem
func getFileSystem(ctx context.Context, clientsets *k8sutil.Clientsets, operatorNamespace, clusterNamespace string) ([]fsStruct, error) {
fsList, err := exec.RunCommandInOperatorPod(ctx, clientsets, "ceph", []string{"fs", "ls", "--format", "json"}, operatorNamespace, clusterNamespace, true)
Expand Down Expand Up @@ -166,36 +205,13 @@ func unMarshaljson(list string) []fsStruct {
return unmarshal
}

func Delete(ctx context.Context, clientsets *k8sutil.Clientsets, OperatorNamespace, CephClusterNamespace, subList, fs, svg string) {
subvollist := strings.Split(subList, ",")
func Delete(ctx context.Context, clientsets *k8sutil.Clientsets, OperatorNamespace, CephClusterNamespace, fs, subvol, svg string) {
k8sSubvolume := getK8sRefSubvolume(ctx, clientsets)
for _, subvolume := range subvollist {
check := checkStaleSubvolume(ctx, clientsets, OperatorNamespace, CephClusterNamespace, fs, subvolume, svg, k8sSubvolume)
if check {
_, err := exec.RunCommandInOperatorPod(ctx, clientsets, "ceph", []string{"fs", "subvolume", "rm", fs, subvolume, svg}, OperatorNamespace, CephClusterNamespace, true)
if err != nil {
logging.Error(err, "failed to delete stale subvolume %q", subvolume)
continue
}
logging.Info("subvolume %q deleted", subvolume)
} else {
logging.Info("subvolume %q is not stale", subvolume)
}
}
}

// checkStaleSubvolume checks if there are any stale subvolume to be deleted
func checkStaleSubvolume(ctx context.Context, clientsets *k8sutil.Clientsets, OperatorNamespace, CephClusterNamespace, fs, subvolume, svg string, k8sSubvolume map[string]subVolumeInfo) bool {
_, ok := k8sSubvolume[subvolume]
if !ok {
snapshot := checkSnapshot(ctx, clientsets, OperatorNamespace, CephClusterNamespace, fs, subvolume, svg)
if snapshot {
logging.Error(fmt.Errorf("subvolume %s has snapshots", subvolume))
return false
} else {
return true
}
_, check := k8sSubvolume[subvol]
if !check {
exec.RunCommandInOperatorPod(ctx, clientsets, "ceph", []string{"fs", "subvolume", "rm", fs, subvol, svg, "--retain-snapshots"}, OperatorNamespace, CephClusterNamespace, true)
logging.Info("subvolume %q deleted", subvol)
} else {
logging.Info("subvolume %q is not stale", subvol)
}
logging.Error(fmt.Errorf("Subvolume %s is referenced by a PV", subvolume))
return false
}

0 comments on commit a0359c0

Please sign in to comment.