Skip to content
This repository has been archived by the owner on Mar 28, 2020. It is now read-only.

e2e: remove etcd operator's backup/restore logic from from e2e test #1627

Merged
merged 2 commits into from
Nov 7, 2017
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
70 changes: 0 additions & 70 deletions test/e2e/cluster_status_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,12 +15,10 @@
package e2e

import (
"fmt"
"os"
"testing"
"time"

api "github.com/coreos/etcd-operator/pkg/apis/etcd/v1beta2"
"github.com/coreos/etcd-operator/pkg/util/retryutil"
"github.com/coreos/etcd-operator/test/e2e/e2eutil"
"github.com/coreos/etcd-operator/test/e2e/framework"
Expand Down Expand Up @@ -65,71 +63,3 @@ func TestReadyMembersStatus(t *testing.T) {
t.Fatalf("failed to get size of ReadyMembers to reach %d : %v", size, err)
}
}

func TestBackupStatus(t *testing.T) {
if os.Getenv(envParallelTest) == envParallelTestTrue {
t.Parallel()
}
f := framework.Global

bp := e2eutil.NewPVBackupPolicy(true, f.StorageClassName)
testEtcd, err := e2eutil.CreateCluster(t, f.CRClient, f.Namespace, e2eutil.ClusterWithBackup(e2eutil.NewCluster("test-etcd-", 1), bp))
if err != nil {
t.Fatal(err)
}
defer func() {
var storageCheckerOptions *e2eutil.StorageCheckerOptions
switch testEtcd.Spec.Backup.StorageType {
case api.BackupStorageTypePersistentVolume, api.BackupStorageTypeDefault:
storageCheckerOptions = &e2eutil.StorageCheckerOptions{}
case api.BackupStorageTypeS3:
storageCheckerOptions = &e2eutil.StorageCheckerOptions{
S3Cli: f.S3Cli,
S3Bucket: f.S3Bucket,
}
}

err := e2eutil.DeleteClusterAndBackup(t, f.CRClient, f.KubeClient, testEtcd, *storageCheckerOptions)
if err != nil {
t.Fatal(err)
}
}()

_, err = e2eutil.WaitUntilSizeReached(t, f.CRClient, 1, 6, testEtcd)
if err != nil {
t.Fatalf("failed to create 1 members etcd cluster: %v", err)
}
err = e2eutil.WaitBackupPodUp(t, f.KubeClient, f.Namespace, testEtcd.Name, 6)
if err != nil {
t.Fatalf("failed to create backup pod: %v", err)
}
err = e2eutil.MakeBackup(f.KubeClient, f.Namespace, testEtcd.Name)
if err != nil {
t.Fatalf("fail to make backup: %v", err)
}

err = retryutil.Retry(5*time.Second, 6, func() (done bool, err error) {
c, err := f.CRClient.EtcdV1beta2().EtcdClusters(f.Namespace).Get(testEtcd.Name, metav1.GetOptions{})
if err != nil {
t.Fatalf("faied to get cluster spec: %v", err)
}
bs := c.Status.BackupServiceStatus
if bs == nil {
e2eutil.LogfWithTimestamp(t, "backup status is nil")
return false, nil
}
// We expect it will make one backup eventually.
if bs.Backups < 1 {
e2eutil.LogfWithTimestamp(t, "backup number is %v", bs.Backups)
return false, nil
}
if bs.BackupSize == 0 {
return false, fmt.Errorf("backupsize = 0, want > 0")
}
return true, nil
})

if err != nil {
t.Error(err)
}
}
43 changes: 0 additions & 43 deletions test/e2e/e2esh/self_hosted_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@ package e2esh

import (
"fmt"
"os"
"sync"
"testing"
"time"
Expand All @@ -35,7 +34,6 @@ import (
func TestSelfHosted(t *testing.T) {
t.Run("create self hosted cluster from scratch", testCreateSelfHostedCluster)
t.Run("migrate boot member to self hosted cluster", testCreateSelfHostedClusterWithBootMember)
t.Run("backup for self hosted cluster", testSelfHostedClusterWithBackup)
t.Run("TLS for self hosted cluster", func(t *testing.T) { e2eslow.TLSTestCommon(t, true) })
cleanupSelfHostedHostpath()
}
Expand Down Expand Up @@ -124,47 +122,6 @@ func startEtcd(f *framework.Framework) (*v1.Pod, error) {
return k8sutil.CreateAndWaitPod(f.KubeClient, f.Namespace, p, 30*time.Second)
}

func testSelfHostedClusterWithBackup(t *testing.T) {
if os.Getenv("AWS_TEST_ENABLED") != "true" {
t.Skip("skipping test since AWS_TEST_ENABLED is not set.")
}

f := framework.Global

cl := e2eutil.NewCluster("test-cluster-", 3)
cl = e2eutil.ClusterWithBackup(cl, e2eutil.NewS3BackupPolicy(true))
cl = e2eutil.ClusterWithSelfHosted(cl, &api.SelfHostedPolicy{})

testEtcd, err := e2eutil.CreateCluster(t, f.CRClient, f.Namespace, cl)
if err != nil {
t.Fatal(err)
}
defer func() {
storageCheckerOptions := e2eutil.StorageCheckerOptions{
S3Cli: f.S3Cli,
S3Bucket: f.S3Bucket,
}
err := e2eutil.DeleteClusterAndBackup(t, f.CRClient, f.KubeClient, testEtcd, storageCheckerOptions)
if err != nil {
t.Fatal(err)
}
}()

_, err = e2eutil.WaitUntilSizeReached(t, f.CRClient, 3, 6, testEtcd)
if err != nil {
t.Fatalf("failed to create 3 members etcd cluster: %v", err)
}
fmt.Println("reached to 3 members cluster")
err = e2eutil.WaitBackupPodUp(t, f.KubeClient, f.Namespace, testEtcd.Name, 6)
if err != nil {
t.Fatalf("failed to create backup pod: %v", err)
}
err = e2eutil.MakeBackup(f.KubeClient, f.Namespace, testEtcd.Name)
if err != nil {
t.Fatalf("fail to make a latest backup: %v", err)
}
}

func cleanupSelfHostedHostpath() {
f := framework.Global
nodes, err := f.KubeClient.CoreV1().Nodes().List(metav1.ListOptions{})
Expand Down
14 changes: 0 additions & 14 deletions test/e2e/e2eutil/crd_util.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@
package e2eutil

import (
"fmt"
"testing"
"time"

Expand Down Expand Up @@ -81,16 +80,3 @@ func DeleteCluster(t *testing.T, crClient versioned.Interface, kubeClient kubern
}
return waitResourcesDeleted(t, kubeClient, cl)
}

func DeleteClusterAndBackup(t *testing.T, crClient versioned.Interface, kubecli kubernetes.Interface, cl *api.EtcdCluster, checkerOpt StorageCheckerOptions) error {
err := DeleteCluster(t, crClient, kubecli, cl)
if err != nil {
return err
}
t.Logf("waiting backup deleted of cluster (%v)", cl.Name)
err = WaitBackupDeleted(kubecli, cl, checkerOpt)
if err != nil {
return fmt.Errorf("fail to wait backup deleted: %v", err)
}
return nil
}
40 changes: 0 additions & 40 deletions test/e2e/e2eutil/spec_util.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,21 +37,6 @@ func NewCluster(genName string, size int) *api.EtcdCluster {
}
}

func NewS3BackupPolicy(cleanup bool) *api.BackupPolicy {
return &api.BackupPolicy{
BackupIntervalInSecond: 60 * 60,
MaxBackups: 5,
StorageType: api.BackupStorageTypeS3,
StorageSource: api.StorageSource{
S3: &api.S3Source{
S3Bucket: os.Getenv("TEST_S3_BUCKET"),
AWSSecret: os.Getenv("TEST_AWS_SECRET"),
},
},
AutoDelete: cleanup,
}
}

// NewS3Backup creates a EtcdBackup object using clusterName.
func NewS3Backup(clusterName string) *api.EtcdBackup {
return &api.EtcdBackup{
Expand Down Expand Up @@ -103,31 +88,6 @@ func NewEtcdRestore(restoreName, version string, size int, restoreSource api.Res
}
}

func NewPVBackupPolicy(cleanup bool, storageClass string) *api.BackupPolicy {
return &api.BackupPolicy{
BackupIntervalInSecond: 60 * 60,
MaxBackups: 5,
StorageType: api.BackupStorageTypePersistentVolume,
StorageSource: api.StorageSource{
PV: &api.PVSource{
VolumeSizeInMB: 512,
StorageClass: storageClass,
},
},
AutoDelete: cleanup,
}
}

func ClusterWithBackup(cl *api.EtcdCluster, backupPolicy *api.BackupPolicy) *api.EtcdCluster {
cl.Spec.Backup = backupPolicy
return cl
}

func ClusterWithRestore(cl *api.EtcdCluster, restorePolicy *api.RestorePolicy) *api.EtcdCluster {
cl.Spec.Restore = restorePolicy
return cl
}

func ClusterWithVersion(cl *api.EtcdCluster, version string) *api.EtcdCluster {
cl.Spec.Version = version
return cl
Expand Down
30 changes: 0 additions & 30 deletions test/e2e/e2eutil/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,19 +16,14 @@ package e2eutil

import (
"bytes"
"context"
"fmt"
"net/http"
"testing"
"time"

"github.com/coreos/etcd-operator/client/experimentalclient"
"github.com/coreos/etcd-operator/pkg/util/constants"
"github.com/coreos/etcd-operator/pkg/util/k8sutil"

"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/kubernetes"
)

Expand All @@ -53,31 +48,6 @@ func KillMembers(kubecli kubernetes.Interface, namespace string, names ...string
return nil
}

func MakeBackup(kubecli kubernetes.Interface, ns, clusterName string) error {
ls := labels.SelectorFromSet(k8sutil.BackupSidecarLabels(clusterName))
podList, err := kubecli.CoreV1().Pods(ns).List(metav1.ListOptions{
LabelSelector: ls.String(),
})
if err != nil {
return err
}
if len(podList.Items) < 1 {
return fmt.Errorf("no backup pod found")
}

// TODO: We are assuming Kubernetes pod network is accessible from test machine.
addr := fmt.Sprintf("%s:%d", podList.Items[0].Status.PodIP, constants.DefaultBackupPodHTTPPort)
bc := experimentalclient.NewBackupWithAddr(&http.Client{}, "http", addr)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()

err = bc.Request(ctx)
if err != nil {
return fmt.Errorf("backup pod (%s): %v", podList.Items[0].Name, err)
}
return nil
}

func LogfWithTimestamp(t *testing.T, format string, args ...interface{}) {
t.Log(time.Now(), fmt.Sprintf(format, args...))
}
Expand Down
88 changes: 0 additions & 88 deletions test/e2e/e2eutil/wait_util.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,19 +17,16 @@ package e2eutil
import (
"bytes"
"fmt"
"path"
"strings"
"testing"
"time"

api "github.com/coreos/etcd-operator/pkg/apis/etcd/v1beta2"
backups3 "github.com/coreos/etcd-operator/pkg/backup/s3"
"github.com/coreos/etcd-operator/pkg/generated/clientset/versioned"
"github.com/coreos/etcd-operator/pkg/util/k8sutil"
"github.com/coreos/etcd-operator/pkg/util/retryutil"

"k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/kubernetes"
Expand Down Expand Up @@ -182,25 +179,6 @@ func presentIn(a string, list []string) bool {
return false
}

func WaitBackupPodUp(t *testing.T, kubecli kubernetes.Interface, ns, clusterName string, retries int) error {
ls := labels.SelectorFromSet(k8sutil.BackupSidecarLabels(clusterName))
return retryutil.Retry(retryInterval, retries, func() (done bool, err error) {
podList, err := kubecli.CoreV1().Pods(ns).List(metav1.ListOptions{
LabelSelector: ls.String(),
})
if err != nil {
return false, err
}
for _, p := range podList.Items {
LogfWithTimestamp(t, "backup pod (%s) phase: %v", p.Name, p.Status.Phase)
if p.Status.Phase == v1.PodRunning {
return true, nil
}
}
return false, nil
})
}

func waitResourcesDeleted(t *testing.T, kubeClient kubernetes.Interface, cl *api.EtcdCluster) error {
undeletedPods, err := WaitPodsDeleted(kubeClient, cl.Namespace, 3, k8sutil.ClusterListOpt(cl.Name))
if err != nil {
Expand Down Expand Up @@ -236,72 +214,6 @@ func waitResourcesDeleted(t *testing.T, kubeClient kubernetes.Interface, cl *api
return nil
}

func WaitBackupDeleted(kubeClient kubernetes.Interface, cl *api.EtcdCluster, checkerOpt StorageCheckerOptions) error {
retries := 3
if checkerOpt.DeletedFromAPI {
// Currently waiting deployment to be gone from API takes a lot of time.
// TODO: revisit this when we use "background propagate" deletion policy.
retries = 30
}
err := retryutil.Retry(retryInterval, retries, func() (bool, error) {
d, err := kubeClient.AppsV1beta1().Deployments(cl.Namespace).Get(k8sutil.BackupSidecarName(cl.Name), metav1.GetOptions{})
// If we don't need to wait deployment to be completely gone, we can say it is deleted
// as long as DeletionTimestamp is not nil. Otherwise, we need to wait it is gone by checking not found error.
if (!checkerOpt.DeletedFromAPI && d.DeletionTimestamp != nil) || apierrors.IsNotFound(err) {
return true, nil
}
if err == nil {
return false, nil
}
return false, err
})
if err != nil {
return fmt.Errorf("failed to wait backup Deployment deleted: %v", err)
}

_, err = WaitPodsDeleted(kubeClient, cl.Namespace, 2,
metav1.ListOptions{
LabelSelector: labels.SelectorFromSet(map[string]string{
"app": k8sutil.BackupPodSelectorAppField,
"etcd_cluster": cl.Name,
}).String(),
})
if err != nil {
return fmt.Errorf("failed to wait backup pod terminated: %v", err)
}
// The rest is to track backup storage, e.g. PV or S3 "dir" deleted.
// If AutoDelete=false, we don't delete them and thus don't check them.
if !cl.Spec.Backup.AutoDelete {
return nil
}
err = retryutil.Retry(retryInterval, 3, func() (done bool, err error) {
switch cl.Spec.Backup.StorageType {
case api.BackupStorageTypePersistentVolume, api.BackupStorageTypeDefault:
pl, err := kubeClient.CoreV1().PersistentVolumeClaims(cl.Namespace).List(k8sutil.ClusterListOpt(cl.Name))
if err != nil {
return false, err
}
if len(pl.Items) > 0 {
return false, nil
}
case api.BackupStorageTypeS3:
s3cli := backups3.NewFromClient(checkerOpt.S3Bucket, path.Join(cl.Namespace, cl.Name), checkerOpt.S3Cli)
keys, err := s3cli.List()
if err != nil {
return false, err
}
if len(keys) > 0 {
return false, nil
}
}
return true, nil
})
if err != nil {
return fmt.Errorf("failed to wait storage (%s) to be deleted: %v", cl.Spec.Backup.StorageType, err)
}
return nil
}

func WaitPodsWithImageDeleted(kubecli kubernetes.Interface, namespace, image string, retries int, lo metav1.ListOptions) ([]*v1.Pod, error) {
return waitPodsDeleted(kubecli, namespace, retries, lo, func(p *v1.Pod) bool {
for _, c := range p.Spec.Containers {
Expand Down
Loading