Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add leader election feature to agones-controller #3025

Merged
merged 1 commit into from
Mar 29, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion cmd/allocator/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -369,7 +369,7 @@ func newServiceHandler(kubeClient kubernetes.Interface, agonesClient versioned.I
totalRemoteAllocationTimeout,
allocationBatchWaitTime)

ctx := signals.NewSigKillContext()
ctx, _ := signals.NewSigKillContext()
h := serviceHandler{
allocationCallback: func(gsa *allocationv1.GameServerAllocation) (k8sruntime.Object, error) {
return allocator.Allocate(ctx, gsa)
Expand Down
86 changes: 74 additions & 12 deletions cmd/controller/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ import (
"agones.dev/agones/pkg/util/runtime"
"agones.dev/agones/pkg/util/signals"
"agones.dev/agones/pkg/util/webhooks"
"github.com/google/uuid"
"github.com/heptiolabs/healthcheck"
"github.com/pkg/errors"
prom "github.com/prometheus/client_golang/prometheus"
Expand All @@ -50,9 +51,12 @@ import (
corev1 "k8s.io/api/core/v1"
extclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/tools/leaderelection"
"k8s.io/client-go/tools/leaderelection/resourcelock"
)

const (
Expand Down Expand Up @@ -80,6 +84,7 @@ const (
kubeconfigFlag = "kubeconfig"
allocationBatchWaitTime = "allocation-batch-wait-time"
defaultResync = 30 * time.Second
leaderElectionFlag = "leader-election"
)

var (
Expand All @@ -106,7 +111,7 @@ func setupLogging(logDir string, logSizeLimitMB int) {

// main starts the operator for the gameserver CRD
func main() {
ctx := signals.NewSigKillContext()
ctx, cancel := signals.NewSigKillContext()
ctlConf := parseEnvFlags()

if ctlConf.LogDir != "" {
Expand Down Expand Up @@ -234,19 +239,21 @@ func main() {
rs = append(rs, gasController)
}

kubeInformerFactory.Start(ctx.Done())
agonesInformerFactory.Start(ctx.Done())
whenLeader(ctx, cancel, logger, ctlConf.LeaderElection, kubeClient, func(ctx context.Context) {
kubeInformerFactory.Start(ctx.Done())
agonesInformerFactory.Start(ctx.Done())

for _, r := range rs {
go func(rr runner) {
if runErr := rr.Run(ctx, ctlConf.NumWorkers); runErr != nil {
logger.WithError(runErr).Fatalf("could not start runner: %T", rr)
}
}(r)
}
for _, r := range rs {
go func(rr runner) {
if runErr := rr.Run(ctx, ctlConf.NumWorkers); runErr != nil {
logger.WithError(runErr).Fatalf("could not start runner: %T", rr)
}
}(r)
}

<-ctx.Done()
logger.Info("Shut down agones controllers")
<-ctx.Done()
logger.Info("Shut down agones controllers")
})
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Nice, exactly what I was hoping to see.

}

func parseEnvFlags() config {
Expand All @@ -269,6 +276,7 @@ func parseEnvFlags() config {
viper.SetDefault(enableStackdriverMetricsFlag, false)
viper.SetDefault(stackdriverLabels, "")
viper.SetDefault(allocationBatchWaitTime, 500*time.Millisecond)
viper.SetDefault(leaderElectionFlag, false)

viper.SetDefault(projectIDFlag, "")
viper.SetDefault(numWorkersFlag, 64)
Expand Down Expand Up @@ -301,6 +309,7 @@ func parseEnvFlags() config {
pflag.Int32(logSizeLimitMBFlag, 1000, "Log file size limit in MB")
pflag.String(logLevelFlag, viper.GetString(logLevelFlag), "Agones Log level")
pflag.Duration(allocationBatchWaitTime, viper.GetDuration(allocationBatchWaitTime), "Flag to configure the waiting period between allocations batches")
pflag.Bool(leaderElectionFlag, viper.GetBool(leaderElectionFlag), "Flag to enable/disable leader election for controller pod")
chiayi marked this conversation as resolved.
Show resolved Hide resolved
cloudproduct.BindFlags()
runtime.FeaturesBindFlags()
pflag.Parse()
Expand Down Expand Up @@ -329,6 +338,7 @@ func parseEnvFlags() config {
runtime.Must(viper.BindEnv(logDirFlag))
runtime.Must(viper.BindEnv(logSizeLimitMBFlag))
runtime.Must(viper.BindEnv(allocationBatchWaitTime))
runtime.Must(viper.BindEnv(leaderElectionFlag))
chiayi marked this conversation as resolved.
Show resolved Hide resolved
runtime.Must(viper.BindPFlags(pflag.CommandLine))
runtime.Must(cloudproduct.BindEnv())
runtime.Must(runtime.FeaturesBindEnv())
Expand Down Expand Up @@ -379,6 +389,7 @@ func parseEnvFlags() config {
LogSizeLimitMB: int(viper.GetInt32(logSizeLimitMBFlag)),
StackdriverLabels: viper.GetString(stackdriverLabels),
AllocationBatchWaitTime: viper.GetDuration(allocationBatchWaitTime),
LeaderElection: viper.GetBool(leaderElectionFlag),
}
}

Expand Down Expand Up @@ -407,6 +418,7 @@ type config struct {
LogLevel string
LogSizeLimitMB int
AllocationBatchWaitTime time.Duration
LeaderElection bool
}

// validate ensures the ctlConfig data is valid.
Expand All @@ -433,6 +445,56 @@ type httpServer struct {
http.ServeMux
}

func whenLeader(ctx context.Context, cancel context.CancelFunc, logger *logrus.Entry, doLeaderElection bool, kubeClient *kubernetes.Clientset, start func(_ context.Context)) {
if !doLeaderElection {
start(ctx)
chiayi marked this conversation as resolved.
Show resolved Hide resolved
return
}

id := uuid.New().String()

lock := &resourcelock.LeaseLock{
LeaseMeta: metav1.ObjectMeta{
Name: "agones-controller-lock",
Namespace: "agones-system",
},
Client: kubeClient.CoordinationV1(),
LockConfig: resourcelock.ResourceLockConfig{
Identity: id,
},
}

logger.WithField("id", id).Info("Leader Election ID")

leaderelection.RunOrDie(ctx, leaderelection.LeaderElectionConfig{
Lock: lock,
// IMPORTANT: you MUST ensure that any code you have that
// is protected by the lease must terminate **before**
// you call cancel. Otherwise, you could have a background
// loop still running and another process could
// get elected before your background loop finished, violating
// the stated goal of the lease.
ReleaseOnCancel: true,
LeaseDuration: 15 * time.Second,
RenewDeadline: 10 * time.Second,
RetryPeriod: 2 * time.Second,
Callbacks: leaderelection.LeaderCallbacks{
OnStartedLeading: start,
OnStoppedLeading: func() {
logger.WithField("id", id).Info("Leader Lost")
cancel()
os.Exit(0)
},
OnNewLeader: func(identity string) {
if identity == id {
return
}
logger.WithField("id", id).Info("New Leader Elected")
},
},
})
}

func (h *httpServer) Run(_ context.Context, _ int) error {
logger.Info("Starting http server...")
srv := &http.Server{
Expand Down
2 changes: 1 addition & 1 deletion cmd/ping/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ func main() {
logger.WithField("version", pkg.Version).WithField("featureGates", runtime.EncodeFeatures()).
WithField("ctlConf", ctlConf).Info("Starting ping...")

ctx := signals.NewSigKillContext()
ctx, _ := signals.NewSigKillContext()

udpSrv := serveUDP(ctx, ctlConf)
defer udpSrv.close()
Expand Down
2 changes: 1 addition & 1 deletion cmd/sdk-server/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ func main() {
time.Sleep(time.Duration(ctlConf.Delay) * time.Second)
}

ctx := signals.NewSigKillContext()
ctx, _ := signals.NewSigKillContext()

grpcServer := grpc.NewServer()
// don't graceful stop, because if we get a SIGKILL signal
Expand Down
7 changes: 4 additions & 3 deletions pkg/util/signals/signals.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,10 @@ import (
)

// NewSigKillContext returns a Context that cancels when os.Interrupt or os.Kill is received
func NewSigKillContext() context.Context {
ctx, _ := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
return ctx
func NewSigKillContext() (context.Context, context.CancelFunc) {
ctx, cancel := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)

return ctx, cancel
}

// NewSigTermHandler creates a channel to listen to SIGTERM and runs the handle function
Expand Down
11 changes: 11 additions & 0 deletions vendor/k8s.io/client-go/tools/leaderelection/OWNERS

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

69 changes: 69 additions & 0 deletions vendor/k8s.io/client-go/tools/leaderelection/healthzadaptor.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Loading