Skip to content

Commit

Permalink
Merge branch 'master' into fix_no_apps_generation
Browse files Browse the repository at this point in the history
  • Loading branch information
blakepettersson authored Mar 10, 2024
2 parents ff37256 + 542890f commit 997f5c7
Show file tree
Hide file tree
Showing 108 changed files with 3,587 additions and 1,059 deletions.
4 changes: 1 addition & 3 deletions .github/workflows/image-reuse.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -74,9 +74,7 @@ jobs:
go-version: ${{ inputs.go-version }}

- name: Install cosign
uses: sigstore/cosign-installer@1fc5bd396d372bee37d608f955b336615edf79c8 # v3.2.0
with:
cosign-release: 'v2.2.1'
uses: sigstore/cosign-installer@e1523de7571e31dbe865fd2e80c5c7c23ae71eb4 # v3.4.0

- uses: docker/setup-qemu-action@2b82ce82d56a2a04d2637cd93a637ae1b359c0a7 # v2.2.0
- uses: docker/setup-buildx-action@f95db51fddba0c2d1ec667646a06c2ce06100226 # v3.0.0
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/image.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ jobs:
packages: write # for uploading attestations. (https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#known-issues)
if: ${{ github.repository == 'argoproj/argo-cd' && github.event_name == 'push' }}
# Must be refernced by a tag. https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#referencing-the-slsa-generator
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v1.7.0
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v1.9.0
with:
image: ghcr.io/argoproj/argo-cd/argocd
digest: ${{ needs.build-and-publish.outputs.image-digest }}
Expand Down
8 changes: 8 additions & 0 deletions .github/workflows/release.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,14 @@ jobs:
echo "KUBECTL_VERSION=$(go list -m k8s.io/client-go | head -n 1 | rev | cut -d' ' -f1 | rev)" >> $GITHUB_ENV
echo "GIT_TREE_STATE=$(if [ -z "`git status --porcelain`" ]; then echo "clean" ; else echo "dirty"; fi)" >> $GITHUB_ENV
- name: Free Disk Space (Ubuntu)
uses: jlumbroso/free-disk-space@4d9e71b726748f254fe64fa44d273194bd18ec91
with:
large-packages: false
docker-images: false
swap-storage: false
tool-cache: false

- name: Run GoReleaser
uses: goreleaser/goreleaser-action@7ec5c2b0c6cdda6e8bbb49444bc797dd33d74dd8 # v5.0.0
id: run-goreleaser
Expand Down
2 changes: 1 addition & 1 deletion Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ WORKDIR /tmp
COPY hack/install.sh hack/tool-versions.sh ./
COPY hack/installers installers

RUN ./install.sh helm-linux && \
RUN ./install.sh helm && \
INSTALL_PATH=/usr/local/bin ./install.sh kustomize

####################################################################################################
Expand Down
5 changes: 2 additions & 3 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -528,8 +528,7 @@ serve-docs-local:

.PHONY: serve-docs
serve-docs:
docker run ${MKDOCS_RUN_ARGS} --rm -it -p 8000:8000 -v ${CURRENT_DIR}/site:/site -w /site --entrypoint "" ${MKDOCS_DOCKER_IMAGE} python3 -m http.server --bind 0.0.0.0 8000

docker run ${MKDOCS_RUN_ARGS} --rm -it -p 8000:8000 -v ${CURRENT_DIR}:/docs -w /docs --entrypoint "" ${MKDOCS_DOCKER_IMAGE} sh -c 'pip install -r docs/requirements.txt; mkdocs serve -a $$(ip route get 1 | awk '\''{print $$7}'\''):8000'

# Verify that kubectl can connect to your K8s cluster from Docker
.PHONY: verify-kube-connect
Expand All @@ -552,7 +551,7 @@ install-tools-local: install-test-tools-local install-codegen-tools-local instal
.PHONY: install-test-tools-local
install-test-tools-local:
./hack/install.sh kustomize
./hack/install.sh helm-linux
./hack/install.sh helm
./hack/install.sh gotestsum

# Installs all tools required for running codegen (Linux packages)
Expand Down
2 changes: 2 additions & 0 deletions USERS.md
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ Currently, the following organizations are **officially** using Argo CD:
1. [Allianz Direct](https://www.allianzdirect.de/)
1. [Amadeus IT Group](https://amadeus.com/)
1. [Ambassador Labs](https://www.getambassador.io/)
1. [Ancestry](https://www.ancestry.com/)
1. [ANSTO - Australian Synchrotron](https://www.synchrotron.org.au/)
1. [Ant Group](https://www.antgroup.com/)
1. [AppDirect](https://www.appdirect.com)
Expand Down Expand Up @@ -127,6 +128,7 @@ Currently, the following organizations are **officially** using Argo CD:
1. [Hiya](https://hiya.com)
1. [Honestbank](https://honestbank.com)
1. [Hostinger](https://www.hostinger.com)
1. [IABAI](https://www.iab.ai)
1. [IBM](https://www.ibm.com/)
1. [Ibotta](https://home.ibotta.com)
1. [IITS-Consulting](https://iits-consulting.de)
Expand Down
2 changes: 1 addition & 1 deletion VERSION
Original file line number Diff line number Diff line change
@@ -1 +1 @@
2.9.0
2.11.0
4 changes: 4 additions & 0 deletions assets/swagger.json
Original file line number Diff line number Diff line change
Expand Up @@ -6424,6 +6424,10 @@
"type": "string"
}
},
"labelWithoutSelector": {
"type": "boolean",
"title": "LabelWithoutSelector specifies whether to apply common labels to resource selectors or not"
},
"namePrefix": {
"type": "string",
"title": "NamePrefix is a prefix appended to resources for Kustomize apps"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -220,7 +220,7 @@ func NewCommand() *cobra.Command {
command.Flags().StringVar(&shardingAlgorithm, "sharding-method", env.StringFromEnv(common.EnvControllerShardingAlgorithm, common.DefaultShardingAlgorithm), "Enables choice of sharding method. Supported sharding methods are : [legacy, round-robin] ")
// global queue rate limit config
command.Flags().Int64Var(&workqueueRateLimit.BucketSize, "wq-bucket-size", env.ParseInt64FromEnv("WORKQUEUE_BUCKET_SIZE", 500, 1, math.MaxInt64), "Set Workqueue Rate Limiter Bucket Size, default 500")
command.Flags().Int64Var(&workqueueRateLimit.BucketQPS, "wq-bucket-qps", env.ParseInt64FromEnv("WORKQUEUE_BUCKET_QPS", 50, 1, math.MaxInt64), "Set Workqueue Rate Limiter Bucket QPS, default 50")
command.Flags().Float64Var(&workqueueRateLimit.BucketQPS, "wq-bucket-qps", env.ParseFloat64FromEnv("WORKQUEUE_BUCKET_QPS", math.MaxFloat64, 1, math.MaxFloat64), "Set Workqueue Rate Limiter Bucket QPS, default set to MaxFloat64 which disables the bucket limiter")
// individual item rate limit config
// when WORKQUEUE_FAILURE_COOLDOWN is 0 per item rate limiting is disabled(default)
command.Flags().DurationVar(&workqueueRateLimit.FailureCoolDown, "wq-cooldown-ns", time.Duration(env.ParseInt64FromEnv("WORKQUEUE_FAILURE_COOLDOWN_NS", 0, 0, (24*time.Hour).Nanoseconds())), "Set Workqueue Per Item Rate Limiter Cooldown duration in ns, default 0(per item rate limiter disabled)")
Expand Down
20 changes: 6 additions & 14 deletions cmd/argocd/commands/admin/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -86,8 +86,12 @@ func loadClusters(ctx context.Context, kubeClient *kubernetes.Clientset, appClie
if err != nil {
return nil, err
}
appItems, err := appClient.ArgoprojV1alpha1().Applications(namespace).List(ctx, v1.ListOptions{})
if err != nil {
return nil, err
}
clusterShardingCache := sharding.NewClusterSharding(argoDB, shard, replicas, shardingAlgorithm)
clusterShardingCache.Init(clustersList)
clusterShardingCache.Init(clustersList, appItems)
clusterShards := clusterShardingCache.GetDistribution()

var cache *appstatecache.Cache
Expand All @@ -113,10 +117,6 @@ func loadClusters(ctx context.Context, kubeClient *kubernetes.Clientset, appClie
}
}

appItems, err := appClient.ArgoprojV1alpha1().Applications(namespace).List(ctx, v1.ListOptions{})
if err != nil {
return nil, err
}
apps := appItems.Items
for i, app := range apps {
err := argo.ValidateDestination(ctx, &app.Spec.Destination, argoDB)
Expand All @@ -129,12 +129,6 @@ func loadClusters(ctx context.Context, kubeClient *kubernetes.Clientset, appClie

batchSize := 10
batchesCount := int(math.Ceil(float64(len(clusters)) / float64(batchSize)))
clusterSharding := &sharding.ClusterSharding{
Shard: shard,
Replicas: replicas,
Shards: make(map[string]int),
Clusters: make(map[string]*v1alpha1.Cluster),
}
for batchNum := 0; batchNum < batchesCount; batchNum++ {
batchStart := batchSize * batchNum
batchEnd := batchSize * (batchNum + 1)
Expand All @@ -146,9 +140,7 @@ func loadClusters(ctx context.Context, kubeClient *kubernetes.Clientset, appClie
clusterShard := 0
cluster := batch[i]
if replicas > 0 {
distributionFunction := sharding.GetDistributionFunction(clusterSharding.GetClusterAccessor(), common.DefaultShardingAlgorithm, replicas)
distributionFunction(&cluster)
clusterShard := clusterShards[cluster.Server]
clusterShard = clusterShards[cluster.Server]
cluster.Shard = pointer.Int64(int64(clusterShard))
log.Infof("Cluster with uid: %s will be processed by shard %d", cluster.ID, clusterShard)
}
Expand Down
Loading

0 comments on commit 997f5c7

Please sign in to comment.