Skip to content

Commit

Permalink
Merge branch 'master' into feat_controller_app_info_metrics
Browse files Browse the repository at this point in the history
Signed-off-by: Thomas Decaux <[email protected]>
  • Loading branch information
ebuildy authored Jan 26, 2024
2 parents 19c8572 + 4e084ac commit 2449b16
Show file tree
Hide file tree
Showing 152 changed files with 4,609 additions and 1,260 deletions.
7 changes: 4 additions & 3 deletions CODEOWNERS
Validating CODEOWNERS rules …
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,10 @@
** @argoproj/argocd-approvers

# Docs
/docs/** @argoproj/argocd-approvers @argoproj/argocd-approvers-docs
/USERS.md @argoproj/argocd-approvers @argoproj/argocd-approvers-docs
/mkdocs.yml @argoproj/argocd-approvers @argoproj/argocd-approvers-docs
/docs/** @argoproj/argocd-approvers @argoproj/argocd-approvers-docs
/USERS.md @argoproj/argocd-approvers @argoproj/argocd-approvers-docs
/README.md @argoproj/argocd-approvers @argoproj/argocd-approvers-docs
/mkdocs.yml @argoproj/argocd-approvers @argoproj/argocd-approvers-docs

# CI
/.github/** @argoproj/argocd-approvers @argoproj/argocd-approvers-ci
Expand Down
2 changes: 1 addition & 1 deletion Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ RUN groupadd -g $ARGOCD_USER_ID argocd && \
apt-get update && \
apt-get dist-upgrade -y && \
apt-get install -y \
git git-lfs tini gpg tzdata && \
git git-lfs tini gpg tzdata connect-proxy && \
apt-get clean && \
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*

Expand Down
31 changes: 13 additions & 18 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ ARGOCD_E2E_DEX_PORT?=5556
ARGOCD_E2E_YARN_HOST?=localhost
ARGOCD_E2E_DISABLE_AUTH?=

ARGOCD_E2E_TEST_TIMEOUT?=60m
ARGOCD_E2E_TEST_TIMEOUT?=90m

ARGOCD_IN_CI?=false
ARGOCD_TEST_E2E?=true
Expand Down Expand Up @@ -175,29 +175,21 @@ endif
.PHONY: all
all: cli image

# We have some legacy requirements for being checked out within $GOPATH.
# The ensure-gopath target can be used as dependency to ensure we are running
# within these boundaries.
.PHONY: ensure-gopath
ensure-gopath:
ifneq ("$(PWD)","$(LEGACY_PATH)")
@echo "Due to legacy requirements for codegen, repository needs to be checked out within \$$GOPATH"
@echo "Location of this repo should be '$(LEGACY_PATH)' but is '$(PWD)'"
@exit 1
endif

.PHONY: gogen
gogen: ensure-gopath
gogen:
export GO111MODULE=off
go generate ./util/argo/...

.PHONY: protogen
protogen: ensure-gopath mod-vendor-local
protogen: mod-vendor-local protogen-fast

.PHONY: protogen-fast
protogen-fast:
export GO111MODULE=off
./hack/generate-proto.sh

.PHONY: openapigen
openapigen: ensure-gopath
openapigen:
export GO111MODULE=off
./hack/update-openapi.sh

Expand All @@ -212,19 +204,22 @@ notification-docs:


.PHONY: clientgen
clientgen: ensure-gopath
clientgen:
export GO111MODULE=off
./hack/update-codegen.sh

.PHONY: clidocsgen
clidocsgen: ensure-gopath
clidocsgen:
go run tools/cmd-docs/main.go


.PHONY: codegen-local
codegen-local: ensure-gopath mod-vendor-local gogen protogen clientgen openapigen clidocsgen manifests-local notification-docs notification-catalog
codegen-local: mod-vendor-local gogen protogen clientgen openapigen clidocsgen manifests-local notification-docs notification-catalog
rm -rf vendor/

.PHONY: codegen-local-fast
codegen-local-fast: gogen protogen-fast clientgen openapigen clidocsgen manifests-local notification-docs notification-catalog

.PHONY: codegen
codegen: test-tools-image
$(call run-in-test-client,make codegen-local)
Expand Down
2 changes: 1 addition & 1 deletion Procfile
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
controller: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-application-controller $COMMAND --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081} --otlp-address=${ARGOCD_OTLP_ADDRESS} --application-namespaces=${ARGOCD_APPLICATION_NAMESPACES:-''} --server-side-diff-enabled=${ARGOCD_APPLICATION_CONTROLLER_SERVER_SIDE_DIFF:-'false'}"
controller: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "HOSTNAME=testappcontroller-1 FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-application-controller $COMMAND --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081} --otlp-address=${ARGOCD_OTLP_ADDRESS} --application-namespaces=${ARGOCD_APPLICATION_NAMESPACES:-''} --server-side-diff-enabled=${ARGOCD_APPLICATION_CONTROLLER_SERVER_SIDE_DIFF:-'false'}"
api-server: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-server $COMMAND --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --disable-auth=${ARGOCD_E2E_DISABLE_AUTH:-'true'} --insecure --dex-server http://localhost:${ARGOCD_E2E_DEX_PORT:-5556} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081} --port ${ARGOCD_E2E_APISERVER_PORT:-8080} --otlp-address=${ARGOCD_OTLP_ADDRESS} --application-namespaces=${ARGOCD_APPLICATION_NAMESPACES:-''}"
dex: sh -c "ARGOCD_BINARY_NAME=argocd-dex go run github.com/argoproj/argo-cd/v2/cmd gendexcfg -o `pwd`/dist/dex.yaml && (test -f dist/dex.yaml || { echo 'Failed to generate dex configuration'; exit 1; }) && docker run --rm -p ${ARGOCD_E2E_DEX_PORT:-5556}:${ARGOCD_E2E_DEX_PORT:-5556} -v `pwd`/dist/dex.yaml:/dex.yaml ghcr.io/dexidp/dex:$(grep "image: ghcr.io/dexidp/dex" manifests/base/dex/argocd-dex-server-deployment.yaml | cut -d':' -f3) dex serve /dex.yaml"
redis: bash -c "if [ \"$ARGOCD_REDIS_LOCAL\" = 'true' ]; then redis-server --save '' --appendonly no --port ${ARGOCD_E2E_REDIS_PORT:-6379}; else docker run --rm --name argocd-redis -i -p ${ARGOCD_E2E_REDIS_PORT:-6379}:${ARGOCD_E2E_REDIS_PORT:-6379} docker.io/library/redis:$(grep "image: redis" manifests/base/redis/argocd-redis-deployment.yaml | cut -d':' -f3) --save '' --appendonly no --port ${ARGOCD_E2E_REDIS_PORT:-6379}; fi"
Expand Down
2 changes: 2 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
**Social:**
[![Twitter Follow](https://img.shields.io/twitter/follow/argoproj?style=social)](https://twitter.com/argoproj)
[![Slack](https://img.shields.io/badge/slack-argoproj-brightgreen.svg?logo=slack)](https://argoproj.github.io/community/join-slack)
[![LinkedIn](https://img.shields.io/badge/LinkedIn-argoproj-blue.svg?logo=linkedin)](https://www.linkedin.com/company/argoproj/)

# Argo CD - Declarative Continuous Delivery for Kubernetes

Expand Down Expand Up @@ -85,4 +86,5 @@ Participation in the Argo CD project is governed by the [CNCF Code of Conduct](h
1. [Getting Started with ArgoCD for GitOps Deployments](https://youtu.be/AvLuplh1skA)
1. [Using Argo CD & Datree for Stable Kubernetes CI/CD Deployments](https://youtu.be/17894DTru2Y)
1. [How to create Argo CD Applications Automatically using ApplicationSet? "Automation of GitOps"](https://amralaayassen.medium.com/how-to-create-argocd-applications-automatically-using-applicationset-automation-of-the-gitops-59455eaf4f72)
1. [Progressive Delivery with Service Mesh – Argo Rollouts with Istio](https://www.cncf.io/blog/2022/12/16/progressive-delivery-with-service-mesh-argo-rollouts-with-istio/)

2 changes: 2 additions & 0 deletions USERS.md
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ Currently, the following organizations are **officially** using Argo CD:
1. [Boozt](https://www.booztgroup.com/)
1. [Boticario](https://www.boticario.com.br/)
1. [Bulder Bank](https://bulderbank.no)
1. [CAM](https://cam-inc.co.jp)
1. [Camptocamp](https://camptocamp.com)
1. [Candis](https://www.candis.io)
1. [Capital One](https://www.capitalone.com)
Expand Down Expand Up @@ -219,6 +220,7 @@ Currently, the following organizations are **officially** using Argo CD:
1. [Pigment](https://www.gopigment.com/)
1. [Pipefy](https://www.pipefy.com/)
1. [Pismo](https://pismo.io/)
1. [PITS Globale Datenrettungsdienste](https://www.pitsdatenrettung.de/)
1. [Platform9 Systems](https://platform9.com/)
1. [Polarpoint.io](https://polarpoint.io)
1. [PostFinance](https://github.com/postfinance)
Expand Down
4 changes: 4 additions & 0 deletions assets/swagger.json
Original file line number Diff line number Diff line change
Expand Up @@ -5664,6 +5664,10 @@
"type": "string",
"title": "ClusterName contains AWS cluster name"
},
"profile": {
"description": "Profile contains optional role ARN. If set then AWS IAM Authenticator uses the profile to perform cluster operations instead of the default AWS credential provider chain.",
"type": "string"
},
"roleARN": {
"description": "RoleARN contains optional role ARN. If set then AWS IAM Authenticator assume a role to perform cluster operations instead of the default AWS credential provider chain.",
"type": "string"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@ func NewCommand() *cobra.Command {
clientConfig clientcmd.ClientConfig
appResyncPeriod int64
appHardResyncPeriod int64
appResyncJitter int64
repoErrorGracePeriod int64
repoServerAddress string
repoServerTimeoutSeconds int
Expand Down Expand Up @@ -147,7 +148,7 @@ func NewCommand() *cobra.Command {
appController.InvalidateProjectsCache()
}))
kubectl := kubeutil.NewKubectl()
clusterFilter := getClusterFilter(kubeClient, settingsMgr, shardingAlgorithm, enableDynamicClusterDistribution)
clusterSharding := getClusterSharding(kubeClient, settingsMgr, shardingAlgorithm, enableDynamicClusterDistribution)
appController, err = controller.NewApplicationController(
namespace,
settingsMgr,
Expand All @@ -158,6 +159,7 @@ func NewCommand() *cobra.Command {
kubectl,
resyncDuration,
hardResyncDuration,
time.Duration(appResyncJitter)*time.Second,
time.Duration(selfHealTimeoutSeconds)*time.Second,
time.Duration(repoErrorGracePeriod)*time.Second,
metricsPort,
Expand All @@ -166,7 +168,7 @@ func NewCommand() *cobra.Command {
metricsAplicationRevisions,
kubectlParallelismLimit,
persistResourceHealth,
clusterFilter,
clusterSharding,
applicationNamespaces,
&workqueueRateLimit,
serverSideDiff,
Expand Down Expand Up @@ -196,6 +198,7 @@ func NewCommand() *cobra.Command {
clientConfig = cli.AddKubectlFlagsToCmd(&command)
command.Flags().Int64Var(&appResyncPeriod, "app-resync", int64(env.ParseDurationFromEnv("ARGOCD_RECONCILIATION_TIMEOUT", defaultAppResyncPeriod*time.Second, 0, math.MaxInt64).Seconds()), "Time period in seconds for application resync.")
command.Flags().Int64Var(&appHardResyncPeriod, "app-hard-resync", int64(env.ParseDurationFromEnv("ARGOCD_HARD_RECONCILIATION_TIMEOUT", defaultAppHardResyncPeriod*time.Second, 0, math.MaxInt64).Seconds()), "Time period in seconds for application hard resync.")
command.Flags().Int64Var(&appResyncJitter, "app-resync-jitter", int64(env.ParseDurationFromEnv("ARGOCD_RECONCILIATION_JITTER", 0*time.Second, 0, math.MaxInt64).Seconds()), "Maximum time period in seconds to add as a delay jitter for application resync.")
command.Flags().Int64Var(&repoErrorGracePeriod, "repo-error-grace-period-seconds", int64(env.ParseDurationFromEnv("ARGOCD_REPO_ERROR_GRACE_PERIOD_SECONDS", defaultAppResyncPeriod*time.Second, 0, math.MaxInt64).Seconds()), "Grace period in seconds for ignoring consecutive errors while communicating with repo server.")
command.Flags().StringVar(&repoServerAddress, "repo-server", env.StringFromEnv("ARGOCD_APPLICATION_CONTROLLER_REPO_SERVER", common.DefaultRepoServerAddr), "Repo server address.")
command.Flags().IntVar(&repoServerTimeoutSeconds, "repo-server-timeout-seconds", env.ParseNumFromEnv("ARGOCD_APPLICATION_CONTROLLER_REPO_SERVER_TIMEOUT_SECONDS", 60, 0, math.MaxInt64), "Repo server RPC call timeout seconds.")
Expand Down Expand Up @@ -238,11 +241,10 @@ func NewCommand() *cobra.Command {
return &command
}

func getClusterFilter(kubeClient *kubernetes.Clientset, settingsMgr *settings.SettingsManager, shardingAlgorithm string, enableDynamicClusterDistribution bool) sharding.ClusterFilterFunction {

var replicas int
shard := env.ParseNumFromEnv(common.EnvControllerShard, -1, -math.MaxInt32, math.MaxInt32)

func getClusterSharding(kubeClient *kubernetes.Clientset, settingsMgr *settings.SettingsManager, shardingAlgorithm string, enableDynamicClusterDistribution bool) sharding.ClusterShardingCache {
var replicasCount int
// StatefulSet mode and Deployment mode uses different default values for shard number.
defaultShardNumberValue := 0
applicationControllerName := env.StringFromEnv(common.EnvAppControllerName, common.DefaultApplicationControllerName)
appControllerDeployment, err := kubeClient.AppsV1().Deployments(settingsMgr.GetNamespace()).Get(context.Background(), applicationControllerName, metav1.GetOptions{})

Expand All @@ -252,22 +254,21 @@ func getClusterFilter(kubeClient *kubernetes.Clientset, settingsMgr *settings.Se
}

if enableDynamicClusterDistribution && appControllerDeployment != nil && appControllerDeployment.Spec.Replicas != nil {
replicas = int(*appControllerDeployment.Spec.Replicas)
replicasCount = int(*appControllerDeployment.Spec.Replicas)
defaultShardNumberValue = -1
} else {
replicas = env.ParseNumFromEnv(common.EnvControllerReplicas, 0, 0, math.MaxInt32)
replicasCount = env.ParseNumFromEnv(common.EnvControllerReplicas, 0, 0, math.MaxInt32)
}

var clusterFilter func(cluster *v1alpha1.Cluster) bool
if replicas > 1 {
shardNumber := env.ParseNumFromEnv(common.EnvControllerShard, defaultShardNumberValue, -math.MaxInt32, math.MaxInt32)
if replicasCount > 1 {
// check for shard mapping using configmap if application-controller is a deployment
// else use existing logic to infer shard from pod name if application-controller is a statefulset
if enableDynamicClusterDistribution && appControllerDeployment != nil {

var err error
// retry 3 times if we find a conflict while updating shard mapping configMap.
// If we still see conflicts after the retries, wait for next iteration of heartbeat process.
for i := 0; i <= common.AppControllerHeartbeatUpdateRetryCount; i++ {
shard, err = sharding.GetOrUpdateShardFromConfigMap(kubeClient, settingsMgr, replicas, shard)
shardNumber, err = sharding.GetOrUpdateShardFromConfigMap(kubeClient, settingsMgr, replicasCount, shardNumber)
if !kubeerrors.IsConflict(err) {
err = fmt.Errorf("unable to get shard due to error updating the sharding config map: %s", err)
break
Expand All @@ -276,19 +277,19 @@ func getClusterFilter(kubeClient *kubernetes.Clientset, settingsMgr *settings.Se
}
errors.CheckError(err)
} else {
if shard < 0 {
if shardNumber < 0 {
var err error
shard, err = sharding.InferShard()
shardNumber, err = sharding.InferShard()
errors.CheckError(err)
}
if shardNumber > replicasCount {
log.Warnf("Calculated shard number %d is greated than the number of replicas count. Defaulting to 0", shardNumber)
shardNumber = 0
}
}
log.Infof("Processing clusters from shard %d", shard)
db := db.NewDB(settingsMgr.GetNamespace(), settingsMgr, kubeClient)
log.Infof("Using filter function: %s", shardingAlgorithm)
distributionFunction := sharding.GetDistributionFunction(db, shardingAlgorithm)
clusterFilter = sharding.GetClusterFilter(db, distributionFunction, shard)
} else {
log.Info("Processing all cluster shards")
}
return clusterFilter
db := db.NewDB(settingsMgr.GetNamespace(), settingsMgr, kubeClient)
return sharding.NewClusterSharding(db, shardNumber, replicasCount, shardingAlgorithm)
}
16 changes: 10 additions & 6 deletions cmd/argocd-k8s-auth/commands/aws.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,13 +37,14 @@ func newAWSCommand() *cobra.Command {
var (
clusterName string
roleARN string
profile string
)
var command = &cobra.Command{
Use: "aws",
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()

presignedURLString, err := getSignedRequestWithRetry(ctx, time.Minute, 5*time.Second, clusterName, roleARN, getSignedRequest)
presignedURLString, err := getSignedRequestWithRetry(ctx, time.Minute, 5*time.Second, clusterName, roleARN, profile, getSignedRequest)
errors.CheckError(err)
token := v1Prefix + base64.RawURLEncoding.EncodeToString([]byte(presignedURLString))
// Set token expiration to 1 minute before the presigned URL expires for some cushion
Expand All @@ -53,16 +54,17 @@ func newAWSCommand() *cobra.Command {
}
command.Flags().StringVar(&clusterName, "cluster-name", "", "AWS Cluster name")
command.Flags().StringVar(&roleARN, "role-arn", "", "AWS Role ARN")
command.Flags().StringVar(&profile, "profile", "", "AWS Profile")
return command
}

type getSignedRequestFunc func(clusterName, roleARN string) (string, error)
type getSignedRequestFunc func(clusterName, roleARN string, profile string) (string, error)

func getSignedRequestWithRetry(ctx context.Context, timeout, interval time.Duration, clusterName, roleARN string, fn getSignedRequestFunc) (string, error) {
func getSignedRequestWithRetry(ctx context.Context, timeout, interval time.Duration, clusterName, roleARN string, profile string, fn getSignedRequestFunc) (string, error) {
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
for {
signed, err := fn(clusterName, roleARN)
signed, err := fn(clusterName, roleARN, profile)
if err == nil {
return signed, nil
}
Expand All @@ -74,8 +76,10 @@ func getSignedRequestWithRetry(ctx context.Context, timeout, interval time.Durat
}
}

func getSignedRequest(clusterName, roleARN string) (string, error) {
sess, err := session.NewSession()
func getSignedRequest(clusterName, roleARN string, profile string) (string, error) {
sess, err := session.NewSessionWithOptions(session.Options{
Profile: profile,
})
if err != nil {
return "", fmt.Errorf("error creating new AWS session: %s", err)
}
Expand Down
8 changes: 4 additions & 4 deletions cmd/argocd-k8s-auth/commands/aws_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ func TestGetSignedRequestWithRetry(t *testing.T) {
}

// when
signed, err := getSignedRequestWithRetry(ctx, time.Second, time.Millisecond, "cluster-name", "", mock.getSignedRequestMock)
signed, err := getSignedRequestWithRetry(ctx, time.Second, time.Millisecond, "cluster-name", "", "", mock.getSignedRequestMock)

// then
assert.NoError(t, err)
Expand All @@ -41,7 +41,7 @@ func TestGetSignedRequestWithRetry(t *testing.T) {
}

// when
signed, err := getSignedRequestWithRetry(ctx, time.Second, time.Millisecond, "cluster-name", "", mock.getSignedRequestMock)
signed, err := getSignedRequestWithRetry(ctx, time.Second, time.Millisecond, "cluster-name", "", "", mock.getSignedRequestMock)

// then
assert.NoError(t, err)
Expand All @@ -57,7 +57,7 @@ func TestGetSignedRequestWithRetry(t *testing.T) {
}

// when
signed, err := getSignedRequestWithRetry(ctx, time.Second, time.Millisecond, "cluster-name", "", mock.getSignedRequestMock)
signed, err := getSignedRequestWithRetry(ctx, time.Second, time.Millisecond, "cluster-name", "", "", mock.getSignedRequestMock)

// then
assert.Error(t, err)
Expand All @@ -70,7 +70,7 @@ type signedRequestMock struct {
returnFunc func(m *signedRequestMock) (string, error)
}

func (m *signedRequestMock) getSignedRequestMock(clusterName, roleARN string) (string, error) {
func (m *signedRequestMock) getSignedRequestMock(clusterName, roleARN string, profile string) (string, error) {
m.getSignedRequestCalls++
return m.returnFunc(m)
}
Loading

0 comments on commit 2449b16

Please sign in to comment.