From 3caa54b065dcea9dc6c2fc36bbfb793505513504 Mon Sep 17 00:00:00 2001 From: Daniel Grimes Date: Fri, 15 Oct 2021 12:27:05 +0000 Subject: [PATCH 1/9] add .devcontainer to .gitignore --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index c23cbfed9c..a9ca864199 100644 --- a/.gitignore +++ b/.gitignore @@ -13,3 +13,4 @@ helm/test-values.yaml *.swp golangci-lint atlantis +.devcontainer \ No newline at end of file From ed418eaae962b835616ab1fdd17fccd2e04d1b51 Mon Sep 17 00:00:00 2001 From: Daniel Grimes Date: Fri, 15 Oct 2021 12:41:39 +0000 Subject: [PATCH 2/9] allow AzureDevOps host to be specified in the user_config --- server/server.go | 10 ++++++++-- server/user_config.go | 1 + 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/server/server.go b/server/server.go index 5519ed18a8..01a0a26101 100644 --- a/server/server.go +++ b/server/server.go @@ -216,8 +216,14 @@ func NewServer(userConfig UserConfig, config Config) (*Server, error) { } if userConfig.AzureDevopsUser != "" { supportedVCSHosts = append(supportedVCSHosts, models.AzureDevops) - var err error - azuredevopsClient, err = vcs.NewAzureDevopsClient("dev.azure.com", userConfig.AzureDevopsUser, userConfig.AzureDevopsToken) + + azureDevOpsHost := userConfig.AzureDevOpsHost + if userConfig.AzureDevOpsHost=="" { + azureDevOpsHost ="dev.azure.com" + } + + var err error + azuredevopsClient, err = vcs.NewAzureDevopsClient(azureDevOpsHost, userConfig.AzureDevopsUser, userConfig.AzureDevopsToken) if err != nil { return nil, err } diff --git a/server/user_config.go b/server/user_config.go index 4875ad7671..53bd1d1e05 100644 --- a/server/user_config.go +++ b/server/user_config.go @@ -17,6 +17,7 @@ type UserConfig struct { AzureDevopsUser string `mapstructure:"azuredevops-user"` AzureDevopsWebhookPassword string `mapstructure:"azuredevops-webhook-password"` AzureDevopsWebhookUser string `mapstructure:"azuredevops-webhook-user"` + AzureDevOpsHost string `mapstructure:"azuredevops-host"` BitbucketBaseURL string `mapstructure:"bitbucket-base-url"` BitbucketToken string `mapstructure:"bitbucket-token"` BitbucketUser string `mapstructure:"bitbucket-user"` From a0653c0459c2b4430638a5ad992536f46eeda422 Mon Sep 17 00:00:00 2001 From: Daniel Grimes Date: Fri, 15 Oct 2021 12:54:39 +0000 Subject: [PATCH 3/9] fix spacing issues --- server/server.go | 1574 ++++++++++++++++++++--------------------- server/user_config.go | 2 +- 2 files changed, 788 insertions(+), 788 deletions(-) diff --git a/server/server.go b/server/server.go index 01a0a26101..b134694d54 100644 --- a/server/server.go +++ b/server/server.go @@ -16,823 +16,823 @@ package server import ( - "context" - "encoding/json" - "flag" - "fmt" - "log" - "net/http" - "net/url" - "os" - "os/signal" - "path/filepath" - "sort" - "strings" - "syscall" - "time" - - "github.com/mitchellh/go-homedir" - "github.com/runatlantis/atlantis/server/core/db" - "github.com/runatlantis/atlantis/server/events/yaml/valid" - - assetfs "github.com/elazarl/go-bindata-assetfs" - "github.com/gorilla/mux" - "github.com/pkg/errors" - "github.com/runatlantis/atlantis/server/controllers" - events_controllers "github.com/runatlantis/atlantis/server/controllers/events" - "github.com/runatlantis/atlantis/server/controllers/templates" - "github.com/runatlantis/atlantis/server/core/locking" - "github.com/runatlantis/atlantis/server/core/runtime" - "github.com/runatlantis/atlantis/server/core/runtime/policy" - "github.com/runatlantis/atlantis/server/core/terraform" - "github.com/runatlantis/atlantis/server/events" - "github.com/runatlantis/atlantis/server/events/models" - "github.com/runatlantis/atlantis/server/events/vcs" - "github.com/runatlantis/atlantis/server/events/vcs/bitbucketcloud" - "github.com/runatlantis/atlantis/server/events/vcs/bitbucketserver" - "github.com/runatlantis/atlantis/server/events/webhooks" - "github.com/runatlantis/atlantis/server/events/yaml" - "github.com/runatlantis/atlantis/server/logging" - "github.com/runatlantis/atlantis/server/static" - "github.com/urfave/cli" - "github.com/urfave/negroni" + "context" + "encoding/json" + "flag" + "fmt" + "log" + "net/http" + "net/url" + "os" + "os/signal" + "path/filepath" + "sort" + "strings" + "syscall" + "time" + + "github.com/mitchellh/go-homedir" + "github.com/runatlantis/atlantis/server/core/db" + "github.com/runatlantis/atlantis/server/events/yaml/valid" + + assetfs "github.com/elazarl/go-bindata-assetfs" + "github.com/gorilla/mux" + "github.com/pkg/errors" + "github.com/runatlantis/atlantis/server/controllers" + events_controllers "github.com/runatlantis/atlantis/server/controllers/events" + "github.com/runatlantis/atlantis/server/controllers/templates" + "github.com/runatlantis/atlantis/server/core/locking" + "github.com/runatlantis/atlantis/server/core/runtime" + "github.com/runatlantis/atlantis/server/core/runtime/policy" + "github.com/runatlantis/atlantis/server/core/terraform" + "github.com/runatlantis/atlantis/server/events" + "github.com/runatlantis/atlantis/server/events/models" + "github.com/runatlantis/atlantis/server/events/vcs" + "github.com/runatlantis/atlantis/server/events/vcs/bitbucketcloud" + "github.com/runatlantis/atlantis/server/events/vcs/bitbucketserver" + "github.com/runatlantis/atlantis/server/events/webhooks" + "github.com/runatlantis/atlantis/server/events/yaml" + "github.com/runatlantis/atlantis/server/logging" + "github.com/runatlantis/atlantis/server/static" + "github.com/urfave/cli" + "github.com/urfave/negroni" ) const ( - // LockViewRouteName is the named route in mux.Router for the lock view. - // The route can be retrieved by this name, ex: - // mux.Router.Get(LockViewRouteName) - LockViewRouteName = "lock-detail" - // LockViewRouteIDQueryParam is the query parameter needed to construct the lock view - // route. ex: - // mux.Router.Get(LockViewRouteName).URL(LockViewRouteIDQueryParam, "my id") - LockViewRouteIDQueryParam = "id" - - // binDirName is the name of the directory inside our data dir where - // we download binaries. - BinDirName = "bin" - - // terraformPluginCacheDir is the name of the dir inside our data dir - // where we tell terraform to cache plugins and modules. - TerraformPluginCacheDirName = "plugin-cache" + // LockViewRouteName is the named route in mux.Router for the lock view. + // The route can be retrieved by this name, ex: + // mux.Router.Get(LockViewRouteName) + LockViewRouteName = "lock-detail" + // LockViewRouteIDQueryParam is the query parameter needed to construct the lock view + // route. ex: + // mux.Router.Get(LockViewRouteName).URL(LockViewRouteIDQueryParam, "my id") + LockViewRouteIDQueryParam = "id" + + // binDirName is the name of the directory inside our data dir where + // we download binaries. + BinDirName = "bin" + + // terraformPluginCacheDir is the name of the dir inside our data dir + // where we tell terraform to cache plugins and modules. + TerraformPluginCacheDirName = "plugin-cache" ) // Server runs the Atlantis web server. type Server struct { - AtlantisVersion string - AtlantisURL *url.URL - Router *mux.Router - Port int - PreWorkflowHooksCommandRunner *events.DefaultPreWorkflowHooksCommandRunner - CommandRunner *events.DefaultCommandRunner - Logger logging.SimpleLogging - Locker locking.Locker - ApplyLocker locking.ApplyLocker - VCSEventsController *events_controllers.VCSEventsController - GithubAppController *controllers.GithubAppController - LocksController *controllers.LocksController - StatusController *controllers.StatusController - IndexTemplate templates.TemplateWriter - LockDetailTemplate templates.TemplateWriter - SSLCertFile string - SSLKeyFile string - Drainer *events.Drainer + AtlantisVersion string + AtlantisURL *url.URL + Router *mux.Router + Port int + PreWorkflowHooksCommandRunner *events.DefaultPreWorkflowHooksCommandRunner + CommandRunner *events.DefaultCommandRunner + Logger logging.SimpleLogging + Locker locking.Locker + ApplyLocker locking.ApplyLocker + VCSEventsController *events_controllers.VCSEventsController + GithubAppController *controllers.GithubAppController + LocksController *controllers.LocksController + StatusController *controllers.StatusController + IndexTemplate templates.TemplateWriter + LockDetailTemplate templates.TemplateWriter + SSLCertFile string + SSLKeyFile string + Drainer *events.Drainer } // Config holds config for server that isn't passed in by the user. type Config struct { - AllowForkPRsFlag string - AtlantisURLFlag string - AtlantisVersion string - DefaultTFVersionFlag string - RepoConfigJSONFlag string - SilenceForkPRErrorsFlag string + AllowForkPRsFlag string + AtlantisURLFlag string + AtlantisVersion string + DefaultTFVersionFlag string + RepoConfigJSONFlag string + SilenceForkPRErrorsFlag string } // WebhookConfig is nested within UserConfig. It's used to configure webhooks. type WebhookConfig struct { - // Event is the type of event we should send this webhook for, ex. apply. - Event string `mapstructure:"event"` - // WorkspaceRegex is a regex that is used to match against the workspace - // that is being modified for this event. If the regex matches, we'll - // send the webhook, ex. "production.*". - WorkspaceRegex string `mapstructure:"workspace-regex"` - // Kind is the type of webhook we should send, ex. slack. - Kind string `mapstructure:"kind"` - // Channel is the channel to send this webhook to. It only applies to - // slack webhooks. Should be without '#'. - Channel string `mapstructure:"channel"` + // Event is the type of event we should send this webhook for, ex. apply. + Event string `mapstructure:"event"` + // WorkspaceRegex is a regex that is used to match against the workspace + // that is being modified for this event. If the regex matches, we'll + // send the webhook, ex. "production.*". + WorkspaceRegex string `mapstructure:"workspace-regex"` + // Kind is the type of webhook we should send, ex. slack. + Kind string `mapstructure:"kind"` + // Channel is the channel to send this webhook to. It only applies to + // slack webhooks. Should be without '#'. + Channel string `mapstructure:"channel"` } // NewServer returns a new server. If there are issues starting the server or // its dependencies an error will be returned. This is like the main() function // for the server CLI command because it injects all the dependencies. func NewServer(userConfig UserConfig, config Config) (*Server, error) { - logger, err := logging.NewStructuredLoggerFromLevel(userConfig.ToLogLevel()) - - if err != nil { - return nil, err - } - - var supportedVCSHosts []models.VCSHostType - var githubClient *vcs.GithubClient - var githubAppEnabled bool - var githubCredentials vcs.GithubCredentials - var gitlabClient *vcs.GitlabClient - var bitbucketCloudClient *bitbucketcloud.Client - var bitbucketServerClient *bitbucketserver.Client - var azuredevopsClient *vcs.AzureDevopsClient - - policyChecksEnabled := false - if userConfig.EnablePolicyChecksFlag { - logger.Info("Policy Checks are enabled") - policyChecksEnabled = true - } - - if userConfig.GithubUser != "" || userConfig.GithubAppID != 0 { - supportedVCSHosts = append(supportedVCSHosts, models.Github) - if userConfig.GithubUser != "" { - githubCredentials = &vcs.GithubUserCredentials{ - User: userConfig.GithubUser, - Token: userConfig.GithubToken, - } - } else if userConfig.GithubAppID != 0 && userConfig.GithubAppKeyFile != "" { - privateKey, err := os.ReadFile(userConfig.GithubAppKeyFile) - if err != nil { - return nil, err - } - githubCredentials = &vcs.GithubAppCredentials{ - AppID: userConfig.GithubAppID, - Key: privateKey, - Hostname: userConfig.GithubHostname, - AppSlug: userConfig.GithubAppSlug, - } - githubAppEnabled = true - } else if userConfig.GithubAppID != 0 && userConfig.GithubAppKey != "" { - githubCredentials = &vcs.GithubAppCredentials{ - AppID: userConfig.GithubAppID, - Key: []byte(userConfig.GithubAppKey), - Hostname: userConfig.GithubHostname, - AppSlug: userConfig.GithubAppSlug, - } - githubAppEnabled = true - } - - var err error - githubClient, err = vcs.NewGithubClient(userConfig.GithubHostname, githubCredentials, logger) - if err != nil { - return nil, err - } - } - if userConfig.GitlabUser != "" { - supportedVCSHosts = append(supportedVCSHosts, models.Gitlab) - var err error - gitlabClient, err = vcs.NewGitlabClient(userConfig.GitlabHostname, userConfig.GitlabToken, logger) - if err != nil { - return nil, err - } - } - if userConfig.BitbucketUser != "" { - if userConfig.BitbucketBaseURL == bitbucketcloud.BaseURL { - supportedVCSHosts = append(supportedVCSHosts, models.BitbucketCloud) - bitbucketCloudClient = bitbucketcloud.NewClient( - http.DefaultClient, - userConfig.BitbucketUser, - userConfig.BitbucketToken, - userConfig.AtlantisURL) - } else { - supportedVCSHosts = append(supportedVCSHosts, models.BitbucketServer) - var err error - bitbucketServerClient, err = bitbucketserver.NewClient( - http.DefaultClient, - userConfig.BitbucketUser, - userConfig.BitbucketToken, - userConfig.BitbucketBaseURL, - userConfig.AtlantisURL) - if err != nil { - return nil, errors.Wrapf(err, "setting up Bitbucket Server client") - } - } - } - if userConfig.AzureDevopsUser != "" { - supportedVCSHosts = append(supportedVCSHosts, models.AzureDevops) - - azureDevOpsHost := userConfig.AzureDevOpsHost - if userConfig.AzureDevOpsHost=="" { - azureDevOpsHost ="dev.azure.com" - } - - var err error - azuredevopsClient, err = vcs.NewAzureDevopsClient(azureDevOpsHost, userConfig.AzureDevopsUser, userConfig.AzureDevopsToken) - if err != nil { - return nil, err - } - } - - if userConfig.WriteGitCreds { - home, err := homedir.Dir() - if err != nil { - return nil, errors.Wrap(err, "getting home dir to write ~/.git-credentials file") - } - if userConfig.GithubUser != "" { - if err := events.WriteGitCreds(userConfig.GithubUser, userConfig.GithubToken, userConfig.GithubHostname, home, logger, false); err != nil { - return nil, err - } - } - if userConfig.GitlabUser != "" { - if err := events.WriteGitCreds(userConfig.GitlabUser, userConfig.GitlabToken, userConfig.GitlabHostname, home, logger, false); err != nil { - return nil, err - } - } - if userConfig.BitbucketUser != "" { - // The default BitbucketBaseURL is https://api.bitbucket.org which can't actually be used for git - // so we override it here only if it's that to be bitbucket.org - bitbucketBaseURL := userConfig.BitbucketBaseURL - if bitbucketBaseURL == "https://api.bitbucket.org" { - bitbucketBaseURL = "bitbucket.org" - } - if err := events.WriteGitCreds(userConfig.BitbucketUser, userConfig.BitbucketToken, bitbucketBaseURL, home, logger, false); err != nil { - return nil, err - } - } - if userConfig.AzureDevopsUser != "" { - if err := events.WriteGitCreds(userConfig.AzureDevopsUser, userConfig.AzureDevopsToken, "dev.azure.com", home, logger, false); err != nil { - return nil, err - } - } - } - - var webhooksConfig []webhooks.Config - for _, c := range userConfig.Webhooks { - config := webhooks.Config{ - Channel: c.Channel, - Event: c.Event, - Kind: c.Kind, - WorkspaceRegex: c.WorkspaceRegex, - } - webhooksConfig = append(webhooksConfig, config) - } - webhooksManager, err := webhooks.NewMultiWebhookSender(webhooksConfig, webhooks.NewSlackClient(userConfig.SlackToken)) - if err != nil { - return nil, errors.Wrap(err, "initializing webhooks") - } - vcsClient := vcs.NewClientProxy(githubClient, gitlabClient, bitbucketCloudClient, bitbucketServerClient, azuredevopsClient) - commitStatusUpdater := &events.DefaultCommitStatusUpdater{Client: vcsClient, StatusName: userConfig.VCSStatusName} - - binDir, err := mkSubDir(userConfig.DataDir, BinDirName) - - if err != nil { - return nil, err - } - - cacheDir, err := mkSubDir(userConfig.DataDir, TerraformPluginCacheDirName) - - if err != nil { - return nil, err - } - - terraformClient, err := terraform.NewClient( - logger, - binDir, - cacheDir, - userConfig.TFEToken, - userConfig.TFEHostname, - userConfig.DefaultTFVersion, - config.DefaultTFVersionFlag, - userConfig.TFDownloadURL, - &terraform.DefaultDownloader{}, - true) - // The flag.Lookup call is to detect if we're running in a unit test. If we - // are, then we don't error out because we don't have/want terraform - // installed on our CI system where the unit tests run. - if err != nil && flag.Lookup("test.v") == nil { - return nil, errors.Wrap(err, "initializing terraform") - } - markdownRenderer := &events.MarkdownRenderer{ - GitlabSupportsCommonMark: gitlabClient.SupportsCommonMark(), - DisableApplyAll: userConfig.DisableApplyAll, - DisableMarkdownFolding: userConfig.DisableMarkdownFolding, - DisableApply: userConfig.DisableApply, - DisableRepoLocking: userConfig.DisableRepoLocking, - EnableDiffMarkdownFormat: userConfig.EnableDiffMarkdownFormat, - } - - boltdb, err := db.New(userConfig.DataDir) - if err != nil { - return nil, err - } - var lockingClient locking.Locker - var applyLockingClient locking.ApplyLocker - if userConfig.DisableRepoLocking { - lockingClient = locking.NewNoOpLocker() - } else { - lockingClient = locking.NewClient(boltdb) - } - applyLockingClient = locking.NewApplyClient(boltdb, userConfig.DisableApply) - workingDirLocker := events.NewDefaultWorkingDirLocker() - - var workingDir events.WorkingDir = &events.FileWorkspace{ - DataDir: userConfig.DataDir, - CheckoutMerge: userConfig.CheckoutStrategy == "merge", - } - // provide fresh tokens before clone from the GitHub Apps integration, proxy workingDir - if githubAppEnabled { - if !userConfig.WriteGitCreds { - return nil, errors.New("Github App requires --write-git-creds to support cloning") - } - workingDir = &events.GithubAppWorkingDir{ - WorkingDir: workingDir, - Credentials: githubCredentials, - GithubHostname: userConfig.GithubHostname, - } - } - - projectLocker := &events.DefaultProjectLocker{ - Locker: lockingClient, - VCSClient: vcsClient, - } - deleteLockCommand := &events.DefaultDeleteLockCommand{ - Locker: lockingClient, - Logger: logger, - WorkingDir: workingDir, - WorkingDirLocker: workingDirLocker, - DB: boltdb, - } - - parsedURL, err := ParseAtlantisURL(userConfig.AtlantisURL) - if err != nil { - return nil, errors.Wrapf(err, - "parsing --%s flag %q", config.AtlantisURLFlag, userConfig.AtlantisURL) - } - validator := &yaml.ParserValidator{} - - globalCfg := valid.NewGlobalCfgFromArgs( - valid.GlobalCfgArgs{ - AllowRepoCfg: userConfig.AllowRepoConfig, - MergeableReq: userConfig.RequireMergeable, - ApprovedReq: userConfig.RequireApproval, - UnDivergedReq: userConfig.RequireUnDiverged, - PolicyCheckEnabled: userConfig.EnablePolicyChecksFlag, - }) - if userConfig.RepoConfig != "" { - globalCfg, err = validator.ParseGlobalCfg(userConfig.RepoConfig, globalCfg) - if err != nil { - return nil, errors.Wrapf(err, "parsing %s file", userConfig.RepoConfig) - } - } else if userConfig.RepoConfigJSON != "" { - globalCfg, err = validator.ParseGlobalCfgJSON(userConfig.RepoConfigJSON, globalCfg) - if err != nil { - return nil, errors.Wrapf(err, "parsing --%s", config.RepoConfigJSONFlag) - } - } - - underlyingRouter := mux.NewRouter() - router := &Router{ - AtlantisURL: parsedURL, - LockViewRouteIDQueryParam: LockViewRouteIDQueryParam, - LockViewRouteName: LockViewRouteName, - Underlying: underlyingRouter, - } - pullClosedExecutor := &events.PullClosedExecutor{ - VCSClient: vcsClient, - Locker: lockingClient, - WorkingDir: workingDir, - Logger: logger, - DB: boltdb, - } - eventParser := &events.EventParser{ - GithubUser: userConfig.GithubUser, - GithubToken: userConfig.GithubToken, - GitlabUser: userConfig.GitlabUser, - GitlabToken: userConfig.GitlabToken, - AllowDraftPRs: userConfig.PlanDrafts, - BitbucketUser: userConfig.BitbucketUser, - BitbucketToken: userConfig.BitbucketToken, - BitbucketServerURL: userConfig.BitbucketBaseURL, - AzureDevopsUser: userConfig.AzureDevopsUser, - AzureDevopsToken: userConfig.AzureDevopsToken, - } - commentParser := &events.CommentParser{ - GithubUser: userConfig.GithubUser, - GitlabUser: userConfig.GitlabUser, - BitbucketUser: userConfig.BitbucketUser, - AzureDevopsUser: userConfig.AzureDevopsUser, - ApplyDisabled: userConfig.DisableApply, - } - defaultTfVersion := terraformClient.DefaultVersion() - pendingPlanFinder := &events.DefaultPendingPlanFinder{} - runStepRunner := &runtime.RunStepRunner{ - TerraformExecutor: terraformClient, - DefaultTFVersion: defaultTfVersion, - TerraformBinDir: terraformClient.TerraformBinDir(), - } - drainer := &events.Drainer{} - statusController := &controllers.StatusController{ - Logger: logger, - Drainer: drainer, - } - preWorkflowHooksCommandRunner := &events.DefaultPreWorkflowHooksCommandRunner{ - VCSClient: vcsClient, - GlobalCfg: globalCfg, - WorkingDirLocker: workingDirLocker, - WorkingDir: workingDir, - PreWorkflowHookRunner: runtime.DefaultPreWorkflowHookRunner{}, - } - projectCommandBuilder := events.NewProjectCommandBuilder( - policyChecksEnabled, - validator, - &events.DefaultProjectFinder{}, - vcsClient, - workingDir, - workingDirLocker, - globalCfg, - pendingPlanFinder, - commentParser, - userConfig.SkipCloneNoChanges, - userConfig.EnableRegExpCmd, - userConfig.AutoplanFileList, - ) - - showStepRunner, err := runtime.NewShowStepRunner(terraformClient, defaultTfVersion) - - if err != nil { - return nil, errors.Wrap(err, "initializing show step runner") - } - - policyCheckRunner, err := runtime.NewPolicyCheckStepRunner( - defaultTfVersion, - policy.NewConfTestExecutorWorkflow(logger, binDir, &terraform.DefaultDownloader{}), - ) - - if err != nil { - return nil, errors.Wrap(err, "initializing policy check runner") - } - - applyRequirementHandler := &events.AggregateApplyRequirements{ - PullApprovedChecker: vcsClient, - WorkingDir: workingDir, - } - - projectCommandRunner := &events.DefaultProjectCommandRunner{ - Locker: projectLocker, - LockURLGenerator: router, - InitStepRunner: &runtime.InitStepRunner{ - TerraformExecutor: terraformClient, - DefaultTFVersion: defaultTfVersion, - }, - PlanStepRunner: &runtime.PlanStepRunner{ - TerraformExecutor: terraformClient, - DefaultTFVersion: defaultTfVersion, - CommitStatusUpdater: commitStatusUpdater, - AsyncTFExec: terraformClient, - }, - ShowStepRunner: showStepRunner, - PolicyCheckStepRunner: policyCheckRunner, - ApplyStepRunner: &runtime.ApplyStepRunner{ - TerraformExecutor: terraformClient, - CommitStatusUpdater: commitStatusUpdater, - AsyncTFExec: terraformClient, - }, - RunStepRunner: runStepRunner, - EnvStepRunner: &runtime.EnvStepRunner{ - RunStepRunner: runStepRunner, - }, - VersionStepRunner: &runtime.VersionStepRunner{ - TerraformExecutor: terraformClient, - DefaultTFVersion: defaultTfVersion, - }, - WorkingDir: workingDir, - Webhooks: webhooksManager, - WorkingDirLocker: workingDirLocker, - AggregateApplyRequirements: applyRequirementHandler, - } - - dbUpdater := &events.DBUpdater{ - DB: boltdb, - } - - pullUpdater := &events.PullUpdater{ - HidePrevPlanComments: userConfig.HidePrevPlanComments, - VCSClient: vcsClient, - MarkdownRenderer: markdownRenderer, - } - - autoMerger := &events.AutoMerger{ - VCSClient: vcsClient, - GlobalAutomerge: userConfig.Automerge, - } - - policyCheckCommandRunner := events.NewPolicyCheckCommandRunner( - dbUpdater, - pullUpdater, - commitStatusUpdater, - projectCommandRunner, - userConfig.ParallelPoolSize, - userConfig.SilenceVCSStatusNoProjects, - ) - - planCommandRunner := events.NewPlanCommandRunner( - userConfig.SilenceVCSStatusNoPlans, - userConfig.SilenceVCSStatusNoProjects, - vcsClient, - pendingPlanFinder, - workingDir, - commitStatusUpdater, - projectCommandBuilder, - projectCommandRunner, - dbUpdater, - pullUpdater, - policyCheckCommandRunner, - autoMerger, - userConfig.ParallelPoolSize, - userConfig.SilenceNoProjects, - boltdb, - ) - - applyCommandRunner := events.NewApplyCommandRunner( - vcsClient, - userConfig.DisableApplyAll, - applyLockingClient, - commitStatusUpdater, - projectCommandBuilder, - projectCommandRunner, - autoMerger, - pullUpdater, - dbUpdater, - boltdb, - userConfig.ParallelPoolSize, - userConfig.SilenceNoProjects, - userConfig.SilenceVCSStatusNoProjects, - ) - - approvePoliciesCommandRunner := events.NewApprovePoliciesCommandRunner( - commitStatusUpdater, - projectCommandBuilder, - projectCommandRunner, - pullUpdater, - dbUpdater, - userConfig.SilenceNoProjects, - userConfig.SilenceVCSStatusNoPlans, - ) - - unlockCommandRunner := events.NewUnlockCommandRunner( - deleteLockCommand, - vcsClient, - userConfig.SilenceNoProjects, - ) - - versionCommandRunner := events.NewVersionCommandRunner( - pullUpdater, - projectCommandBuilder, - projectCommandRunner, - userConfig.ParallelPoolSize, - userConfig.SilenceNoProjects, - ) - - commentCommandRunnerByCmd := map[models.CommandName]events.CommentCommandRunner{ - models.PlanCommand: planCommandRunner, - models.ApplyCommand: applyCommandRunner, - models.ApprovePoliciesCommand: approvePoliciesCommandRunner, - models.UnlockCommand: unlockCommandRunner, - models.VersionCommand: versionCommandRunner, - } - - commandRunner := &events.DefaultCommandRunner{ - VCSClient: vcsClient, - GithubPullGetter: githubClient, - GitlabMergeRequestGetter: gitlabClient, - AzureDevopsPullGetter: azuredevopsClient, - CommentCommandRunnerByCmd: commentCommandRunnerByCmd, - EventParser: eventParser, - Logger: logger, - GlobalCfg: globalCfg, - AllowForkPRs: userConfig.AllowForkPRs, - AllowForkPRsFlag: config.AllowForkPRsFlag, - SilenceForkPRErrors: userConfig.SilenceForkPRErrors, - SilenceForkPRErrorsFlag: config.SilenceForkPRErrorsFlag, - DisableAutoplan: userConfig.DisableAutoplan, - Drainer: drainer, - PreWorkflowHooksCommandRunner: preWorkflowHooksCommandRunner, - PullStatusFetcher: boltdb, - } - repoAllowlist, err := events.NewRepoAllowlistChecker(userConfig.RepoAllowlist) - if err != nil { - return nil, err - } - locksController := &controllers.LocksController{ - AtlantisVersion: config.AtlantisVersion, - AtlantisURL: parsedURL, - Locker: lockingClient, - ApplyLocker: applyLockingClient, - Logger: logger, - VCSClient: vcsClient, - LockDetailTemplate: templates.LockTemplate, - WorkingDir: workingDir, - WorkingDirLocker: workingDirLocker, - DB: boltdb, - DeleteLockCommand: deleteLockCommand, - } - eventsController := &events_controllers.VCSEventsController{ - CommandRunner: commandRunner, - PullCleaner: pullClosedExecutor, - Parser: eventParser, - CommentParser: commentParser, - Logger: logger, - ApplyDisabled: userConfig.DisableApply, - GithubWebhookSecret: []byte(userConfig.GithubWebhookSecret), - GithubRequestValidator: &events_controllers.DefaultGithubRequestValidator{}, - GitlabRequestParserValidator: &events_controllers.DefaultGitlabRequestParserValidator{}, - GitlabWebhookSecret: []byte(userConfig.GitlabWebhookSecret), - RepoAllowlistChecker: repoAllowlist, - SilenceAllowlistErrors: userConfig.SilenceAllowlistErrors, - SupportedVCSHosts: supportedVCSHosts, - VCSClient: vcsClient, - BitbucketWebhookSecret: []byte(userConfig.BitbucketWebhookSecret), - AzureDevopsWebhookBasicUser: []byte(userConfig.AzureDevopsWebhookUser), - AzureDevopsWebhookBasicPassword: []byte(userConfig.AzureDevopsWebhookPassword), - AzureDevopsRequestValidator: &events_controllers.DefaultAzureDevopsRequestValidator{}, - } - githubAppController := &controllers.GithubAppController{ - AtlantisURL: parsedURL, - Logger: logger, - GithubSetupComplete: githubAppEnabled, - GithubHostname: userConfig.GithubHostname, - GithubOrg: userConfig.GithubOrg, - } - - return &Server{ - AtlantisVersion: config.AtlantisVersion, - AtlantisURL: parsedURL, - Router: underlyingRouter, - Port: userConfig.Port, - PreWorkflowHooksCommandRunner: preWorkflowHooksCommandRunner, - CommandRunner: commandRunner, - Logger: logger, - Locker: lockingClient, - ApplyLocker: applyLockingClient, - VCSEventsController: eventsController, - GithubAppController: githubAppController, - LocksController: locksController, - StatusController: statusController, - IndexTemplate: templates.IndexTemplate, - LockDetailTemplate: templates.LockTemplate, - SSLKeyFile: userConfig.SSLKeyFile, - SSLCertFile: userConfig.SSLCertFile, - Drainer: drainer, - }, nil + logger, err := logging.NewStructuredLoggerFromLevel(userConfig.ToLogLevel()) + + if err != nil { + return nil, err + } + + var supportedVCSHosts []models.VCSHostType + var githubClient *vcs.GithubClient + var githubAppEnabled bool + var githubCredentials vcs.GithubCredentials + var gitlabClient *vcs.GitlabClient + var bitbucketCloudClient *bitbucketcloud.Client + var bitbucketServerClient *bitbucketserver.Client + var azuredevopsClient *vcs.AzureDevopsClient + + policyChecksEnabled := false + if userConfig.EnablePolicyChecksFlag { + logger.Info("Policy Checks are enabled") + policyChecksEnabled = true + } + + if userConfig.GithubUser != "" || userConfig.GithubAppID != 0 { + supportedVCSHosts = append(supportedVCSHosts, models.Github) + if userConfig.GithubUser != "" { + githubCredentials = &vcs.GithubUserCredentials{ + User: userConfig.GithubUser, + Token: userConfig.GithubToken, + } + } else if userConfig.GithubAppID != 0 && userConfig.GithubAppKeyFile != "" { + privateKey, err := os.ReadFile(userConfig.GithubAppKeyFile) + if err != nil { + return nil, err + } + githubCredentials = &vcs.GithubAppCredentials{ + AppID: userConfig.GithubAppID, + Key: privateKey, + Hostname: userConfig.GithubHostname, + AppSlug: userConfig.GithubAppSlug, + } + githubAppEnabled = true + } else if userConfig.GithubAppID != 0 && userConfig.GithubAppKey != "" { + githubCredentials = &vcs.GithubAppCredentials{ + AppID: userConfig.GithubAppID, + Key: []byte(userConfig.GithubAppKey), + Hostname: userConfig.GithubHostname, + AppSlug: userConfig.GithubAppSlug, + } + githubAppEnabled = true + } + + var err error + githubClient, err = vcs.NewGithubClient(userConfig.GithubHostname, githubCredentials, logger) + if err != nil { + return nil, err + } + } + if userConfig.GitlabUser != "" { + supportedVCSHosts = append(supportedVCSHosts, models.Gitlab) + var err error + gitlabClient, err = vcs.NewGitlabClient(userConfig.GitlabHostname, userConfig.GitlabToken, logger) + if err != nil { + return nil, err + } + } + if userConfig.BitbucketUser != "" { + if userConfig.BitbucketBaseURL == bitbucketcloud.BaseURL { + supportedVCSHosts = append(supportedVCSHosts, models.BitbucketCloud) + bitbucketCloudClient = bitbucketcloud.NewClient( + http.DefaultClient, + userConfig.BitbucketUser, + userConfig.BitbucketToken, + userConfig.AtlantisURL) + } else { + supportedVCSHosts = append(supportedVCSHosts, models.BitbucketServer) + var err error + bitbucketServerClient, err = bitbucketserver.NewClient( + http.DefaultClient, + userConfig.BitbucketUser, + userConfig.BitbucketToken, + userConfig.BitbucketBaseURL, + userConfig.AtlantisURL) + if err != nil { + return nil, errors.Wrapf(err, "setting up Bitbucket Server client") + } + } + } + if userConfig.AzureDevopsUser != "" { + supportedVCSHosts = append(supportedVCSHosts, models.AzureDevops) + + azureDevOpsHost := userConfig.AzureDevOpsHost + if userConfig.AzureDevOpsHost == "" { + azureDevOpsHost = "dev.azure.com" + } + + var err error + azuredevopsClient, err = vcs.NewAzureDevopsClient(azureDevOpsHost, userConfig.AzureDevopsUser, userConfig.AzureDevopsToken) + if err != nil { + return nil, err + } + } + + if userConfig.WriteGitCreds { + home, err := homedir.Dir() + if err != nil { + return nil, errors.Wrap(err, "getting home dir to write ~/.git-credentials file") + } + if userConfig.GithubUser != "" { + if err := events.WriteGitCreds(userConfig.GithubUser, userConfig.GithubToken, userConfig.GithubHostname, home, logger, false); err != nil { + return nil, err + } + } + if userConfig.GitlabUser != "" { + if err := events.WriteGitCreds(userConfig.GitlabUser, userConfig.GitlabToken, userConfig.GitlabHostname, home, logger, false); err != nil { + return nil, err + } + } + if userConfig.BitbucketUser != "" { + // The default BitbucketBaseURL is https://api.bitbucket.org which can't actually be used for git + // so we override it here only if it's that to be bitbucket.org + bitbucketBaseURL := userConfig.BitbucketBaseURL + if bitbucketBaseURL == "https://api.bitbucket.org" { + bitbucketBaseURL = "bitbucket.org" + } + if err := events.WriteGitCreds(userConfig.BitbucketUser, userConfig.BitbucketToken, bitbucketBaseURL, home, logger, false); err != nil { + return nil, err + } + } + if userConfig.AzureDevopsUser != "" { + if err := events.WriteGitCreds(userConfig.AzureDevopsUser, userConfig.AzureDevopsToken, "dev.azure.com", home, logger, false); err != nil { + return nil, err + } + } + } + + var webhooksConfig []webhooks.Config + for _, c := range userConfig.Webhooks { + config := webhooks.Config{ + Channel: c.Channel, + Event: c.Event, + Kind: c.Kind, + WorkspaceRegex: c.WorkspaceRegex, + } + webhooksConfig = append(webhooksConfig, config) + } + webhooksManager, err := webhooks.NewMultiWebhookSender(webhooksConfig, webhooks.NewSlackClient(userConfig.SlackToken)) + if err != nil { + return nil, errors.Wrap(err, "initializing webhooks") + } + vcsClient := vcs.NewClientProxy(githubClient, gitlabClient, bitbucketCloudClient, bitbucketServerClient, azuredevopsClient) + commitStatusUpdater := &events.DefaultCommitStatusUpdater{Client: vcsClient, StatusName: userConfig.VCSStatusName} + + binDir, err := mkSubDir(userConfig.DataDir, BinDirName) + + if err != nil { + return nil, err + } + + cacheDir, err := mkSubDir(userConfig.DataDir, TerraformPluginCacheDirName) + + if err != nil { + return nil, err + } + + terraformClient, err := terraform.NewClient( + logger, + binDir, + cacheDir, + userConfig.TFEToken, + userConfig.TFEHostname, + userConfig.DefaultTFVersion, + config.DefaultTFVersionFlag, + userConfig.TFDownloadURL, + &terraform.DefaultDownloader{}, + true) + // The flag.Lookup call is to detect if we're running in a unit test. If we + // are, then we don't error out because we don't have/want terraform + // installed on our CI system where the unit tests run. + if err != nil && flag.Lookup("test.v") == nil { + return nil, errors.Wrap(err, "initializing terraform") + } + markdownRenderer := &events.MarkdownRenderer{ + GitlabSupportsCommonMark: gitlabClient.SupportsCommonMark(), + DisableApplyAll: userConfig.DisableApplyAll, + DisableMarkdownFolding: userConfig.DisableMarkdownFolding, + DisableApply: userConfig.DisableApply, + DisableRepoLocking: userConfig.DisableRepoLocking, + EnableDiffMarkdownFormat: userConfig.EnableDiffMarkdownFormat, + } + + boltdb, err := db.New(userConfig.DataDir) + if err != nil { + return nil, err + } + var lockingClient locking.Locker + var applyLockingClient locking.ApplyLocker + if userConfig.DisableRepoLocking { + lockingClient = locking.NewNoOpLocker() + } else { + lockingClient = locking.NewClient(boltdb) + } + applyLockingClient = locking.NewApplyClient(boltdb, userConfig.DisableApply) + workingDirLocker := events.NewDefaultWorkingDirLocker() + + var workingDir events.WorkingDir = &events.FileWorkspace{ + DataDir: userConfig.DataDir, + CheckoutMerge: userConfig.CheckoutStrategy == "merge", + } + // provide fresh tokens before clone from the GitHub Apps integration, proxy workingDir + if githubAppEnabled { + if !userConfig.WriteGitCreds { + return nil, errors.New("Github App requires --write-git-creds to support cloning") + } + workingDir = &events.GithubAppWorkingDir{ + WorkingDir: workingDir, + Credentials: githubCredentials, + GithubHostname: userConfig.GithubHostname, + } + } + + projectLocker := &events.DefaultProjectLocker{ + Locker: lockingClient, + VCSClient: vcsClient, + } + deleteLockCommand := &events.DefaultDeleteLockCommand{ + Locker: lockingClient, + Logger: logger, + WorkingDir: workingDir, + WorkingDirLocker: workingDirLocker, + DB: boltdb, + } + + parsedURL, err := ParseAtlantisURL(userConfig.AtlantisURL) + if err != nil { + return nil, errors.Wrapf(err, + "parsing --%s flag %q", config.AtlantisURLFlag, userConfig.AtlantisURL) + } + validator := &yaml.ParserValidator{} + + globalCfg := valid.NewGlobalCfgFromArgs( + valid.GlobalCfgArgs{ + AllowRepoCfg: userConfig.AllowRepoConfig, + MergeableReq: userConfig.RequireMergeable, + ApprovedReq: userConfig.RequireApproval, + UnDivergedReq: userConfig.RequireUnDiverged, + PolicyCheckEnabled: userConfig.EnablePolicyChecksFlag, + }) + if userConfig.RepoConfig != "" { + globalCfg, err = validator.ParseGlobalCfg(userConfig.RepoConfig, globalCfg) + if err != nil { + return nil, errors.Wrapf(err, "parsing %s file", userConfig.RepoConfig) + } + } else if userConfig.RepoConfigJSON != "" { + globalCfg, err = validator.ParseGlobalCfgJSON(userConfig.RepoConfigJSON, globalCfg) + if err != nil { + return nil, errors.Wrapf(err, "parsing --%s", config.RepoConfigJSONFlag) + } + } + + underlyingRouter := mux.NewRouter() + router := &Router{ + AtlantisURL: parsedURL, + LockViewRouteIDQueryParam: LockViewRouteIDQueryParam, + LockViewRouteName: LockViewRouteName, + Underlying: underlyingRouter, + } + pullClosedExecutor := &events.PullClosedExecutor{ + VCSClient: vcsClient, + Locker: lockingClient, + WorkingDir: workingDir, + Logger: logger, + DB: boltdb, + } + eventParser := &events.EventParser{ + GithubUser: userConfig.GithubUser, + GithubToken: userConfig.GithubToken, + GitlabUser: userConfig.GitlabUser, + GitlabToken: userConfig.GitlabToken, + AllowDraftPRs: userConfig.PlanDrafts, + BitbucketUser: userConfig.BitbucketUser, + BitbucketToken: userConfig.BitbucketToken, + BitbucketServerURL: userConfig.BitbucketBaseURL, + AzureDevopsUser: userConfig.AzureDevopsUser, + AzureDevopsToken: userConfig.AzureDevopsToken, + } + commentParser := &events.CommentParser{ + GithubUser: userConfig.GithubUser, + GitlabUser: userConfig.GitlabUser, + BitbucketUser: userConfig.BitbucketUser, + AzureDevopsUser: userConfig.AzureDevopsUser, + ApplyDisabled: userConfig.DisableApply, + } + defaultTfVersion := terraformClient.DefaultVersion() + pendingPlanFinder := &events.DefaultPendingPlanFinder{} + runStepRunner := &runtime.RunStepRunner{ + TerraformExecutor: terraformClient, + DefaultTFVersion: defaultTfVersion, + TerraformBinDir: terraformClient.TerraformBinDir(), + } + drainer := &events.Drainer{} + statusController := &controllers.StatusController{ + Logger: logger, + Drainer: drainer, + } + preWorkflowHooksCommandRunner := &events.DefaultPreWorkflowHooksCommandRunner{ + VCSClient: vcsClient, + GlobalCfg: globalCfg, + WorkingDirLocker: workingDirLocker, + WorkingDir: workingDir, + PreWorkflowHookRunner: runtime.DefaultPreWorkflowHookRunner{}, + } + projectCommandBuilder := events.NewProjectCommandBuilder( + policyChecksEnabled, + validator, + &events.DefaultProjectFinder{}, + vcsClient, + workingDir, + workingDirLocker, + globalCfg, + pendingPlanFinder, + commentParser, + userConfig.SkipCloneNoChanges, + userConfig.EnableRegExpCmd, + userConfig.AutoplanFileList, + ) + + showStepRunner, err := runtime.NewShowStepRunner(terraformClient, defaultTfVersion) + + if err != nil { + return nil, errors.Wrap(err, "initializing show step runner") + } + + policyCheckRunner, err := runtime.NewPolicyCheckStepRunner( + defaultTfVersion, + policy.NewConfTestExecutorWorkflow(logger, binDir, &terraform.DefaultDownloader{}), + ) + + if err != nil { + return nil, errors.Wrap(err, "initializing policy check runner") + } + + applyRequirementHandler := &events.AggregateApplyRequirements{ + PullApprovedChecker: vcsClient, + WorkingDir: workingDir, + } + + projectCommandRunner := &events.DefaultProjectCommandRunner{ + Locker: projectLocker, + LockURLGenerator: router, + InitStepRunner: &runtime.InitStepRunner{ + TerraformExecutor: terraformClient, + DefaultTFVersion: defaultTfVersion, + }, + PlanStepRunner: &runtime.PlanStepRunner{ + TerraformExecutor: terraformClient, + DefaultTFVersion: defaultTfVersion, + CommitStatusUpdater: commitStatusUpdater, + AsyncTFExec: terraformClient, + }, + ShowStepRunner: showStepRunner, + PolicyCheckStepRunner: policyCheckRunner, + ApplyStepRunner: &runtime.ApplyStepRunner{ + TerraformExecutor: terraformClient, + CommitStatusUpdater: commitStatusUpdater, + AsyncTFExec: terraformClient, + }, + RunStepRunner: runStepRunner, + EnvStepRunner: &runtime.EnvStepRunner{ + RunStepRunner: runStepRunner, + }, + VersionStepRunner: &runtime.VersionStepRunner{ + TerraformExecutor: terraformClient, + DefaultTFVersion: defaultTfVersion, + }, + WorkingDir: workingDir, + Webhooks: webhooksManager, + WorkingDirLocker: workingDirLocker, + AggregateApplyRequirements: applyRequirementHandler, + } + + dbUpdater := &events.DBUpdater{ + DB: boltdb, + } + + pullUpdater := &events.PullUpdater{ + HidePrevPlanComments: userConfig.HidePrevPlanComments, + VCSClient: vcsClient, + MarkdownRenderer: markdownRenderer, + } + + autoMerger := &events.AutoMerger{ + VCSClient: vcsClient, + GlobalAutomerge: userConfig.Automerge, + } + + policyCheckCommandRunner := events.NewPolicyCheckCommandRunner( + dbUpdater, + pullUpdater, + commitStatusUpdater, + projectCommandRunner, + userConfig.ParallelPoolSize, + userConfig.SilenceVCSStatusNoProjects, + ) + + planCommandRunner := events.NewPlanCommandRunner( + userConfig.SilenceVCSStatusNoPlans, + userConfig.SilenceVCSStatusNoProjects, + vcsClient, + pendingPlanFinder, + workingDir, + commitStatusUpdater, + projectCommandBuilder, + projectCommandRunner, + dbUpdater, + pullUpdater, + policyCheckCommandRunner, + autoMerger, + userConfig.ParallelPoolSize, + userConfig.SilenceNoProjects, + boltdb, + ) + + applyCommandRunner := events.NewApplyCommandRunner( + vcsClient, + userConfig.DisableApplyAll, + applyLockingClient, + commitStatusUpdater, + projectCommandBuilder, + projectCommandRunner, + autoMerger, + pullUpdater, + dbUpdater, + boltdb, + userConfig.ParallelPoolSize, + userConfig.SilenceNoProjects, + userConfig.SilenceVCSStatusNoProjects, + ) + + approvePoliciesCommandRunner := events.NewApprovePoliciesCommandRunner( + commitStatusUpdater, + projectCommandBuilder, + projectCommandRunner, + pullUpdater, + dbUpdater, + userConfig.SilenceNoProjects, + userConfig.SilenceVCSStatusNoPlans, + ) + + unlockCommandRunner := events.NewUnlockCommandRunner( + deleteLockCommand, + vcsClient, + userConfig.SilenceNoProjects, + ) + + versionCommandRunner := events.NewVersionCommandRunner( + pullUpdater, + projectCommandBuilder, + projectCommandRunner, + userConfig.ParallelPoolSize, + userConfig.SilenceNoProjects, + ) + + commentCommandRunnerByCmd := map[models.CommandName]events.CommentCommandRunner{ + models.PlanCommand: planCommandRunner, + models.ApplyCommand: applyCommandRunner, + models.ApprovePoliciesCommand: approvePoliciesCommandRunner, + models.UnlockCommand: unlockCommandRunner, + models.VersionCommand: versionCommandRunner, + } + + commandRunner := &events.DefaultCommandRunner{ + VCSClient: vcsClient, + GithubPullGetter: githubClient, + GitlabMergeRequestGetter: gitlabClient, + AzureDevopsPullGetter: azuredevopsClient, + CommentCommandRunnerByCmd: commentCommandRunnerByCmd, + EventParser: eventParser, + Logger: logger, + GlobalCfg: globalCfg, + AllowForkPRs: userConfig.AllowForkPRs, + AllowForkPRsFlag: config.AllowForkPRsFlag, + SilenceForkPRErrors: userConfig.SilenceForkPRErrors, + SilenceForkPRErrorsFlag: config.SilenceForkPRErrorsFlag, + DisableAutoplan: userConfig.DisableAutoplan, + Drainer: drainer, + PreWorkflowHooksCommandRunner: preWorkflowHooksCommandRunner, + PullStatusFetcher: boltdb, + } + repoAllowlist, err := events.NewRepoAllowlistChecker(userConfig.RepoAllowlist) + if err != nil { + return nil, err + } + locksController := &controllers.LocksController{ + AtlantisVersion: config.AtlantisVersion, + AtlantisURL: parsedURL, + Locker: lockingClient, + ApplyLocker: applyLockingClient, + Logger: logger, + VCSClient: vcsClient, + LockDetailTemplate: templates.LockTemplate, + WorkingDir: workingDir, + WorkingDirLocker: workingDirLocker, + DB: boltdb, + DeleteLockCommand: deleteLockCommand, + } + eventsController := &events_controllers.VCSEventsController{ + CommandRunner: commandRunner, + PullCleaner: pullClosedExecutor, + Parser: eventParser, + CommentParser: commentParser, + Logger: logger, + ApplyDisabled: userConfig.DisableApply, + GithubWebhookSecret: []byte(userConfig.GithubWebhookSecret), + GithubRequestValidator: &events_controllers.DefaultGithubRequestValidator{}, + GitlabRequestParserValidator: &events_controllers.DefaultGitlabRequestParserValidator{}, + GitlabWebhookSecret: []byte(userConfig.GitlabWebhookSecret), + RepoAllowlistChecker: repoAllowlist, + SilenceAllowlistErrors: userConfig.SilenceAllowlistErrors, + SupportedVCSHosts: supportedVCSHosts, + VCSClient: vcsClient, + BitbucketWebhookSecret: []byte(userConfig.BitbucketWebhookSecret), + AzureDevopsWebhookBasicUser: []byte(userConfig.AzureDevopsWebhookUser), + AzureDevopsWebhookBasicPassword: []byte(userConfig.AzureDevopsWebhookPassword), + AzureDevopsRequestValidator: &events_controllers.DefaultAzureDevopsRequestValidator{}, + } + githubAppController := &controllers.GithubAppController{ + AtlantisURL: parsedURL, + Logger: logger, + GithubSetupComplete: githubAppEnabled, + GithubHostname: userConfig.GithubHostname, + GithubOrg: userConfig.GithubOrg, + } + + return &Server{ + AtlantisVersion: config.AtlantisVersion, + AtlantisURL: parsedURL, + Router: underlyingRouter, + Port: userConfig.Port, + PreWorkflowHooksCommandRunner: preWorkflowHooksCommandRunner, + CommandRunner: commandRunner, + Logger: logger, + Locker: lockingClient, + ApplyLocker: applyLockingClient, + VCSEventsController: eventsController, + GithubAppController: githubAppController, + LocksController: locksController, + StatusController: statusController, + IndexTemplate: templates.IndexTemplate, + LockDetailTemplate: templates.LockTemplate, + SSLKeyFile: userConfig.SSLKeyFile, + SSLCertFile: userConfig.SSLCertFile, + Drainer: drainer, + }, nil } // Start creates the routes and starts serving traffic. func (s *Server) Start() error { - s.Router.HandleFunc("/", s.Index).Methods("GET").MatcherFunc(func(r *http.Request, rm *mux.RouteMatch) bool { - return r.URL.Path == "/" || r.URL.Path == "/index.html" - }) - s.Router.HandleFunc("/healthz", s.Healthz).Methods("GET") - s.Router.HandleFunc("/status", s.StatusController.Get).Methods("GET") - s.Router.PathPrefix("/static/").Handler(http.FileServer(&assetfs.AssetFS{Asset: static.Asset, AssetDir: static.AssetDir, AssetInfo: static.AssetInfo})) - s.Router.HandleFunc("/events", s.VCSEventsController.Post).Methods("POST") - s.Router.HandleFunc("/github-app/exchange-code", s.GithubAppController.ExchangeCode).Methods("GET") - s.Router.HandleFunc("/github-app/setup", s.GithubAppController.New).Methods("GET") - s.Router.HandleFunc("/apply/lock", s.LocksController.LockApply).Methods("POST").Queries() - s.Router.HandleFunc("/apply/unlock", s.LocksController.UnlockApply).Methods("DELETE").Queries() - s.Router.HandleFunc("/locks", s.LocksController.DeleteLock).Methods("DELETE").Queries("id", "{id:.*}") - s.Router.HandleFunc("/lock", s.LocksController.GetLock).Methods("GET"). - Queries(LockViewRouteIDQueryParam, fmt.Sprintf("{%s}", LockViewRouteIDQueryParam)).Name(LockViewRouteName) - n := negroni.New(&negroni.Recovery{ - Logger: log.New(os.Stdout, "", log.LstdFlags), - PrintStack: false, - StackAll: false, - StackSize: 1024 * 8, - }, NewRequestLogger(s.Logger)) - n.UseHandler(s.Router) - - defer s.Logger.Flush() - - // Ensure server gracefully drains connections when stopped. - stop := make(chan os.Signal, 1) - // Stop on SIGINTs and SIGTERMs. - signal.Notify(stop, os.Interrupt, syscall.SIGTERM) - - server := &http.Server{Addr: fmt.Sprintf(":%d", s.Port), Handler: n} - go func() { - s.Logger.Info("Atlantis started - listening on port %v", s.Port) - - var err error - if s.SSLCertFile != "" && s.SSLKeyFile != "" { - err = server.ListenAndServeTLS(s.SSLCertFile, s.SSLKeyFile) - } else { - err = server.ListenAndServe() - } - - if err != nil && err != http.ErrServerClosed { - s.Logger.Err(err.Error()) - } - }() - <-stop - - s.Logger.Warn("Received interrupt. Waiting for in-progress operations to complete") - s.waitForDrain() - ctx, _ := context.WithTimeout(context.Background(), 5*time.Second) // nolint: vet - if err := server.Shutdown(ctx); err != nil { - return cli.NewExitError(fmt.Sprintf("while shutting down: %s", err), 1) - } - return nil + s.Router.HandleFunc("/", s.Index).Methods("GET").MatcherFunc(func(r *http.Request, rm *mux.RouteMatch) bool { + return r.URL.Path == "/" || r.URL.Path == "/index.html" + }) + s.Router.HandleFunc("/healthz", s.Healthz).Methods("GET") + s.Router.HandleFunc("/status", s.StatusController.Get).Methods("GET") + s.Router.PathPrefix("/static/").Handler(http.FileServer(&assetfs.AssetFS{Asset: static.Asset, AssetDir: static.AssetDir, AssetInfo: static.AssetInfo})) + s.Router.HandleFunc("/events", s.VCSEventsController.Post).Methods("POST") + s.Router.HandleFunc("/github-app/exchange-code", s.GithubAppController.ExchangeCode).Methods("GET") + s.Router.HandleFunc("/github-app/setup", s.GithubAppController.New).Methods("GET") + s.Router.HandleFunc("/apply/lock", s.LocksController.LockApply).Methods("POST").Queries() + s.Router.HandleFunc("/apply/unlock", s.LocksController.UnlockApply).Methods("DELETE").Queries() + s.Router.HandleFunc("/locks", s.LocksController.DeleteLock).Methods("DELETE").Queries("id", "{id:.*}") + s.Router.HandleFunc("/lock", s.LocksController.GetLock).Methods("GET"). + Queries(LockViewRouteIDQueryParam, fmt.Sprintf("{%s}", LockViewRouteIDQueryParam)).Name(LockViewRouteName) + n := negroni.New(&negroni.Recovery{ + Logger: log.New(os.Stdout, "", log.LstdFlags), + PrintStack: false, + StackAll: false, + StackSize: 1024 * 8, + }, NewRequestLogger(s.Logger)) + n.UseHandler(s.Router) + + defer s.Logger.Flush() + + // Ensure server gracefully drains connections when stopped. + stop := make(chan os.Signal, 1) + // Stop on SIGINTs and SIGTERMs. + signal.Notify(stop, os.Interrupt, syscall.SIGTERM) + + server := &http.Server{Addr: fmt.Sprintf(":%d", s.Port), Handler: n} + go func() { + s.Logger.Info("Atlantis started - listening on port %v", s.Port) + + var err error + if s.SSLCertFile != "" && s.SSLKeyFile != "" { + err = server.ListenAndServeTLS(s.SSLCertFile, s.SSLKeyFile) + } else { + err = server.ListenAndServe() + } + + if err != nil && err != http.ErrServerClosed { + s.Logger.Err(err.Error()) + } + }() + <-stop + + s.Logger.Warn("Received interrupt. Waiting for in-progress operations to complete") + s.waitForDrain() + ctx, _ := context.WithTimeout(context.Background(), 5*time.Second) // nolint: vet + if err := server.Shutdown(ctx); err != nil { + return cli.NewExitError(fmt.Sprintf("while shutting down: %s", err), 1) + } + return nil } // waitForDrain blocks until draining is complete. func (s *Server) waitForDrain() { - drainComplete := make(chan bool, 1) - go func() { - s.Drainer.ShutdownBlocking() - drainComplete <- true - }() - ticker := time.NewTicker(5 * time.Second) - for { - select { - case <-drainComplete: - s.Logger.Info("All in-progress operations complete, shutting down") - return - case <-ticker.C: - s.Logger.Info("Waiting for in-progress operations to complete, current in-progress ops: %d", s.Drainer.GetStatus().InProgressOps) - } - } + drainComplete := make(chan bool, 1) + go func() { + s.Drainer.ShutdownBlocking() + drainComplete <- true + }() + ticker := time.NewTicker(5 * time.Second) + for { + select { + case <-drainComplete: + s.Logger.Info("All in-progress operations complete, shutting down") + return + case <-ticker.C: + s.Logger.Info("Waiting for in-progress operations to complete, current in-progress ops: %d", s.Drainer.GetStatus().InProgressOps) + } + } } // Index is the / route. func (s *Server) Index(w http.ResponseWriter, _ *http.Request) { - locks, err := s.Locker.List() - if err != nil { - w.WriteHeader(http.StatusServiceUnavailable) - fmt.Fprintf(w, "Could not retrieve locks: %s", err) - return - } - - var lockResults []templates.LockIndexData - for id, v := range locks { - lockURL, _ := s.Router.Get(LockViewRouteName).URL("id", url.QueryEscape(id)) - lockResults = append(lockResults, templates.LockIndexData{ - // NOTE: must use .String() instead of .Path because we need the - // query params as part of the lock URL. - LockPath: lockURL.String(), - RepoFullName: v.Project.RepoFullName, - PullNum: v.Pull.Num, - Path: v.Project.Path, - Workspace: v.Workspace, - Time: v.Time, - TimeFormatted: v.Time.Format("02-01-2006 15:04:05"), - }) - } - - applyCmdLock, err := s.ApplyLocker.CheckApplyLock() - s.Logger.Info("Apply Lock: %v", applyCmdLock) - if err != nil { - w.WriteHeader(http.StatusServiceUnavailable) - fmt.Fprintf(w, "Could not retrieve global apply lock: %s", err) - return - } - - applyLockData := templates.ApplyLockData{ - Time: applyCmdLock.Time, - Locked: applyCmdLock.Locked, - TimeFormatted: applyCmdLock.Time.Format("02-01-2006 15:04:05"), - } - //Sort by date - newest to oldest. - sort.SliceStable(lockResults, func(i, j int) bool { return lockResults[i].Time.After(lockResults[j].Time) }) - - err = s.IndexTemplate.Execute(w, templates.IndexData{ - Locks: lockResults, - ApplyLock: applyLockData, - AtlantisVersion: s.AtlantisVersion, - CleanedBasePath: s.AtlantisURL.Path, - }) - if err != nil { - s.Logger.Err(err.Error()) - } + locks, err := s.Locker.List() + if err != nil { + w.WriteHeader(http.StatusServiceUnavailable) + fmt.Fprintf(w, "Could not retrieve locks: %s", err) + return + } + + var lockResults []templates.LockIndexData + for id, v := range locks { + lockURL, _ := s.Router.Get(LockViewRouteName).URL("id", url.QueryEscape(id)) + lockResults = append(lockResults, templates.LockIndexData{ + // NOTE: must use .String() instead of .Path because we need the + // query params as part of the lock URL. + LockPath: lockURL.String(), + RepoFullName: v.Project.RepoFullName, + PullNum: v.Pull.Num, + Path: v.Project.Path, + Workspace: v.Workspace, + Time: v.Time, + TimeFormatted: v.Time.Format("02-01-2006 15:04:05"), + }) + } + + applyCmdLock, err := s.ApplyLocker.CheckApplyLock() + s.Logger.Info("Apply Lock: %v", applyCmdLock) + if err != nil { + w.WriteHeader(http.StatusServiceUnavailable) + fmt.Fprintf(w, "Could not retrieve global apply lock: %s", err) + return + } + + applyLockData := templates.ApplyLockData{ + Time: applyCmdLock.Time, + Locked: applyCmdLock.Locked, + TimeFormatted: applyCmdLock.Time.Format("02-01-2006 15:04:05"), + } + //Sort by date - newest to oldest. + sort.SliceStable(lockResults, func(i, j int) bool { return lockResults[i].Time.After(lockResults[j].Time) }) + + err = s.IndexTemplate.Execute(w, templates.IndexData{ + Locks: lockResults, + ApplyLock: applyLockData, + AtlantisVersion: s.AtlantisVersion, + CleanedBasePath: s.AtlantisURL.Path, + }) + if err != nil { + s.Logger.Err(err.Error()) + } } func mkSubDir(parentDir string, subDir string) (string, error) { - fullDir := filepath.Join(parentDir, subDir) - if err := os.MkdirAll(fullDir, 0700); err != nil { - return "", errors.Wrapf(err, "unable to create dir %q", fullDir) - } + fullDir := filepath.Join(parentDir, subDir) + if err := os.MkdirAll(fullDir, 0700); err != nil { + return "", errors.Wrapf(err, "unable to create dir %q", fullDir) + } - return fullDir, nil + return fullDir, nil } // Healthz returns the health check response. It always returns a 200 currently. func (s *Server) Healthz(w http.ResponseWriter, _ *http.Request) { - data, err := json.MarshalIndent(&struct { - Status string `json:"status"` - }{ - Status: "ok", - }, "", " ") - if err != nil { - w.WriteHeader(http.StatusInternalServerError) - fmt.Fprintf(w, "Error creating status json response: %s", err) - return - } - w.Header().Set("Content-Type", "application/json") - w.Write(data) // nolint: errcheck + data, err := json.MarshalIndent(&struct { + Status string `json:"status"` + }{ + Status: "ok", + }, "", " ") + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + fmt.Fprintf(w, "Error creating status json response: %s", err) + return + } + w.Header().Set("Content-Type", "application/json") + w.Write(data) // nolint: errcheck } // ParseAtlantisURL parses the user-passed atlantis URL to ensure it is valid @@ -840,15 +840,15 @@ func (s *Server) Healthz(w http.ResponseWriter, _ *http.Request) { // It removes any trailing slashes from the path so we can concatenate it // with other paths without checking. func ParseAtlantisURL(u string) (*url.URL, error) { - parsed, err := url.Parse(u) - if err != nil { - return nil, err - } - if !(parsed.Scheme == "http" || parsed.Scheme == "https") { - return nil, errors.New("http or https must be specified") - } - // We want the path to end without a trailing slash so we know how to - // use it in the rest of the program. - parsed.Path = strings.TrimSuffix(parsed.Path, "/") - return parsed, nil + parsed, err := url.Parse(u) + if err != nil { + return nil, err + } + if !(parsed.Scheme == "http" || parsed.Scheme == "https") { + return nil, errors.New("http or https must be specified") + } + // We want the path to end without a trailing slash so we know how to + // use it in the rest of the program. + parsed.Path = strings.TrimSuffix(parsed.Path, "/") + return parsed, nil } diff --git a/server/user_config.go b/server/user_config.go index 53bd1d1e05..22c56f34c6 100644 --- a/server/user_config.go +++ b/server/user_config.go @@ -17,7 +17,7 @@ type UserConfig struct { AzureDevopsUser string `mapstructure:"azuredevops-user"` AzureDevopsWebhookPassword string `mapstructure:"azuredevops-webhook-password"` AzureDevopsWebhookUser string `mapstructure:"azuredevops-webhook-user"` - AzureDevOpsHost string `mapstructure:"azuredevops-host"` + AzureDevOpsHost string `mapstructure:"azuredevops-host"` BitbucketBaseURL string `mapstructure:"bitbucket-base-url"` BitbucketToken string `mapstructure:"bitbucket-token"` BitbucketUser string `mapstructure:"bitbucket-user"` From f176bd008957a34b298b342f738eb74e9b6e2189 Mon Sep 17 00:00:00 2001 From: atlantisbot Date: Fri, 15 Oct 2021 14:40:41 +0000 Subject: [PATCH 4/9] gofmt --- server/server.go | 1574 +++++++++++++++++++++++----------------------- 1 file changed, 787 insertions(+), 787 deletions(-) diff --git a/server/server.go b/server/server.go index b134694d54..4149f8bceb 100644 --- a/server/server.go +++ b/server/server.go @@ -16,823 +16,823 @@ package server import ( - "context" - "encoding/json" - "flag" - "fmt" - "log" - "net/http" - "net/url" - "os" - "os/signal" - "path/filepath" - "sort" - "strings" - "syscall" - "time" - - "github.com/mitchellh/go-homedir" - "github.com/runatlantis/atlantis/server/core/db" - "github.com/runatlantis/atlantis/server/events/yaml/valid" - - assetfs "github.com/elazarl/go-bindata-assetfs" - "github.com/gorilla/mux" - "github.com/pkg/errors" - "github.com/runatlantis/atlantis/server/controllers" - events_controllers "github.com/runatlantis/atlantis/server/controllers/events" - "github.com/runatlantis/atlantis/server/controllers/templates" - "github.com/runatlantis/atlantis/server/core/locking" - "github.com/runatlantis/atlantis/server/core/runtime" - "github.com/runatlantis/atlantis/server/core/runtime/policy" - "github.com/runatlantis/atlantis/server/core/terraform" - "github.com/runatlantis/atlantis/server/events" - "github.com/runatlantis/atlantis/server/events/models" - "github.com/runatlantis/atlantis/server/events/vcs" - "github.com/runatlantis/atlantis/server/events/vcs/bitbucketcloud" - "github.com/runatlantis/atlantis/server/events/vcs/bitbucketserver" - "github.com/runatlantis/atlantis/server/events/webhooks" - "github.com/runatlantis/atlantis/server/events/yaml" - "github.com/runatlantis/atlantis/server/logging" - "github.com/runatlantis/atlantis/server/static" - "github.com/urfave/cli" - "github.com/urfave/negroni" + "context" + "encoding/json" + "flag" + "fmt" + "log" + "net/http" + "net/url" + "os" + "os/signal" + "path/filepath" + "sort" + "strings" + "syscall" + "time" + + "github.com/mitchellh/go-homedir" + "github.com/runatlantis/atlantis/server/core/db" + "github.com/runatlantis/atlantis/server/events/yaml/valid" + + assetfs "github.com/elazarl/go-bindata-assetfs" + "github.com/gorilla/mux" + "github.com/pkg/errors" + "github.com/runatlantis/atlantis/server/controllers" + events_controllers "github.com/runatlantis/atlantis/server/controllers/events" + "github.com/runatlantis/atlantis/server/controllers/templates" + "github.com/runatlantis/atlantis/server/core/locking" + "github.com/runatlantis/atlantis/server/core/runtime" + "github.com/runatlantis/atlantis/server/core/runtime/policy" + "github.com/runatlantis/atlantis/server/core/terraform" + "github.com/runatlantis/atlantis/server/events" + "github.com/runatlantis/atlantis/server/events/models" + "github.com/runatlantis/atlantis/server/events/vcs" + "github.com/runatlantis/atlantis/server/events/vcs/bitbucketcloud" + "github.com/runatlantis/atlantis/server/events/vcs/bitbucketserver" + "github.com/runatlantis/atlantis/server/events/webhooks" + "github.com/runatlantis/atlantis/server/events/yaml" + "github.com/runatlantis/atlantis/server/logging" + "github.com/runatlantis/atlantis/server/static" + "github.com/urfave/cli" + "github.com/urfave/negroni" ) const ( - // LockViewRouteName is the named route in mux.Router for the lock view. - // The route can be retrieved by this name, ex: - // mux.Router.Get(LockViewRouteName) - LockViewRouteName = "lock-detail" - // LockViewRouteIDQueryParam is the query parameter needed to construct the lock view - // route. ex: - // mux.Router.Get(LockViewRouteName).URL(LockViewRouteIDQueryParam, "my id") - LockViewRouteIDQueryParam = "id" - - // binDirName is the name of the directory inside our data dir where - // we download binaries. - BinDirName = "bin" - - // terraformPluginCacheDir is the name of the dir inside our data dir - // where we tell terraform to cache plugins and modules. - TerraformPluginCacheDirName = "plugin-cache" + // LockViewRouteName is the named route in mux.Router for the lock view. + // The route can be retrieved by this name, ex: + // mux.Router.Get(LockViewRouteName) + LockViewRouteName = "lock-detail" + // LockViewRouteIDQueryParam is the query parameter needed to construct the lock view + // route. ex: + // mux.Router.Get(LockViewRouteName).URL(LockViewRouteIDQueryParam, "my id") + LockViewRouteIDQueryParam = "id" + + // binDirName is the name of the directory inside our data dir where + // we download binaries. + BinDirName = "bin" + + // terraformPluginCacheDir is the name of the dir inside our data dir + // where we tell terraform to cache plugins and modules. + TerraformPluginCacheDirName = "plugin-cache" ) // Server runs the Atlantis web server. type Server struct { - AtlantisVersion string - AtlantisURL *url.URL - Router *mux.Router - Port int - PreWorkflowHooksCommandRunner *events.DefaultPreWorkflowHooksCommandRunner - CommandRunner *events.DefaultCommandRunner - Logger logging.SimpleLogging - Locker locking.Locker - ApplyLocker locking.ApplyLocker - VCSEventsController *events_controllers.VCSEventsController - GithubAppController *controllers.GithubAppController - LocksController *controllers.LocksController - StatusController *controllers.StatusController - IndexTemplate templates.TemplateWriter - LockDetailTemplate templates.TemplateWriter - SSLCertFile string - SSLKeyFile string - Drainer *events.Drainer + AtlantisVersion string + AtlantisURL *url.URL + Router *mux.Router + Port int + PreWorkflowHooksCommandRunner *events.DefaultPreWorkflowHooksCommandRunner + CommandRunner *events.DefaultCommandRunner + Logger logging.SimpleLogging + Locker locking.Locker + ApplyLocker locking.ApplyLocker + VCSEventsController *events_controllers.VCSEventsController + GithubAppController *controllers.GithubAppController + LocksController *controllers.LocksController + StatusController *controllers.StatusController + IndexTemplate templates.TemplateWriter + LockDetailTemplate templates.TemplateWriter + SSLCertFile string + SSLKeyFile string + Drainer *events.Drainer } // Config holds config for server that isn't passed in by the user. type Config struct { - AllowForkPRsFlag string - AtlantisURLFlag string - AtlantisVersion string - DefaultTFVersionFlag string - RepoConfigJSONFlag string - SilenceForkPRErrorsFlag string + AllowForkPRsFlag string + AtlantisURLFlag string + AtlantisVersion string + DefaultTFVersionFlag string + RepoConfigJSONFlag string + SilenceForkPRErrorsFlag string } // WebhookConfig is nested within UserConfig. It's used to configure webhooks. type WebhookConfig struct { - // Event is the type of event we should send this webhook for, ex. apply. - Event string `mapstructure:"event"` - // WorkspaceRegex is a regex that is used to match against the workspace - // that is being modified for this event. If the regex matches, we'll - // send the webhook, ex. "production.*". - WorkspaceRegex string `mapstructure:"workspace-regex"` - // Kind is the type of webhook we should send, ex. slack. - Kind string `mapstructure:"kind"` - // Channel is the channel to send this webhook to. It only applies to - // slack webhooks. Should be without '#'. - Channel string `mapstructure:"channel"` + // Event is the type of event we should send this webhook for, ex. apply. + Event string `mapstructure:"event"` + // WorkspaceRegex is a regex that is used to match against the workspace + // that is being modified for this event. If the regex matches, we'll + // send the webhook, ex. "production.*". + WorkspaceRegex string `mapstructure:"workspace-regex"` + // Kind is the type of webhook we should send, ex. slack. + Kind string `mapstructure:"kind"` + // Channel is the channel to send this webhook to. It only applies to + // slack webhooks. Should be without '#'. + Channel string `mapstructure:"channel"` } // NewServer returns a new server. If there are issues starting the server or // its dependencies an error will be returned. This is like the main() function // for the server CLI command because it injects all the dependencies. func NewServer(userConfig UserConfig, config Config) (*Server, error) { - logger, err := logging.NewStructuredLoggerFromLevel(userConfig.ToLogLevel()) - - if err != nil { - return nil, err - } - - var supportedVCSHosts []models.VCSHostType - var githubClient *vcs.GithubClient - var githubAppEnabled bool - var githubCredentials vcs.GithubCredentials - var gitlabClient *vcs.GitlabClient - var bitbucketCloudClient *bitbucketcloud.Client - var bitbucketServerClient *bitbucketserver.Client - var azuredevopsClient *vcs.AzureDevopsClient - - policyChecksEnabled := false - if userConfig.EnablePolicyChecksFlag { - logger.Info("Policy Checks are enabled") - policyChecksEnabled = true - } - - if userConfig.GithubUser != "" || userConfig.GithubAppID != 0 { - supportedVCSHosts = append(supportedVCSHosts, models.Github) - if userConfig.GithubUser != "" { - githubCredentials = &vcs.GithubUserCredentials{ - User: userConfig.GithubUser, - Token: userConfig.GithubToken, - } - } else if userConfig.GithubAppID != 0 && userConfig.GithubAppKeyFile != "" { - privateKey, err := os.ReadFile(userConfig.GithubAppKeyFile) - if err != nil { - return nil, err - } - githubCredentials = &vcs.GithubAppCredentials{ - AppID: userConfig.GithubAppID, - Key: privateKey, - Hostname: userConfig.GithubHostname, - AppSlug: userConfig.GithubAppSlug, - } - githubAppEnabled = true - } else if userConfig.GithubAppID != 0 && userConfig.GithubAppKey != "" { - githubCredentials = &vcs.GithubAppCredentials{ - AppID: userConfig.GithubAppID, - Key: []byte(userConfig.GithubAppKey), - Hostname: userConfig.GithubHostname, - AppSlug: userConfig.GithubAppSlug, - } - githubAppEnabled = true - } - - var err error - githubClient, err = vcs.NewGithubClient(userConfig.GithubHostname, githubCredentials, logger) - if err != nil { - return nil, err - } - } - if userConfig.GitlabUser != "" { - supportedVCSHosts = append(supportedVCSHosts, models.Gitlab) - var err error - gitlabClient, err = vcs.NewGitlabClient(userConfig.GitlabHostname, userConfig.GitlabToken, logger) - if err != nil { - return nil, err - } - } - if userConfig.BitbucketUser != "" { - if userConfig.BitbucketBaseURL == bitbucketcloud.BaseURL { - supportedVCSHosts = append(supportedVCSHosts, models.BitbucketCloud) - bitbucketCloudClient = bitbucketcloud.NewClient( - http.DefaultClient, - userConfig.BitbucketUser, - userConfig.BitbucketToken, - userConfig.AtlantisURL) - } else { - supportedVCSHosts = append(supportedVCSHosts, models.BitbucketServer) - var err error - bitbucketServerClient, err = bitbucketserver.NewClient( - http.DefaultClient, - userConfig.BitbucketUser, - userConfig.BitbucketToken, - userConfig.BitbucketBaseURL, - userConfig.AtlantisURL) - if err != nil { - return nil, errors.Wrapf(err, "setting up Bitbucket Server client") - } - } - } - if userConfig.AzureDevopsUser != "" { - supportedVCSHosts = append(supportedVCSHosts, models.AzureDevops) - - azureDevOpsHost := userConfig.AzureDevOpsHost - if userConfig.AzureDevOpsHost == "" { - azureDevOpsHost = "dev.azure.com" - } - - var err error - azuredevopsClient, err = vcs.NewAzureDevopsClient(azureDevOpsHost, userConfig.AzureDevopsUser, userConfig.AzureDevopsToken) - if err != nil { - return nil, err - } - } - - if userConfig.WriteGitCreds { - home, err := homedir.Dir() - if err != nil { - return nil, errors.Wrap(err, "getting home dir to write ~/.git-credentials file") - } - if userConfig.GithubUser != "" { - if err := events.WriteGitCreds(userConfig.GithubUser, userConfig.GithubToken, userConfig.GithubHostname, home, logger, false); err != nil { - return nil, err - } - } - if userConfig.GitlabUser != "" { - if err := events.WriteGitCreds(userConfig.GitlabUser, userConfig.GitlabToken, userConfig.GitlabHostname, home, logger, false); err != nil { - return nil, err - } - } - if userConfig.BitbucketUser != "" { - // The default BitbucketBaseURL is https://api.bitbucket.org which can't actually be used for git - // so we override it here only if it's that to be bitbucket.org - bitbucketBaseURL := userConfig.BitbucketBaseURL - if bitbucketBaseURL == "https://api.bitbucket.org" { - bitbucketBaseURL = "bitbucket.org" - } - if err := events.WriteGitCreds(userConfig.BitbucketUser, userConfig.BitbucketToken, bitbucketBaseURL, home, logger, false); err != nil { - return nil, err - } - } - if userConfig.AzureDevopsUser != "" { - if err := events.WriteGitCreds(userConfig.AzureDevopsUser, userConfig.AzureDevopsToken, "dev.azure.com", home, logger, false); err != nil { - return nil, err - } - } - } - - var webhooksConfig []webhooks.Config - for _, c := range userConfig.Webhooks { - config := webhooks.Config{ - Channel: c.Channel, - Event: c.Event, - Kind: c.Kind, - WorkspaceRegex: c.WorkspaceRegex, - } - webhooksConfig = append(webhooksConfig, config) - } - webhooksManager, err := webhooks.NewMultiWebhookSender(webhooksConfig, webhooks.NewSlackClient(userConfig.SlackToken)) - if err != nil { - return nil, errors.Wrap(err, "initializing webhooks") - } - vcsClient := vcs.NewClientProxy(githubClient, gitlabClient, bitbucketCloudClient, bitbucketServerClient, azuredevopsClient) - commitStatusUpdater := &events.DefaultCommitStatusUpdater{Client: vcsClient, StatusName: userConfig.VCSStatusName} - - binDir, err := mkSubDir(userConfig.DataDir, BinDirName) - - if err != nil { - return nil, err - } - - cacheDir, err := mkSubDir(userConfig.DataDir, TerraformPluginCacheDirName) - - if err != nil { - return nil, err - } - - terraformClient, err := terraform.NewClient( - logger, - binDir, - cacheDir, - userConfig.TFEToken, - userConfig.TFEHostname, - userConfig.DefaultTFVersion, - config.DefaultTFVersionFlag, - userConfig.TFDownloadURL, - &terraform.DefaultDownloader{}, - true) - // The flag.Lookup call is to detect if we're running in a unit test. If we - // are, then we don't error out because we don't have/want terraform - // installed on our CI system where the unit tests run. - if err != nil && flag.Lookup("test.v") == nil { - return nil, errors.Wrap(err, "initializing terraform") - } - markdownRenderer := &events.MarkdownRenderer{ - GitlabSupportsCommonMark: gitlabClient.SupportsCommonMark(), - DisableApplyAll: userConfig.DisableApplyAll, - DisableMarkdownFolding: userConfig.DisableMarkdownFolding, - DisableApply: userConfig.DisableApply, - DisableRepoLocking: userConfig.DisableRepoLocking, - EnableDiffMarkdownFormat: userConfig.EnableDiffMarkdownFormat, - } - - boltdb, err := db.New(userConfig.DataDir) - if err != nil { - return nil, err - } - var lockingClient locking.Locker - var applyLockingClient locking.ApplyLocker - if userConfig.DisableRepoLocking { - lockingClient = locking.NewNoOpLocker() - } else { - lockingClient = locking.NewClient(boltdb) - } - applyLockingClient = locking.NewApplyClient(boltdb, userConfig.DisableApply) - workingDirLocker := events.NewDefaultWorkingDirLocker() - - var workingDir events.WorkingDir = &events.FileWorkspace{ - DataDir: userConfig.DataDir, - CheckoutMerge: userConfig.CheckoutStrategy == "merge", - } - // provide fresh tokens before clone from the GitHub Apps integration, proxy workingDir - if githubAppEnabled { - if !userConfig.WriteGitCreds { - return nil, errors.New("Github App requires --write-git-creds to support cloning") - } - workingDir = &events.GithubAppWorkingDir{ - WorkingDir: workingDir, - Credentials: githubCredentials, - GithubHostname: userConfig.GithubHostname, - } - } - - projectLocker := &events.DefaultProjectLocker{ - Locker: lockingClient, - VCSClient: vcsClient, - } - deleteLockCommand := &events.DefaultDeleteLockCommand{ - Locker: lockingClient, - Logger: logger, - WorkingDir: workingDir, - WorkingDirLocker: workingDirLocker, - DB: boltdb, - } - - parsedURL, err := ParseAtlantisURL(userConfig.AtlantisURL) - if err != nil { - return nil, errors.Wrapf(err, - "parsing --%s flag %q", config.AtlantisURLFlag, userConfig.AtlantisURL) - } - validator := &yaml.ParserValidator{} - - globalCfg := valid.NewGlobalCfgFromArgs( - valid.GlobalCfgArgs{ - AllowRepoCfg: userConfig.AllowRepoConfig, - MergeableReq: userConfig.RequireMergeable, - ApprovedReq: userConfig.RequireApproval, - UnDivergedReq: userConfig.RequireUnDiverged, - PolicyCheckEnabled: userConfig.EnablePolicyChecksFlag, - }) - if userConfig.RepoConfig != "" { - globalCfg, err = validator.ParseGlobalCfg(userConfig.RepoConfig, globalCfg) - if err != nil { - return nil, errors.Wrapf(err, "parsing %s file", userConfig.RepoConfig) - } - } else if userConfig.RepoConfigJSON != "" { - globalCfg, err = validator.ParseGlobalCfgJSON(userConfig.RepoConfigJSON, globalCfg) - if err != nil { - return nil, errors.Wrapf(err, "parsing --%s", config.RepoConfigJSONFlag) - } - } - - underlyingRouter := mux.NewRouter() - router := &Router{ - AtlantisURL: parsedURL, - LockViewRouteIDQueryParam: LockViewRouteIDQueryParam, - LockViewRouteName: LockViewRouteName, - Underlying: underlyingRouter, - } - pullClosedExecutor := &events.PullClosedExecutor{ - VCSClient: vcsClient, - Locker: lockingClient, - WorkingDir: workingDir, - Logger: logger, - DB: boltdb, - } - eventParser := &events.EventParser{ - GithubUser: userConfig.GithubUser, - GithubToken: userConfig.GithubToken, - GitlabUser: userConfig.GitlabUser, - GitlabToken: userConfig.GitlabToken, - AllowDraftPRs: userConfig.PlanDrafts, - BitbucketUser: userConfig.BitbucketUser, - BitbucketToken: userConfig.BitbucketToken, - BitbucketServerURL: userConfig.BitbucketBaseURL, - AzureDevopsUser: userConfig.AzureDevopsUser, - AzureDevopsToken: userConfig.AzureDevopsToken, - } - commentParser := &events.CommentParser{ - GithubUser: userConfig.GithubUser, - GitlabUser: userConfig.GitlabUser, - BitbucketUser: userConfig.BitbucketUser, - AzureDevopsUser: userConfig.AzureDevopsUser, - ApplyDisabled: userConfig.DisableApply, - } - defaultTfVersion := terraformClient.DefaultVersion() - pendingPlanFinder := &events.DefaultPendingPlanFinder{} - runStepRunner := &runtime.RunStepRunner{ - TerraformExecutor: terraformClient, - DefaultTFVersion: defaultTfVersion, - TerraformBinDir: terraformClient.TerraformBinDir(), - } - drainer := &events.Drainer{} - statusController := &controllers.StatusController{ - Logger: logger, - Drainer: drainer, - } - preWorkflowHooksCommandRunner := &events.DefaultPreWorkflowHooksCommandRunner{ - VCSClient: vcsClient, - GlobalCfg: globalCfg, - WorkingDirLocker: workingDirLocker, - WorkingDir: workingDir, - PreWorkflowHookRunner: runtime.DefaultPreWorkflowHookRunner{}, - } - projectCommandBuilder := events.NewProjectCommandBuilder( - policyChecksEnabled, - validator, - &events.DefaultProjectFinder{}, - vcsClient, - workingDir, - workingDirLocker, - globalCfg, - pendingPlanFinder, - commentParser, - userConfig.SkipCloneNoChanges, - userConfig.EnableRegExpCmd, - userConfig.AutoplanFileList, - ) - - showStepRunner, err := runtime.NewShowStepRunner(terraformClient, defaultTfVersion) - - if err != nil { - return nil, errors.Wrap(err, "initializing show step runner") - } - - policyCheckRunner, err := runtime.NewPolicyCheckStepRunner( - defaultTfVersion, - policy.NewConfTestExecutorWorkflow(logger, binDir, &terraform.DefaultDownloader{}), - ) - - if err != nil { - return nil, errors.Wrap(err, "initializing policy check runner") - } - - applyRequirementHandler := &events.AggregateApplyRequirements{ - PullApprovedChecker: vcsClient, - WorkingDir: workingDir, - } - - projectCommandRunner := &events.DefaultProjectCommandRunner{ - Locker: projectLocker, - LockURLGenerator: router, - InitStepRunner: &runtime.InitStepRunner{ - TerraformExecutor: terraformClient, - DefaultTFVersion: defaultTfVersion, - }, - PlanStepRunner: &runtime.PlanStepRunner{ - TerraformExecutor: terraformClient, - DefaultTFVersion: defaultTfVersion, - CommitStatusUpdater: commitStatusUpdater, - AsyncTFExec: terraformClient, - }, - ShowStepRunner: showStepRunner, - PolicyCheckStepRunner: policyCheckRunner, - ApplyStepRunner: &runtime.ApplyStepRunner{ - TerraformExecutor: terraformClient, - CommitStatusUpdater: commitStatusUpdater, - AsyncTFExec: terraformClient, - }, - RunStepRunner: runStepRunner, - EnvStepRunner: &runtime.EnvStepRunner{ - RunStepRunner: runStepRunner, - }, - VersionStepRunner: &runtime.VersionStepRunner{ - TerraformExecutor: terraformClient, - DefaultTFVersion: defaultTfVersion, - }, - WorkingDir: workingDir, - Webhooks: webhooksManager, - WorkingDirLocker: workingDirLocker, - AggregateApplyRequirements: applyRequirementHandler, - } - - dbUpdater := &events.DBUpdater{ - DB: boltdb, - } - - pullUpdater := &events.PullUpdater{ - HidePrevPlanComments: userConfig.HidePrevPlanComments, - VCSClient: vcsClient, - MarkdownRenderer: markdownRenderer, - } - - autoMerger := &events.AutoMerger{ - VCSClient: vcsClient, - GlobalAutomerge: userConfig.Automerge, - } - - policyCheckCommandRunner := events.NewPolicyCheckCommandRunner( - dbUpdater, - pullUpdater, - commitStatusUpdater, - projectCommandRunner, - userConfig.ParallelPoolSize, - userConfig.SilenceVCSStatusNoProjects, - ) - - planCommandRunner := events.NewPlanCommandRunner( - userConfig.SilenceVCSStatusNoPlans, - userConfig.SilenceVCSStatusNoProjects, - vcsClient, - pendingPlanFinder, - workingDir, - commitStatusUpdater, - projectCommandBuilder, - projectCommandRunner, - dbUpdater, - pullUpdater, - policyCheckCommandRunner, - autoMerger, - userConfig.ParallelPoolSize, - userConfig.SilenceNoProjects, - boltdb, - ) - - applyCommandRunner := events.NewApplyCommandRunner( - vcsClient, - userConfig.DisableApplyAll, - applyLockingClient, - commitStatusUpdater, - projectCommandBuilder, - projectCommandRunner, - autoMerger, - pullUpdater, - dbUpdater, - boltdb, - userConfig.ParallelPoolSize, - userConfig.SilenceNoProjects, - userConfig.SilenceVCSStatusNoProjects, - ) - - approvePoliciesCommandRunner := events.NewApprovePoliciesCommandRunner( - commitStatusUpdater, - projectCommandBuilder, - projectCommandRunner, - pullUpdater, - dbUpdater, - userConfig.SilenceNoProjects, - userConfig.SilenceVCSStatusNoPlans, - ) - - unlockCommandRunner := events.NewUnlockCommandRunner( - deleteLockCommand, - vcsClient, - userConfig.SilenceNoProjects, - ) - - versionCommandRunner := events.NewVersionCommandRunner( - pullUpdater, - projectCommandBuilder, - projectCommandRunner, - userConfig.ParallelPoolSize, - userConfig.SilenceNoProjects, - ) - - commentCommandRunnerByCmd := map[models.CommandName]events.CommentCommandRunner{ - models.PlanCommand: planCommandRunner, - models.ApplyCommand: applyCommandRunner, - models.ApprovePoliciesCommand: approvePoliciesCommandRunner, - models.UnlockCommand: unlockCommandRunner, - models.VersionCommand: versionCommandRunner, - } - - commandRunner := &events.DefaultCommandRunner{ - VCSClient: vcsClient, - GithubPullGetter: githubClient, - GitlabMergeRequestGetter: gitlabClient, - AzureDevopsPullGetter: azuredevopsClient, - CommentCommandRunnerByCmd: commentCommandRunnerByCmd, - EventParser: eventParser, - Logger: logger, - GlobalCfg: globalCfg, - AllowForkPRs: userConfig.AllowForkPRs, - AllowForkPRsFlag: config.AllowForkPRsFlag, - SilenceForkPRErrors: userConfig.SilenceForkPRErrors, - SilenceForkPRErrorsFlag: config.SilenceForkPRErrorsFlag, - DisableAutoplan: userConfig.DisableAutoplan, - Drainer: drainer, - PreWorkflowHooksCommandRunner: preWorkflowHooksCommandRunner, - PullStatusFetcher: boltdb, - } - repoAllowlist, err := events.NewRepoAllowlistChecker(userConfig.RepoAllowlist) - if err != nil { - return nil, err - } - locksController := &controllers.LocksController{ - AtlantisVersion: config.AtlantisVersion, - AtlantisURL: parsedURL, - Locker: lockingClient, - ApplyLocker: applyLockingClient, - Logger: logger, - VCSClient: vcsClient, - LockDetailTemplate: templates.LockTemplate, - WorkingDir: workingDir, - WorkingDirLocker: workingDirLocker, - DB: boltdb, - DeleteLockCommand: deleteLockCommand, - } - eventsController := &events_controllers.VCSEventsController{ - CommandRunner: commandRunner, - PullCleaner: pullClosedExecutor, - Parser: eventParser, - CommentParser: commentParser, - Logger: logger, - ApplyDisabled: userConfig.DisableApply, - GithubWebhookSecret: []byte(userConfig.GithubWebhookSecret), - GithubRequestValidator: &events_controllers.DefaultGithubRequestValidator{}, - GitlabRequestParserValidator: &events_controllers.DefaultGitlabRequestParserValidator{}, - GitlabWebhookSecret: []byte(userConfig.GitlabWebhookSecret), - RepoAllowlistChecker: repoAllowlist, - SilenceAllowlistErrors: userConfig.SilenceAllowlistErrors, - SupportedVCSHosts: supportedVCSHosts, - VCSClient: vcsClient, - BitbucketWebhookSecret: []byte(userConfig.BitbucketWebhookSecret), - AzureDevopsWebhookBasicUser: []byte(userConfig.AzureDevopsWebhookUser), - AzureDevopsWebhookBasicPassword: []byte(userConfig.AzureDevopsWebhookPassword), - AzureDevopsRequestValidator: &events_controllers.DefaultAzureDevopsRequestValidator{}, - } - githubAppController := &controllers.GithubAppController{ - AtlantisURL: parsedURL, - Logger: logger, - GithubSetupComplete: githubAppEnabled, - GithubHostname: userConfig.GithubHostname, - GithubOrg: userConfig.GithubOrg, - } - - return &Server{ - AtlantisVersion: config.AtlantisVersion, - AtlantisURL: parsedURL, - Router: underlyingRouter, - Port: userConfig.Port, - PreWorkflowHooksCommandRunner: preWorkflowHooksCommandRunner, - CommandRunner: commandRunner, - Logger: logger, - Locker: lockingClient, - ApplyLocker: applyLockingClient, - VCSEventsController: eventsController, - GithubAppController: githubAppController, - LocksController: locksController, - StatusController: statusController, - IndexTemplate: templates.IndexTemplate, - LockDetailTemplate: templates.LockTemplate, - SSLKeyFile: userConfig.SSLKeyFile, - SSLCertFile: userConfig.SSLCertFile, - Drainer: drainer, - }, nil + logger, err := logging.NewStructuredLoggerFromLevel(userConfig.ToLogLevel()) + + if err != nil { + return nil, err + } + + var supportedVCSHosts []models.VCSHostType + var githubClient *vcs.GithubClient + var githubAppEnabled bool + var githubCredentials vcs.GithubCredentials + var gitlabClient *vcs.GitlabClient + var bitbucketCloudClient *bitbucketcloud.Client + var bitbucketServerClient *bitbucketserver.Client + var azuredevopsClient *vcs.AzureDevopsClient + + policyChecksEnabled := false + if userConfig.EnablePolicyChecksFlag { + logger.Info("Policy Checks are enabled") + policyChecksEnabled = true + } + + if userConfig.GithubUser != "" || userConfig.GithubAppID != 0 { + supportedVCSHosts = append(supportedVCSHosts, models.Github) + if userConfig.GithubUser != "" { + githubCredentials = &vcs.GithubUserCredentials{ + User: userConfig.GithubUser, + Token: userConfig.GithubToken, + } + } else if userConfig.GithubAppID != 0 && userConfig.GithubAppKeyFile != "" { + privateKey, err := os.ReadFile(userConfig.GithubAppKeyFile) + if err != nil { + return nil, err + } + githubCredentials = &vcs.GithubAppCredentials{ + AppID: userConfig.GithubAppID, + Key: privateKey, + Hostname: userConfig.GithubHostname, + AppSlug: userConfig.GithubAppSlug, + } + githubAppEnabled = true + } else if userConfig.GithubAppID != 0 && userConfig.GithubAppKey != "" { + githubCredentials = &vcs.GithubAppCredentials{ + AppID: userConfig.GithubAppID, + Key: []byte(userConfig.GithubAppKey), + Hostname: userConfig.GithubHostname, + AppSlug: userConfig.GithubAppSlug, + } + githubAppEnabled = true + } + + var err error + githubClient, err = vcs.NewGithubClient(userConfig.GithubHostname, githubCredentials, logger) + if err != nil { + return nil, err + } + } + if userConfig.GitlabUser != "" { + supportedVCSHosts = append(supportedVCSHosts, models.Gitlab) + var err error + gitlabClient, err = vcs.NewGitlabClient(userConfig.GitlabHostname, userConfig.GitlabToken, logger) + if err != nil { + return nil, err + } + } + if userConfig.BitbucketUser != "" { + if userConfig.BitbucketBaseURL == bitbucketcloud.BaseURL { + supportedVCSHosts = append(supportedVCSHosts, models.BitbucketCloud) + bitbucketCloudClient = bitbucketcloud.NewClient( + http.DefaultClient, + userConfig.BitbucketUser, + userConfig.BitbucketToken, + userConfig.AtlantisURL) + } else { + supportedVCSHosts = append(supportedVCSHosts, models.BitbucketServer) + var err error + bitbucketServerClient, err = bitbucketserver.NewClient( + http.DefaultClient, + userConfig.BitbucketUser, + userConfig.BitbucketToken, + userConfig.BitbucketBaseURL, + userConfig.AtlantisURL) + if err != nil { + return nil, errors.Wrapf(err, "setting up Bitbucket Server client") + } + } + } + if userConfig.AzureDevopsUser != "" { + supportedVCSHosts = append(supportedVCSHosts, models.AzureDevops) + + azureDevOpsHost := userConfig.AzureDevOpsHost + if userConfig.AzureDevOpsHost == "" { + azureDevOpsHost = "dev.azure.com" + } + + var err error + azuredevopsClient, err = vcs.NewAzureDevopsClient(azureDevOpsHost, userConfig.AzureDevopsUser, userConfig.AzureDevopsToken) + if err != nil { + return nil, err + } + } + + if userConfig.WriteGitCreds { + home, err := homedir.Dir() + if err != nil { + return nil, errors.Wrap(err, "getting home dir to write ~/.git-credentials file") + } + if userConfig.GithubUser != "" { + if err := events.WriteGitCreds(userConfig.GithubUser, userConfig.GithubToken, userConfig.GithubHostname, home, logger, false); err != nil { + return nil, err + } + } + if userConfig.GitlabUser != "" { + if err := events.WriteGitCreds(userConfig.GitlabUser, userConfig.GitlabToken, userConfig.GitlabHostname, home, logger, false); err != nil { + return nil, err + } + } + if userConfig.BitbucketUser != "" { + // The default BitbucketBaseURL is https://api.bitbucket.org which can't actually be used for git + // so we override it here only if it's that to be bitbucket.org + bitbucketBaseURL := userConfig.BitbucketBaseURL + if bitbucketBaseURL == "https://api.bitbucket.org" { + bitbucketBaseURL = "bitbucket.org" + } + if err := events.WriteGitCreds(userConfig.BitbucketUser, userConfig.BitbucketToken, bitbucketBaseURL, home, logger, false); err != nil { + return nil, err + } + } + if userConfig.AzureDevopsUser != "" { + if err := events.WriteGitCreds(userConfig.AzureDevopsUser, userConfig.AzureDevopsToken, "dev.azure.com", home, logger, false); err != nil { + return nil, err + } + } + } + + var webhooksConfig []webhooks.Config + for _, c := range userConfig.Webhooks { + config := webhooks.Config{ + Channel: c.Channel, + Event: c.Event, + Kind: c.Kind, + WorkspaceRegex: c.WorkspaceRegex, + } + webhooksConfig = append(webhooksConfig, config) + } + webhooksManager, err := webhooks.NewMultiWebhookSender(webhooksConfig, webhooks.NewSlackClient(userConfig.SlackToken)) + if err != nil { + return nil, errors.Wrap(err, "initializing webhooks") + } + vcsClient := vcs.NewClientProxy(githubClient, gitlabClient, bitbucketCloudClient, bitbucketServerClient, azuredevopsClient) + commitStatusUpdater := &events.DefaultCommitStatusUpdater{Client: vcsClient, StatusName: userConfig.VCSStatusName} + + binDir, err := mkSubDir(userConfig.DataDir, BinDirName) + + if err != nil { + return nil, err + } + + cacheDir, err := mkSubDir(userConfig.DataDir, TerraformPluginCacheDirName) + + if err != nil { + return nil, err + } + + terraformClient, err := terraform.NewClient( + logger, + binDir, + cacheDir, + userConfig.TFEToken, + userConfig.TFEHostname, + userConfig.DefaultTFVersion, + config.DefaultTFVersionFlag, + userConfig.TFDownloadURL, + &terraform.DefaultDownloader{}, + true) + // The flag.Lookup call is to detect if we're running in a unit test. If we + // are, then we don't error out because we don't have/want terraform + // installed on our CI system where the unit tests run. + if err != nil && flag.Lookup("test.v") == nil { + return nil, errors.Wrap(err, "initializing terraform") + } + markdownRenderer := &events.MarkdownRenderer{ + GitlabSupportsCommonMark: gitlabClient.SupportsCommonMark(), + DisableApplyAll: userConfig.DisableApplyAll, + DisableMarkdownFolding: userConfig.DisableMarkdownFolding, + DisableApply: userConfig.DisableApply, + DisableRepoLocking: userConfig.DisableRepoLocking, + EnableDiffMarkdownFormat: userConfig.EnableDiffMarkdownFormat, + } + + boltdb, err := db.New(userConfig.DataDir) + if err != nil { + return nil, err + } + var lockingClient locking.Locker + var applyLockingClient locking.ApplyLocker + if userConfig.DisableRepoLocking { + lockingClient = locking.NewNoOpLocker() + } else { + lockingClient = locking.NewClient(boltdb) + } + applyLockingClient = locking.NewApplyClient(boltdb, userConfig.DisableApply) + workingDirLocker := events.NewDefaultWorkingDirLocker() + + var workingDir events.WorkingDir = &events.FileWorkspace{ + DataDir: userConfig.DataDir, + CheckoutMerge: userConfig.CheckoutStrategy == "merge", + } + // provide fresh tokens before clone from the GitHub Apps integration, proxy workingDir + if githubAppEnabled { + if !userConfig.WriteGitCreds { + return nil, errors.New("Github App requires --write-git-creds to support cloning") + } + workingDir = &events.GithubAppWorkingDir{ + WorkingDir: workingDir, + Credentials: githubCredentials, + GithubHostname: userConfig.GithubHostname, + } + } + + projectLocker := &events.DefaultProjectLocker{ + Locker: lockingClient, + VCSClient: vcsClient, + } + deleteLockCommand := &events.DefaultDeleteLockCommand{ + Locker: lockingClient, + Logger: logger, + WorkingDir: workingDir, + WorkingDirLocker: workingDirLocker, + DB: boltdb, + } + + parsedURL, err := ParseAtlantisURL(userConfig.AtlantisURL) + if err != nil { + return nil, errors.Wrapf(err, + "parsing --%s flag %q", config.AtlantisURLFlag, userConfig.AtlantisURL) + } + validator := &yaml.ParserValidator{} + + globalCfg := valid.NewGlobalCfgFromArgs( + valid.GlobalCfgArgs{ + AllowRepoCfg: userConfig.AllowRepoConfig, + MergeableReq: userConfig.RequireMergeable, + ApprovedReq: userConfig.RequireApproval, + UnDivergedReq: userConfig.RequireUnDiverged, + PolicyCheckEnabled: userConfig.EnablePolicyChecksFlag, + }) + if userConfig.RepoConfig != "" { + globalCfg, err = validator.ParseGlobalCfg(userConfig.RepoConfig, globalCfg) + if err != nil { + return nil, errors.Wrapf(err, "parsing %s file", userConfig.RepoConfig) + } + } else if userConfig.RepoConfigJSON != "" { + globalCfg, err = validator.ParseGlobalCfgJSON(userConfig.RepoConfigJSON, globalCfg) + if err != nil { + return nil, errors.Wrapf(err, "parsing --%s", config.RepoConfigJSONFlag) + } + } + + underlyingRouter := mux.NewRouter() + router := &Router{ + AtlantisURL: parsedURL, + LockViewRouteIDQueryParam: LockViewRouteIDQueryParam, + LockViewRouteName: LockViewRouteName, + Underlying: underlyingRouter, + } + pullClosedExecutor := &events.PullClosedExecutor{ + VCSClient: vcsClient, + Locker: lockingClient, + WorkingDir: workingDir, + Logger: logger, + DB: boltdb, + } + eventParser := &events.EventParser{ + GithubUser: userConfig.GithubUser, + GithubToken: userConfig.GithubToken, + GitlabUser: userConfig.GitlabUser, + GitlabToken: userConfig.GitlabToken, + AllowDraftPRs: userConfig.PlanDrafts, + BitbucketUser: userConfig.BitbucketUser, + BitbucketToken: userConfig.BitbucketToken, + BitbucketServerURL: userConfig.BitbucketBaseURL, + AzureDevopsUser: userConfig.AzureDevopsUser, + AzureDevopsToken: userConfig.AzureDevopsToken, + } + commentParser := &events.CommentParser{ + GithubUser: userConfig.GithubUser, + GitlabUser: userConfig.GitlabUser, + BitbucketUser: userConfig.BitbucketUser, + AzureDevopsUser: userConfig.AzureDevopsUser, + ApplyDisabled: userConfig.DisableApply, + } + defaultTfVersion := terraformClient.DefaultVersion() + pendingPlanFinder := &events.DefaultPendingPlanFinder{} + runStepRunner := &runtime.RunStepRunner{ + TerraformExecutor: terraformClient, + DefaultTFVersion: defaultTfVersion, + TerraformBinDir: terraformClient.TerraformBinDir(), + } + drainer := &events.Drainer{} + statusController := &controllers.StatusController{ + Logger: logger, + Drainer: drainer, + } + preWorkflowHooksCommandRunner := &events.DefaultPreWorkflowHooksCommandRunner{ + VCSClient: vcsClient, + GlobalCfg: globalCfg, + WorkingDirLocker: workingDirLocker, + WorkingDir: workingDir, + PreWorkflowHookRunner: runtime.DefaultPreWorkflowHookRunner{}, + } + projectCommandBuilder := events.NewProjectCommandBuilder( + policyChecksEnabled, + validator, + &events.DefaultProjectFinder{}, + vcsClient, + workingDir, + workingDirLocker, + globalCfg, + pendingPlanFinder, + commentParser, + userConfig.SkipCloneNoChanges, + userConfig.EnableRegExpCmd, + userConfig.AutoplanFileList, + ) + + showStepRunner, err := runtime.NewShowStepRunner(terraformClient, defaultTfVersion) + + if err != nil { + return nil, errors.Wrap(err, "initializing show step runner") + } + + policyCheckRunner, err := runtime.NewPolicyCheckStepRunner( + defaultTfVersion, + policy.NewConfTestExecutorWorkflow(logger, binDir, &terraform.DefaultDownloader{}), + ) + + if err != nil { + return nil, errors.Wrap(err, "initializing policy check runner") + } + + applyRequirementHandler := &events.AggregateApplyRequirements{ + PullApprovedChecker: vcsClient, + WorkingDir: workingDir, + } + + projectCommandRunner := &events.DefaultProjectCommandRunner{ + Locker: projectLocker, + LockURLGenerator: router, + InitStepRunner: &runtime.InitStepRunner{ + TerraformExecutor: terraformClient, + DefaultTFVersion: defaultTfVersion, + }, + PlanStepRunner: &runtime.PlanStepRunner{ + TerraformExecutor: terraformClient, + DefaultTFVersion: defaultTfVersion, + CommitStatusUpdater: commitStatusUpdater, + AsyncTFExec: terraformClient, + }, + ShowStepRunner: showStepRunner, + PolicyCheckStepRunner: policyCheckRunner, + ApplyStepRunner: &runtime.ApplyStepRunner{ + TerraformExecutor: terraformClient, + CommitStatusUpdater: commitStatusUpdater, + AsyncTFExec: terraformClient, + }, + RunStepRunner: runStepRunner, + EnvStepRunner: &runtime.EnvStepRunner{ + RunStepRunner: runStepRunner, + }, + VersionStepRunner: &runtime.VersionStepRunner{ + TerraformExecutor: terraformClient, + DefaultTFVersion: defaultTfVersion, + }, + WorkingDir: workingDir, + Webhooks: webhooksManager, + WorkingDirLocker: workingDirLocker, + AggregateApplyRequirements: applyRequirementHandler, + } + + dbUpdater := &events.DBUpdater{ + DB: boltdb, + } + + pullUpdater := &events.PullUpdater{ + HidePrevPlanComments: userConfig.HidePrevPlanComments, + VCSClient: vcsClient, + MarkdownRenderer: markdownRenderer, + } + + autoMerger := &events.AutoMerger{ + VCSClient: vcsClient, + GlobalAutomerge: userConfig.Automerge, + } + + policyCheckCommandRunner := events.NewPolicyCheckCommandRunner( + dbUpdater, + pullUpdater, + commitStatusUpdater, + projectCommandRunner, + userConfig.ParallelPoolSize, + userConfig.SilenceVCSStatusNoProjects, + ) + + planCommandRunner := events.NewPlanCommandRunner( + userConfig.SilenceVCSStatusNoPlans, + userConfig.SilenceVCSStatusNoProjects, + vcsClient, + pendingPlanFinder, + workingDir, + commitStatusUpdater, + projectCommandBuilder, + projectCommandRunner, + dbUpdater, + pullUpdater, + policyCheckCommandRunner, + autoMerger, + userConfig.ParallelPoolSize, + userConfig.SilenceNoProjects, + boltdb, + ) + + applyCommandRunner := events.NewApplyCommandRunner( + vcsClient, + userConfig.DisableApplyAll, + applyLockingClient, + commitStatusUpdater, + projectCommandBuilder, + projectCommandRunner, + autoMerger, + pullUpdater, + dbUpdater, + boltdb, + userConfig.ParallelPoolSize, + userConfig.SilenceNoProjects, + userConfig.SilenceVCSStatusNoProjects, + ) + + approvePoliciesCommandRunner := events.NewApprovePoliciesCommandRunner( + commitStatusUpdater, + projectCommandBuilder, + projectCommandRunner, + pullUpdater, + dbUpdater, + userConfig.SilenceNoProjects, + userConfig.SilenceVCSStatusNoPlans, + ) + + unlockCommandRunner := events.NewUnlockCommandRunner( + deleteLockCommand, + vcsClient, + userConfig.SilenceNoProjects, + ) + + versionCommandRunner := events.NewVersionCommandRunner( + pullUpdater, + projectCommandBuilder, + projectCommandRunner, + userConfig.ParallelPoolSize, + userConfig.SilenceNoProjects, + ) + + commentCommandRunnerByCmd := map[models.CommandName]events.CommentCommandRunner{ + models.PlanCommand: planCommandRunner, + models.ApplyCommand: applyCommandRunner, + models.ApprovePoliciesCommand: approvePoliciesCommandRunner, + models.UnlockCommand: unlockCommandRunner, + models.VersionCommand: versionCommandRunner, + } + + commandRunner := &events.DefaultCommandRunner{ + VCSClient: vcsClient, + GithubPullGetter: githubClient, + GitlabMergeRequestGetter: gitlabClient, + AzureDevopsPullGetter: azuredevopsClient, + CommentCommandRunnerByCmd: commentCommandRunnerByCmd, + EventParser: eventParser, + Logger: logger, + GlobalCfg: globalCfg, + AllowForkPRs: userConfig.AllowForkPRs, + AllowForkPRsFlag: config.AllowForkPRsFlag, + SilenceForkPRErrors: userConfig.SilenceForkPRErrors, + SilenceForkPRErrorsFlag: config.SilenceForkPRErrorsFlag, + DisableAutoplan: userConfig.DisableAutoplan, + Drainer: drainer, + PreWorkflowHooksCommandRunner: preWorkflowHooksCommandRunner, + PullStatusFetcher: boltdb, + } + repoAllowlist, err := events.NewRepoAllowlistChecker(userConfig.RepoAllowlist) + if err != nil { + return nil, err + } + locksController := &controllers.LocksController{ + AtlantisVersion: config.AtlantisVersion, + AtlantisURL: parsedURL, + Locker: lockingClient, + ApplyLocker: applyLockingClient, + Logger: logger, + VCSClient: vcsClient, + LockDetailTemplate: templates.LockTemplate, + WorkingDir: workingDir, + WorkingDirLocker: workingDirLocker, + DB: boltdb, + DeleteLockCommand: deleteLockCommand, + } + eventsController := &events_controllers.VCSEventsController{ + CommandRunner: commandRunner, + PullCleaner: pullClosedExecutor, + Parser: eventParser, + CommentParser: commentParser, + Logger: logger, + ApplyDisabled: userConfig.DisableApply, + GithubWebhookSecret: []byte(userConfig.GithubWebhookSecret), + GithubRequestValidator: &events_controllers.DefaultGithubRequestValidator{}, + GitlabRequestParserValidator: &events_controllers.DefaultGitlabRequestParserValidator{}, + GitlabWebhookSecret: []byte(userConfig.GitlabWebhookSecret), + RepoAllowlistChecker: repoAllowlist, + SilenceAllowlistErrors: userConfig.SilenceAllowlistErrors, + SupportedVCSHosts: supportedVCSHosts, + VCSClient: vcsClient, + BitbucketWebhookSecret: []byte(userConfig.BitbucketWebhookSecret), + AzureDevopsWebhookBasicUser: []byte(userConfig.AzureDevopsWebhookUser), + AzureDevopsWebhookBasicPassword: []byte(userConfig.AzureDevopsWebhookPassword), + AzureDevopsRequestValidator: &events_controllers.DefaultAzureDevopsRequestValidator{}, + } + githubAppController := &controllers.GithubAppController{ + AtlantisURL: parsedURL, + Logger: logger, + GithubSetupComplete: githubAppEnabled, + GithubHostname: userConfig.GithubHostname, + GithubOrg: userConfig.GithubOrg, + } + + return &Server{ + AtlantisVersion: config.AtlantisVersion, + AtlantisURL: parsedURL, + Router: underlyingRouter, + Port: userConfig.Port, + PreWorkflowHooksCommandRunner: preWorkflowHooksCommandRunner, + CommandRunner: commandRunner, + Logger: logger, + Locker: lockingClient, + ApplyLocker: applyLockingClient, + VCSEventsController: eventsController, + GithubAppController: githubAppController, + LocksController: locksController, + StatusController: statusController, + IndexTemplate: templates.IndexTemplate, + LockDetailTemplate: templates.LockTemplate, + SSLKeyFile: userConfig.SSLKeyFile, + SSLCertFile: userConfig.SSLCertFile, + Drainer: drainer, + }, nil } // Start creates the routes and starts serving traffic. func (s *Server) Start() error { - s.Router.HandleFunc("/", s.Index).Methods("GET").MatcherFunc(func(r *http.Request, rm *mux.RouteMatch) bool { - return r.URL.Path == "/" || r.URL.Path == "/index.html" - }) - s.Router.HandleFunc("/healthz", s.Healthz).Methods("GET") - s.Router.HandleFunc("/status", s.StatusController.Get).Methods("GET") - s.Router.PathPrefix("/static/").Handler(http.FileServer(&assetfs.AssetFS{Asset: static.Asset, AssetDir: static.AssetDir, AssetInfo: static.AssetInfo})) - s.Router.HandleFunc("/events", s.VCSEventsController.Post).Methods("POST") - s.Router.HandleFunc("/github-app/exchange-code", s.GithubAppController.ExchangeCode).Methods("GET") - s.Router.HandleFunc("/github-app/setup", s.GithubAppController.New).Methods("GET") - s.Router.HandleFunc("/apply/lock", s.LocksController.LockApply).Methods("POST").Queries() - s.Router.HandleFunc("/apply/unlock", s.LocksController.UnlockApply).Methods("DELETE").Queries() - s.Router.HandleFunc("/locks", s.LocksController.DeleteLock).Methods("DELETE").Queries("id", "{id:.*}") - s.Router.HandleFunc("/lock", s.LocksController.GetLock).Methods("GET"). - Queries(LockViewRouteIDQueryParam, fmt.Sprintf("{%s}", LockViewRouteIDQueryParam)).Name(LockViewRouteName) - n := negroni.New(&negroni.Recovery{ - Logger: log.New(os.Stdout, "", log.LstdFlags), - PrintStack: false, - StackAll: false, - StackSize: 1024 * 8, - }, NewRequestLogger(s.Logger)) - n.UseHandler(s.Router) - - defer s.Logger.Flush() - - // Ensure server gracefully drains connections when stopped. - stop := make(chan os.Signal, 1) - // Stop on SIGINTs and SIGTERMs. - signal.Notify(stop, os.Interrupt, syscall.SIGTERM) - - server := &http.Server{Addr: fmt.Sprintf(":%d", s.Port), Handler: n} - go func() { - s.Logger.Info("Atlantis started - listening on port %v", s.Port) - - var err error - if s.SSLCertFile != "" && s.SSLKeyFile != "" { - err = server.ListenAndServeTLS(s.SSLCertFile, s.SSLKeyFile) - } else { - err = server.ListenAndServe() - } - - if err != nil && err != http.ErrServerClosed { - s.Logger.Err(err.Error()) - } - }() - <-stop - - s.Logger.Warn("Received interrupt. Waiting for in-progress operations to complete") - s.waitForDrain() - ctx, _ := context.WithTimeout(context.Background(), 5*time.Second) // nolint: vet - if err := server.Shutdown(ctx); err != nil { - return cli.NewExitError(fmt.Sprintf("while shutting down: %s", err), 1) - } - return nil + s.Router.HandleFunc("/", s.Index).Methods("GET").MatcherFunc(func(r *http.Request, rm *mux.RouteMatch) bool { + return r.URL.Path == "/" || r.URL.Path == "/index.html" + }) + s.Router.HandleFunc("/healthz", s.Healthz).Methods("GET") + s.Router.HandleFunc("/status", s.StatusController.Get).Methods("GET") + s.Router.PathPrefix("/static/").Handler(http.FileServer(&assetfs.AssetFS{Asset: static.Asset, AssetDir: static.AssetDir, AssetInfo: static.AssetInfo})) + s.Router.HandleFunc("/events", s.VCSEventsController.Post).Methods("POST") + s.Router.HandleFunc("/github-app/exchange-code", s.GithubAppController.ExchangeCode).Methods("GET") + s.Router.HandleFunc("/github-app/setup", s.GithubAppController.New).Methods("GET") + s.Router.HandleFunc("/apply/lock", s.LocksController.LockApply).Methods("POST").Queries() + s.Router.HandleFunc("/apply/unlock", s.LocksController.UnlockApply).Methods("DELETE").Queries() + s.Router.HandleFunc("/locks", s.LocksController.DeleteLock).Methods("DELETE").Queries("id", "{id:.*}") + s.Router.HandleFunc("/lock", s.LocksController.GetLock).Methods("GET"). + Queries(LockViewRouteIDQueryParam, fmt.Sprintf("{%s}", LockViewRouteIDQueryParam)).Name(LockViewRouteName) + n := negroni.New(&negroni.Recovery{ + Logger: log.New(os.Stdout, "", log.LstdFlags), + PrintStack: false, + StackAll: false, + StackSize: 1024 * 8, + }, NewRequestLogger(s.Logger)) + n.UseHandler(s.Router) + + defer s.Logger.Flush() + + // Ensure server gracefully drains connections when stopped. + stop := make(chan os.Signal, 1) + // Stop on SIGINTs and SIGTERMs. + signal.Notify(stop, os.Interrupt, syscall.SIGTERM) + + server := &http.Server{Addr: fmt.Sprintf(":%d", s.Port), Handler: n} + go func() { + s.Logger.Info("Atlantis started - listening on port %v", s.Port) + + var err error + if s.SSLCertFile != "" && s.SSLKeyFile != "" { + err = server.ListenAndServeTLS(s.SSLCertFile, s.SSLKeyFile) + } else { + err = server.ListenAndServe() + } + + if err != nil && err != http.ErrServerClosed { + s.Logger.Err(err.Error()) + } + }() + <-stop + + s.Logger.Warn("Received interrupt. Waiting for in-progress operations to complete") + s.waitForDrain() + ctx, _ := context.WithTimeout(context.Background(), 5*time.Second) // nolint: vet + if err := server.Shutdown(ctx); err != nil { + return cli.NewExitError(fmt.Sprintf("while shutting down: %s", err), 1) + } + return nil } // waitForDrain blocks until draining is complete. func (s *Server) waitForDrain() { - drainComplete := make(chan bool, 1) - go func() { - s.Drainer.ShutdownBlocking() - drainComplete <- true - }() - ticker := time.NewTicker(5 * time.Second) - for { - select { - case <-drainComplete: - s.Logger.Info("All in-progress operations complete, shutting down") - return - case <-ticker.C: - s.Logger.Info("Waiting for in-progress operations to complete, current in-progress ops: %d", s.Drainer.GetStatus().InProgressOps) - } - } + drainComplete := make(chan bool, 1) + go func() { + s.Drainer.ShutdownBlocking() + drainComplete <- true + }() + ticker := time.NewTicker(5 * time.Second) + for { + select { + case <-drainComplete: + s.Logger.Info("All in-progress operations complete, shutting down") + return + case <-ticker.C: + s.Logger.Info("Waiting for in-progress operations to complete, current in-progress ops: %d", s.Drainer.GetStatus().InProgressOps) + } + } } // Index is the / route. func (s *Server) Index(w http.ResponseWriter, _ *http.Request) { - locks, err := s.Locker.List() - if err != nil { - w.WriteHeader(http.StatusServiceUnavailable) - fmt.Fprintf(w, "Could not retrieve locks: %s", err) - return - } - - var lockResults []templates.LockIndexData - for id, v := range locks { - lockURL, _ := s.Router.Get(LockViewRouteName).URL("id", url.QueryEscape(id)) - lockResults = append(lockResults, templates.LockIndexData{ - // NOTE: must use .String() instead of .Path because we need the - // query params as part of the lock URL. - LockPath: lockURL.String(), - RepoFullName: v.Project.RepoFullName, - PullNum: v.Pull.Num, - Path: v.Project.Path, - Workspace: v.Workspace, - Time: v.Time, - TimeFormatted: v.Time.Format("02-01-2006 15:04:05"), - }) - } - - applyCmdLock, err := s.ApplyLocker.CheckApplyLock() - s.Logger.Info("Apply Lock: %v", applyCmdLock) - if err != nil { - w.WriteHeader(http.StatusServiceUnavailable) - fmt.Fprintf(w, "Could not retrieve global apply lock: %s", err) - return - } - - applyLockData := templates.ApplyLockData{ - Time: applyCmdLock.Time, - Locked: applyCmdLock.Locked, - TimeFormatted: applyCmdLock.Time.Format("02-01-2006 15:04:05"), - } - //Sort by date - newest to oldest. - sort.SliceStable(lockResults, func(i, j int) bool { return lockResults[i].Time.After(lockResults[j].Time) }) - - err = s.IndexTemplate.Execute(w, templates.IndexData{ - Locks: lockResults, - ApplyLock: applyLockData, - AtlantisVersion: s.AtlantisVersion, - CleanedBasePath: s.AtlantisURL.Path, - }) - if err != nil { - s.Logger.Err(err.Error()) - } + locks, err := s.Locker.List() + if err != nil { + w.WriteHeader(http.StatusServiceUnavailable) + fmt.Fprintf(w, "Could not retrieve locks: %s", err) + return + } + + var lockResults []templates.LockIndexData + for id, v := range locks { + lockURL, _ := s.Router.Get(LockViewRouteName).URL("id", url.QueryEscape(id)) + lockResults = append(lockResults, templates.LockIndexData{ + // NOTE: must use .String() instead of .Path because we need the + // query params as part of the lock URL. + LockPath: lockURL.String(), + RepoFullName: v.Project.RepoFullName, + PullNum: v.Pull.Num, + Path: v.Project.Path, + Workspace: v.Workspace, + Time: v.Time, + TimeFormatted: v.Time.Format("02-01-2006 15:04:05"), + }) + } + + applyCmdLock, err := s.ApplyLocker.CheckApplyLock() + s.Logger.Info("Apply Lock: %v", applyCmdLock) + if err != nil { + w.WriteHeader(http.StatusServiceUnavailable) + fmt.Fprintf(w, "Could not retrieve global apply lock: %s", err) + return + } + + applyLockData := templates.ApplyLockData{ + Time: applyCmdLock.Time, + Locked: applyCmdLock.Locked, + TimeFormatted: applyCmdLock.Time.Format("02-01-2006 15:04:05"), + } + //Sort by date - newest to oldest. + sort.SliceStable(lockResults, func(i, j int) bool { return lockResults[i].Time.After(lockResults[j].Time) }) + + err = s.IndexTemplate.Execute(w, templates.IndexData{ + Locks: lockResults, + ApplyLock: applyLockData, + AtlantisVersion: s.AtlantisVersion, + CleanedBasePath: s.AtlantisURL.Path, + }) + if err != nil { + s.Logger.Err(err.Error()) + } } func mkSubDir(parentDir string, subDir string) (string, error) { - fullDir := filepath.Join(parentDir, subDir) - if err := os.MkdirAll(fullDir, 0700); err != nil { - return "", errors.Wrapf(err, "unable to create dir %q", fullDir) - } + fullDir := filepath.Join(parentDir, subDir) + if err := os.MkdirAll(fullDir, 0700); err != nil { + return "", errors.Wrapf(err, "unable to create dir %q", fullDir) + } - return fullDir, nil + return fullDir, nil } // Healthz returns the health check response. It always returns a 200 currently. func (s *Server) Healthz(w http.ResponseWriter, _ *http.Request) { - data, err := json.MarshalIndent(&struct { - Status string `json:"status"` - }{ - Status: "ok", - }, "", " ") - if err != nil { - w.WriteHeader(http.StatusInternalServerError) - fmt.Fprintf(w, "Error creating status json response: %s", err) - return - } - w.Header().Set("Content-Type", "application/json") - w.Write(data) // nolint: errcheck + data, err := json.MarshalIndent(&struct { + Status string `json:"status"` + }{ + Status: "ok", + }, "", " ") + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + fmt.Fprintf(w, "Error creating status json response: %s", err) + return + } + w.Header().Set("Content-Type", "application/json") + w.Write(data) // nolint: errcheck } // ParseAtlantisURL parses the user-passed atlantis URL to ensure it is valid @@ -840,15 +840,15 @@ func (s *Server) Healthz(w http.ResponseWriter, _ *http.Request) { // It removes any trailing slashes from the path so we can concatenate it // with other paths without checking. func ParseAtlantisURL(u string) (*url.URL, error) { - parsed, err := url.Parse(u) - if err != nil { - return nil, err - } - if !(parsed.Scheme == "http" || parsed.Scheme == "https") { - return nil, errors.New("http or https must be specified") - } - // We want the path to end without a trailing slash so we know how to - // use it in the rest of the program. - parsed.Path = strings.TrimSuffix(parsed.Path, "/") - return parsed, nil + parsed, err := url.Parse(u) + if err != nil { + return nil, err + } + if !(parsed.Scheme == "http" || parsed.Scheme == "https") { + return nil, errors.New("http or https must be specified") + } + // We want the path to end without a trailing slash so we know how to + // use it in the rest of the program. + parsed.Path = strings.TrimSuffix(parsed.Path, "/") + return parsed, nil } From e491db13b76297d4b8be1c5c8ed63136f035a0e8 Mon Sep 17 00:00:00 2001 From: atlantisbot Date: Fri, 15 Oct 2021 14:56:47 +0000 Subject: [PATCH 5/9] fixes to cmd and update to approach --- cmd/server.go | 9 +++++++++ server/server.go | 7 +------ server/user_config.go | 2 +- 3 files changed, 11 insertions(+), 7 deletions(-) diff --git a/cmd/server.go b/cmd/server.go index cd869dbd0f..0699d5b825 100644 --- a/cmd/server.go +++ b/cmd/server.go @@ -41,6 +41,7 @@ const ( ADWebhookUserFlag = "azuredevops-webhook-user" ADTokenFlag = "azuredevops-token" // nolint: gosec ADUserFlag = "azuredevops-user" + ADHostnameFlag = "azuredevops-hostname" AllowForkPRsFlag = "allow-fork-prs" AllowRepoConfigFlag = "allow-repo-config" AtlantisURLFlag = "atlantis-url" @@ -106,6 +107,7 @@ const ( // NOTE: Must manually set these as defaults in the setDefaults function. DefaultADBasicUser = "" DefaultADBasicPassword = "" + DefaultADHostname = "dev.azure.com" DefaultAutoplanFileList = "**/*.tf,**/*.tfvars,**/*.tfvars.json,**/terragrunt.hcl" DefaultCheckoutStrategy = "branch" DefaultBitbucketBaseURL = bitbucketcloud.BaseURL @@ -139,6 +141,10 @@ var stringFlags = map[string]stringFlag{ description: "Azure DevOps basic HTTP authentication username for inbound webhooks.", defaultValue: "", }, + ADHostnameFlag: { + description: "Azure DevOps hostname to support cloud and self hosted instances.", + defaultValue: "dev.azure.com", + }, AtlantisURLFlag: { description: "URL that Atlantis can be reached at. Defaults to http://$(hostname):$port where $port is from --" + PortFlag + ". Supports a base path ex. https://example.com/basepath.", }, @@ -589,6 +595,9 @@ func (s *ServerCmd) run() error { } func (s *ServerCmd) setDefaults(c *server.UserConfig) { + if c.AzureDevOpsHostname == "" { + c.AzureDevOpsHostname = DefaultADHostname + } if c.AutoplanFileList == "" { c.AutoplanFileList = DefaultAutoplanFileList } diff --git a/server/server.go b/server/server.go index 4149f8bceb..9afab1c7f4 100644 --- a/server/server.go +++ b/server/server.go @@ -217,13 +217,8 @@ func NewServer(userConfig UserConfig, config Config) (*Server, error) { if userConfig.AzureDevopsUser != "" { supportedVCSHosts = append(supportedVCSHosts, models.AzureDevops) - azureDevOpsHost := userConfig.AzureDevOpsHost - if userConfig.AzureDevOpsHost == "" { - azureDevOpsHost = "dev.azure.com" - } - var err error - azuredevopsClient, err = vcs.NewAzureDevopsClient(azureDevOpsHost, userConfig.AzureDevopsUser, userConfig.AzureDevopsToken) + azuredevopsClient, err = vcs.NewAzureDevopsClient(userConfig.AzureDevOpsHostname, userConfig.AzureDevopsUser, userConfig.AzureDevopsToken) if err != nil { return nil, err } diff --git a/server/user_config.go b/server/user_config.go index 22c56f34c6..297b692886 100644 --- a/server/user_config.go +++ b/server/user_config.go @@ -17,7 +17,7 @@ type UserConfig struct { AzureDevopsUser string `mapstructure:"azuredevops-user"` AzureDevopsWebhookPassword string `mapstructure:"azuredevops-webhook-password"` AzureDevopsWebhookUser string `mapstructure:"azuredevops-webhook-user"` - AzureDevOpsHost string `mapstructure:"azuredevops-host"` + AzureDevOpsHostname string `mapstructure:"azuredevops-hostname"` BitbucketBaseURL string `mapstructure:"bitbucket-base-url"` BitbucketToken string `mapstructure:"bitbucket-token"` BitbucketUser string `mapstructure:"bitbucket-user"` From 0db316e0161c44579867550cb3c0e90c87bd901e Mon Sep 17 00:00:00 2001 From: atlantisbot Date: Mon, 18 Oct 2021 11:20:55 +0000 Subject: [PATCH 6/9] add specific self host fixtures --- server/events/vcs/fixtures/fixtures.go | 168 +++++++++++++++++++++++++ 1 file changed, 168 insertions(+) diff --git a/server/events/vcs/fixtures/fixtures.go b/server/events/vcs/fixtures/fixtures.go index 75330cbe0c..8d69e4dbcd 100644 --- a/server/events/vcs/fixtures/fixtures.go +++ b/server/events/vcs/fixtures/fixtures.go @@ -227,6 +227,174 @@ var ADPullJSON = `{ "artifactId": "vstfs:///Git/PullRequestId/a7573007-bbb3-4341-b726-0c4148a07853%2f3411ebc1-d5aa-464f-9615-0b527bc66719%2f22" }` + +var ADSelfPullEvent = azuredevops.Event{ + EventType: "git.pullrequest.created", + Resource: &ADSelfPull, +} + +var ADSelfPullUpdatedEvent = azuredevops.Event{ + EventType: "git.pullrequest.updated", + Resource: &ADSelfPull, +} + +var ADSelfPullClosedEvent = azuredevops.Event{ + EventType: "git.pullrequest.merged", + Resource: &ADSelfPullCompleted, +} + +var ADSelfPull = azuredevops.GitPullRequest{ + CreatedBy: &azuredevops.IdentityRef{ + ID: azuredevops.String("d6245f20-2af8-44f4-9451-8107cb2767db"), + DisplayName: azuredevops.String("User"), + UniqueName: azuredevops.String("user@example.com"), + }, + LastMergeSourceCommit: &azuredevops.GitCommitRef{ + CommitID: azuredevops.String("b60280bc6e62e2f880f1b63c1e24987664d3bda3"), + URL: azuredevops.String("https://devops.abc.com/owner/project/_apis/git/repositories/3411ebc1-d5aa-464f-9615-0b527bc66719/commits/b60280bc6e62e2f880f1b63c1e24987664d3bda3"), + }, + PullRequestID: azuredevops.Int(1), + Repository: &ADSelfRepo, + SourceRefName: azuredevops.String("refs/heads/feature/sourceBranch"), + Status: azuredevops.String("active"), + TargetRefName: azuredevops.String("refs/heads/targetBranch"), + URL: azuredevops.String("https://devops.abc.com/owner/project/_apis/git/repositories/3411ebc1-d5aa-464f-9615-0b527bc66719/pullRequests/21"), +} + +var ADSelfPullCompleted = azuredevops.GitPullRequest{ + CreatedBy: &azuredevops.IdentityRef{ + ID: azuredevops.String("d6245f20-2af8-44f4-9451-8107cb2767db"), + DisplayName: azuredevops.String("User"), + UniqueName: azuredevops.String("user@example.com"), + }, + LastMergeSourceCommit: &azuredevops.GitCommitRef{ + CommitID: azuredevops.String("b60280bc6e62e2f880f1b63c1e24987664d3bda3"), + URL: azuredevops.String("https://https://devops.abc.com/owner/project/_apis/git/repositories/3411ebc1-d5aa-464f-9615-0b527bc66719/commits/b60280bc6e62e2f880f1b63c1e24987664d3bda3"), + }, + PullRequestID: azuredevops.Int(1), + Repository: &ADSelfRepo, + SourceRefName: azuredevops.String("refs/heads/owner/sourceBranch"), + Status: azuredevops.String("completed"), + TargetRefName: azuredevops.String("refs/heads/targetBranch"), + URL: azuredevops.String("https://devops.abc.com/owner/project/_apis/git/repositories/3411ebc1-d5aa-464f-9615-0b527bc66719/pullRequests/21"), +} + +var ADSelfRepo = azuredevops.GitRepository{ + DefaultBranch: azuredevops.String("refs/heads/master"), + Name: azuredevops.String("repo"), + ParentRepository: &azuredevops.GitRepositoryRef{ + Name: azuredevops.String("owner"), + }, + Project: &azuredevops.TeamProjectReference{ + ID: azuredevops.String("a21f5f20-4a12-aaf4-ab12-9a0927cbbb90"), + Name: azuredevops.String("project"), + State: azuredevops.String("unchanged"), + }, + WebURL: azuredevops.String("https://devops.abc.com/owner/project/_git/repo"), +} + +var ADSelfPullJSON = `{ + "repository": { + "id": "3411ebc1-d5aa-464f-9615-0b527bc66719", + "name": "repo", + "url": "https://devops.abc.com/owner/project/_apis/git/repositories/3411ebc1-d5aa-464f-9615-0b527bc66719", + "webUrl": "https://devops.abc.com/owner/project/_apis/git/repositories/3411ebc1-d5aa-464f-9615-0b527bc66719", + "project": { + "id": "a7573007-bbb3-4341-b726-0c4148a07853", + "name": "project", + "description": "test project created on Halloween 2016", + "url": "https://dev.azure.com/owner/_apis/projects/a7573007-bbb3-4341-b726-0c4148a07853", + "state": "wellFormed", + "revision": 7 + }, + "remoteUrl": "https://devops.abc.com/owner/project/_git/repo" + }, + "pullRequestId": 22, + "codeReviewId": 22, + "status": "active", + "createdBy": { + "id": "d6245f20-2af8-44f4-9451-8107cb2767db", + "displayName": "Normal Paulk", + "uniqueName": "fabrikamfiber16@hotmail.com", + "url": "https://dev.azure.com/owner/_apis/Identities/d6245f20-2af8-44f4-9451-8107cb2767db", + "imageUrl": "https://dev.azure.com/owner/_api/_common/identityImage?id=d6245f20-2af8-44f4-9451-8107cb2767db" + }, + "creationDate": "2016-11-01T16:30:31.6655471Z", + "title": "A new feature", + "description": "Adding a new feature", + "sourceRefName": "refs/heads/npaulk/my_work", + "targetRefName": "refs/heads/new_feature", + "mergeStatus": "succeeded", + "mergeId": "f5fc8381-3fb2-49fe-8a0d-27dcc2d6ef82", + "lastMergeSourceCommit": { + "commitId": "b60280bc6e62e2f880f1b63c1e24987664d3bda3", + "url": "https://dev.azure.com/owner/_apis/git/repositories/3411ebc1-d5aa-464f-9615-0b527bc66719/commits/b60280bc6e62e2f880f1b63c1e24987664d3bda3" + }, + "lastMergeTargetCommit": { + "commitId": "f47bbc106853afe3c1b07a81754bce5f4b8dbf62", + "url": "https://dev.azure.com/owner/_apis/git/repositories/3411ebc1-d5aa-464f-9615-0b527bc66719/commits/f47bbc106853afe3c1b07a81754bce5f4b8dbf62" + }, + "lastMergeCommit": { + "commitId": "39f52d24533cc712fc845ed9fd1b6c06b3942588", + "author": { + "name": "Normal Paulk", + "email": "fabrikamfiber16@hotmail.com", + "date": "2016-11-01T16:30:32Z" + }, + "committer": { + "name": "Normal Paulk", + "email": "fabrikamfiber16@hotmail.com", + "date": "2016-11-01T16:30:32Z" + }, + "comment": "Merge pull request 22 from npaulk/my_work into new_feature", + "url": "https://dev.azure.com/owner/_apis/git/repositories/3411ebc1-d5aa-464f-9615-0b527bc66719/commits/39f52d24533cc712fc845ed9fd1b6c06b3942588" + }, + "reviewers": [ + { + "reviewerUrl": "https://dev.azure.com/owner/_apis/git/repositories/3411ebc1-d5aa-464f-9615-0b527bc66719/pullRequests/22/reviewers/d6245f20-2af8-44f4-9451-8107cb2767db", + "vote": 0, + "id": "d6245f20-2af8-44f4-9451-8107cb2767db", + "displayName": "Normal Paulk", + "uniqueName": "fabrikamfiber16@hotmail.com", + "url": "https://dev.azure.com/owner/_apis/Identities/d6245f20-2af8-44f4-9451-8107cb2767db", + "imageUrl": "https://dev.azure.com/owner/_api/_common/identityImage?id=d6245f20-2af8-44f4-9451-8107cb2767db" + } + ], + "url": "https://dev.azure.com/owner/_apis/git/repositories/3411ebc1-d5aa-464f-9615-0b527bc66719/pullRequests/22", + "_links": { + "self": { + "href": "https://dev.azure.com/owner/_apis/git/repositories/3411ebc1-d5aa-464f-9615-0b527bc66719/pullRequests/22" + }, + "repository": { + "href": "https://dev.azure.com/owner/_apis/git/repositories/3411ebc1-d5aa-464f-9615-0b527bc66719" + }, + "workItems": { + "href": "https://dev.azure.com/owner/_apis/git/repositories/3411ebc1-d5aa-464f-9615-0b527bc66719/pullRequests/22/workitems" + }, + "sourceBranch": { + "href": "https://dev.azure.com/owner/_apis/git/repositories/3411ebc1-d5aa-464f-9615-0b527bc66719/refs" + }, + "targetBranch": { + "href": "https://dev.azure.com/owner/_apis/git/repositories/3411ebc1-d5aa-464f-9615-0b527bc66719/refs" + }, + "sourceCommit": { + "href": "https://dev.azure.com/owner/_apis/git/repositories/3411ebc1-d5aa-464f-9615-0b527bc66719/commits/b60280bc6e62e2f880f1b63c1e24987664d3bda3" + }, + "targetCommit": { + "href": "https://dev.azure.com/owner/_apis/git/repositories/3411ebc1-d5aa-464f-9615-0b527bc66719/commits/f47bbc106853afe3c1b07a81754bce5f4b8dbf62" + }, + "createdBy": { + "href": "https://dev.azure.com/owner/_apis/Identities/d6245f20-2af8-44f4-9451-8107cb2767db" + }, + "iterations": { + "href": "https://dev.azure.com/owner/_apis/git/repositories/3411ebc1-d5aa-464f-9615-0b527bc66719/pullRequests/22/iterations" + } + }, + "supportsIterations": true, + "artifactId": "vstfs:///Git/PullRequestId/a7573007-bbb3-4341-b726-0c4148a07853%2f3411ebc1-d5aa-464f-9615-0b527bc66719%2f22" +}` + + const GithubPrivateKey = `-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAuEPzOUE+kiEH1WLiMeBytTEF856j0hOVcSUSUkZxKvqczkWM 9vo1gDyC7ZXhdH9fKh32aapba3RSsp4ke+giSmYTk2mGR538ShSDxh0OgpJmjiKP From afffa4ca8cdbb0811bc62704af6165c18aad0326 Mon Sep 17 00:00:00 2001 From: atlantisbot Date: Mon, 18 Oct 2021 11:21:11 +0000 Subject: [PATCH 7/9] add specific self host parser tests --- server/events/event_parser_test.go | 184 +++++++++++++++++++++++++++++ 1 file changed, 184 insertions(+) diff --git a/server/events/event_parser_test.go b/server/events/event_parser_test.go index c98fcd8ab5..d05cf92f6a 100644 --- a/server/events/event_parser_test.go +++ b/server/events/event_parser_test.go @@ -1214,6 +1214,7 @@ func TestParseAzureDevopsPullEvent(t *testing.T) { Equals(t, models.User{Username: "user@example.com"}, actUser) } + func TestParseAzureDevopsPullEvent_EventType(t *testing.T) { cases := []struct { action string @@ -1313,3 +1314,186 @@ func TestParseAzureDevopsPull(t *testing.T) { Equals(t, expBaseRepo, actBaseRepo) Equals(t, expBaseRepo, actHeadRepo) } + + + +func TestParseAzureDevopsSelfHostedRepo(t *testing.T) { + // this should be successful + repo := ADSelfRepo + repo.ParentRepository = nil + r, err := parser.ParseAzureDevopsRepo(&repo) + Ok(t, err) + Equals(t, models.Repo{ + Owner: "owner/project", + FullName: "owner/project/repo", + CloneURL: "https://azuredevops-user:azuredevops-token@devops.abc.com/owner/project/_git/repo", + SanitizedCloneURL: "https://azuredevops-user:@devops.abc.com/owner/project/_git/repo", + Name: "repo", + VCSHost: models.VCSHost{ + Hostname: "devops.abc.com", + Type: models.AzureDevops, + }, + }, r) + +} + + + +func TestParseAzureDevopsSelfHostedPullEvent(t *testing.T) { + _, _, _, _, _, err := parser.ParseAzureDevopsPullEvent(ADSelfPullEvent) + Ok(t, err) + + testPull := deepcopy.Copy(ADSelfPull).(azuredevops.GitPullRequest) + testPull.LastMergeSourceCommit.CommitID = nil + _, _, _, err = parser.ParseAzureDevopsPull(&testPull) + ErrEquals(t, "lastMergeSourceCommit.commitID is null", err) + + testPull = deepcopy.Copy(ADSelfPull).(azuredevops.GitPullRequest) + testPull.URL = nil + _, _, _, err = parser.ParseAzureDevopsPull(&testPull) + ErrEquals(t, "url is null", err) + testEvent := deepcopy.Copy(ADSelfPullEvent).(azuredevops.Event) + resource := deepcopy.Copy(testEvent.Resource).(*azuredevops.GitPullRequest) + resource.CreatedBy = nil + testEvent.Resource = resource + _, _, _, _, _, err = parser.ParseAzureDevopsPullEvent(testEvent) + ErrEquals(t, "CreatedBy is null", err) + + testEvent = deepcopy.Copy(ADSelfPullEvent).(azuredevops.Event) + resource = deepcopy.Copy(testEvent.Resource).(*azuredevops.GitPullRequest) + resource.CreatedBy.UniqueName = azuredevops.String("") + testEvent.Resource = resource + _, _, _, _, _, err = parser.ParseAzureDevopsPullEvent(testEvent) + ErrEquals(t, "CreatedBy.UniqueName is null", err) + + actPull, evType, actBaseRepo, actHeadRepo, actUser, err := parser.ParseAzureDevopsPullEvent(ADSelfPullEvent) + Ok(t, err) + expBaseRepo := models.Repo{ + Owner: "owner/project", + FullName: "owner/project/repo", + CloneURL: "https://azuredevops-user:azuredevops-token@devops.abc.com/owner/project/_git/repo", + SanitizedCloneURL: "https://azuredevops-user:@devops.abc.com/owner/project/_git/repo", + Name: "repo", + VCSHost: models.VCSHost{ + Hostname: "devops.abc.com", + Type: models.AzureDevops, + }, + } + Equals(t, expBaseRepo, actBaseRepo) + Equals(t, expBaseRepo, actHeadRepo) + Equals(t, models.PullRequest{ + URL: ADSelfPull.GetURL(), + Author: ADSelfPull.CreatedBy.GetUniqueName(), + HeadBranch: "feature/sourceBranch", + BaseBranch: "targetBranch", + HeadCommit: ADSelfPull.LastMergeSourceCommit.GetCommitID(), + Num: ADSelfPull.GetPullRequestID(), + State: models.OpenPullState, + BaseRepo: expBaseRepo, + }, actPull) + Equals(t, models.OpenedPullEvent, evType) + Equals(t, models.User{Username: "user@example.com"}, actUser) +} + + + + +func TestParseAzureDevopsSelfHostedPullEvent_EventType(t *testing.T) { + cases := []struct { + action string + exp models.PullRequestEventType + }{ + { + action: "git.pullrequest.updated", + exp: models.UpdatedPullEvent, + }, + { + action: "git.pullrequest.created", + exp: models.OpenedPullEvent, + }, + { + action: "git.pullrequest.updated", + exp: models.ClosedPullEvent, + }, + { + action: "anything_else", + exp: models.OtherPullEvent, + }, + } + + for _, c := range cases { + t.Run(c.action, func(t *testing.T) { + event := deepcopy.Copy(ADSelfPullEvent).(azuredevops.Event) + if c.exp == models.ClosedPullEvent { + event = deepcopy.Copy(ADSelfPullClosedEvent).(azuredevops.Event) + } + event.EventType = c.action + _, actType, _, _, _, err := parser.ParseAzureDevopsPullEvent(event) + Ok(t, err) + Equals(t, c.exp, actType) + }) + } +} + +func TestParseAzureSelfHostedDevopsPull(t *testing.T) { + testPull := deepcopy.Copy(ADSelfPull).(azuredevops.GitPullRequest) + testPull.LastMergeSourceCommit.CommitID = nil + _, _, _, err := parser.ParseAzureDevopsPull(&testPull) + ErrEquals(t, "lastMergeSourceCommit.commitID is null", err) + + testPull = deepcopy.Copy(ADSelfPull).(azuredevops.GitPullRequest) + testPull.URL = nil + _, _, _, err = parser.ParseAzureDevopsPull(&testPull) + ErrEquals(t, "url is null", err) + + testPull = deepcopy.Copy(ADSelfPull).(azuredevops.GitPullRequest) + testPull.SourceRefName = nil + _, _, _, err = parser.ParseAzureDevopsPull(&testPull) + ErrEquals(t, "sourceRefName (branch name) is null", err) + + testPull = deepcopy.Copy(ADSelfPull).(azuredevops.GitPullRequest) + testPull.TargetRefName = nil + _, _, _, err = parser.ParseAzureDevopsPull(&testPull) + ErrEquals(t, "targetRefName (branch name) is null", err) + + testPull = deepcopy.Copy(ADSelfPull).(azuredevops.GitPullRequest) + testPull.CreatedBy = nil + _, _, _, err = parser.ParseAzureDevopsPull(&testPull) + ErrEquals(t, "CreatedBy is null", err) + + testPull = deepcopy.Copy(ADSelfPull).(azuredevops.GitPullRequest) + testPull.CreatedBy.UniqueName = nil + _, _, _, err = parser.ParseAzureDevopsPull(&testPull) + ErrEquals(t, "CreatedBy.UniqueName is null", err) + + testPull = deepcopy.Copy(ADSelfPull).(azuredevops.GitPullRequest) + testPull.PullRequestID = nil + _, _, _, err = parser.ParseAzureDevopsPull(&testPull) + ErrEquals(t, "pullRequestId is null", err) + + actPull, actBaseRepo, actHeadRepo, err := parser.ParseAzureDevopsPull(&ADSelfPull) + Ok(t, err) + expBaseRepo := models.Repo{ + Owner: "owner/project", + FullName: "owner/project/repo", + CloneURL: "https://azuredevops-user:azuredevops-token@devops.abc.com/owner/project/_git/repo", + SanitizedCloneURL: "https://azuredevops-user:@devops.abc.com/owner/project/_git/repo", + Name: "repo", + VCSHost: models.VCSHost{ + Hostname: "devops.abc.com", + Type: models.AzureDevops, + }, + } + Equals(t, models.PullRequest{ + URL: ADSelfPull.GetURL(), + Author: ADSelfPull.CreatedBy.GetUniqueName(), + HeadBranch: "feature/sourceBranch", + BaseBranch: "targetBranch", + HeadCommit: ADSelfPull.LastMergeSourceCommit.GetCommitID(), + Num: ADSelfPull.GetPullRequestID(), + State: models.OpenPullState, + BaseRepo: expBaseRepo, + }, actPull) + Equals(t, expBaseRepo, actBaseRepo) + Equals(t, expBaseRepo, actHeadRepo) +} From 0dd4237eb24baa53b58e45f741c21ffbaf6c950f Mon Sep 17 00:00:00 2001 From: atlantisbot Date: Mon, 18 Oct 2021 11:21:28 +0000 Subject: [PATCH 8/9] update code to azure self hosted devops --- server/events/event_parser.go | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/server/events/event_parser.go b/server/events/event_parser.go index d427a4202a..a90137082a 100644 --- a/server/events/event_parser.go +++ b/server/events/event_parser.go @@ -786,6 +786,7 @@ func (e *EventParser) ParseAzureDevopsPull(pull *azuredevops.GitPullRequest) (pu err = errors.New("url is null") return } + headBranch := pull.GetSourceRefName() if headBranch == "" { err = errors.New("sourceRefName (branch name) is null") @@ -851,19 +852,23 @@ func (e *EventParser) ParseAzureDevopsRepo(adRepo *azuredevops.GitRepository) (m teamProject := adRepo.GetProject() parent := adRepo.GetParentRepository() owner := "" + + + uri, err := url.Parse(adRepo.GetWebURL()) + if err != nil { + return models.Repo{}, err + } + if parent != nil { owner = parent.GetName() } else { - uri, err := url.Parse(adRepo.GetWebURL()) - if err != nil { - return models.Repo{}, err - } + if strings.Contains(uri.Host, "visualstudio.com") { owner = strings.Split(uri.Host, ".")[0] } else if strings.Contains(uri.Host, "dev.azure.com") { owner = strings.Split(uri.Path, "/")[1] } else { - owner = "" + owner = strings.Split(uri.Path, "/")[1] // to support owner for self hosted } } @@ -872,7 +877,14 @@ func (e *EventParser) ParseAzureDevopsRepo(adRepo *azuredevops.GitRepository) (m // https://docs.microsoft.com/en-us/azure/devops/release-notes/2018/sep-10-azure-devops-launch#switch-existing-organizations-to-use-the-new-domain-name-url project := teamProject.GetName() repo := adRepo.GetName() - cloneURL := fmt.Sprintf("https://dev.azure.com/%s/%s/_git/%s", owner, project, repo) + + host := uri.Host + if host== "" { + host="dev.azure.com" + } + + cloneURL := fmt.Sprintf("https://%s/%s/%s/_git/%s",host, owner, project, repo) + fmt.Println("%", cloneURL ) fullName := fmt.Sprintf("%s/%s/%s", owner, project, repo) return models.NewRepo(models.AzureDevops, fullName, cloneURL, e.AzureDevopsUser, e.AzureDevopsToken) } From 1a48f59fbd058aef2e48bb4b491f4be390df8bfb Mon Sep 17 00:00:00 2001 From: atlantisbot Date: Mon, 18 Oct 2021 11:27:52 +0000 Subject: [PATCH 9/9] go fmt --- server/events/event_parser.go | 11 +++++------ server/events/event_parser_test.go | 8 -------- server/events/vcs/fixtures/fixtures.go | 2 -- 3 files changed, 5 insertions(+), 16 deletions(-) diff --git a/server/events/event_parser.go b/server/events/event_parser.go index a90137082a..380ec3cd3f 100644 --- a/server/events/event_parser.go +++ b/server/events/event_parser.go @@ -786,7 +786,7 @@ func (e *EventParser) ParseAzureDevopsPull(pull *azuredevops.GitPullRequest) (pu err = errors.New("url is null") return } - + headBranch := pull.GetSourceRefName() if headBranch == "" { err = errors.New("sourceRefName (branch name) is null") @@ -853,7 +853,6 @@ func (e *EventParser) ParseAzureDevopsRepo(adRepo *azuredevops.GitRepository) (m parent := adRepo.GetParentRepository() owner := "" - uri, err := url.Parse(adRepo.GetWebURL()) if err != nil { return models.Repo{}, err @@ -879,12 +878,12 @@ func (e *EventParser) ParseAzureDevopsRepo(adRepo *azuredevops.GitRepository) (m repo := adRepo.GetName() host := uri.Host - if host== "" { - host="dev.azure.com" + if host == "" { + host = "dev.azure.com" } - cloneURL := fmt.Sprintf("https://%s/%s/%s/_git/%s",host, owner, project, repo) - fmt.Println("%", cloneURL ) + cloneURL := fmt.Sprintf("https://%s/%s/%s/_git/%s", host, owner, project, repo) + fmt.Println("%", cloneURL) fullName := fmt.Sprintf("%s/%s/%s", owner, project, repo) return models.NewRepo(models.AzureDevops, fullName, cloneURL, e.AzureDevopsUser, e.AzureDevopsToken) } diff --git a/server/events/event_parser_test.go b/server/events/event_parser_test.go index d05cf92f6a..47a93e5bac 100644 --- a/server/events/event_parser_test.go +++ b/server/events/event_parser_test.go @@ -1214,7 +1214,6 @@ func TestParseAzureDevopsPullEvent(t *testing.T) { Equals(t, models.User{Username: "user@example.com"}, actUser) } - func TestParseAzureDevopsPullEvent_EventType(t *testing.T) { cases := []struct { action string @@ -1315,8 +1314,6 @@ func TestParseAzureDevopsPull(t *testing.T) { Equals(t, expBaseRepo, actHeadRepo) } - - func TestParseAzureDevopsSelfHostedRepo(t *testing.T) { // this should be successful repo := ADSelfRepo @@ -1337,8 +1334,6 @@ func TestParseAzureDevopsSelfHostedRepo(t *testing.T) { } - - func TestParseAzureDevopsSelfHostedPullEvent(t *testing.T) { _, _, _, _, _, err := parser.ParseAzureDevopsPullEvent(ADSelfPullEvent) Ok(t, err) @@ -1395,9 +1390,6 @@ func TestParseAzureDevopsSelfHostedPullEvent(t *testing.T) { Equals(t, models.User{Username: "user@example.com"}, actUser) } - - - func TestParseAzureDevopsSelfHostedPullEvent_EventType(t *testing.T) { cases := []struct { action string diff --git a/server/events/vcs/fixtures/fixtures.go b/server/events/vcs/fixtures/fixtures.go index 8d69e4dbcd..5d02277f06 100644 --- a/server/events/vcs/fixtures/fixtures.go +++ b/server/events/vcs/fixtures/fixtures.go @@ -227,7 +227,6 @@ var ADPullJSON = `{ "artifactId": "vstfs:///Git/PullRequestId/a7573007-bbb3-4341-b726-0c4148a07853%2f3411ebc1-d5aa-464f-9615-0b527bc66719%2f22" }` - var ADSelfPullEvent = azuredevops.Event{ EventType: "git.pullrequest.created", Resource: &ADSelfPull, @@ -394,7 +393,6 @@ var ADSelfPullJSON = `{ "artifactId": "vstfs:///Git/PullRequestId/a7573007-bbb3-4341-b726-0c4148a07853%2f3411ebc1-d5aa-464f-9615-0b527bc66719%2f22" }` - const GithubPrivateKey = `-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAuEPzOUE+kiEH1WLiMeBytTEF856j0hOVcSUSUkZxKvqczkWM 9vo1gDyC7ZXhdH9fKh32aapba3RSsp4ke+giSmYTk2mGR538ShSDxh0OgpJmjiKP