diff --git a/Dockerfile b/Dockerfile
index a9a60940e1..0814b60f01 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -19,6 +19,22 @@ RUN AVAILABLE_TERRAFORM_VERSIONS="0.8.8 0.9.11 0.10.8 0.11.14 0.12.30 0.13.6 ${D
done && \
ln -s /usr/local/bin/tf/versions/${DEFAULT_TERRAFORM_VERSION}/terraform /usr/local/bin/terraform
+ENV DEFAULT_CONFTEST_VERSION=0.21.0
+
+RUN AVAILABLE_CONFTEST_VERSIONS="${DEFAULT_CONFTEST_VERSION}" && \
+ for VERSION in ${AVAILABLE_CONFTEST_VERSIONS}; do \
+ curl -LOs https://github.com/open-policy-agent/conftest/releases/download/v${VERSION}/conftest_${VERSION}_Linux_x86_64.tar.gz && \
+ curl -LOs https://github.com/open-policy-agent/conftest/releases/download/v${VERSION}/checksums.txt && \
+ sed -n "/conftest_${VERSION}_Linux_x86_64.tar.gz/p" checksums.txt | sha256sum -c && \
+ mkdir -p /usr/local/bin/cft/versions/${VERSION} && \
+ tar -C /usr/local/bin/cft/versions/${VERSION} -xzf conftest_${VERSION}_Linux_x86_64.tar.gz && \
+ ln -s /usr/local/bin/cft/versions/${VERSION}/conftest /usr/local/bin/conftest${VERSION} && \
+ rm conftest_${VERSION}_Linux_x86_64.tar.gz && \
+ rm checksums.txt; \
+ done
+
+RUN ln -s /usr/local/bin/cft/versions/${DEFAULT_CONFTEST_VERSION}/conftest /usr/local/bin/conftest
+
# copy binary
COPY atlantis /usr/local/bin/atlantis
diff --git a/Dockerfile.dev b/Dockerfile.dev
index d2100faecb..acbf3e1d75 100644
--- a/Dockerfile.dev
+++ b/Dockerfile.dev
@@ -1,3 +1,6 @@
FROM runatlantis/atlantis:latest
COPY atlantis /usr/local/bin/atlantis
+# TODO: remove this once we get this in the base image
+ENV DEFAULT_CONFTEST_VERSION=0.21.0
+
WORKDIR /atlantis/src
diff --git a/cmd/server.go b/cmd/server.go
index 9193eda326..8550e985c6 100644
--- a/cmd/server.go
+++ b/cmd/server.go
@@ -57,6 +57,7 @@ const (
DisableAutoplanFlag = "disable-autoplan"
DisableMarkdownFoldingFlag = "disable-markdown-folding"
DisableRepoLockingFlag = "disable-repo-locking"
+ EnablePolicyChecksFlag = "enable-policy-checks"
GHHostnameFlag = "gh-hostname"
GHTokenFlag = "gh-token"
GHUserFlag = "gh-user"
@@ -293,7 +294,10 @@ var boolFlags = map[string]boolFlag{
defaultValue: false,
},
DisableRepoLockingFlag: {
- description: "Disable atlantis locking repos",
+ description: "Disable atlantis locking repos",
+ },
+ EnablePolicyChecksFlag: {
+ description: "Enable atlantis to run user defined policy checks. This is explicitly disabled for TFE/TFC backends since plan files are inaccessible.",
defaultValue: false,
},
AllowDraftPRs: {
diff --git a/cmd/server_test.go b/cmd/server_test.go
index 01ab5466c0..27c846eb29 100644
--- a/cmd/server_test.go
+++ b/cmd/server_test.go
@@ -100,6 +100,7 @@ var testFlags = map[string]interface{}{
VCSStatusName: "my-status",
WriteGitCredsFlag: true,
DisableAutoplanFlag: true,
+ EnablePolicyChecksFlag: false,
}
func TestExecute_Defaults(t *testing.T) {
diff --git a/go.mod b/go.mod
index 3b236d2852..59c96b7f86 100644
--- a/go.mod
+++ b/go.mod
@@ -28,6 +28,8 @@ require (
github.com/hashicorp/terraform-config-inspect v0.0.0-20200806211835-c481b8bfa41e
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/leodido/go-urn v1.2.0 // indirect
+ github.com/lusis/go-slackbot v0.0.0-20180109053408-401027ccfef5 // indirect
+ github.com/lusis/slack-test v0.0.0-20190426140909-c40012f20018 // indirect
github.com/mcdafydd/go-azuredevops v0.12.0
github.com/microcosm-cc/bluemonday v1.0.1
github.com/mitchellh/colorstring v0.0.0-20150917214807-8631ce90f286
diff --git a/go.sum b/go.sum
index 1a39ea1ce8..25ff9b5248 100644
--- a/go.sum
+++ b/go.sum
@@ -138,7 +138,6 @@ github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGa
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
-github.com/gorilla/websocket v1.2.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/graph-gophers/graphql-go v0.0.0-20200309224638-dae41bde9ef9/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc=
@@ -223,6 +222,10 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y=
github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
+github.com/lusis/go-slackbot v0.0.0-20180109053408-401027ccfef5 h1:AsEBgzv3DhuYHI/GiQh2HxvTP71HCCE9E/tzGUzGdtU=
+github.com/lusis/go-slackbot v0.0.0-20180109053408-401027ccfef5/go.mod h1:c2mYKRyMb1BPkO5St0c/ps62L4S0W2NAkaTXj9qEI+0=
+github.com/lusis/slack-test v0.0.0-20190426140909-c40012f20018 h1:MNApn+Z+fIT4NPZopPfCc1obT6aY3SVM6DOctz1A9ZU=
+github.com/lusis/slack-test v0.0.0-20190426140909-c40012f20018/go.mod h1:sFlOUpQL1YcjhFVXhg1CG8ZASEs/Mf1oVb6H75JL/zg=
github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4=
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mattn/go-colorable v0.0.9 h1:UVL0vNpWh04HeJXV0KLcaT7r06gOH2l4OW6ddYRUIY4=
@@ -263,8 +266,6 @@ github.com/mohae/deepcopy v0.0.0-20170603005431-491d3605edfb/go.mod h1:TaXosZuwd
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/nlopes/slack v0.4.0 h1:OVnHm7lv5gGT5gkcHsZAyw++oHVFihbjWbL3UceUpiA=
github.com/nlopes/slack v0.4.0/go.mod h1:jVI4BBK3lSktibKahxBF74txcK2vyvkza1z/+rRnVAM=
-github.com/nlopes/slack v0.6.0 h1:jt0jxVQGhssx1Ib7naAOZEZcGdtIhTzkP0nopK0AsRA=
-github.com/nlopes/slack v0.6.0/go.mod h1:JzQ9m3PMAqcpeCam7UaHSuBuupz7CmpjehYMayT6YOk=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.9.0 h1:SZjF721BByVj8QH636/8S2DnX4n0Re3SteMmw3N+tzc=
@@ -277,7 +278,9 @@ github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/petergtz/pegomock v2.9.0+incompatible h1:BKfb5XfkJfehe5T+O1xD4Zm26Sb9dnRj7tHxLYwUPiI=
github.com/petergtz/pegomock v2.9.0+incompatible/go.mod h1:nuBLWZpVyv/fLo56qTwt/AUau7jgouO1h7bEvZCq82o=
+github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
@@ -462,6 +465,7 @@ golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBn
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0 h1:Dh6fw+p6FyRl5x/FvNswO1ji0lIGzm3KP8Y9VkS9PTE=
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
@@ -498,6 +502,7 @@ google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZi
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1 h1:j6XxA85m/6txkUCHvzlV5f+HBNl/1r5cZ2A/3IEFOO8=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
+gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
diff --git a/server/events/apply_command_runner.go b/server/events/apply_command_runner.go
new file mode 100644
index 0000000000..aa5a4a652b
--- /dev/null
+++ b/server/events/apply_command_runner.go
@@ -0,0 +1,186 @@
+package events
+
+import (
+ "github.com/runatlantis/atlantis/server/events/db"
+ "github.com/runatlantis/atlantis/server/events/models"
+ "github.com/runatlantis/atlantis/server/events/vcs"
+)
+
+func NewApplyCommandRunner(
+ vcsClient vcs.Client,
+ disableApplyAll bool,
+ disableApply bool,
+ commitStatusUpdater CommitStatusUpdater,
+ prjCommandBuilder ProjectApplyCommandBuilder,
+ prjCmdRunner ProjectApplyCommandRunner,
+ autoMerger *AutoMerger,
+ pullUpdater *PullUpdater,
+ dbUpdater *DBUpdater,
+ db *db.BoltDB,
+ parallelPoolSize int,
+) *ApplyCommandRunner {
+ return &ApplyCommandRunner{
+ vcsClient: vcsClient,
+ DisableApplyAll: disableApplyAll,
+ DisableApply: disableApply,
+ commitStatusUpdater: commitStatusUpdater,
+ prjCmdBuilder: prjCommandBuilder,
+ prjCmdRunner: prjCmdRunner,
+ autoMerger: autoMerger,
+ pullUpdater: pullUpdater,
+ dbUpdater: dbUpdater,
+ DB: db,
+ parallelPoolSize: parallelPoolSize,
+ }
+}
+
+type ApplyCommandRunner struct {
+ DisableApplyAll bool
+ DisableApply bool
+ DB *db.BoltDB
+ vcsClient vcs.Client
+ commitStatusUpdater CommitStatusUpdater
+ prjCmdBuilder ProjectApplyCommandBuilder
+ prjCmdRunner ProjectApplyCommandRunner
+ autoMerger *AutoMerger
+ pullUpdater *PullUpdater
+ dbUpdater *DBUpdater
+ parallelPoolSize int
+}
+
+func (a *ApplyCommandRunner) Run(ctx *CommandContext, cmd *CommentCommand) {
+ var err error
+ baseRepo := ctx.Pull.BaseRepo
+ pull := ctx.Pull
+
+ if a.DisableApply {
+ ctx.Log.Info("ignoring apply command since apply disabled globally")
+ if err := a.vcsClient.CreateComment(baseRepo, pull.Num, applyDisabledComment, models.ApplyCommand.String()); err != nil {
+ ctx.Log.Err("unable to comment on pull request: %s", err)
+ }
+
+ return
+ }
+
+ if a.DisableApplyAll && !cmd.IsForSpecificProject() {
+ ctx.Log.Info("ignoring apply command without flags since apply all is disabled")
+ if err := a.vcsClient.CreateComment(baseRepo, pull.Num, applyAllDisabledComment, models.ApplyCommand.String()); err != nil {
+ ctx.Log.Err("unable to comment on pull request: %s", err)
+ }
+
+ return
+ }
+
+ // Get the mergeable status before we set any build statuses of our own.
+ // We do this here because when we set a "Pending" status, if users have
+ // required the Atlantis status checks to pass, then we've now changed
+ // the mergeability status of the pull request.
+ ctx.PullMergeable, err = a.vcsClient.PullIsMergeable(baseRepo, pull)
+ if err != nil {
+ // On error we continue the request with mergeable assumed false.
+ // We want to continue because not all apply's will need this status,
+ // only if they rely on the mergeability requirement.
+ ctx.PullMergeable = false
+ ctx.Log.Warn("unable to get mergeable status: %s. Continuing with mergeable assumed false", err)
+ }
+
+ // TODO: This needs to be revisited and new PullMergeable like conditions should
+ // be added to check against it.
+ if a.anyFailedPolicyChecks(pull) {
+ ctx.PullMergeable = false
+ ctx.Log.Warn("when using policy checks all policies have to be approved or pass. Continuing with mergeable assumed false")
+ }
+
+ ctx.Log.Info("pull request mergeable status: %t", ctx.PullMergeable)
+
+ if err = a.commitStatusUpdater.UpdateCombined(baseRepo, pull, models.PendingCommitStatus, cmd.CommandName()); err != nil {
+ ctx.Log.Warn("unable to update commit status: %s", err)
+ }
+
+ var projectCmds []models.ProjectCommandContext
+ projectCmds, err = a.prjCmdBuilder.BuildApplyCommands(ctx, cmd)
+
+ if err != nil {
+ if statusErr := a.commitStatusUpdater.UpdateCombined(ctx.Pull.BaseRepo, ctx.Pull, models.FailedCommitStatus, cmd.CommandName()); statusErr != nil {
+ ctx.Log.Warn("unable to update commit status: %s", statusErr)
+ }
+ a.pullUpdater.updatePull(ctx, cmd, CommandResult{Error: err})
+ return
+ }
+
+ // Only run commands in parallel if enabled
+ var result CommandResult
+ if a.isParallelEnabled(projectCmds) {
+ ctx.Log.Info("Running applies in parallel")
+ result = runProjectCmdsParallel(projectCmds, a.prjCmdRunner.Apply, a.parallelPoolSize)
+ } else {
+ result = runProjectCmds(projectCmds, a.prjCmdRunner.Apply)
+ }
+
+ a.pullUpdater.updatePull(
+ ctx,
+ cmd,
+ result)
+
+ pullStatus, err := a.dbUpdater.updateDB(ctx, pull, result.ProjectResults)
+ if err != nil {
+ ctx.Log.Err("writing results: %s", err)
+ return
+ }
+
+ a.updateCommitStatus(ctx, pullStatus)
+
+ if a.autoMerger.automergeEnabled(projectCmds) {
+ a.autoMerger.automerge(ctx, pullStatus)
+ }
+}
+
+func (a *ApplyCommandRunner) isParallelEnabled(projectCmds []models.ProjectCommandContext) bool {
+ return len(projectCmds) > 0 && projectCmds[0].ParallelApplyEnabled
+}
+
+func (a *ApplyCommandRunner) updateCommitStatus(ctx *CommandContext, pullStatus models.PullStatus) {
+ var numSuccess int
+ var numErrored int
+ status := models.SuccessCommitStatus
+
+ numSuccess = pullStatus.StatusCount(models.AppliedPlanStatus)
+ numErrored = pullStatus.StatusCount(models.ErroredApplyStatus)
+
+ if numErrored > 0 {
+ status = models.FailedCommitStatus
+ } else if numSuccess < len(pullStatus.Projects) {
+ // If there are plans that haven't been applied yet, we'll use a pending
+ // status.
+ status = models.PendingCommitStatus
+ }
+
+ if err := a.commitStatusUpdater.UpdateCombinedCount(
+ ctx.Pull.BaseRepo,
+ ctx.Pull,
+ status,
+ models.ApplyCommand,
+ numSuccess,
+ len(pullStatus.Projects),
+ ); err != nil {
+ ctx.Log.Warn("unable to update commit status: %s", err)
+ }
+}
+
+func (a *ApplyCommandRunner) anyFailedPolicyChecks(pull models.PullRequest) bool {
+ policyCheckPullStatus, _ := a.DB.GetPullStatus(pull)
+ if policyCheckPullStatus != nil && policyCheckPullStatus.StatusCount(models.ErroredPolicyCheckStatus) > 0 {
+ return true
+ }
+
+ return false
+
+}
+
+// applyAllDisabledComment is posted when apply all commands (i.e. "atlantis apply")
+// are disabled and an apply all command is issued.
+var applyAllDisabledComment = "**Error:** Running `atlantis apply` without flags is disabled." +
+ " You must specify which project to apply via the `-d
`, `-w ` or `-p ` flags."
+
+// applyDisabledComment is posted when apply commands are disabled globally and an apply command is issued.
+var applyDisabledComment = "**Error:** Running `atlantis apply` is disabled."
diff --git a/server/events/approve_policies_command_runner.go b/server/events/approve_policies_command_runner.go
new file mode 100644
index 0000000000..d1d9b55788
--- /dev/null
+++ b/server/events/approve_policies_command_runner.go
@@ -0,0 +1,101 @@
+package events
+
+import (
+ "fmt"
+
+ "github.com/runatlantis/atlantis/server/events/models"
+)
+
+func NewApprovePoliciesCommandRunner(
+ commitStatusUpdater CommitStatusUpdater,
+ prjCommandBuilder ProjectApprovePoliciesCommandBuilder,
+ prjCommandRunner ProjectApprovePoliciesCommandRunner,
+ pullUpdater *PullUpdater,
+ dbUpdater *DBUpdater,
+) *ApprovePoliciesCommandRunner {
+ return &ApprovePoliciesCommandRunner{
+ commitStatusUpdater: commitStatusUpdater,
+ prjCmdBuilder: prjCommandBuilder,
+ prjCmdRunner: prjCommandRunner,
+ pullUpdater: pullUpdater,
+ dbUpdater: dbUpdater,
+ }
+}
+
+type ApprovePoliciesCommandRunner struct {
+ commitStatusUpdater CommitStatusUpdater
+ pullUpdater *PullUpdater
+ dbUpdater *DBUpdater
+ prjCmdBuilder ProjectApprovePoliciesCommandBuilder
+ prjCmdRunner ProjectApprovePoliciesCommandRunner
+}
+
+func (a *ApprovePoliciesCommandRunner) Run(ctx *CommandContext, cmd *CommentCommand) {
+ baseRepo := ctx.Pull.BaseRepo
+ pull := ctx.Pull
+
+ if err := a.commitStatusUpdater.UpdateCombined(baseRepo, pull, models.PendingCommitStatus, models.PolicyCheckCommand); err != nil {
+ ctx.Log.Warn("unable to update commit status: %s", err)
+ }
+
+ projectCmds, err := a.prjCmdBuilder.BuildApprovePoliciesCommands(ctx, cmd)
+ if err != nil {
+ if statusErr := a.commitStatusUpdater.UpdateCombined(ctx.Pull.BaseRepo, ctx.Pull, models.FailedCommitStatus, models.PolicyCheckCommand); statusErr != nil {
+ ctx.Log.Warn("unable to update commit status: %s", statusErr)
+ }
+ a.pullUpdater.updatePull(ctx, cmd, CommandResult{Error: err})
+ return
+ }
+
+ result := a.buildApprovePolicyCommandResults(ctx, projectCmds)
+
+ a.pullUpdater.updatePull(
+ ctx,
+ cmd,
+ result,
+ )
+
+ pullStatus, err := a.dbUpdater.updateDB(ctx, pull, result.ProjectResults)
+ if err != nil {
+ ctx.Log.Err("writing results: %s", err)
+ return
+ }
+
+ a.updateCommitStatus(ctx, pullStatus)
+}
+
+func (a *ApprovePoliciesCommandRunner) buildApprovePolicyCommandResults(ctx *CommandContext, prjCmds []models.ProjectCommandContext) (result CommandResult) {
+ // Check if vcs user is in the owner list of the PolicySets. All projects
+ // share the same Owners list at this time so no reason to iterate over each
+ // project.
+ if len(prjCmds) > 0 && !prjCmds[0].PolicySets.IsOwner(ctx.User.Username) {
+ result.Error = fmt.Errorf("contact policy owners to approve failing policies")
+ return
+ }
+
+ var prjResults []models.ProjectResult
+
+ for _, prjCmd := range prjCmds {
+ prjResult := a.prjCmdRunner.ApprovePolicies(prjCmd)
+ prjResults = append(prjResults, prjResult)
+ }
+ result.ProjectResults = prjResults
+ return
+}
+
+func (a *ApprovePoliciesCommandRunner) updateCommitStatus(ctx *CommandContext, pullStatus models.PullStatus) {
+ var numSuccess int
+ var numErrored int
+ status := models.SuccessCommitStatus
+
+ numSuccess = pullStatus.StatusCount(models.PassedPolicyCheckStatus)
+ numErrored = pullStatus.StatusCount(models.ErroredPolicyCheckStatus)
+
+ if numErrored > 0 {
+ status = models.FailedCommitStatus
+ }
+
+ if err := a.commitStatusUpdater.UpdateCombinedCount(ctx.Pull.BaseRepo, ctx.Pull, status, models.PolicyCheckCommand, numSuccess, len(pullStatus.Projects)); err != nil {
+ ctx.Log.Warn("unable to update commit status: %s", err)
+ }
+}
diff --git a/server/events/automerger.go b/server/events/automerger.go
new file mode 100644
index 0000000000..decd35b8fe
--- /dev/null
+++ b/server/events/automerger.go
@@ -0,0 +1,50 @@
+package events
+
+import (
+ "fmt"
+
+ "github.com/runatlantis/atlantis/server/events/models"
+ "github.com/runatlantis/atlantis/server/events/vcs"
+)
+
+type AutoMerger struct {
+ VCSClient vcs.Client
+ GlobalAutomerge bool
+}
+
+func (c *AutoMerger) automerge(ctx *CommandContext, pullStatus models.PullStatus) {
+ // We only automerge if all projects have been successfully applied.
+ for _, p := range pullStatus.Projects {
+ if p.Status != models.AppliedPlanStatus {
+ ctx.Log.Info("not automerging because project at dir %q, workspace %q has status %q", p.RepoRelDir, p.Workspace, p.Status.String())
+ return
+ }
+ }
+
+ // Comment that we're automerging the pull request.
+ if err := c.VCSClient.CreateComment(ctx.Pull.BaseRepo, ctx.Pull.Num, automergeComment, models.ApplyCommand.String()); err != nil {
+ ctx.Log.Err("failed to comment about automerge: %s", err)
+ // Commenting isn't required so continue.
+ }
+
+ // Make the API call to perform the merge.
+ ctx.Log.Info("automerging pull request")
+ err := c.VCSClient.MergePull(ctx.Pull)
+
+ if err != nil {
+ ctx.Log.Err("automerging failed: %s", err)
+
+ failureComment := fmt.Sprintf("Automerging failed:\n```\n%s\n```", err)
+ if commentErr := c.VCSClient.CreateComment(ctx.Pull.BaseRepo, ctx.Pull.Num, failureComment, models.ApplyCommand.String()); commentErr != nil {
+ ctx.Log.Err("failed to comment about automerge failing: %s", err)
+ }
+ }
+}
+
+// automergeEnabled returns true if automerging is enabled in this context.
+func (c *AutoMerger) automergeEnabled(projectCmds []models.ProjectCommandContext) bool {
+ // If the global automerge is set, we always automerge.
+ return c.GlobalAutomerge ||
+ // Otherwise we check if this repo is configured for automerging.
+ (len(projectCmds) > 0 && projectCmds[0].AutomergeEnabled)
+}
diff --git a/server/events/command_context.go b/server/events/command_context.go
index 72e9bd2dff..16c4abf67c 100644
--- a/server/events/command_context.go
+++ b/server/events/command_context.go
@@ -10,7 +10,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// Modified hereafter by contributors to runatlantis/atlantis.
-
package events
import (
@@ -18,6 +17,17 @@ import (
"github.com/runatlantis/atlantis/server/logging"
)
+// CommandTrigger represents the how the command was triggered
+type CommandTrigger int
+
+const (
+ // Commands that are automatically triggered (ie. automatic plans)
+ Auto CommandTrigger = iota
+
+ // Commands that are triggered by comments (ie. atlantis plan)
+ Comment
+)
+
// CommandContext represents the context of a command that should be executed
// for a pull request.
type CommandContext struct {
@@ -35,4 +45,6 @@ type CommandContext struct {
// set our own build statuses which can affect mergeability if users have
// required the Atlantis status to be successful prior to merging.
PullMergeable bool
+
+ Trigger CommandTrigger
}
diff --git a/server/events/command_runner.go b/server/events/command_runner.go
index 25489122bb..e7cf6bd2f3 100644
--- a/server/events/command_runner.go
+++ b/server/events/command_runner.go
@@ -15,13 +15,10 @@ package events
import (
"fmt"
- "sync"
"github.com/google/go-github/v31/github"
"github.com/mcdafydd/go-azuredevops/azuredevops"
"github.com/pkg/errors"
- "github.com/remeh/sizedwaitgroup"
- "github.com/runatlantis/atlantis/server/events/db"
"github.com/runatlantis/atlantis/server/events/models"
"github.com/runatlantis/atlantis/server/events/vcs"
"github.com/runatlantis/atlantis/server/logging"
@@ -68,18 +65,34 @@ type GitlabMergeRequestGetter interface {
GetMergeRequest(repoFullName string, pullNum int) (*gitlab.MergeRequest, error)
}
+// CommentCommandRunner runs individual command workflows.
+type CommentCommandRunner interface {
+ Run(*CommandContext, *CommentCommand)
+}
+
+func buildCommentCommandRunner(
+ cmdRunner *DefaultCommandRunner,
+ cmdName models.CommandName,
+) CommentCommandRunner {
+ // panic here, we want to fail fast and hard since
+ // this would be an internal service configuration error.
+ runner, ok := cmdRunner.CommentCommandRunnerByCmd[cmdName]
+
+ if !ok {
+ panic(fmt.Sprintf("command runner not configured for command %s", cmdName.String()))
+ }
+
+ return runner
+}
+
// DefaultCommandRunner is the first step when processing a comment command.
type DefaultCommandRunner struct {
VCSClient vcs.Client
GithubPullGetter GithubPullGetter
AzureDevopsPullGetter AzureDevopsPullGetter
GitlabMergeRequestGetter GitlabMergeRequestGetter
- CommitStatusUpdater CommitStatusUpdater
- DisableApplyAll bool
- DisableApply bool
DisableAutoplan bool
EventParser EventParsing
- MarkdownRenderer *MarkdownRenderer
Logger logging.SimpleLogging
// AllowForkPRs controls whether we operate on pull requests from forks.
AllowForkPRs bool
@@ -90,30 +103,17 @@ type DefaultCommandRunner struct {
// this in our error message back to the user on a forked PR so they know
// how to enable this functionality.
AllowForkPRsFlag string
- // HidePrevPlanComments will hide previous plan comments to declutter PRs.
- HidePrevPlanComments bool
// SilenceForkPRErrors controls whether to comment on Fork PRs when AllowForkPRs = False
SilenceForkPRErrors bool
// SilenceForkPRErrorsFlag is the name of the flag that controls fork PR's. We use
// this in our error message back to the user on a forked PR so they know
// how to disable error comment
- SilenceForkPRErrorsFlag string
- // SilenceVCSStatusNoPlans is whether autoplan should set commit status if no plans
- // are found
- SilenceVCSStatusNoPlans bool
- ProjectCommandBuilder ProjectCommandBuilder
- ProjectCommandRunner ProjectCommandRunner
- // GlobalAutomerge is true if we should automatically merge pull requests if all
- // plans have been successfully applied. This is set via a CLI flag.
- GlobalAutomerge bool
- PendingPlanFinder PendingPlanFinder
- WorkingDir WorkingDir
- DB *db.BoltDB
- Drainer *Drainer
- DeleteLockCommand DeleteLockCommand
+ SilenceForkPRErrorsFlag string
+ CommentCommandRunnerByCmd map[models.CommandName]CommentCommandRunner
+ Drainer *Drainer
}
-// RunAutoplanCommand runs plan when a pull request is opened or updated.
+// RunAutoplanCommand runs plan and policy_checks when a pull request is opened or updated.
func (c *DefaultCommandRunner) RunAutoplanCommand(baseRepo models.Repo, headRepo models.Repo, pull models.PullRequest, user models.User) {
if opStarted := c.Drainer.StartOp(); !opStarted {
if commentErr := c.VCSClient.CreateComment(baseRepo, pull.Num, ShutdownComment, models.PlanCommand.String()); commentErr != nil {
@@ -130,6 +130,7 @@ func (c *DefaultCommandRunner) RunAutoplanCommand(baseRepo models.Repo, headRepo
Log: log,
Pull: pull,
HeadRepo: headRepo,
+ Trigger: Auto,
}
if !c.validateCtxAndComment(ctx) {
return
@@ -138,58 +139,13 @@ func (c *DefaultCommandRunner) RunAutoplanCommand(baseRepo models.Repo, headRepo
return
}
- projectCmds, err := c.ProjectCommandBuilder.BuildAutoplanCommands(ctx)
- if err != nil {
- if statusErr := c.CommitStatusUpdater.UpdateCombined(ctx.Pull.BaseRepo, ctx.Pull, models.FailedCommitStatus, models.PlanCommand); statusErr != nil {
- ctx.Log.Warn("unable to update commit status: %s", statusErr)
- }
-
- c.updatePull(ctx, AutoplanCommand{}, CommandResult{Error: err})
+ autoPlanRunner := buildCommentCommandRunner(c, models.PlanCommand)
+ if autoPlanRunner == nil {
+ ctx.Log.Err("invalid autoplan command")
return
}
- if len(projectCmds) == 0 {
- log.Info("determined there was no project to run plan in")
- if !c.SilenceVCSStatusNoPlans {
- // If there were no projects modified, we set successful commit statuses
- // with 0/0 projects planned/applied successfully because some users require
- // the Atlantis status to be passing for all pull requests.
- ctx.Log.Debug("setting VCS status to success with no projects found")
- if err := c.CommitStatusUpdater.UpdateCombinedCount(baseRepo, pull, models.SuccessCommitStatus, models.PlanCommand, 0, 0); err != nil {
- ctx.Log.Warn("unable to update commit status: %s", err)
- }
- if err := c.CommitStatusUpdater.UpdateCombinedCount(baseRepo, pull, models.SuccessCommitStatus, models.ApplyCommand, 0, 0); err != nil {
- ctx.Log.Warn("unable to update commit status: %s", err)
- }
- }
- return
- }
-
- // At this point we are sure Atlantis has work to do, so set commit status to pending
- if err := c.CommitStatusUpdater.UpdateCombined(ctx.Pull.BaseRepo, ctx.Pull, models.PendingCommitStatus, models.PlanCommand); err != nil {
- ctx.Log.Warn("unable to update commit status: %s", err)
- }
-
- // Only run commands in parallel if enabled
- var result CommandResult
- if c.parallelPlanEnabled(ctx, projectCmds) {
- ctx.Log.Info("Running plans in parallel")
- result = c.runProjectCmdsParallel(projectCmds, models.PlanCommand)
- } else {
- result = c.runProjectCmds(projectCmds, models.PlanCommand)
- }
-
- if c.automergeEnabled(ctx, projectCmds) && result.HasErrors() {
- ctx.Log.Info("deleting plans because there were errors and automerge requires all plans succeed")
- c.deletePlans(ctx)
- result.PlansDeleted = true
- }
- c.updatePull(ctx, AutoplanCommand{}, result)
- pullStatus, err := c.updateDB(ctx, ctx.Pull, result.ProjectResults)
- if err != nil {
- c.Logger.Err("writing results: %s", err)
- }
- c.updateCommitStatus(ctx, models.PlanCommand, pullStatus)
+ autoPlanRunner.Run(ctx, nil)
}
// RunCommentCommand executes the command.
@@ -209,258 +165,30 @@ func (c *DefaultCommandRunner) RunCommentCommand(baseRepo models.Repo, maybeHead
log := c.buildLogger(baseRepo.FullName, pullNum)
defer c.logPanics(baseRepo, pullNum, log)
- if c.DisableApply && cmd.Name == models.ApplyCommand {
- log.Info("ignoring apply command since apply disabled globally")
- if err := c.VCSClient.CreateComment(baseRepo, pullNum, applyDisabledComment, models.ApplyCommand.String()); err != nil {
- log.Err("unable to comment on pull request: %s", err)
- }
- return
- }
-
- if c.DisableApplyAll && cmd.Name == models.ApplyCommand && !cmd.IsForSpecificProject() {
- log.Info("ignoring apply command without flags since apply all is disabled")
- if err := c.VCSClient.CreateComment(baseRepo, pullNum, applyAllDisabledComment, models.ApplyCommand.String()); err != nil {
- log.Err("unable to comment on pull request: %s", err)
- }
- return
- }
-
- var headRepo models.Repo
- if maybeHeadRepo != nil {
- headRepo = *maybeHeadRepo
- }
-
- var err error
- var pull models.PullRequest
- switch baseRepo.VCSHost.Type {
- case models.Github:
- pull, headRepo, err = c.getGithubData(baseRepo, pullNum)
- case models.Gitlab:
- pull, err = c.getGitlabData(baseRepo, pullNum)
- case models.BitbucketCloud, models.BitbucketServer:
- if maybePull == nil {
- err = errors.New("pull request should not be nil–this is a bug")
- break
- }
- pull = *maybePull
- case models.AzureDevops:
- pull, headRepo, err = c.getAzureDevopsData(baseRepo, pullNum)
- default:
- err = errors.New("Unknown VCS type–this is a bug")
- }
+ headRepo, pull, err := c.ensureValidRepoMetadata(baseRepo, maybeHeadRepo, maybePull, user, pullNum, log)
if err != nil {
- log.Err(err.Error())
- if commentErr := c.VCSClient.CreateComment(baseRepo, pullNum, fmt.Sprintf("`Error: %s`", err), ""); commentErr != nil {
- log.Err("unable to comment: %s", commentErr)
- }
return
}
+
ctx := &CommandContext{
User: user,
Log: log,
Pull: pull,
HeadRepo: headRepo,
- }
- if !c.validateCtxAndComment(ctx) {
- return
- }
-
- if cmd.Name == models.UnlockCommand {
- vcsMessage := "All Atlantis locks for this PR have been unlocked and plans discarded"
- err := c.DeleteLockCommand.DeleteLocksByPull(baseRepo.FullName, pullNum)
- if err != nil {
- vcsMessage = "Failed to delete PR locks"
- log.Err("failed to delete locks by pull %s", err.Error())
- }
- if commentErr := c.VCSClient.CreateComment(baseRepo, pullNum, vcsMessage, models.UnlockCommand.String()); commentErr != nil {
- log.Err("unable to comment: %s", commentErr)
- }
- return
+ Trigger: Comment,
}
- if cmd.CommandName() == models.ApplyCommand {
- // Get the mergeable status before we set any build statuses of our own.
- // We do this here because when we set a "Pending" status, if users have
- // required the Atlantis status checks to pass, then we've now changed
- // the mergeability status of the pull request.
- ctx.PullMergeable, err = c.VCSClient.PullIsMergeable(baseRepo, pull)
- if err != nil {
- // On error we continue the request with mergeable assumed false.
- // We want to continue because not all apply's will need this status,
- // only if they rely on the mergeability requirement.
- ctx.PullMergeable = false
- ctx.Log.Warn("unable to get mergeable status: %s. Continuing with mergeable assumed false", err)
- }
- ctx.Log.Info("pull request mergeable status: %t", ctx.PullMergeable)
- }
-
- if err = c.CommitStatusUpdater.UpdateCombined(baseRepo, pull, models.PendingCommitStatus, cmd.CommandName()); err != nil {
- ctx.Log.Warn("unable to update commit status: %s", err)
- }
-
- var projectCmds []models.ProjectCommandContext
- switch cmd.Name {
- case models.PlanCommand:
- projectCmds, err = c.ProjectCommandBuilder.BuildPlanCommands(ctx, cmd)
- case models.ApplyCommand:
- projectCmds, err = c.ProjectCommandBuilder.BuildApplyCommands(ctx, cmd)
- default:
- ctx.Log.Err("failed to determine desired command, neither plan nor apply")
- return
- }
- if err != nil {
- if statusErr := c.CommitStatusUpdater.UpdateCombined(ctx.Pull.BaseRepo, ctx.Pull, models.FailedCommitStatus, cmd.CommandName()); statusErr != nil {
- ctx.Log.Warn("unable to update commit status: %s", statusErr)
- }
- c.updatePull(ctx, cmd, CommandResult{Error: err})
+ if !c.validateCtxAndComment(ctx) {
return
}
- // Only run commands in parallel if enabled
- var result CommandResult
- if cmd.Name == models.ApplyCommand && c.parallelApplyEnabled(ctx, projectCmds) {
- ctx.Log.Info("Running applies in parallel")
- result = c.runProjectCmdsParallel(projectCmds, cmd.Name)
- } else if cmd.Name == models.PlanCommand && c.parallelPlanEnabled(ctx, projectCmds) {
- ctx.Log.Info("Running plans in parallel")
- result = c.runProjectCmdsParallel(projectCmds, cmd.Name)
- } else {
- result = c.runProjectCmds(projectCmds, cmd.Name)
- }
-
- if cmd.Name == models.PlanCommand && c.automergeEnabled(ctx, projectCmds) && result.HasErrors() {
- ctx.Log.Info("deleting plans because there were errors and automerge requires all plans succeed")
- c.deletePlans(ctx)
- result.PlansDeleted = true
- }
-
- c.updatePull(
- ctx,
- cmd,
- result)
-
- pullStatus, err := c.updateDB(ctx, pull, result.ProjectResults)
- if err != nil {
- c.Logger.Err("writing results: %s", err)
+ cmdRunner := buildCommentCommandRunner(c, cmd.CommandName())
+ if cmdRunner == nil {
+ ctx.Log.Err("command %s is not supported", cmd.Name.String())
return
}
- c.updateCommitStatus(ctx, cmd.Name, pullStatus)
-
- if cmd.Name == models.ApplyCommand && c.automergeEnabled(ctx, projectCmds) {
- c.automerge(ctx, pullStatus)
- }
-}
-
-func (c *DefaultCommandRunner) updateCommitStatus(ctx *CommandContext, cmd models.CommandName, pullStatus models.PullStatus) {
- var numSuccess int
- var status models.CommitStatus
-
- if cmd == models.PlanCommand {
- // We consider anything that isn't a plan error as a plan success.
- // For example, if there is an apply error, that means that at least a
- // plan was generated successfully.
- numSuccess = len(pullStatus.Projects) - pullStatus.StatusCount(models.ErroredPlanStatus)
- status = models.SuccessCommitStatus
- if numSuccess != len(pullStatus.Projects) {
- status = models.FailedCommitStatus
- }
- } else {
- numSuccess = pullStatus.StatusCount(models.AppliedPlanStatus)
-
- numErrored := pullStatus.StatusCount(models.ErroredApplyStatus)
- status = models.SuccessCommitStatus
- if numErrored > 0 {
- status = models.FailedCommitStatus
- } else if numSuccess < len(pullStatus.Projects) {
- // If there are plans that haven't been applied yet, we'll use a pending
- // status.
- status = models.PendingCommitStatus
- }
- }
-
- if err := c.CommitStatusUpdater.UpdateCombinedCount(ctx.Pull.BaseRepo, ctx.Pull, status, cmd, numSuccess, len(pullStatus.Projects)); err != nil {
- ctx.Log.Warn("unable to update commit status: %s", err)
- }
-}
-
-func (c *DefaultCommandRunner) automerge(ctx *CommandContext, pullStatus models.PullStatus) {
- // We only automerge if all projects have been successfully applied.
- for _, p := range pullStatus.Projects {
- if p.Status != models.AppliedPlanStatus {
- ctx.Log.Info("not automerging because project at dir %q, workspace %q has status %q", p.RepoRelDir, p.Workspace, p.Status.String())
- return
- }
- }
-
- // Comment that we're automerging the pull request.
- if err := c.VCSClient.CreateComment(ctx.Pull.BaseRepo, ctx.Pull.Num, automergeComment, models.ApplyCommand.String()); err != nil {
- ctx.Log.Err("failed to comment about automerge: %s", err)
- // Commenting isn't required so continue.
- }
-
- // Make the API call to perform the merge.
- ctx.Log.Info("automerging pull request")
- err := c.VCSClient.MergePull(ctx.Pull)
-
- if err != nil {
- ctx.Log.Err("automerging failed: %s", err)
-
- failureComment := fmt.Sprintf("Automerging failed:\n```\n%s\n```", err)
- if commentErr := c.VCSClient.CreateComment(ctx.Pull.BaseRepo, ctx.Pull.Num, failureComment, models.ApplyCommand.String()); commentErr != nil {
- ctx.Log.Err("failed to comment about automerge failing: %s", err)
- }
- }
-}
-
-func (c *DefaultCommandRunner) runProjectCmdsParallel(cmds []models.ProjectCommandContext, cmdName models.CommandName) CommandResult {
- var results []models.ProjectResult
- mux := &sync.Mutex{}
-
- wg := sizedwaitgroup.New(c.ParallelPoolSize)
- for _, pCmd := range cmds {
- pCmd := pCmd
- var execute func()
- wg.Add()
-
- switch cmdName {
- case models.PlanCommand:
- execute = func() {
- defer wg.Done()
- res := c.ProjectCommandRunner.Plan(pCmd)
- mux.Lock()
- results = append(results, res)
- mux.Unlock()
- }
- case models.ApplyCommand:
- execute = func() {
- defer wg.Done()
- res := c.ProjectCommandRunner.Apply(pCmd)
- mux.Lock()
- results = append(results, res)
- mux.Unlock()
- }
- }
- go execute()
- }
-
- wg.Wait()
- return CommandResult{ProjectResults: results}
-}
-
-func (c *DefaultCommandRunner) runProjectCmds(cmds []models.ProjectCommandContext, cmdName models.CommandName) CommandResult {
- var results []models.ProjectResult
- for _, pCmd := range cmds {
- var res models.ProjectResult
- switch cmdName {
- case models.PlanCommand:
- res = c.ProjectCommandRunner.Plan(pCmd)
- case models.ApplyCommand:
- res = c.ProjectCommandRunner.Apply(pCmd)
- }
- results = append(results, res)
- }
- return CommandResult{ProjectResults: results}
+ cmdRunner.Run(ctx, cmd)
}
func (c *DefaultCommandRunner) getGithubData(baseRepo models.Repo, pullNum int) (models.PullRequest, models.Repo, error) {
@@ -510,6 +238,45 @@ func (c *DefaultCommandRunner) buildLogger(repoFullName string, pullNum int) *lo
return c.Logger.NewLogger(src, true, c.Logger.GetLevel())
}
+func (c *DefaultCommandRunner) ensureValidRepoMetadata(
+ baseRepo models.Repo,
+ maybeHeadRepo *models.Repo,
+ maybePull *models.PullRequest,
+ user models.User,
+ pullNum int,
+ log *logging.SimpleLogger,
+) (headRepo models.Repo, pull models.PullRequest, err error) {
+ if maybeHeadRepo != nil {
+ headRepo = *maybeHeadRepo
+ }
+
+ switch baseRepo.VCSHost.Type {
+ case models.Github:
+ pull, headRepo, err = c.getGithubData(baseRepo, pullNum)
+ case models.Gitlab:
+ pull, err = c.getGitlabData(baseRepo, pullNum)
+ case models.BitbucketCloud, models.BitbucketServer:
+ if maybePull == nil {
+ err = errors.New("pull request should not be nil–this is a bug")
+ break
+ }
+ pull = *maybePull
+ case models.AzureDevops:
+ pull, headRepo, err = c.getAzureDevopsData(baseRepo, pullNum)
+ default:
+ err = errors.New("Unknown VCS type–this is a bug")
+ }
+
+ if err != nil {
+ log.Err(err.Error())
+ if commentErr := c.VCSClient.CreateComment(baseRepo, pullNum, fmt.Sprintf("`Error: %s`", err), ""); commentErr != nil {
+ log.Err("unable to comment: %s", commentErr)
+ }
+ }
+
+ return
+}
+
func (c *DefaultCommandRunner) validateCtxAndComment(ctx *CommandContext) bool {
if !c.AllowForkPRs && ctx.HeadRepo.Owner != ctx.Pull.BaseRepo.Owner {
if c.SilenceForkPRErrors {
@@ -532,29 +299,6 @@ func (c *DefaultCommandRunner) validateCtxAndComment(ctx *CommandContext) bool {
return true
}
-func (c *DefaultCommandRunner) updatePull(ctx *CommandContext, command PullCommand, res CommandResult) {
- // Log if we got any errors or failures.
- if res.Error != nil {
- ctx.Log.Err(res.Error.Error())
- } else if res.Failure != "" {
- ctx.Log.Warn(res.Failure)
- }
-
- // HidePrevPlanComments will hide old comments left from previous plan runs to reduce
- // clutter in a pull/merge request. This will not delete the comment, since the
- // comment trail may be useful in auditing or backtracing problems.
- if c.HidePrevPlanComments {
- if err := c.VCSClient.HidePrevPlanComments(ctx.Pull.BaseRepo, ctx.Pull.Num); err != nil {
- ctx.Log.Err("unable to hide old comments: %s", err)
- }
- }
-
- comment := c.MarkdownRenderer.Render(res, command.CommandName(), ctx.Log.History.String(), command.IsVerbose(), ctx.Pull.BaseRepo.VCSHost.Type)
- if err := c.VCSClient.CreateComment(ctx.Pull.BaseRepo, ctx.Pull.Num, comment, command.CommandName().String()); err != nil {
- ctx.Log.Err("unable to comment: %s", err)
- }
-}
-
// logPanics logs and creates a comment on the pull request for panics.
func (c *DefaultCommandRunner) logPanics(baseRepo models.Repo, pullNum int, logger logging.SimpleLogging) {
if err := recover(); err != nil {
@@ -571,59 +315,6 @@ func (c *DefaultCommandRunner) logPanics(baseRepo models.Repo, pullNum int, logg
}
}
-// deletePlans deletes all plans generated in this ctx.
-func (c *DefaultCommandRunner) deletePlans(ctx *CommandContext) {
- pullDir, err := c.WorkingDir.GetPullDir(ctx.Pull.BaseRepo, ctx.Pull)
- if err != nil {
- ctx.Log.Err("getting pull dir: %s", err)
- }
- if err := c.PendingPlanFinder.DeletePlans(pullDir); err != nil {
- ctx.Log.Err("deleting pending plans: %s", err)
- }
-}
-
-func (c *DefaultCommandRunner) updateDB(ctx *CommandContext, pull models.PullRequest, results []models.ProjectResult) (models.PullStatus, error) {
- // Filter out results that errored due to the directory not existing. We
- // don't store these in the database because they would never be "apply-able"
- // and so the pull request would always have errors.
- var filtered []models.ProjectResult
- for _, r := range results {
- if _, ok := r.Error.(DirNotExistErr); ok {
- ctx.Log.Debug("ignoring error result from project at dir %q workspace %q because it is dir not exist error", r.RepoRelDir, r.Workspace)
- continue
- }
- filtered = append(filtered, r)
- }
- ctx.Log.Debug("updating DB with pull results")
- return c.DB.UpdatePullWithResults(pull, filtered)
-}
-
-// automergeEnabled returns true if automerging is enabled in this context.
-func (c *DefaultCommandRunner) automergeEnabled(ctx *CommandContext, projectCmds []models.ProjectCommandContext) bool {
- // If the global automerge is set, we always automerge.
- return c.GlobalAutomerge ||
- // Otherwise we check if this repo is configured for automerging.
- (len(projectCmds) > 0 && projectCmds[0].AutomergeEnabled)
-}
-
-// parallelApplyEnabled returns true if parallel apply is enabled in this context.
-func (c *DefaultCommandRunner) parallelApplyEnabled(ctx *CommandContext, projectCmds []models.ProjectCommandContext) bool {
- return len(projectCmds) > 0 && projectCmds[0].ParallelApplyEnabled
-}
-
-// parallelPlanEnabled returns true if parallel plan is enabled in this context.
-func (c *DefaultCommandRunner) parallelPlanEnabled(ctx *CommandContext, projectCmds []models.ProjectCommandContext) bool {
- return len(projectCmds) > 0 && projectCmds[0].ParallelPlanEnabled
-}
-
// automergeComment is the comment that gets posted when Atlantis automatically
// merges the PR.
var automergeComment = `Automatically merging because all plans have been successfully applied.`
-
-// applyAllDisabledComment is posted when apply all commands (i.e. "atlantis apply")
-// are disabled and an apply all command is issued.
-var applyAllDisabledComment = "**Error:** Running `atlantis apply` without flags is disabled." +
- " You must specify which project to apply via the `-d `, `-w ` or `-p ` flags."
-
-// applyDisabledComment is posted when apply commands are disabled globally and an apply command is issued.
-var applyDisabledComment = "**Error:** Running `atlantis apply` is disabled."
diff --git a/server/events/command_runner_internal_test.go b/server/events/command_runner_internal_test.go
index 2de083cad9..440002645a 100644
--- a/server/events/command_runner_internal_test.go
+++ b/server/events/command_runner_internal_test.go
@@ -7,7 +7,7 @@ import (
. "github.com/runatlantis/atlantis/testing"
)
-func TestUpdateCommitStatus(t *testing.T) {
+func TestApplyUpdateCommitStatus(t *testing.T) {
cases := map[string]struct {
cmd models.CommandName
pullStatus models.PullStatus
@@ -15,41 +15,6 @@ func TestUpdateCommitStatus(t *testing.T) {
expNumSuccess int
expNumTotal int
}{
- "single plan success": {
- cmd: models.PlanCommand,
- pullStatus: models.PullStatus{
- Projects: []models.ProjectStatus{
- {
- Status: models.PlannedPlanStatus,
- },
- },
- },
- expStatus: models.SuccessCommitStatus,
- expNumSuccess: 1,
- expNumTotal: 1,
- },
- "one plan error, other errors": {
- cmd: models.PlanCommand,
- pullStatus: models.PullStatus{
- Projects: []models.ProjectStatus{
- {
- Status: models.ErroredPlanStatus,
- },
- {
- Status: models.PlannedPlanStatus,
- },
- {
- Status: models.AppliedPlanStatus,
- },
- {
- Status: models.ErroredApplyStatus,
- },
- },
- },
- expStatus: models.FailedCommitStatus,
- expNumSuccess: 3,
- expNumTotal: 4,
- },
"apply, one pending": {
cmd: models.ApplyCommand,
pullStatus: models.PullStatus{
@@ -106,10 +71,72 @@ func TestUpdateCommitStatus(t *testing.T) {
for name, c := range cases {
t.Run(name, func(t *testing.T) {
csu := &MockCSU{}
- cr := &DefaultCommandRunner{
- CommitStatusUpdater: csu,
+ cr := &ApplyCommandRunner{
+ commitStatusUpdater: csu,
+ }
+ cr.updateCommitStatus(&CommandContext{}, c.pullStatus)
+ Equals(t, models.Repo{}, csu.CalledRepo)
+ Equals(t, models.PullRequest{}, csu.CalledPull)
+ Equals(t, c.expStatus, csu.CalledStatus)
+ Equals(t, c.cmd, csu.CalledCommand)
+ Equals(t, c.expNumSuccess, csu.CalledNumSuccess)
+ Equals(t, c.expNumTotal, csu.CalledNumTotal)
+ })
+ }
+}
+
+func TestPlanUpdateCommitStatus(t *testing.T) {
+ cases := map[string]struct {
+ cmd models.CommandName
+ pullStatus models.PullStatus
+ expStatus models.CommitStatus
+ expNumSuccess int
+ expNumTotal int
+ }{
+ "single plan success": {
+ cmd: models.PlanCommand,
+ pullStatus: models.PullStatus{
+ Projects: []models.ProjectStatus{
+ {
+ Status: models.PlannedPlanStatus,
+ },
+ },
+ },
+ expStatus: models.SuccessCommitStatus,
+ expNumSuccess: 1,
+ expNumTotal: 1,
+ },
+ "one plan error, other errors": {
+ cmd: models.PlanCommand,
+ pullStatus: models.PullStatus{
+ Projects: []models.ProjectStatus{
+ {
+ Status: models.ErroredPlanStatus,
+ },
+ {
+ Status: models.PlannedPlanStatus,
+ },
+ {
+ Status: models.AppliedPlanStatus,
+ },
+ {
+ Status: models.ErroredApplyStatus,
+ },
+ },
+ },
+ expStatus: models.FailedCommitStatus,
+ expNumSuccess: 3,
+ expNumTotal: 4,
+ },
+ }
+
+ for name, c := range cases {
+ t.Run(name, func(t *testing.T) {
+ csu := &MockCSU{}
+ cr := &PlanCommandRunner{
+ commitStatusUpdater: csu,
}
- cr.updateCommitStatus(&CommandContext{}, c.cmd, c.pullStatus)
+ cr.updateCommitStatus(&CommandContext{}, c.pullStatus)
Equals(t, models.Repo{}, csu.CalledRepo)
Equals(t, models.PullRequest{}, csu.CalledPull)
Equals(t, c.expStatus, csu.CalledStatus)
diff --git a/server/events/command_runner_test.go b/server/events/command_runner_test.go
index 1b8b333d05..47aba3c1d5 100644
--- a/server/events/command_runner_test.go
+++ b/server/events/command_runner_test.go
@@ -20,6 +20,7 @@ import (
"testing"
"github.com/runatlantis/atlantis/server/events/db"
+ "github.com/runatlantis/atlantis/server/events/yaml/valid"
"github.com/runatlantis/atlantis/server/logging"
"github.com/google/go-github/v31/github"
@@ -47,6 +48,19 @@ var workingDir events.WorkingDir
var pendingPlanFinder *mocks.MockPendingPlanFinder
var drainer *events.Drainer
var deleteLockCommand *mocks.MockDeleteLockCommand
+var commitUpdater *mocks.MockCommitStatusUpdater
+
+// TODO: refactor these into their own unit tests.
+// these were all split out from default command runner in an effort to improve
+// readability however the tests were kept as is.
+var dbUpdater *events.DBUpdater
+var pullUpdater *events.PullUpdater
+var autoMerger *events.AutoMerger
+var policyCheckCommandRunner *events.PolicyCheckCommandRunner
+var approvePoliciesCommandRunner *events.ApprovePoliciesCommandRunner
+var planCommandRunner *events.PlanCommandRunner
+var applyCommandRunner *events.ApplyCommandRunner
+var unlockCommandRunner *events.UnlockCommandRunner
func setup(t *testing.T) *vcsmocks.MockClient {
RegisterMockTestingT(t)
@@ -61,6 +75,7 @@ func setup(t *testing.T) *vcsmocks.MockClient {
projectCommandRunner = mocks.NewMockProjectCommandRunner()
workingDir = mocks.NewMockWorkingDir()
pendingPlanFinder = mocks.NewMockPendingPlanFinder()
+ commitUpdater = mocks.NewMockCommitStatusUpdater()
tmp, cleanup := TempDir(t)
defer cleanup()
@@ -72,25 +87,91 @@ func setup(t *testing.T) *vcsmocks.MockClient {
When(logger.GetLevel()).ThenReturn(logging.Info)
When(logger.NewLogger("runatlantis/atlantis#1", true, logging.Info)).
ThenReturn(pullLogger)
+
+ dbUpdater = &events.DBUpdater{
+ DB: defaultBoltDB,
+ }
+
+ pullUpdater = &events.PullUpdater{
+ HidePrevPlanComments: false,
+ VCSClient: vcsClient,
+ MarkdownRenderer: &events.MarkdownRenderer{},
+ }
+
+ autoMerger = &events.AutoMerger{
+ VCSClient: vcsClient,
+ GlobalAutomerge: false,
+ }
+
+ parallelPoolSize := 1
+ policyCheckCommandRunner = events.NewPolicyCheckCommandRunner(
+ dbUpdater,
+ pullUpdater,
+ commitUpdater,
+ projectCommandRunner,
+ parallelPoolSize,
+ )
+
+ planCommandRunner = events.NewPlanCommandRunner(
+ false,
+ vcsClient,
+ pendingPlanFinder,
+ workingDir,
+ commitUpdater,
+ projectCommandBuilder,
+ projectCommandRunner,
+ dbUpdater,
+ pullUpdater,
+ policyCheckCommandRunner,
+ autoMerger,
+ parallelPoolSize,
+ )
+
+ applyCommandRunner = events.NewApplyCommandRunner(
+ vcsClient,
+ false,
+ false,
+ commitUpdater,
+ projectCommandBuilder,
+ projectCommandRunner,
+ autoMerger,
+ pullUpdater,
+ dbUpdater,
+ defaultBoltDB,
+ parallelPoolSize,
+ )
+
+ approvePoliciesCommandRunner = events.NewApprovePoliciesCommandRunner(
+ commitUpdater,
+ projectCommandBuilder,
+ projectCommandRunner,
+ pullUpdater,
+ dbUpdater,
+ )
+
+ unlockCommandRunner = events.NewUnlockCommandRunner(
+ deleteLockCommand,
+ vcsClient,
+ )
+
+ commentCommandRunnerByCmd := map[models.CommandName]events.CommentCommandRunner{
+ models.PlanCommand: planCommandRunner,
+ models.ApplyCommand: applyCommandRunner,
+ models.ApprovePoliciesCommand: approvePoliciesCommandRunner,
+ models.UnlockCommand: unlockCommandRunner,
+ }
+
ch = events.DefaultCommandRunner{
- VCSClient: vcsClient,
- CommitStatusUpdater: &events.DefaultCommitStatusUpdater{vcsClient, "atlantis"},
- EventParser: eventParsing,
- MarkdownRenderer: &events.MarkdownRenderer{},
- GithubPullGetter: githubGetter,
- GitlabMergeRequestGetter: gitlabGetter,
- AzureDevopsPullGetter: azuredevopsGetter,
- Logger: logger,
- AllowForkPRs: false,
- AllowForkPRsFlag: "allow-fork-prs-flag",
- ProjectCommandBuilder: projectCommandBuilder,
- ProjectCommandRunner: projectCommandRunner,
- PendingPlanFinder: pendingPlanFinder,
- WorkingDir: workingDir,
- DisableApplyAll: false,
- DB: defaultBoltDB,
- Drainer: drainer,
- DeleteLockCommand: deleteLockCommand,
+ VCSClient: vcsClient,
+ CommentCommandRunnerByCmd: commentCommandRunnerByCmd,
+ EventParser: eventParsing,
+ GithubPullGetter: githubGetter,
+ GitlabMergeRequestGetter: gitlabGetter,
+ AzureDevopsPullGetter: azuredevopsGetter,
+ Logger: logger,
+ AllowForkPRs: false,
+ AllowForkPRsFlag: "allow-fork-prs-flag",
+ Drainer: drainer,
}
return vcsClient
}
@@ -193,8 +274,14 @@ func TestRunCommentCommand_DisableApplyAllDisabled(t *testing.T) {
t.Log("if \"atlantis apply\" is run and this is disabled atlantis should" +
" comment saying that this is not allowed")
vcsClient := setup(t)
- ch.DisableApplyAll = true
- modelPull := models.PullRequest{BaseRepo: fixtures.GithubRepo, State: models.OpenPullState}
+ applyCommandRunner.DisableApplyAll = true
+ pull := &github.PullRequest{
+ State: github.String("open"),
+ }
+ modelPull := models.PullRequest{BaseRepo: fixtures.GithubRepo, State: models.OpenPullState, Num: fixtures.Pull.Num}
+ When(githubGetter.GetPullRequest(fixtures.GithubRepo, fixtures.Pull.Num)).ThenReturn(pull, nil)
+ When(eventParsing.ParseGithubPull(pull)).ThenReturn(modelPull, modelPull.BaseRepo, fixtures.GithubRepo, nil)
+
ch.RunCommentCommand(fixtures.GithubRepo, nil, nil, fixtures.User, modelPull.Num, &events.CommentCommand{Name: models.ApplyCommand})
vcsClient.VerifyWasCalledOnce().CreateComment(fixtures.GithubRepo, modelPull.Num, "**Error:** Running `atlantis apply` without flags is disabled. You must specify which project to apply via the `-d `, `-w ` or `-p ` flags.", "apply")
}
@@ -203,8 +290,15 @@ func TestRunCommentCommand_ApplyDisabled(t *testing.T) {
t.Log("if \"atlantis apply\" is run and this is disabled globally atlantis should" +
" comment saying that this is not allowed")
vcsClient := setup(t)
- ch.DisableApply = true
- modelPull := models.PullRequest{State: models.OpenPullState}
+ applyCommandRunner.DisableApply = true
+ defer func() { applyCommandRunner.DisableApply = false }()
+ pull := &github.PullRequest{
+ State: github.String("open"),
+ }
+ modelPull := models.PullRequest{BaseRepo: fixtures.GithubRepo, State: models.OpenPullState, Num: fixtures.Pull.Num}
+ When(githubGetter.GetPullRequest(fixtures.GithubRepo, fixtures.Pull.Num)).ThenReturn(pull, nil)
+ When(eventParsing.ParseGithubPull(pull)).ThenReturn(modelPull, modelPull.BaseRepo, fixtures.GithubRepo, nil)
+
ch.RunCommentCommand(fixtures.GithubRepo, nil, nil, fixtures.User, modelPull.Num, &events.CommentCommand{Name: models.ApplyCommand})
vcsClient.VerifyWasCalledOnce().CreateComment(fixtures.GithubRepo, modelPull.Num, "**Error:** Running `atlantis apply` is disabled.", "apply")
}
@@ -217,8 +311,12 @@ func TestRunCommentCommand_DisableDisableAutoplan(t *testing.T) {
When(projectCommandBuilder.BuildAutoplanCommands(matchers.AnyPtrToEventsCommandContext())).
ThenReturn([]models.ProjectCommandContext{
- {},
- {},
+ {
+ CommandName: models.PlanCommand,
+ },
+ {
+ CommandName: models.PlanCommand,
+ },
}, nil)
ch.RunAutoplanCommand(fixtures.GithubRepo, fixtures.GithubRepo, fixtures.Pull, fixtures.User)
@@ -232,7 +330,7 @@ func TestRunCommentCommand_ClosedPull(t *testing.T) {
pull := &github.PullRequest{
State: github.String("closed"),
}
- modelPull := models.PullRequest{BaseRepo: fixtures.GithubRepo, State: models.ClosedPullState}
+ modelPull := models.PullRequest{BaseRepo: fixtures.GithubRepo, State: models.ClosedPullState, Num: fixtures.Pull.Num}
When(githubGetter.GetPullRequest(fixtures.GithubRepo, fixtures.Pull.Num)).ThenReturn(pull, nil)
When(eventParsing.ParseGithubPull(pull)).ThenReturn(modelPull, modelPull.BaseRepo, fixtures.GithubRepo, nil)
@@ -248,7 +346,7 @@ func TestRunUnlockCommand_VCSComment(t *testing.T) {
pull := &github.PullRequest{
State: github.String("open"),
}
- modelPull := models.PullRequest{BaseRepo: fixtures.GithubRepo, State: models.OpenPullState}
+ modelPull := models.PullRequest{BaseRepo: fixtures.GithubRepo, State: models.OpenPullState, Num: fixtures.Pull.Num}
When(githubGetter.GetPullRequest(fixtures.GithubRepo, fixtures.Pull.Num)).ThenReturn(pull, nil)
When(eventParsing.ParseGithubPull(pull)).ThenReturn(modelPull, modelPull.BaseRepo, fixtures.GithubRepo, nil)
@@ -266,7 +364,7 @@ func TestRunUnlockCommandFail_VCSComment(t *testing.T) {
pull := &github.PullRequest{
State: github.String("open"),
}
- modelPull := models.PullRequest{BaseRepo: fixtures.GithubRepo, State: models.OpenPullState}
+ modelPull := models.PullRequest{BaseRepo: fixtures.GithubRepo, State: models.OpenPullState, Num: fixtures.Pull.Num}
When(githubGetter.GetPullRequest(fixtures.GithubRepo, fixtures.Pull.Num)).ThenReturn(pull, nil)
When(eventParsing.ParseGithubPull(pull)).ThenReturn(modelPull, modelPull.BaseRepo, fixtures.GithubRepo, nil)
When(deleteLockCommand.DeleteLocksByPull(fixtures.GithubRepo.FullName, fixtures.Pull.Num)).ThenReturn(errors.New("err"))
@@ -284,14 +382,19 @@ func TestRunAutoplanCommand_DeletePlans(t *testing.T) {
defer cleanup()
boltDB, err := db.New(tmp)
Ok(t, err)
- ch.DB = boltDB
- ch.GlobalAutomerge = true
- defer func() { ch.GlobalAutomerge = false }()
+ dbUpdater.DB = boltDB
+ applyCommandRunner.DB = boltDB
+ autoMerger.GlobalAutomerge = true
+ defer func() { autoMerger.GlobalAutomerge = false }()
When(projectCommandBuilder.BuildAutoplanCommands(matchers.AnyPtrToEventsCommandContext())).
ThenReturn([]models.ProjectCommandContext{
- {},
- {},
+ {
+ CommandName: models.PlanCommand,
+ },
+ {
+ CommandName: models.PlanCommand,
+ },
}, nil)
callCount := 0
When(projectCommandRunner.Plan(matchers.AnyModelsProjectCommandContext())).Then(func(_ []Param) ReturnValues {
@@ -319,6 +422,165 @@ func TestRunAutoplanCommand_DeletePlans(t *testing.T) {
pendingPlanFinder.VerifyWasCalledOnce().DeletePlans(tmp)
}
+func TestFailedApprovalCreatesFailedStatusUpdate(t *testing.T) {
+ t.Log("if \"atlantis approve_policies\" is run by non policy owner policy check status fails.")
+ setup(t)
+ tmp, cleanup := TempDir(t)
+ defer cleanup()
+ boltDB, err := db.New(tmp)
+ Ok(t, err)
+ dbUpdater.DB = boltDB
+ applyCommandRunner.DB = boltDB
+ autoMerger.GlobalAutomerge = true
+ defer func() { autoMerger.GlobalAutomerge = false }()
+
+ pull := &github.PullRequest{
+ State: github.String("open"),
+ }
+
+ modelPull := models.PullRequest{
+ BaseRepo: fixtures.GithubRepo,
+ State: models.OpenPullState,
+ Num: fixtures.Pull.Num,
+ }
+ When(githubGetter.GetPullRequest(fixtures.GithubRepo, fixtures.Pull.Num)).ThenReturn(pull, nil)
+ When(eventParsing.ParseGithubPull(pull)).ThenReturn(modelPull, modelPull.BaseRepo, fixtures.GithubRepo, nil)
+
+ When(projectCommandBuilder.BuildApprovePoliciesCommands(matchers.AnyPtrToEventsCommandContext(), matchers.AnyPtrToEventsCommentCommand())).ThenReturn([]models.ProjectCommandContext{
+ {
+ CommandName: models.ApprovePoliciesCommand,
+ },
+ {
+ CommandName: models.ApprovePoliciesCommand,
+ },
+ }, nil)
+
+ When(workingDir.GetPullDir(fixtures.GithubRepo, fixtures.Pull)).ThenReturn(tmp, nil)
+
+ ch.RunCommentCommand(fixtures.GithubRepo, &fixtures.GithubRepo, &fixtures.Pull, fixtures.User, fixtures.Pull.Num, &events.CommentCommand{Name: models.ApprovePoliciesCommand})
+ commitUpdater.VerifyWasCalledOnce().UpdateCombinedCount(
+ matchers.AnyModelsRepo(),
+ matchers.AnyModelsPullRequest(),
+ matchers.EqModelsCommitStatus(models.SuccessCommitStatus),
+ matchers.EqModelsCommandName(models.PolicyCheckCommand),
+ EqInt(0),
+ EqInt(0),
+ )
+}
+
+func TestApprovedPoliciesUpdateFailedPolicyStatus(t *testing.T) {
+ t.Log("if \"atlantis approve_policies\" is run by policy owner all policy checks are approved.")
+ setup(t)
+ tmp, cleanup := TempDir(t)
+ defer cleanup()
+ boltDB, err := db.New(tmp)
+ Ok(t, err)
+ dbUpdater.DB = boltDB
+ applyCommandRunner.DB = boltDB
+ autoMerger.GlobalAutomerge = true
+ defer func() { autoMerger.GlobalAutomerge = false }()
+
+ pull := &github.PullRequest{
+ State: github.String("open"),
+ }
+
+ modelPull := models.PullRequest{
+ BaseRepo: fixtures.GithubRepo,
+ State: models.OpenPullState,
+ Num: fixtures.Pull.Num,
+ }
+ When(githubGetter.GetPullRequest(fixtures.GithubRepo, fixtures.Pull.Num)).ThenReturn(pull, nil)
+ When(eventParsing.ParseGithubPull(pull)).ThenReturn(modelPull, modelPull.BaseRepo, fixtures.GithubRepo, nil)
+
+ When(projectCommandBuilder.BuildApprovePoliciesCommands(matchers.AnyPtrToEventsCommandContext(), matchers.AnyPtrToEventsCommentCommand())).ThenReturn([]models.ProjectCommandContext{
+ {
+ CommandName: models.ApprovePoliciesCommand,
+ PolicySets: valid.PolicySets{
+ Owners: valid.PolicyOwners{
+ Users: []string{fixtures.User.Username},
+ },
+ },
+ },
+ }, nil)
+
+ When(workingDir.GetPullDir(fixtures.GithubRepo, fixtures.Pull)).ThenReturn(tmp, nil)
+ When(projectCommandRunner.ApprovePolicies(matchers.AnyModelsProjectCommandContext())).Then(func(_ []Param) ReturnValues {
+ return ReturnValues{
+ models.ProjectResult{
+ Command: models.PolicyCheckCommand,
+ PolicyCheckSuccess: &models.PolicyCheckSuccess{},
+ },
+ }
+ })
+
+ ch.RunCommentCommand(fixtures.GithubRepo, &fixtures.GithubRepo, &fixtures.Pull, fixtures.User, fixtures.Pull.Num, &events.CommentCommand{Name: models.ApprovePoliciesCommand})
+ commitUpdater.VerifyWasCalledOnce().UpdateCombinedCount(
+ matchers.AnyModelsRepo(),
+ matchers.AnyModelsPullRequest(),
+ matchers.EqModelsCommitStatus(models.SuccessCommitStatus),
+ matchers.EqModelsCommandName(models.PolicyCheckCommand),
+ EqInt(1),
+ EqInt(1),
+ )
+}
+
+func TestApplyMergeablityWhenPolicyCheckFails(t *testing.T) {
+ t.Log("if \"atlantis apply\" is run with failing policy check then apply is not performed")
+ setup(t)
+ tmp, cleanup := TempDir(t)
+ defer cleanup()
+ boltDB, err := db.New(tmp)
+ Ok(t, err)
+ dbUpdater.DB = boltDB
+ applyCommandRunner.DB = boltDB
+ autoMerger.GlobalAutomerge = true
+ defer func() { autoMerger.GlobalAutomerge = false }()
+
+ pull := &github.PullRequest{
+ State: github.String("open"),
+ }
+
+ modelPull := models.PullRequest{
+ BaseRepo: fixtures.GithubRepo,
+ State: models.OpenPullState,
+ Num: fixtures.Pull.Num,
+ }
+ When(githubGetter.GetPullRequest(fixtures.GithubRepo, fixtures.Pull.Num)).ThenReturn(pull, nil)
+ When(eventParsing.ParseGithubPull(pull)).ThenReturn(modelPull, modelPull.BaseRepo, fixtures.GithubRepo, nil)
+
+ _, _ = boltDB.UpdatePullWithResults(modelPull, []models.ProjectResult{
+ {
+ Command: models.PolicyCheckCommand,
+ Error: fmt.Errorf("failing policy"),
+ ProjectName: "default",
+ Workspace: "default",
+ RepoRelDir: ".",
+ },
+ })
+
+ When(ch.VCSClient.PullIsMergeable(fixtures.GithubRepo, modelPull)).ThenReturn(true, nil)
+
+ When(projectCommandBuilder.BuildApplyCommands(matchers.AnyPtrToEventsCommandContext(), matchers.AnyPtrToEventsCommentCommand())).Then(func(args []Param) ReturnValues {
+ ctx := args[0].(*events.CommandContext)
+ Equals(t, false, ctx.PullMergeable)
+
+ return ReturnValues{
+ []models.ProjectCommandContext{
+ {
+ CommandName: models.ApplyCommand,
+ ProjectName: "default",
+ Workspace: "default",
+ RepoRelDir: ".",
+ },
+ },
+ nil,
+ }
+ })
+
+ When(workingDir.GetPullDir(fixtures.GithubRepo, modelPull)).ThenReturn(tmp, nil)
+ ch.RunCommentCommand(fixtures.GithubRepo, &fixtures.GithubRepo, &modelPull, fixtures.User, fixtures.Pull.Num, &events.CommentCommand{Name: models.ApplyCommand})
+}
+
func TestApplyWithAutoMerge_VSCMerge(t *testing.T) {
t.Log("if \"atlantis apply\" is run with automerge then a VCS merge is performed")
@@ -329,8 +591,8 @@ func TestApplyWithAutoMerge_VSCMerge(t *testing.T) {
modelPull := models.PullRequest{BaseRepo: fixtures.GithubRepo, State: models.OpenPullState}
When(githubGetter.GetPullRequest(fixtures.GithubRepo, fixtures.Pull.Num)).ThenReturn(pull, nil)
When(eventParsing.ParseGithubPull(pull)).ThenReturn(modelPull, modelPull.BaseRepo, fixtures.GithubRepo, nil)
- ch.GlobalAutomerge = true
- defer func() { ch.GlobalAutomerge = false }()
+ autoMerger.GlobalAutomerge = true
+ defer func() { autoMerger.GlobalAutomerge = false }()
ch.RunCommentCommand(fixtures.GithubRepo, &fixtures.GithubRepo, nil, fixtures.User, fixtures.Pull.Num, &events.CommentCommand{Name: models.ApplyCommand})
vcsClient.VerifyWasCalledOnce().MergePull(modelPull)
@@ -340,13 +602,14 @@ func TestRunApply_DiscardedProjects(t *testing.T) {
t.Log("if \"atlantis apply\" is run with automerge and at least one project" +
" has a discarded plan, automerge should not take place")
vcsClient := setup(t)
- ch.GlobalAutomerge = true
- defer func() { ch.GlobalAutomerge = false }()
+ autoMerger.GlobalAutomerge = true
+ defer func() { autoMerger.GlobalAutomerge = false }()
tmp, cleanup := TempDir(t)
defer cleanup()
boltDB, err := db.New(tmp)
Ok(t, err)
- ch.DB = boltDB
+ dbUpdater.DB = boltDB
+ applyCommandRunner.DB = boltDB
pull := fixtures.Pull
pull.BaseRepo = fixtures.GithubRepo
_, err = boltDB.UpdatePullWithResults(pull, []models.ProjectResult{
diff --git a/server/events/comment_parser.go b/server/events/comment_parser.go
index d8751df182..b0c7f58c72 100644
--- a/server/events/comment_parser.go
+++ b/server/events/comment_parser.go
@@ -91,7 +91,7 @@ type CommentParseResult struct {
// Valid commands contain:
// - The initial "executable" name, 'run' or 'atlantis' or '@GithubUser'
// where GithubUser is the API user Atlantis is running as.
-// - Then a command, either 'plan', 'apply', or 'help'.
+// - Then a command, either 'plan', 'apply', 'approve_policies', or 'help'.
// - Then optional flags, then an optional separator '--' followed by optional
// extra flags to be appended to the terraform plan/apply command.
//
@@ -101,6 +101,7 @@ type CommentParseResult struct {
// - @GithubUser plan -w staging
// - atlantis plan -w staging -d dir --verbose
// - atlantis plan --verbose -- -key=value -key2 value2
+// - atlantis approve_policies
//
func (e *CommentParser) Parse(comment string, vcsHost models.VCSHostType) CommentParseResult {
if multiLineRegex.MatchString(comment) {
@@ -159,8 +160,8 @@ func (e *CommentParser) Parse(comment string, vcsHost models.VCSHostType) Commen
return CommentParseResult{CommentResponse: e.HelpComment(e.ApplyDisabled)}
}
- // Need to have a plan, apply or unlock at this point.
- if !e.stringInSlice(command, []string{models.PlanCommand.String(), models.ApplyCommand.String(), models.UnlockCommand.String()}) {
+ // Need to have a plan, apply, approve_policy or unlock at this point.
+ if !e.stringInSlice(command, []string{models.PlanCommand.String(), models.ApplyCommand.String(), models.UnlockCommand.String(), models.ApprovePoliciesCommand.String()}) {
return CommentParseResult{CommentResponse: fmt.Sprintf("```\nError: unknown command %q.\nRun 'atlantis --help' for usage.\n```", command)}
}
@@ -189,11 +190,15 @@ func (e *CommentParser) Parse(comment string, vcsHost models.VCSHostType) Commen
flagSet.StringVarP(&dir, dirFlagLong, dirFlagShort, "", "Apply the plan for this directory, relative to root of repo, ex. 'child/dir'.")
flagSet.StringVarP(&project, projectFlagLong, projectFlagShort, "", fmt.Sprintf("Apply the plan for this project. Refers to the name of the project configured in %s. Cannot be used at same time as workspace or dir flags.", yaml.AtlantisYAMLFilename))
flagSet.BoolVarP(&verbose, verboseFlagLong, verboseFlagShort, false, "Append Atlantis log to comment.")
+ case models.ApprovePoliciesCommand.String():
+ name = models.ApprovePoliciesCommand
+ flagSet = pflag.NewFlagSet(models.ApprovePoliciesCommand.String(), pflag.ContinueOnError)
+ flagSet.SetOutput(ioutil.Discard)
+ flagSet.BoolVarP(&verbose, verboseFlagLong, verboseFlagShort, false, "Append Atlantis log to comment.")
case models.UnlockCommand.String():
name = models.UnlockCommand
flagSet = pflag.NewFlagSet(models.UnlockCommand.String(), pflag.ContinueOnError)
flagSet.SetOutput(ioutil.Discard)
-
default:
return CommentParseResult{CommentResponse: fmt.Sprintf("Error: unknown command %q – this is a bug", command)}
}
diff --git a/server/events/comment_parser_test.go b/server/events/comment_parser_test.go
index 11b3878d0f..8d1a773fdc 100644
--- a/server/events/comment_parser_test.go
+++ b/server/events/comment_parser_test.go
@@ -130,14 +130,24 @@ func TestParse_UnusedArguments(t *testing.T) {
"arg arg2 --",
"arg arg2",
},
+ {
+ models.ApprovePoliciesCommand,
+ "arg arg2 --",
+ "arg arg2",
+ },
}
for _, c := range cases {
comment := fmt.Sprintf("atlantis %s %s", c.Command.String(), c.Args)
t.Run(comment, func(t *testing.T) {
r := commentParser.Parse(comment, models.Github)
- usage := PlanUsage
- if c.Command == models.ApplyCommand {
+ var usage string
+ switch c.Command {
+ case models.PlanCommand:
+ usage = PlanUsage
+ case models.ApplyCommand:
usage = ApplyUsage
+ case models.ApprovePoliciesCommand:
+ usage = ApprovePolicyUsage
}
Equals(t, fmt.Sprintf("```\nError: unknown argument(s) – %s.\n%s```", c.Unused, usage), r.CommentResponse)
})
@@ -194,6 +204,8 @@ func TestParse_SubcommandUsage(t *testing.T) {
"atlantis plan --help",
"atlantis apply -h",
"atlantis apply --help",
+ "atlantis approve_policies -h",
+ "atlantis approve_policies --help",
}
for _, c := range comments {
r := commentParser.Parse(c, models.Github)
@@ -538,6 +550,7 @@ func TestParse_Parsing(t *testing.T) {
"",
},
}
+
for _, test := range cases {
for _, cmdName := range []string{"plan", "apply"} {
comment := fmt.Sprintf("atlantis %s %s", cmdName, test.flags)
@@ -555,6 +568,9 @@ func TestParse_Parsing(t *testing.T) {
if cmdName == "apply" {
Assert(t, r.Command.Name == models.ApplyCommand, "did not parse comment %q as apply command", comment)
}
+ if cmdName == "approve_policies" {
+ Assert(t, r.Command.Name == models.ApprovePoliciesCommand, "did not parse comment %q as approve_policies command", comment)
+ }
})
}
}
@@ -792,6 +808,10 @@ var ApplyUsage = `Usage of apply:
--verbose Append Atlantis log to comment.
-w, --workspace string Apply the plan for this Terraform workspace.
`
+
+var ApprovePolicyUsage = `Usage of approve_policies:
+ --verbose Append Atlantis log to comment.
+`
var UnlockUsage = "`Usage of unlock:`\n\n ```cmake\n" +
`atlantis unlock
diff --git a/server/events/commit_status_updater.go b/server/events/commit_status_updater.go
index 2d8238febd..d5be3b0910 100644
--- a/server/events/commit_status_updater.go
+++ b/server/events/commit_status_updater.go
@@ -61,10 +61,17 @@ func (d *DefaultCommitStatusUpdater) UpdateCombined(repo models.Repo, pull model
func (d *DefaultCommitStatusUpdater) UpdateCombinedCount(repo models.Repo, pull models.PullRequest, status models.CommitStatus, command models.CommandName, numSuccess int, numTotal int) error {
src := fmt.Sprintf("%s/%s", d.StatusName, command.String())
- cmdVerb := "planned"
- if command == models.ApplyCommand {
+ cmdVerb := "unknown"
+
+ switch command {
+ case models.PlanCommand:
+ cmdVerb = "planned"
+ case models.PolicyCheckCommand:
+ cmdVerb = "policies checked"
+ case models.ApplyCommand:
cmdVerb = "applied"
}
+
return d.Client.UpdateStatus(repo, pull, status, src, fmt.Sprintf("%d/%d projects %s successfully.", numSuccess, numTotal, cmdVerb), "")
}
diff --git a/server/events/db_updater.go b/server/events/db_updater.go
new file mode 100644
index 0000000000..202ca54235
--- /dev/null
+++ b/server/events/db_updater.go
@@ -0,0 +1,26 @@
+package events
+
+import (
+ "github.com/runatlantis/atlantis/server/events/db"
+ "github.com/runatlantis/atlantis/server/events/models"
+)
+
+type DBUpdater struct {
+ DB *db.BoltDB
+}
+
+func (c *DBUpdater) updateDB(ctx *CommandContext, pull models.PullRequest, results []models.ProjectResult) (models.PullStatus, error) {
+ // Filter out results that errored due to the directory not existing. We
+ // don't store these in the database because they would never be "apply-able"
+ // and so the pull request would always have errors.
+ var filtered []models.ProjectResult
+ for _, r := range results {
+ if _, ok := r.Error.(DirNotExistErr); ok {
+ ctx.Log.Debug("ignoring error result from project at dir %q workspace %q because it is dir not exist error", r.RepoRelDir, r.Workspace)
+ continue
+ }
+ filtered = append(filtered, r)
+ }
+ ctx.Log.Debug("updating DB with pull results")
+ return c.DB.UpdatePullWithResults(pull, filtered)
+}
diff --git a/server/events/event_parser.go b/server/events/event_parser.go
index 90d63b0926..5a351ffcf8 100644
--- a/server/events/event_parser.go
+++ b/server/events/event_parser.go
@@ -43,11 +43,30 @@ type PullCommand interface {
IsAutoplan() bool
}
+// PolicyCheckCommand is a policy_check command that is automatically triggered
+// after successful plan command.
+type PolicyCheckCommand struct{}
+
+// CommandName is policy_check.
+func (c PolicyCheckCommand) CommandName() models.CommandName {
+ return models.PolicyCheckCommand
+}
+
+// IsVerbose is false for policy_check commands.
+func (c PolicyCheckCommand) IsVerbose() bool {
+ return false
+}
+
+// IsAutoplan is true for policy_check commands.
+func (c PolicyCheckCommand) IsAutoplan() bool {
+ return false
+}
+
// AutoplanCommand is a plan command that is automatically triggered when a
// pull request is opened or updated.
type AutoplanCommand struct{}
-// CommandName is Plan.
+// CommandName is plan.
func (c AutoplanCommand) CommandName() models.CommandName {
return models.PlanCommand
}
diff --git a/server/events/markdown_renderer.go b/server/events/markdown_renderer.go
index fed9118616..eb05ba676e 100644
--- a/server/events/markdown_renderer.go
+++ b/server/events/markdown_renderer.go
@@ -24,8 +24,10 @@ import (
)
const (
- planCommandTitle = "Plan"
- applyCommandTitle = "Apply"
+ planCommandTitle = "Plan"
+ applyCommandTitle = "Apply"
+ policyCheckCommandTitle = "Policy Check"
+ approvePoliciesCommandTitle = "Approve Policies"
// maxUnwrappedLines is the maximum number of lines the Terraform output
// can be before we wrap it in an expandable template.
maxUnwrappedLines = 12
@@ -79,6 +81,10 @@ type planSuccessData struct {
DisableRepoLocking bool
}
+type policyCheckSuccessData struct {
+ models.PolicyCheckSuccess
+}
+
type projectResultTmplData struct {
Workspace string
RepoRelDir string
@@ -89,7 +95,7 @@ type projectResultTmplData struct {
// Render formats the data into a markdown string.
// nolint: interfacer
func (m *MarkdownRenderer) Render(res CommandResult, cmdName models.CommandName, log string, verbose bool, vcsHost models.VCSHostType) string {
- commandStr := strings.Title(cmdName.String())
+ commandStr := strings.Title(strings.Replace(cmdName.String(), "_", " ", -1))
common := commonData{
Command: commandStr,
Verbose: verbose,
@@ -111,6 +117,7 @@ func (m *MarkdownRenderer) Render(res CommandResult, cmdName models.CommandName,
func (m *MarkdownRenderer) renderProjectResults(results []models.ProjectResult, common commonData, vcsHost models.VCSHostType) string {
var resultsTmplData []projectResultTmplData
numPlanSuccesses := 0
+ numPolicyCheckSuccesses := 0
for _, result := range results {
resultData := projectResultTmplData{
@@ -145,6 +152,13 @@ func (m *MarkdownRenderer) renderProjectResults(results []models.ProjectResult,
resultData.Rendered = m.renderTemplate(planSuccessUnwrappedTmpl, planSuccessData{PlanSuccess: *result.PlanSuccess, PlanWasDeleted: common.PlansDeleted, DisableApply: common.DisableApply, DisableRepoLocking: common.DisableRepoLocking})
}
numPlanSuccesses++
+ } else if result.PolicyCheckSuccess != nil {
+ if m.shouldUseWrappedTmpl(vcsHost, result.PolicyCheckSuccess.PolicyCheckOutput) {
+ resultData.Rendered = m.renderTemplate(policyCheckSuccessWrappedTmpl, policyCheckSuccessData{PolicyCheckSuccess: *result.PolicyCheckSuccess})
+ } else {
+ resultData.Rendered = m.renderTemplate(policyCheckSuccessUnwrappedTmpl, policyCheckSuccessData{PolicyCheckSuccess: *result.PolicyCheckSuccess})
+ }
+ numPolicyCheckSuccesses++
} else if result.ApplySuccess != "" {
if m.shouldUseWrappedTmpl(vcsHost, result.ApplySuccess) {
resultData.Rendered = m.renderTemplate(applyWrappedSuccessTmpl, struct{ Output string }{result.ApplySuccess})
@@ -163,10 +177,17 @@ func (m *MarkdownRenderer) renderProjectResults(results []models.ProjectResult,
tmpl = singleProjectPlanSuccessTmpl
case len(resultsTmplData) == 1 && common.Command == planCommandTitle && numPlanSuccesses == 0:
tmpl = singleProjectPlanUnsuccessfulTmpl
+ case len(resultsTmplData) == 1 && common.Command == policyCheckCommandTitle && numPolicyCheckSuccesses > 0:
+ tmpl = singleProjectPlanSuccessTmpl
+ case len(resultsTmplData) == 1 && common.Command == policyCheckCommandTitle && numPolicyCheckSuccesses == 0:
+ tmpl = singleProjectPlanUnsuccessfulTmpl
case len(resultsTmplData) == 1 && common.Command == applyCommandTitle:
tmpl = singleProjectApplyTmpl
- case common.Command == planCommandTitle:
+ case common.Command == planCommandTitle,
+ common.Command == policyCheckCommandTitle:
tmpl = multiProjectPlanTmpl
+ case common.Command == approvePoliciesCommandTitle:
+ tmpl = approveAllProjectsTmpl
case common.Command == applyCommandTitle:
tmpl = multiProjectApplyTmpl
default:
@@ -218,6 +239,11 @@ var singleProjectPlanSuccessTmpl = template.Must(template.New("").Parse(
var singleProjectPlanUnsuccessfulTmpl = template.Must(template.New("").Parse(
"{{$result := index .Results 0}}Ran {{.Command}} for dir: `{{$result.RepoRelDir}}` workspace: `{{$result.Workspace}}`\n\n" +
"{{$result.Rendered}}\n" + logTmpl))
+var approveAllProjectsTmpl = template.Must(template.New("").Funcs(sprig.TxtFuncMap()).Parse(
+ "Approved Policies for {{ len .Results }} projects:\n\n" +
+ "{{ range $result := .Results }}" +
+ "1. {{ if $result.ProjectName }}project: `{{$result.ProjectName}}` {{ end }}dir: `{{$result.RepoRelDir}}` workspace: `{{$result.Workspace}}`\n" +
+ "{{end}}\n" + logTmpl))
var multiProjectPlanTmpl = template.Must(template.New("").Funcs(sprig.TxtFuncMap()).Parse(
"Ran {{.Command}} for {{ len .Results }} projects:\n\n" +
"{{ range $result := .Results }}" +
@@ -257,6 +283,29 @@ var planSuccessWrappedTmpl = template.Must(template.New("").Parse(
"" +
"{{ if .HasDiverged }}\n\n:warning: The branch we're merging into is ahead, it is recommended to pull new commits first.{{end}}"))
+var policyCheckSuccessUnwrappedTmpl = template.Must(template.New("").Parse(
+ "```diff\n" +
+ "{{.PolicyCheckOutput}}\n" +
+ "```\n\n" + policyCheckNextSteps +
+ "{{ if .HasDiverged }}\n\n:warning: The branch we're merging into is ahead, it is recommended to pull new commits first.{{end}}"))
+
+var policyCheckSuccessWrappedTmpl = template.Must(template.New("").Parse(
+ "Show Output
\n\n" +
+ "```diff\n" +
+ "{{.PolicyCheckOutput}}\n" +
+ "```\n\n" +
+ policyCheckNextSteps + "\n" +
+ " " +
+ "{{ if .HasDiverged }}\n\n:warning: The branch we're merging into is ahead, it is recommended to pull new commits first.{{end}}"))
+
+// policyCheckNextSteps are instructions appended after successful plans as to what
+// to do next.
+var policyCheckNextSteps = "* :arrow_forward: To **apply** this plan, comment:\n" +
+ " * `{{.ApplyCmd}}`\n" +
+ "* :put_litter_in_its_place: To **delete** this plan click [here]({{.LockURL}})\n" +
+ "* :repeat: To re-run policies **plan** this project again by commenting:\n" +
+ " * `{{.RePlanCmd}}`"
+
// planNextSteps are instructions appended after successful plans as to what
// to do next.
var planNextSteps = "{{ if .PlanWasDeleted }}This plan was not saved because one or more projects failed and automerge requires all plans pass.{{ else }}" +
@@ -278,7 +327,10 @@ var applyWrappedSuccessTmpl = template.Must(template.New("").Parse(
var unwrappedErrTmplText = "**{{.Command}} Error**\n" +
"```\n" +
"{{.Error}}\n" +
- "```"
+ "```" +
+ "{{ if eq .Command \"Policy Check\" }}" +
+ "\n* :heavy_check_mark: To **approve** failing policies either request an approval from approvers or address the failure by modifying the codebase.\n" +
+ "{{ end }}"
var wrappedErrTmplText = "**{{.Command}} Error**\n" +
"Show Output
\n\n" +
"```\n" +
diff --git a/server/events/markdown_renderer_test.go b/server/events/markdown_renderer_test.go
index ac4a77fd0e..c1c39358eb 100644
--- a/server/events/markdown_renderer_test.go
+++ b/server/events/markdown_renderer_test.go
@@ -44,6 +44,13 @@ func TestRenderErr(t *testing.T) {
err,
"**Plan Error**\n```\nerr\n```\n",
},
+ {
+ "policy check error",
+ models.PolicyCheckCommand,
+ err,
+ "**Policy Check Error**\n```\nerr\n```" +
+ "\n* :heavy_check_mark: To **approve** failing policies either request an approval from approvers or address the failure by modifying the codebase.\n\n",
+ },
}
r := events.MarkdownRenderer{}
@@ -83,6 +90,12 @@ func TestRenderFailure(t *testing.T) {
"failure",
"**Plan Failed**: failure\n",
},
+ {
+ "policy check failure",
+ models.PolicyCheckCommand,
+ "failure",
+ "**Policy Check Failed**: failure\n",
+ },
}
r := events.MarkdownRenderer{}
@@ -230,6 +243,42 @@ $$$
* :repeat: To **plan** this project again, comment:
* $atlantis plan -d path -w workspace$
+---
+* :fast_forward: To **apply** all unapplied plans from this pull request, comment:
+ * $atlantis apply$
+* :put_litter_in_its_place: To delete all plans and locks for the PR, comment:
+ * $atlantis unlock$
+`,
+ },
+ {
+ "single successful policy check with project name",
+ models.PolicyCheckCommand,
+ []models.ProjectResult{
+ {
+ PolicyCheckSuccess: &models.PolicyCheckSuccess{
+ PolicyCheckOutput: "2 tests, 1 passed, 0 warnings, 0 failure, 0 exceptions",
+ LockURL: "lock-url",
+ RePlanCmd: "atlantis plan -d path -w workspace",
+ ApplyCmd: "atlantis apply -d path -w workspace",
+ },
+ Workspace: "workspace",
+ RepoRelDir: "path",
+ ProjectName: "projectname",
+ },
+ },
+ models.Github,
+ `Ran Policy Check for project: $projectname$ dir: $path$ workspace: $workspace$
+
+$$$diff
+2 tests, 1 passed, 0 warnings, 0 failure, 0 exceptions
+$$$
+
+* :arrow_forward: To **apply** this plan, comment:
+ * $atlantis apply -d path -w workspace$
+* :put_litter_in_its_place: To **delete** this plan click [here](lock-url)
+* :repeat: To re-run policies **plan** this project again by commenting:
+ * $atlantis plan -d path -w workspace$
+
---
* :fast_forward: To **apply** all unapplied plans from this pull request, comment:
* $atlantis apply$
@@ -331,6 +380,68 @@ $$$
* :repeat: To **plan** this project again, comment:
* $atlantis plan -d path2 -w workspace$
+---
+* :fast_forward: To **apply** all unapplied plans from this pull request, comment:
+ * $atlantis apply$
+* :put_litter_in_its_place: To delete all plans and locks for the PR, comment:
+ * $atlantis unlock$
+`,
+ },
+ {
+ "multiple successful policy checks",
+ models.PolicyCheckCommand,
+ []models.ProjectResult{
+ {
+ Workspace: "workspace",
+ RepoRelDir: "path",
+ PolicyCheckSuccess: &models.PolicyCheckSuccess{
+ PolicyCheckOutput: "4 tests, 4 passed, 0 warnings, 0 failures, 0 exceptions",
+ LockURL: "lock-url",
+ ApplyCmd: "atlantis apply -d path -w workspace",
+ RePlanCmd: "atlantis plan -d path -w workspace",
+ },
+ },
+ {
+ Workspace: "workspace",
+ RepoRelDir: "path2",
+ ProjectName: "projectname",
+ PolicyCheckSuccess: &models.PolicyCheckSuccess{
+ PolicyCheckOutput: "4 tests, 4 passed, 0 warnings, 0 failures, 0 exceptions",
+ LockURL: "lock-url2",
+ ApplyCmd: "atlantis apply -d path2 -w workspace",
+ RePlanCmd: "atlantis plan -d path2 -w workspace",
+ },
+ },
+ },
+ models.Github,
+ `Ran Policy Check for 2 projects:
+
+1. dir: $path$ workspace: $workspace$
+1. project: $projectname$ dir: $path2$ workspace: $workspace$
+
+### 1. dir: $path$ workspace: $workspace$
+$$$diff
+4 tests, 4 passed, 0 warnings, 0 failures, 0 exceptions
+$$$
+
+* :arrow_forward: To **apply** this plan, comment:
+ * $atlantis apply -d path -w workspace$
+* :put_litter_in_its_place: To **delete** this plan click [here](lock-url)
+* :repeat: To re-run policies **plan** this project again by commenting:
+ * $atlantis plan -d path -w workspace$
+
+---
+### 2. project: $projectname$ dir: $path2$ workspace: $workspace$
+$$$diff
+4 tests, 4 passed, 0 warnings, 0 failures, 0 exceptions
+$$$
+
+* :arrow_forward: To **apply** this plan, comment:
+ * $atlantis apply -d path2 -w workspace$
+* :put_litter_in_its_place: To **delete** this plan click [here](lock-url2)
+* :repeat: To re-run policies **plan** this project again by commenting:
+ * $atlantis plan -d path2 -w workspace$
+
---
* :fast_forward: To **apply** all unapplied plans from this pull request, comment:
* $atlantis apply$
@@ -467,6 +578,70 @@ $$$
error
$$$
+---
+* :fast_forward: To **apply** all unapplied plans from this pull request, comment:
+ * $atlantis apply$
+* :put_litter_in_its_place: To delete all plans and locks for the PR, comment:
+ * $atlantis unlock$
+`,
+ },
+ {
+ "successful, failed, and errored policy check",
+ models.PolicyCheckCommand,
+ []models.ProjectResult{
+ {
+ Workspace: "workspace",
+ RepoRelDir: "path",
+ PolicyCheckSuccess: &models.PolicyCheckSuccess{
+ PolicyCheckOutput: "4 tests, 4 passed, 0 warnings, 0 failures, 0 exceptions",
+ LockURL: "lock-url",
+ ApplyCmd: "atlantis apply -d path -w workspace",
+ RePlanCmd: "atlantis plan -d path -w workspace",
+ },
+ },
+ {
+ Workspace: "workspace",
+ RepoRelDir: "path2",
+ Failure: "failure",
+ },
+ {
+ Workspace: "workspace",
+ RepoRelDir: "path3",
+ ProjectName: "projectname",
+ Error: errors.New("error"),
+ },
+ },
+ models.Github,
+ `Ran Policy Check for 3 projects:
+
+1. dir: $path$ workspace: $workspace$
+1. dir: $path2$ workspace: $workspace$
+1. project: $projectname$ dir: $path3$ workspace: $workspace$
+
+### 1. dir: $path$ workspace: $workspace$
+$$$diff
+4 tests, 4 passed, 0 warnings, 0 failures, 0 exceptions
+$$$
+
+* :arrow_forward: To **apply** this plan, comment:
+ * $atlantis apply -d path -w workspace$
+* :put_litter_in_its_place: To **delete** this plan click [here](lock-url)
+* :repeat: To re-run policies **plan** this project again by commenting:
+ * $atlantis plan -d path -w workspace$
+
+---
+### 2. dir: $path2$ workspace: $workspace$
+**Policy Check Failed**: failure
+
+---
+### 3. project: $projectname$ dir: $path3$ workspace: $workspace$
+**Policy Check Error**
+$$$
+error
+$$$
+* :heavy_check_mark: To **approve** failing policies either request an approval from approvers or address the failure by modifying the codebase.
+
+
---
* :fast_forward: To **apply** all unapplied plans from this pull request, comment:
* $atlantis apply$
diff --git a/server/events/mocks/matchers/events_commentparseresult.go b/server/events/mocks/matchers/events_commentparseresult.go
index 82e0e87f11..bcdd017764 100644
--- a/server/events/mocks/matchers/events_commentparseresult.go
+++ b/server/events/mocks/matchers/events_commentparseresult.go
@@ -2,9 +2,9 @@
package matchers
import (
- "reflect"
"github.com/petergtz/pegomock"
events "github.com/runatlantis/atlantis/server/events"
+ "reflect"
)
func AnyEventsCommentParseResult() events.CommentParseResult {
diff --git a/server/events/mocks/matchers/ptr_to_events_commandcontext.go b/server/events/mocks/matchers/ptr_to_events_commandcontext.go
index f7b214813f..896b495636 100644
--- a/server/events/mocks/matchers/ptr_to_events_commandcontext.go
+++ b/server/events/mocks/matchers/ptr_to_events_commandcontext.go
@@ -2,9 +2,9 @@
package matchers
import (
- "reflect"
"github.com/petergtz/pegomock"
events "github.com/runatlantis/atlantis/server/events"
+ "reflect"
)
func AnyPtrToEventsCommandContext() *events.CommandContext {
diff --git a/server/events/mocks/matchers/ptr_to_events_commentcommand.go b/server/events/mocks/matchers/ptr_to_events_commentcommand.go
index 83f61b2f88..a153bb3274 100644
--- a/server/events/mocks/matchers/ptr_to_events_commentcommand.go
+++ b/server/events/mocks/matchers/ptr_to_events_commentcommand.go
@@ -2,9 +2,9 @@
package matchers
import (
- "reflect"
"github.com/petergtz/pegomock"
events "github.com/runatlantis/atlantis/server/events"
+ "reflect"
)
func AnyPtrToEventsCommentCommand() *events.CommentCommand {
diff --git a/server/events/mocks/matchers/ptr_to_models_commandcontext.go b/server/events/mocks/matchers/ptr_to_models_commandcontext.go
new file mode 100644
index 0000000000..ca6abf31da
--- /dev/null
+++ b/server/events/mocks/matchers/ptr_to_models_commandcontext.go
@@ -0,0 +1,20 @@
+// Code generated by pegomock. DO NOT EDIT.
+package matchers
+
+import (
+ "github.com/petergtz/pegomock"
+ events "github.com/runatlantis/atlantis/server/events"
+ "reflect"
+)
+
+func AnyPtrToModelsCommandContext() *events.CommandContext {
+ pegomock.RegisterMatcher(pegomock.NewAnyMatcher(reflect.TypeOf((*(*events.CommandContext))(nil)).Elem()))
+ var nullValue *events.CommandContext
+ return nullValue
+}
+
+func EqPtrToModelsCommandContext(value *events.CommandContext) *events.CommandContext {
+ pegomock.RegisterMatcher(&pegomock.EqMatcher{Value: value})
+ var nullValue *events.CommandContext
+ return nullValue
+}
diff --git a/server/events/mocks/mock_comment_parsing.go b/server/events/mocks/mock_comment_parsing.go
index 4522ec38ea..24e4281ff4 100644
--- a/server/events/mocks/mock_comment_parsing.go
+++ b/server/events/mocks/mock_comment_parsing.go
@@ -4,11 +4,12 @@
package mocks
import (
+ "reflect"
+ "time"
+
pegomock "github.com/petergtz/pegomock"
events "github.com/runatlantis/atlantis/server/events"
models "github.com/runatlantis/atlantis/server/events/models"
- "reflect"
- "time"
)
type MockCommentParsing struct {
diff --git a/server/events/mocks/mock_project_command_builder.go b/server/events/mocks/mock_project_command_builder.go
index f32d3ac754..d4a3659738 100644
--- a/server/events/mocks/mock_project_command_builder.go
+++ b/server/events/mocks/mock_project_command_builder.go
@@ -83,6 +83,25 @@ func (mock *MockProjectCommandBuilder) BuildApplyCommands(ctx *events.CommandCon
return ret0, ret1
}
+func (mock *MockProjectCommandBuilder) BuildApprovePoliciesCommands(ctx *events.CommandContext, comment *events.CommentCommand) ([]models.ProjectCommandContext, error) {
+ if mock == nil {
+ panic("mock must not be nil. Use myMock := NewMockProjectCommandBuilder().")
+ }
+ params := []pegomock.Param{ctx, comment}
+ result := pegomock.GetGenericMockFrom(mock).Invoke("BuildApprovePoliciesCommands", params, []reflect.Type{reflect.TypeOf((*[]models.ProjectCommandContext)(nil)).Elem(), reflect.TypeOf((*error)(nil)).Elem()})
+ var ret0 []models.ProjectCommandContext
+ var ret1 error
+ if len(result) != 0 {
+ if result[0] != nil {
+ ret0 = result[0].([]models.ProjectCommandContext)
+ }
+ if result[1] != nil {
+ ret1 = result[1].(error)
+ }
+ }
+ return ret0, ret1
+}
+
func (mock *MockProjectCommandBuilder) VerifyWasCalledOnce() *VerifierMockProjectCommandBuilder {
return &VerifierMockProjectCommandBuilder{
mock: mock,
@@ -208,3 +227,34 @@ func (c *MockProjectCommandBuilder_BuildApplyCommands_OngoingVerification) GetAl
}
return
}
+
+func (verifier *VerifierMockProjectCommandBuilder) BuildApprovePoliciesCommands(ctx *events.CommandContext, comment *events.CommentCommand) *MockProjectCommandBuilder_BuildApprovePoliciesCommands_OngoingVerification {
+ params := []pegomock.Param{ctx, comment}
+ methodInvocations := pegomock.GetGenericMockFrom(verifier.mock).Verify(verifier.inOrderContext, verifier.invocationCountMatcher, "BuildApprovePoliciesCommands", params, verifier.timeout)
+ return &MockProjectCommandBuilder_BuildApprovePoliciesCommands_OngoingVerification{mock: verifier.mock, methodInvocations: methodInvocations}
+}
+
+type MockProjectCommandBuilder_BuildApprovePoliciesCommands_OngoingVerification struct {
+ mock *MockProjectCommandBuilder
+ methodInvocations []pegomock.MethodInvocation
+}
+
+func (c *MockProjectCommandBuilder_BuildApprovePoliciesCommands_OngoingVerification) GetCapturedArguments() (*events.CommandContext, *events.CommentCommand) {
+ ctx, comment := c.GetAllCapturedArguments()
+ return ctx[len(ctx)-1], comment[len(comment)-1]
+}
+
+func (c *MockProjectCommandBuilder_BuildApprovePoliciesCommands_OngoingVerification) GetAllCapturedArguments() (_param0 []*events.CommandContext, _param1 []*events.CommentCommand) {
+ params := pegomock.GetGenericMockFrom(c.mock).GetInvocationParams(c.methodInvocations)
+ if len(params) > 0 {
+ _param0 = make([]*events.CommandContext, len(c.methodInvocations))
+ for u, param := range params[0] {
+ _param0[u] = param.(*events.CommandContext)
+ }
+ _param1 = make([]*events.CommentCommand, len(c.methodInvocations))
+ for u, param := range params[1] {
+ _param1[u] = param.(*events.CommentCommand)
+ }
+ }
+ return
+}
diff --git a/server/events/mocks/mock_project_command_runner.go b/server/events/mocks/mock_project_command_runner.go
index 817ce187d6..e0b8ff7704 100644
--- a/server/events/mocks/mock_project_command_runner.go
+++ b/server/events/mocks/mock_project_command_runner.go
@@ -55,6 +55,36 @@ func (mock *MockProjectCommandRunner) Apply(ctx models.ProjectCommandContext) mo
return ret0
}
+func (mock *MockProjectCommandRunner) PolicyCheck(ctx models.ProjectCommandContext) models.ProjectResult {
+ if mock == nil {
+ panic("mock must not be nil. Use myMock := NewMockProjectCommandRunner().")
+ }
+ params := []pegomock.Param{ctx}
+ result := pegomock.GetGenericMockFrom(mock).Invoke("PolicyCheck", params, []reflect.Type{reflect.TypeOf((*models.ProjectResult)(nil)).Elem()})
+ var ret0 models.ProjectResult
+ if len(result) != 0 {
+ if result[0] != nil {
+ ret0 = result[0].(models.ProjectResult)
+ }
+ }
+ return ret0
+}
+
+func (mock *MockProjectCommandRunner) ApprovePolicies(ctx models.ProjectCommandContext) models.ProjectResult {
+ if mock == nil {
+ panic("mock must not be nil. Use myMock := NewMockProjectCommandRunner().")
+ }
+ params := []pegomock.Param{ctx}
+ result := pegomock.GetGenericMockFrom(mock).Invoke("ApprovePolicies", params, []reflect.Type{reflect.TypeOf((*models.ProjectResult)(nil)).Elem()})
+ var ret0 models.ProjectResult
+ if len(result) != 0 {
+ if result[0] != nil {
+ ret0 = result[0].(models.ProjectResult)
+ }
+ }
+ return ret0
+}
+
func (mock *MockProjectCommandRunner) VerifyWasCalledOnce() *VerifierMockProjectCommandRunner {
return &VerifierMockProjectCommandRunner{
mock: mock,
@@ -145,3 +175,57 @@ func (c *MockProjectCommandRunner_Apply_OngoingVerification) GetAllCapturedArgum
}
return
}
+
+func (verifier *VerifierMockProjectCommandRunner) PolicyCheck(ctx models.ProjectCommandContext) *MockProjectCommandRunner_PolicyCheck_OngoingVerification {
+ params := []pegomock.Param{ctx}
+ methodInvocations := pegomock.GetGenericMockFrom(verifier.mock).Verify(verifier.inOrderContext, verifier.invocationCountMatcher, "PolicyCheck", params, verifier.timeout)
+ return &MockProjectCommandRunner_PolicyCheck_OngoingVerification{mock: verifier.mock, methodInvocations: methodInvocations}
+}
+
+type MockProjectCommandRunner_PolicyCheck_OngoingVerification struct {
+ mock *MockProjectCommandRunner
+ methodInvocations []pegomock.MethodInvocation
+}
+
+func (c *MockProjectCommandRunner_PolicyCheck_OngoingVerification) GetCapturedArguments() models.ProjectCommandContext {
+ ctx := c.GetAllCapturedArguments()
+ return ctx[len(ctx)-1]
+}
+
+func (c *MockProjectCommandRunner_PolicyCheck_OngoingVerification) GetAllCapturedArguments() (_param0 []models.ProjectCommandContext) {
+ params := pegomock.GetGenericMockFrom(c.mock).GetInvocationParams(c.methodInvocations)
+ if len(params) > 0 {
+ _param0 = make([]models.ProjectCommandContext, len(c.methodInvocations))
+ for u, param := range params[0] {
+ _param0[u] = param.(models.ProjectCommandContext)
+ }
+ }
+ return
+}
+
+func (verifier *VerifierMockProjectCommandRunner) ApprovePolicies(ctx models.ProjectCommandContext) *MockProjectCommandRunner_ApprovePolicies_OngoingVerification {
+ params := []pegomock.Param{ctx}
+ methodInvocations := pegomock.GetGenericMockFrom(verifier.mock).Verify(verifier.inOrderContext, verifier.invocationCountMatcher, "ApprovePolicies", params, verifier.timeout)
+ return &MockProjectCommandRunner_ApprovePolicies_OngoingVerification{mock: verifier.mock, methodInvocations: methodInvocations}
+}
+
+type MockProjectCommandRunner_ApprovePolicies_OngoingVerification struct {
+ mock *MockProjectCommandRunner
+ methodInvocations []pegomock.MethodInvocation
+}
+
+func (c *MockProjectCommandRunner_ApprovePolicies_OngoingVerification) GetCapturedArguments() models.ProjectCommandContext {
+ ctx := c.GetAllCapturedArguments()
+ return ctx[len(ctx)-1]
+}
+
+func (c *MockProjectCommandRunner_ApprovePolicies_OngoingVerification) GetAllCapturedArguments() (_param0 []models.ProjectCommandContext) {
+ params := pegomock.GetGenericMockFrom(c.mock).GetInvocationParams(c.methodInvocations)
+ if len(params) > 0 {
+ _param0 = make([]models.ProjectCommandContext, len(c.methodInvocations))
+ for u, param := range params[0] {
+ _param0[u] = param.(models.ProjectCommandContext)
+ }
+ }
+ return
+}
diff --git a/server/events/models/models.go b/server/events/models/models.go
index f54d086692..39052ef3de 100644
--- a/server/events/models/models.go
+++ b/server/events/models/models.go
@@ -30,6 +30,10 @@ import (
"github.com/runatlantis/atlantis/server/events/yaml/valid"
)
+const (
+ planfileSlashReplace = "::"
+)
+
// Repo is a VCS repository.
type Repo struct {
// FullName is the owner and repo name separated
@@ -300,6 +304,7 @@ func (h VCSHostType) String() string {
// ProjectCommandContext defines the context for a plan or apply stage that will
// be executed for a project.
type ProjectCommandContext struct {
+ CommandName CommandName
// ApplyCmd is the command that users should run to apply this plan. If
// this is an apply then this will be empty.
ApplyCmd string
@@ -313,6 +318,8 @@ type ProjectCommandContext struct {
ParallelApplyEnabled bool
// ParallelPlanEnabled is true if parallel plan is enabled for this project.
ParallelPlanEnabled bool
+ // ParallelPolicyCheckEnabled is true if parallel policy_check is enabled for this project.
+ ParallelPolicyCheckEnabled bool
// AutoplanEnabled is true if autoplanning is enabled for this project.
AutoplanEnabled bool
// BaseRepo is the repository that the pull request will be merged into.
@@ -357,6 +364,18 @@ type ProjectCommandContext struct {
// Workspace is the Terraform workspace this project is in. It will always
// be set.
Workspace string
+ // PolicySets represent the policies that are run on the plan as part of the
+ // policy check stage
+ PolicySets valid.PolicySets
+}
+
+// GetShowResultFileName returns the filename (not the path) to store the tf show result
+func (p ProjectCommandContext) GetShowResultFileName() string {
+ if p.ProjectName == "" {
+ return fmt.Sprintf("%s.json", p.Workspace)
+ }
+ projName := strings.Replace(p.ProjectName, "/", planfileSlashReplace, -1)
+ return fmt.Sprintf("%s-%s.json", projName, p.Workspace)
}
// SplitRepoFullName splits a repo full name up into its owner and repo
@@ -374,16 +393,17 @@ func SplitRepoFullName(repoFullName string) (owner string, repo string) {
return repoFullName[:lastSlashIdx], repoFullName[lastSlashIdx+1:]
}
-// ProjectResult is the result of executing a plan/apply for a specific project.
+// ProjectResult is the result of executing a plan/policy_check/apply for a specific project.
type ProjectResult struct {
- Command CommandName
- RepoRelDir string
- Workspace string
- Error error
- Failure string
- PlanSuccess *PlanSuccess
- ApplySuccess string
- ProjectName string
+ Command CommandName
+ RepoRelDir string
+ Workspace string
+ Error error
+ Failure string
+ PlanSuccess *PlanSuccess
+ PolicyCheckSuccess *PolicyCheckSuccess
+ ApplySuccess string
+ ProjectName string
}
// CommitStatus returns the vcs commit status of this project result.
@@ -408,7 +428,13 @@ func (p ProjectResult) PlanStatus() ProjectPlanStatus {
return ErroredPlanStatus
}
return PlannedPlanStatus
-
+ case PolicyCheckCommand, ApprovePoliciesCommand:
+ if p.Error != nil {
+ return ErroredPolicyCheckStatus
+ } else if p.Failure != "" {
+ return ErroredPolicyCheckStatus
+ }
+ return PassedPolicyCheckStatus
case ApplyCommand:
if p.Error != nil {
return ErroredApplyStatus
@@ -423,7 +449,7 @@ func (p ProjectResult) PlanStatus() ProjectPlanStatus {
// IsSuccessful returns true if this project result had no errors.
func (p ProjectResult) IsSuccessful() bool {
- return p.PlanSuccess != nil || p.ApplySuccess != ""
+ return p.PlanSuccess != nil || p.PolicyCheckSuccess != nil || p.ApplySuccess != ""
}
// PlanSuccess is the result of a successful plan.
@@ -442,6 +468,22 @@ type PlanSuccess struct {
HasDiverged bool
}
+// PolicyCheckSuccess is the result of a successful policy check run.
+type PolicyCheckSuccess struct {
+ // PolicyCheckOutput is the output from policy check binary(conftest|opa)
+ PolicyCheckOutput string
+ // LockURL is the full URL to the lock held by this policy check.
+ LockURL string
+ // RePlanCmd is the command that users should run to re-plan this project.
+ RePlanCmd string
+ // ApplyCmd is the command that users should run to apply this plan.
+ ApplyCmd string
+ // HasDiverged is true if we're using the checkout merge strategy and the
+ // branch we're merging into has been updated since we cloned and merged
+ // it.
+ HasDiverged bool
+}
+
// PullStatus is the current status of a pull request that is in progress.
type PullStatus struct {
// Projects are the projects that have been modified in this pull request.
@@ -490,6 +532,12 @@ const (
// DiscardedPlanStatus means that there was an unapplied plan that was
// discarded due to a project being unlocked
DiscardedPlanStatus
+ // ErroredPolicyCheckStatus means that there was an unapplied plan that was
+ // discarded due to a project being unlocked
+ ErroredPolicyCheckStatus
+ // PassedPolicyCheckStatus means that there was an unapplied plan that was
+ // discarded due to a project being unlocked
+ PassedPolicyCheckStatus
)
// String returns a string representation of the status.
@@ -505,6 +553,10 @@ func (p ProjectPlanStatus) String() string {
return "applied"
case DiscardedPlanStatus:
return "plan_discarded"
+ case ErroredPolicyCheckStatus:
+ return "policy_check_errored"
+ case PassedPolicyCheckStatus:
+ return "policy_check_passed"
default:
panic("missing String() impl for ProjectPlanStatus")
}
@@ -520,6 +572,12 @@ const (
PlanCommand
// UnlockCommand is a command to discard previous plans as well as the atlantis locks.
UnlockCommand
+ // PolicyCheckCommand is a command to run conftest test.
+ PolicyCheckCommand
+ // ApprovePoliciesCommand is a command to approve policies with owner check
+ ApprovePoliciesCommand
+ // AutoplanCommand is a command to run terrafor plan on PR open/update if autoplan is enabled
+ AutoplanCommand
// Adding more? Don't forget to update String() below
)
@@ -528,10 +586,14 @@ func (c CommandName) String() string {
switch c {
case ApplyCommand:
return "apply"
- case PlanCommand:
+ case PlanCommand, AutoplanCommand:
return "plan"
case UnlockCommand:
return "unlock"
+ case PolicyCheckCommand:
+ return "policy_check"
+ case ApprovePoliciesCommand:
+ return "approve_policies"
}
return ""
}
diff --git a/server/events/models/models_test.go b/server/events/models/models_test.go
index 3aee9ba89e..37e41f1529 100644
--- a/server/events/models/models_test.go
+++ b/server/events/models/models_test.go
@@ -367,6 +367,12 @@ func TestProjectResult_IsSuccessful(t *testing.T) {
},
true,
},
+ "policy_check success": {
+ models.ProjectResult{
+ PolicyCheckSuccess: &models.PolicyCheckSuccess{},
+ },
+ true,
+ },
"apply success": {
models.ProjectResult{
ApplySuccess: "success",
@@ -441,6 +447,34 @@ func TestProjectResult_PlanStatus(t *testing.T) {
},
expStatus: models.AppliedPlanStatus,
},
+ {
+ p: models.ProjectResult{
+ Command: models.PolicyCheckCommand,
+ PolicyCheckSuccess: &models.PolicyCheckSuccess{},
+ },
+ expStatus: models.PassedPolicyCheckStatus,
+ },
+ {
+ p: models.ProjectResult{
+ Command: models.PolicyCheckCommand,
+ Failure: "failure",
+ },
+ expStatus: models.ErroredPolicyCheckStatus,
+ },
+ {
+ p: models.ProjectResult{
+ Command: models.ApprovePoliciesCommand,
+ PolicyCheckSuccess: &models.PolicyCheckSuccess{},
+ },
+ expStatus: models.PassedPolicyCheckStatus,
+ },
+ {
+ p: models.ProjectResult{
+ Command: models.ApprovePoliciesCommand,
+ Failure: "failure",
+ },
+ expStatus: models.ErroredPolicyCheckStatus,
+ },
}
for _, c := range cases {
@@ -468,6 +502,12 @@ func TestPullStatus_StatusCount(t *testing.T) {
{
Status: models.DiscardedPlanStatus,
},
+ {
+ Status: models.ErroredPolicyCheckStatus,
+ },
+ {
+ Status: models.PassedPolicyCheckStatus,
+ },
},
}
@@ -476,6 +516,8 @@ func TestPullStatus_StatusCount(t *testing.T) {
Equals(t, 1, ps.StatusCount(models.ErroredApplyStatus))
Equals(t, 0, ps.StatusCount(models.ErroredPlanStatus))
Equals(t, 1, ps.StatusCount(models.DiscardedPlanStatus))
+ Equals(t, 1, ps.StatusCount(models.ErroredPolicyCheckStatus))
+ Equals(t, 1, ps.StatusCount(models.PassedPolicyCheckStatus))
}
func TestApplyCommand_String(t *testing.T) {
@@ -490,6 +532,12 @@ func TestPlanCommand_String(t *testing.T) {
Equals(t, "plan", uc.String())
}
+func TestPolicyCheckCommand_String(t *testing.T) {
+ uc := models.PolicyCheckCommand
+
+ Equals(t, "policy_check", uc.String())
+}
+
func TestUnlockCommand_String(t *testing.T) {
uc := models.UnlockCommand
diff --git a/server/events/plan_command_runner.go b/server/events/plan_command_runner.go
new file mode 100644
index 0000000000..ae7c3d167e
--- /dev/null
+++ b/server/events/plan_command_runner.go
@@ -0,0 +1,253 @@
+package events
+
+import (
+ "github.com/runatlantis/atlantis/server/events/models"
+ "github.com/runatlantis/atlantis/server/events/vcs"
+)
+
+func NewPlanCommandRunner(
+ silenceVCSStatusNoPlans bool,
+ vcsClient vcs.Client,
+ pendingPlanFinder PendingPlanFinder,
+ workingDir WorkingDir,
+ commitStatusUpdater CommitStatusUpdater,
+ projectCommandBuilder ProjectPlanCommandBuilder,
+ projectCommandRunner ProjectPlanCommandRunner,
+ dbUpdater *DBUpdater,
+ pullUpdater *PullUpdater,
+ policyCheckCommandRunner *PolicyCheckCommandRunner,
+ autoMerger *AutoMerger,
+ parallelPoolSize int,
+) *PlanCommandRunner {
+ return &PlanCommandRunner{
+ silenceVCSStatusNoPlans: silenceVCSStatusNoPlans,
+ vcsClient: vcsClient,
+ pendingPlanFinder: pendingPlanFinder,
+ workingDir: workingDir,
+ commitStatusUpdater: commitStatusUpdater,
+ prjCmdBuilder: projectCommandBuilder,
+ prjCmdRunner: projectCommandRunner,
+ dbUpdater: dbUpdater,
+ pullUpdater: pullUpdater,
+ policyCheckCommandRunner: policyCheckCommandRunner,
+ autoMerger: autoMerger,
+ parallelPoolSize: parallelPoolSize,
+ }
+}
+
+type PlanCommandRunner struct {
+ vcsClient vcs.Client
+ // SilenceVCSStatusNoPlans is whether autoplan should set commit status if no plans
+ // are found
+ silenceVCSStatusNoPlans bool
+ commitStatusUpdater CommitStatusUpdater
+ pendingPlanFinder PendingPlanFinder
+ workingDir WorkingDir
+ prjCmdBuilder ProjectPlanCommandBuilder
+ prjCmdRunner ProjectPlanCommandRunner
+ dbUpdater *DBUpdater
+ pullUpdater *PullUpdater
+ policyCheckCommandRunner *PolicyCheckCommandRunner
+ autoMerger *AutoMerger
+ parallelPoolSize int
+}
+
+func (p *PlanCommandRunner) runAutoplan(ctx *CommandContext) {
+ baseRepo := ctx.Pull.BaseRepo
+ pull := ctx.Pull
+
+ projectCmds, err := p.prjCmdBuilder.BuildAutoplanCommands(ctx)
+ if err != nil {
+ if statusErr := p.commitStatusUpdater.UpdateCombined(baseRepo, pull, models.FailedCommitStatus, models.PlanCommand); statusErr != nil {
+ ctx.Log.Warn("unable to update commit status: %s", statusErr)
+ }
+ p.pullUpdater.updatePull(ctx, AutoplanCommand{}, CommandResult{Error: err})
+ return
+ }
+
+ projectCmds, policyCheckCmds := p.partitionProjectCmds(ctx, projectCmds)
+
+ if len(projectCmds) == 0 {
+ ctx.Log.Info("determined there was no project to run plan in")
+ if !p.silenceVCSStatusNoPlans {
+ // If there were no projects modified, we set successful commit statuses
+ // with 0/0 projects planned/policy_checked/applied successfully because some users require
+ // the Atlantis status to be passing for all pull requests.
+ ctx.Log.Debug("setting VCS status to success with no projects found")
+ if err := p.commitStatusUpdater.UpdateCombinedCount(baseRepo, pull, models.SuccessCommitStatus, models.PlanCommand, 0, 0); err != nil {
+ ctx.Log.Warn("unable to update commit status: %s", err)
+ }
+ if err := p.commitStatusUpdater.UpdateCombinedCount(baseRepo, pull, models.SuccessCommitStatus, models.PolicyCheckCommand, 0, 0); err != nil {
+ ctx.Log.Warn("unable to update commit status: %s", err)
+ }
+ if err := p.commitStatusUpdater.UpdateCombinedCount(baseRepo, pull, models.SuccessCommitStatus, models.ApplyCommand, 0, 0); err != nil {
+ ctx.Log.Warn("unable to update commit status: %s", err)
+ }
+ }
+ return
+ }
+
+ // At this point we are sure Atlantis has work to do, so set commit status to pending
+ if err := p.commitStatusUpdater.UpdateCombined(ctx.Pull.BaseRepo, ctx.Pull, models.PendingCommitStatus, models.PlanCommand); err != nil {
+ ctx.Log.Warn("unable to update commit status: %s", err)
+ }
+
+ // Only run commands in parallel if enabled
+ var result CommandResult
+ if p.isParallelEnabled(projectCmds) {
+ ctx.Log.Info("Running plans in parallel")
+ result = runProjectCmdsParallel(projectCmds, p.prjCmdRunner.Plan, p.parallelPoolSize)
+ } else {
+ result = runProjectCmds(projectCmds, p.prjCmdRunner.Plan)
+ }
+
+ if p.autoMerger.automergeEnabled(projectCmds) && result.HasErrors() {
+ ctx.Log.Info("deleting plans because there were errors and automerge requires all plans succeed")
+ p.deletePlans(ctx)
+ result.PlansDeleted = true
+ }
+
+ p.pullUpdater.updatePull(ctx, AutoplanCommand{}, result)
+
+ pullStatus, err := p.dbUpdater.updateDB(ctx, ctx.Pull, result.ProjectResults)
+ if err != nil {
+ ctx.Log.Err("writing results: %s", err)
+ }
+
+ p.updateCommitStatus(ctx, pullStatus)
+
+ // Check if there are any planned projects and if there are any errors or if plans are being deleted
+ if len(policyCheckCmds) > 0 &&
+ !(result.HasErrors() || result.PlansDeleted) {
+ // Run policy_check command
+ ctx.Log.Info("Running policy_checks for all plans")
+ p.policyCheckCommandRunner.Run(ctx, policyCheckCmds)
+ }
+}
+
+func (p *PlanCommandRunner) run(ctx *CommandContext, cmd *CommentCommand) {
+ var err error
+ baseRepo := ctx.Pull.BaseRepo
+ pull := ctx.Pull
+
+ if err = p.commitStatusUpdater.UpdateCombined(baseRepo, pull, models.PendingCommitStatus, models.PlanCommand); err != nil {
+ ctx.Log.Warn("unable to update commit status: %s", err)
+ }
+
+ projectCmds, err := p.prjCmdBuilder.BuildPlanCommands(ctx, cmd)
+ if err != nil {
+ if statusErr := p.commitStatusUpdater.UpdateCombined(ctx.Pull.BaseRepo, ctx.Pull, models.FailedCommitStatus, models.PlanCommand); statusErr != nil {
+ ctx.Log.Warn("unable to update commit status: %s", statusErr)
+ }
+ p.pullUpdater.updatePull(ctx, cmd, CommandResult{Error: err})
+ return
+ }
+
+ projectCmds, policyCheckCmds := p.partitionProjectCmds(ctx, projectCmds)
+
+ // Only run commands in parallel if enabled
+ var result CommandResult
+ if p.isParallelEnabled(projectCmds) {
+ ctx.Log.Info("Running applies in parallel")
+ result = runProjectCmdsParallel(projectCmds, p.prjCmdRunner.Plan, p.parallelPoolSize)
+ } else {
+ result = runProjectCmds(projectCmds, p.prjCmdRunner.Plan)
+ }
+
+ if p.autoMerger.automergeEnabled(projectCmds) && result.HasErrors() {
+ ctx.Log.Info("deleting plans because there were errors and automerge requires all plans succeed")
+ p.deletePlans(ctx)
+ result.PlansDeleted = true
+ }
+
+ p.pullUpdater.updatePull(
+ ctx,
+ cmd,
+ result)
+
+ pullStatus, err := p.dbUpdater.updateDB(ctx, pull, result.ProjectResults)
+ if err != nil {
+ ctx.Log.Err("writing results: %s", err)
+ return
+ }
+
+ p.updateCommitStatus(ctx, pullStatus)
+
+ // Runs policy checks step after all plans are successful.
+ // This step does not approve any policies that require approval.
+ if len(result.ProjectResults) > 0 &&
+ !(result.HasErrors() || result.PlansDeleted) {
+ ctx.Log.Info("Running policy check for %s", cmd.String())
+ p.policyCheckCommandRunner.Run(ctx, policyCheckCmds)
+ }
+}
+
+func (p *PlanCommandRunner) Run(ctx *CommandContext, cmd *CommentCommand) {
+ if ctx.Trigger == Auto {
+ p.runAutoplan(ctx)
+ } else {
+ p.run(ctx, cmd)
+ }
+}
+
+func (p *PlanCommandRunner) updateCommitStatus(ctx *CommandContext, pullStatus models.PullStatus) {
+ var numSuccess int
+ var numErrored int
+ status := models.SuccessCommitStatus
+
+ numErrored = pullStatus.StatusCount(models.ErroredPlanStatus)
+ // We consider anything that isn't a plan error as a plan success.
+ // For example, if there is an apply error, that means that at least a
+ // plan was generated successfully.
+ numSuccess = len(pullStatus.Projects) - numErrored
+
+ if numErrored > 0 {
+ status = models.FailedCommitStatus
+ }
+
+ if err := p.commitStatusUpdater.UpdateCombinedCount(
+ ctx.Pull.BaseRepo,
+ ctx.Pull,
+ status,
+ models.PlanCommand,
+ numSuccess,
+ len(pullStatus.Projects),
+ ); err != nil {
+ ctx.Log.Warn("unable to update commit status: %s", err)
+ }
+}
+
+// deletePlans deletes all plans generated in this ctx.
+func (p *PlanCommandRunner) deletePlans(ctx *CommandContext) {
+ pullDir, err := p.workingDir.GetPullDir(ctx.Pull.BaseRepo, ctx.Pull)
+ if err != nil {
+ ctx.Log.Err("getting pull dir: %s", err)
+ }
+ if err := p.pendingPlanFinder.DeletePlans(pullDir); err != nil {
+ ctx.Log.Err("deleting pending plans: %s", err)
+ }
+}
+
+func (p *PlanCommandRunner) partitionProjectCmds(
+ ctx *CommandContext,
+ cmds []models.ProjectCommandContext,
+) (
+ projectCmds []models.ProjectCommandContext,
+ policyCheckCmds []models.ProjectCommandContext,
+) {
+ for _, cmd := range cmds {
+ switch cmd.CommandName {
+ case models.PlanCommand:
+ projectCmds = append(projectCmds, cmd)
+ case models.PolicyCheckCommand:
+ policyCheckCmds = append(policyCheckCmds, cmd)
+ default:
+ ctx.Log.Err("%s is not supported", cmd.CommandName)
+ }
+ }
+ return
+}
+
+func (p *PlanCommandRunner) isParallelEnabled(projectCmds []models.ProjectCommandContext) bool {
+ return len(projectCmds) > 0 && projectCmds[0].ParallelPlanEnabled
+}
diff --git a/server/events/policy_check_command_runner.go b/server/events/policy_check_command_runner.go
new file mode 100644
index 0000000000..cb51a62511
--- /dev/null
+++ b/server/events/policy_check_command_runner.go
@@ -0,0 +1,76 @@
+package events
+
+import "github.com/runatlantis/atlantis/server/events/models"
+
+func NewPolicyCheckCommandRunner(
+ dbUpdater *DBUpdater,
+ pullUpdater *PullUpdater,
+ commitStatusUpdater CommitStatusUpdater,
+ projectCommandRunner ProjectPolicyCheckCommandRunner,
+ parallelPoolSize int,
+) *PolicyCheckCommandRunner {
+ return &PolicyCheckCommandRunner{
+ dbUpdater: dbUpdater,
+ pullUpdater: pullUpdater,
+ commitStatusUpdater: commitStatusUpdater,
+ prjCmdRunner: projectCommandRunner,
+ parallelPoolSize: parallelPoolSize,
+ }
+}
+
+type PolicyCheckCommandRunner struct {
+ dbUpdater *DBUpdater
+ pullUpdater *PullUpdater
+ commitStatusUpdater CommitStatusUpdater
+ prjCmdRunner ProjectPolicyCheckCommandRunner
+ parallelPoolSize int
+}
+
+func (p *PolicyCheckCommandRunner) Run(ctx *CommandContext, cmds []models.ProjectCommandContext) {
+ if len(cmds) == 0 {
+ return
+ }
+
+ // So set policy_check commit status to pending
+ if err := p.commitStatusUpdater.UpdateCombined(ctx.Pull.BaseRepo, ctx.Pull, models.PendingCommitStatus, models.PolicyCheckCommand); err != nil {
+ ctx.Log.Warn("unable to update commit status: %s", err)
+ }
+
+ var result CommandResult
+ if p.isParallelEnabled(cmds) {
+ ctx.Log.Info("Running policy_checks in parallel")
+ result = runProjectCmdsParallel(cmds, p.prjCmdRunner.PolicyCheck, p.parallelPoolSize)
+ } else {
+ result = runProjectCmds(cmds, p.prjCmdRunner.PolicyCheck)
+ }
+
+ p.pullUpdater.updatePull(ctx, PolicyCheckCommand{}, result)
+
+ pullStatus, err := p.dbUpdater.updateDB(ctx, ctx.Pull, result.ProjectResults)
+ if err != nil {
+ ctx.Log.Err("writing results: %s", err)
+ }
+
+ p.updateCommitStatus(ctx, pullStatus)
+}
+
+func (p *PolicyCheckCommandRunner) updateCommitStatus(ctx *CommandContext, pullStatus models.PullStatus) {
+ var numSuccess int
+ var numErrored int
+ status := models.SuccessCommitStatus
+
+ numSuccess = pullStatus.StatusCount(models.PassedPolicyCheckStatus)
+ numErrored = pullStatus.StatusCount(models.ErroredPolicyCheckStatus)
+
+ if numErrored > 0 {
+ status = models.FailedCommitStatus
+ }
+
+ if err := p.commitStatusUpdater.UpdateCombinedCount(ctx.Pull.BaseRepo, ctx.Pull, status, models.PolicyCheckCommand, numSuccess, len(pullStatus.Projects)); err != nil {
+ ctx.Log.Warn("unable to update commit status: %s", err)
+ }
+}
+
+func (p *PolicyCheckCommandRunner) isParallelEnabled(cmds []models.ProjectCommandContext) bool {
+ return len(cmds) > 0 && cmds[0].ParallelPolicyCheckEnabled
+}
diff --git a/server/events/project_command_builder.go b/server/events/project_command_builder.go
index 9341ce8acc..54ff76c962 100644
--- a/server/events/project_command_builder.go
+++ b/server/events/project_command_builder.go
@@ -3,14 +3,9 @@ package events
import (
"fmt"
"os"
- "path/filepath"
- "regexp"
- "strings"
"github.com/runatlantis/atlantis/server/events/yaml/valid"
- "github.com/hashicorp/go-version"
- "github.com/hashicorp/terraform-config-inspect/tfconfig"
"github.com/pkg/errors"
"github.com/runatlantis/atlantis/server/events/models"
"github.com/runatlantis/atlantis/server/events/vcs"
@@ -32,10 +27,37 @@ const (
DefaultParallelPlanEnabled = false
)
-//go:generate pegomock generate -m --use-experimental-model-gen --package mocks -o mocks/mock_project_command_builder.go ProjectCommandBuilder
+func NewProjectCommandBuilder(
+ policyChecksSupported bool,
+ parserValidator *yaml.ParserValidator,
+ projectFinder ProjectFinder,
+ vcsClient vcs.Client,
+ workingDir WorkingDir,
+ workingDirLocker WorkingDirLocker,
+ globalCfg valid.GlobalCfg,
+ pendingPlanFinder *DefaultPendingPlanFinder,
+ commentBuilder CommentBuilder,
+ skipCloneNoChanges bool,
+) *DefaultProjectCommandBuilder {
+ projectCommandBuilder := &DefaultProjectCommandBuilder{
+ ParserValidator: parserValidator,
+ ProjectFinder: projectFinder,
+ VCSClient: vcsClient,
+ WorkingDir: workingDir,
+ WorkingDirLocker: workingDirLocker,
+ GlobalCfg: globalCfg,
+ PendingPlanFinder: pendingPlanFinder,
+ SkipCloneNoChanges: skipCloneNoChanges,
+ ProjectCommandContextBuilder: NewProjectCommandContextBulder(
+ policyChecksSupported,
+ commentBuilder,
+ ),
+ }
+
+ return projectCommandBuilder
+}
-// ProjectCommandBuilder builds commands that run on individual projects.
-type ProjectCommandBuilder interface {
+type ProjectPlanCommandBuilder interface {
// BuildAutoplanCommands builds project commands that will run plan on
// the projects determined to be modified.
BuildAutoplanCommands(ctx *CommandContext) ([]models.ProjectCommandContext, error)
@@ -43,25 +65,42 @@ type ProjectCommandBuilder interface {
// comment doesn't specify one project then there may be multiple commands
// to be run.
BuildPlanCommands(ctx *CommandContext, comment *CommentCommand) ([]models.ProjectCommandContext, error)
- // BuildApplyCommands builds project apply commands for ctx and comment. If
+}
+
+type ProjectApplyCommandBuilder interface {
+ // BuildApplyCommands builds project Apply commands for this ctx and comment. If
// comment doesn't specify one project then there may be multiple commands
// to be run.
BuildApplyCommands(ctx *CommandContext, comment *CommentCommand) ([]models.ProjectCommandContext, error)
}
+type ProjectApprovePoliciesCommandBuilder interface {
+ // BuildApprovePoliciesCommands builds project PolicyCheck commands for this ctx and comment.
+ BuildApprovePoliciesCommands(ctx *CommandContext, comment *CommentCommand) ([]models.ProjectCommandContext, error)
+}
+
+//go:generate pegomock generate -m --use-experimental-model-gen --package mocks -o mocks/mock_project_command_builder.go ProjectCommandBuilder
+
+// ProjectCommandBuilder builds commands that run on individual projects.
+type ProjectCommandBuilder interface {
+ ProjectPlanCommandBuilder
+ ProjectApplyCommandBuilder
+ ProjectApprovePoliciesCommandBuilder
+}
+
// DefaultProjectCommandBuilder implements ProjectCommandBuilder.
// This class combines the data from the comment and any atlantis.yaml file or
// Atlantis server config and then generates a set of contexts.
type DefaultProjectCommandBuilder struct {
- ParserValidator *yaml.ParserValidator
- ProjectFinder ProjectFinder
- VCSClient vcs.Client
- WorkingDir WorkingDir
- WorkingDirLocker WorkingDirLocker
- GlobalCfg valid.GlobalCfg
- PendingPlanFinder *DefaultPendingPlanFinder
- CommentBuilder CommentBuilder
- SkipCloneNoChanges bool
+ ParserValidator *yaml.ParserValidator
+ ProjectFinder ProjectFinder
+ VCSClient vcs.Client
+ WorkingDir WorkingDir
+ WorkingDirLocker WorkingDirLocker
+ GlobalCfg valid.GlobalCfg
+ PendingPlanFinder *DefaultPendingPlanFinder
+ ProjectCommandContextBuilder ProjectCommandContextBuilder
+ SkipCloneNoChanges bool
}
// See ProjectCommandBuilder.BuildAutoplanCommands.
@@ -87,16 +126,20 @@ func (p *DefaultProjectCommandBuilder) BuildPlanCommands(ctx *CommandContext, cm
return p.buildPlanAllCommands(ctx, cmd.Flags, cmd.Verbose)
}
pcc, err := p.buildProjectPlanCommand(ctx, cmd)
- return []models.ProjectCommandContext{pcc}, err
+ return pcc, err
}
// See ProjectCommandBuilder.BuildApplyCommands.
func (p *DefaultProjectCommandBuilder) BuildApplyCommands(ctx *CommandContext, cmd *CommentCommand) ([]models.ProjectCommandContext, error) {
if !cmd.IsForSpecificProject() {
- return p.buildApplyAllCommands(ctx, cmd)
+ return p.buildAllProjectCommands(ctx, cmd)
}
pac, err := p.buildProjectApplyCommand(ctx, cmd)
- return []models.ProjectCommandContext{pac}, err
+ return pac, err
+}
+
+func (p *DefaultProjectCommandBuilder) BuildApprovePoliciesCommands(ctx *CommandContext, cmd *CommentCommand) ([]models.ProjectCommandContext, error) {
+ return p.buildAllProjectCommands(ctx, cmd)
}
// buildPlanAllCommands builds plan contexts for all projects we determine were
@@ -159,6 +202,7 @@ func (p *DefaultProjectCommandBuilder) buildPlanAllCommands(ctx *CommandContext,
}
var projCtxs []models.ProjectCommandContext
+
if hasRepoCfg {
// If there's a repo cfg then we'll use it to figure out which projects
// should be planed.
@@ -172,10 +216,23 @@ func (p *DefaultProjectCommandBuilder) buildPlanAllCommands(ctx *CommandContext,
return nil, err
}
ctx.Log.Info("%d projects are to be planned based on their when_modified config", len(matchingProjects))
+
for _, mp := range matchingProjects {
ctx.Log.Debug("determining config for project at dir: %q workspace: %q", mp.Dir, mp.Workspace)
mergedCfg := p.GlobalCfg.MergeProjectCfg(ctx.Log, ctx.Pull.BaseRepo.ID(), mp, repoCfg)
- projCtxs = append(projCtxs, p.buildCtx(ctx, models.PlanCommand, mergedCfg, commentFlags, repoCfg.Automerge, repoCfg.ParallelApply, repoCfg.ParallelPlan, verbose, repoDir))
+
+ projCtxs = append(projCtxs,
+ p.ProjectCommandContextBuilder.BuildProjectContext(
+ ctx,
+ models.PlanCommand,
+ mergedCfg,
+ commentFlags,
+ repoDir,
+ repoCfg.Automerge,
+ repoCfg.ParallelApply,
+ repoCfg.ParallelPlan,
+ verbose,
+ )...)
}
} else {
// If there is no config file, then we'll plan each project that
@@ -186,7 +243,19 @@ func (p *DefaultProjectCommandBuilder) buildPlanAllCommands(ctx *CommandContext,
for _, mp := range modifiedProjects {
ctx.Log.Debug("determining config for project at dir: %q", mp.Path)
pCfg := p.GlobalCfg.DefaultProjCfg(ctx.Log, ctx.Pull.BaseRepo.ID(), mp.Path, DefaultWorkspace)
- projCtxs = append(projCtxs, p.buildCtx(ctx, models.PlanCommand, pCfg, commentFlags, DefaultAutomergeEnabled, DefaultParallelApplyEnabled, DefaultParallelPlanEnabled, verbose, repoDir))
+
+ projCtxs = append(projCtxs,
+ p.ProjectCommandContextBuilder.BuildProjectContext(
+ ctx,
+ models.PlanCommand,
+ pCfg,
+ commentFlags,
+ repoDir,
+ DefaultAutomergeEnabled,
+ DefaultParallelApplyEnabled,
+ DefaultParallelPlanEnabled,
+ verbose,
+ )...)
}
}
@@ -195,13 +264,13 @@ func (p *DefaultProjectCommandBuilder) buildPlanAllCommands(ctx *CommandContext,
// buildProjectPlanCommand builds a plan context for a single project.
// cmd must be for only one project.
-func (p *DefaultProjectCommandBuilder) buildProjectPlanCommand(ctx *CommandContext, cmd *CommentCommand) (models.ProjectCommandContext, error) {
+func (p *DefaultProjectCommandBuilder) buildProjectPlanCommand(ctx *CommandContext, cmd *CommentCommand) ([]models.ProjectCommandContext, error) {
workspace := DefaultWorkspace
if cmd.Workspace != "" {
workspace = cmd.Workspace
}
- var pcc models.ProjectCommandContext
+ var pcc []models.ProjectCommandContext
ctx.Log.Debug("building plan command")
unlockFn, err := p.WorkingDirLocker.TryLock(ctx.Pull.BaseRepo.FullName, ctx.Pull.Num, workspace)
if err != nil {
@@ -220,14 +289,69 @@ func (p *DefaultProjectCommandBuilder) buildProjectPlanCommand(ctx *CommandConte
repoRelDir = cmd.RepoRelDir
}
- return p.buildProjectCommandCtx(ctx, models.PlanCommand, cmd.ProjectName, cmd.Flags, repoDir, repoRelDir, workspace, cmd.Verbose)
+ return p.buildProjectCommandCtx(
+ ctx,
+ models.PlanCommand,
+ cmd.ProjectName,
+ cmd.Flags,
+ repoDir,
+ repoRelDir,
+ workspace,
+ cmd.Verbose,
+ )
+}
+
+// getCfg returns the atlantis.yaml config (if it exists) for this project. If
+// there is no config, then projectCfg and repoCfg will be nil.
+func (p *DefaultProjectCommandBuilder) getCfg(ctx *CommandContext, projectName string, dir string, workspace string, repoDir string) (projectCfg *valid.Project, repoCfg *valid.RepoCfg, err error) {
+ hasConfigFile, err := p.ParserValidator.HasRepoCfg(repoDir)
+ if err != nil {
+ err = errors.Wrapf(err, "looking for %s file in %q", yaml.AtlantisYAMLFilename, repoDir)
+ return
+ }
+ if !hasConfigFile {
+ if projectName != "" {
+ err = fmt.Errorf("cannot specify a project name unless an %s file exists to configure projects", yaml.AtlantisYAMLFilename)
+ return
+ }
+ return
+ }
+
+ var repoConfig valid.RepoCfg
+ repoConfig, err = p.ParserValidator.ParseRepoCfg(repoDir, p.GlobalCfg, ctx.Pull.BaseRepo.ID())
+ if err != nil {
+ return
+ }
+ repoCfg = &repoConfig
+
+ // If they've specified a project by name we look it up. Otherwise we
+ // use the dir and workspace.
+ if projectName != "" {
+ projectCfg = repoCfg.FindProjectByName(projectName)
+ if projectCfg == nil {
+ err = fmt.Errorf("no project with name %q is defined in %s", projectName, yaml.AtlantisYAMLFilename)
+ return
+ }
+ return
+ }
+
+ projCfgs := repoCfg.FindProjectsByDirWorkspace(dir, workspace)
+ if len(projCfgs) == 0 {
+ return
+ }
+ if len(projCfgs) > 1 {
+ err = fmt.Errorf("must specify project name: more than one project defined in %s matched dir: %q workspace: %q", yaml.AtlantisYAMLFilename, dir, workspace)
+ return
+ }
+ projectCfg = &projCfgs[0]
+ return
}
-// buildApplyAllCommands builds apply contexts for every project that has
+// buildAllProjectCommands builds contexts for a command for every project that has
// pending plans in this ctx.
-func (p *DefaultProjectCommandBuilder) buildApplyAllCommands(ctx *CommandContext, commentCmd *CommentCommand) ([]models.ProjectCommandContext, error) {
+func (p *DefaultProjectCommandBuilder) buildAllProjectCommands(ctx *CommandContext, commentCmd *CommentCommand) ([]models.ProjectCommandContext, error) {
// Lock all dirs in this pull request (instead of a single dir) because we
- // don't know how many dirs we'll need to apply in.
+ // don't know how many dirs we'll need to run the command in.
unlockFn, err := p.WorkingDirLocker.TryLockPull(ctx.Pull.BaseRepo.FullName, ctx.Pull.Num)
if err != nil {
return nil, err
@@ -246,24 +370,24 @@ func (p *DefaultProjectCommandBuilder) buildApplyAllCommands(ctx *CommandContext
var cmds []models.ProjectCommandContext
for _, plan := range plans {
- cmd, err := p.buildProjectCommandCtx(ctx, models.ApplyCommand, plan.ProjectName, commentCmd.Flags, plan.RepoDir, plan.RepoRelDir, plan.Workspace, commentCmd.Verbose)
+ commentCmds, err := p.buildProjectCommandCtx(ctx, commentCmd.CommandName(), plan.ProjectName, commentCmd.Flags, plan.RepoDir, plan.RepoRelDir, plan.Workspace, commentCmd.Verbose)
if err != nil {
return nil, errors.Wrapf(err, "building command for dir %q", plan.RepoRelDir)
}
- cmds = append(cmds, cmd)
+ cmds = append(cmds, commentCmds...)
}
return cmds, nil
}
// buildProjectApplyCommand builds an apply command for the single project
// identified by cmd.
-func (p *DefaultProjectCommandBuilder) buildProjectApplyCommand(ctx *CommandContext, cmd *CommentCommand) (models.ProjectCommandContext, error) {
+func (p *DefaultProjectCommandBuilder) buildProjectApplyCommand(ctx *CommandContext, cmd *CommentCommand) ([]models.ProjectCommandContext, error) {
workspace := DefaultWorkspace
if cmd.Workspace != "" {
workspace = cmd.Workspace
}
- var projCtx models.ProjectCommandContext
+ var projCtx []models.ProjectCommandContext
unlockFn, err := p.WorkingDirLocker.TryLock(ctx.Pull.BaseRepo.FullName, ctx.Pull.Num, workspace)
if err != nil {
return projCtx, err
@@ -282,24 +406,32 @@ func (p *DefaultProjectCommandBuilder) buildProjectApplyCommand(ctx *CommandCont
repoRelDir = cmd.RepoRelDir
}
- return p.buildProjectCommandCtx(ctx, models.ApplyCommand, cmd.ProjectName, cmd.Flags, repoDir, repoRelDir, workspace, cmd.Verbose)
+ return p.buildProjectCommandCtx(
+ ctx,
+ models.ApplyCommand,
+ cmd.ProjectName,
+ cmd.Flags,
+ repoDir,
+ repoRelDir,
+ workspace,
+ cmd.Verbose,
+ )
}
// buildProjectCommandCtx builds a context for a single project identified
// by the parameters.
-func (p *DefaultProjectCommandBuilder) buildProjectCommandCtx(
- ctx *CommandContext,
+func (p *DefaultProjectCommandBuilder) buildProjectCommandCtx(ctx *CommandContext,
cmd models.CommandName,
projectName string,
commentFlags []string,
repoDir string,
repoRelDir string,
workspace string,
- verbose bool) (models.ProjectCommandContext, error) {
+ verbose bool) ([]models.ProjectCommandContext, error) {
projCfgPtr, repoCfgPtr, err := p.getCfg(ctx, projectName, repoRelDir, workspace, repoDir)
if err != nil {
- return models.ProjectCommandContext{}, err
+ return []models.ProjectCommandContext{}, err
}
var projCfg valid.MergedProjectCfg
@@ -315,7 +447,7 @@ func (p *DefaultProjectCommandBuilder) buildProjectCommandCtx(
}
if err := p.validateWorkspaceAllowed(repoCfgPtr, repoRelDir, workspace); err != nil {
- return models.ProjectCommandContext{}, err
+ return []models.ProjectCommandContext{}, err
}
automerge := DefaultAutomergeEnabled
@@ -326,53 +458,18 @@ func (p *DefaultProjectCommandBuilder) buildProjectCommandCtx(
parallelApply = repoCfgPtr.ParallelApply
parallelPlan = repoCfgPtr.ParallelPlan
}
- return p.buildCtx(ctx, cmd, projCfg, commentFlags, automerge, parallelApply, parallelPlan, verbose, repoDir), nil
-}
-// getCfg returns the atlantis.yaml config (if it exists) for this project. If
-// there is no config, then projectCfg and repoCfg will be nil.
-func (p *DefaultProjectCommandBuilder) getCfg(ctx *CommandContext, projectName string, dir string, workspace string, repoDir string) (projectCfg *valid.Project, repoCfg *valid.RepoCfg, err error) {
- hasConfigFile, err := p.ParserValidator.HasRepoCfg(repoDir)
- if err != nil {
- err = errors.Wrapf(err, "looking for %s file in %q", yaml.AtlantisYAMLFilename, repoDir)
- return
- }
- if !hasConfigFile {
- if projectName != "" {
- err = fmt.Errorf("cannot specify a project name unless an %s file exists to configure projects", yaml.AtlantisYAMLFilename)
- return
- }
- return
- }
-
- var repoConfig valid.RepoCfg
- repoConfig, err = p.ParserValidator.ParseRepoCfg(repoDir, p.GlobalCfg, ctx.Pull.BaseRepo.ID())
- if err != nil {
- return
- }
- repoCfg = &repoConfig
-
- // If they've specified a project by name we look it up. Otherwise we
- // use the dir and workspace.
- if projectName != "" {
- projectCfg = repoCfg.FindProjectByName(projectName)
- if projectCfg == nil {
- err = fmt.Errorf("no project with name %q is defined in %s", projectName, yaml.AtlantisYAMLFilename)
- return
- }
- return
- }
-
- projCfgs := repoCfg.FindProjectsByDirWorkspace(dir, workspace)
- if len(projCfgs) == 0 {
- return
- }
- if len(projCfgs) > 1 {
- err = fmt.Errorf("must specify project name: more than one project defined in %s matched dir: %q workspace: %q", yaml.AtlantisYAMLFilename, dir, workspace)
- return
- }
- projectCfg = &projCfgs[0]
- return
+ return p.ProjectCommandContextBuilder.BuildProjectContext(
+ ctx,
+ cmd,
+ projCfg,
+ commentFlags,
+ repoDir,
+ automerge,
+ parallelApply,
+ parallelPlan,
+ verbose,
+ ), nil
}
// validateWorkspaceAllowed returns an error if repoCfg defines projects in
@@ -385,124 +482,5 @@ func (p *DefaultProjectCommandBuilder) validateWorkspaceAllowed(repoCfg *valid.R
return nil
}
- projects := repoCfg.FindProjectsByDir(repoRelDir)
-
- // If that directory doesn't have any projects configured then we don't
- // enforce workspace names.
- if len(projects) == 0 {
- return nil
- }
-
- var configuredSpaces []string
- for _, p := range projects {
- if p.Workspace == workspace {
- return nil
- }
- configuredSpaces = append(configuredSpaces, p.Workspace)
- }
-
- return fmt.Errorf(
- "running commands in workspace %q is not allowed because this"+
- " directory is only configured for the following workspaces: %s",
- workspace,
- strings.Join(configuredSpaces, ", "),
- )
-}
-
-// buildCtx is a helper method that handles constructing the ProjectCommandContext.
-func (p *DefaultProjectCommandBuilder) buildCtx(ctx *CommandContext,
- cmd models.CommandName,
- projCfg valid.MergedProjectCfg,
- commentArgs []string,
- automergeEnabled bool,
- parallelApplyEnabled bool,
- parallelPlanEnabled bool,
- verbose bool,
- absRepoDir string) models.ProjectCommandContext {
-
- var steps []valid.Step
- switch cmd {
- case models.PlanCommand:
- steps = projCfg.Workflow.Plan.Steps
- case models.ApplyCommand:
- steps = projCfg.Workflow.Apply.Steps
- }
-
- // If TerraformVersion not defined in config file look for a
- // terraform.require_version block.
- if projCfg.TerraformVersion == nil {
- projCfg.TerraformVersion = p.getTfVersion(ctx, filepath.Join(absRepoDir, projCfg.RepoRelDir))
- }
-
- return models.ProjectCommandContext{
- ApplyCmd: p.CommentBuilder.BuildApplyComment(projCfg.RepoRelDir, projCfg.Workspace, projCfg.Name),
- BaseRepo: ctx.Pull.BaseRepo,
- EscapedCommentArgs: p.escapeArgs(commentArgs),
- AutomergeEnabled: automergeEnabled,
- ParallelApplyEnabled: parallelApplyEnabled,
- ParallelPlanEnabled: parallelPlanEnabled,
- AutoplanEnabled: projCfg.AutoplanEnabled,
- Steps: steps,
- HeadRepo: ctx.HeadRepo,
- Log: ctx.Log,
- PullMergeable: ctx.PullMergeable,
- Pull: ctx.Pull,
- ProjectName: projCfg.Name,
- ApplyRequirements: projCfg.ApplyRequirements,
- RePlanCmd: p.CommentBuilder.BuildPlanComment(projCfg.RepoRelDir, projCfg.Workspace, projCfg.Name, commentArgs),
- RepoRelDir: projCfg.RepoRelDir,
- RepoConfigVersion: projCfg.RepoCfgVersion,
- TerraformVersion: projCfg.TerraformVersion,
- User: ctx.User,
- Verbose: verbose,
- Workspace: projCfg.Workspace,
- }
-}
-
-func (p *DefaultProjectCommandBuilder) escapeArgs(args []string) []string {
- var escaped []string
- for _, arg := range args {
- var escapedArg string
- for i := range arg {
- escapedArg += "\\" + string(arg[i])
- }
- escaped = append(escaped, escapedArg)
- }
- return escaped
-}
-
-// Extracts required_version from Terraform configuration.
-// Returns nil if unable to determine version from configuration.
-func (p *DefaultProjectCommandBuilder) getTfVersion(ctx *CommandContext, absProjDir string) *version.Version {
- module, diags := tfconfig.LoadModule(absProjDir)
- if diags.HasErrors() {
- ctx.Log.Err("trying to detect required version: %s", diags.Error())
- for _, d := range diags {
- ctx.Log.Debug("%s in %s:%d", d.Detail, d.Pos.Filename, d.Pos.Line)
- }
- return nil
- }
-
- if len(module.RequiredCore) != 1 {
- ctx.Log.Info("cannot determine which version to use from terraform configuration, detected %d possibilities.", len(module.RequiredCore))
- return nil
- }
- requiredVersionSetting := module.RequiredCore[0]
-
- // We allow `= x.y.z`, `=x.y.z` or `x.y.z` where `x`, `y` and `z` are integers.
- re := regexp.MustCompile(`^=?\s*([^\s]+)\s*$`)
- matched := re.FindStringSubmatch(requiredVersionSetting)
- if len(matched) == 0 {
- ctx.Log.Debug("did not specify exact version in terraform configuration, found %q", requiredVersionSetting)
- return nil
- }
- ctx.Log.Debug("found required_version setting of %q", requiredVersionSetting)
- version, err := version.NewVersion(matched[1])
- if err != nil {
- ctx.Log.Debug(err.Error())
- return nil
- }
-
- ctx.Log.Info("detected module requires version: %q", version.String())
- return version
+ return repoCfg.ValidateWorkspaceAllowed(repoRelDir, workspace)
}
diff --git a/server/events/project_command_builder_internal_test.go b/server/events/project_command_builder_internal_test.go
index 2ccbec4517..f137f35591 100644
--- a/server/events/project_command_builder_internal_test.go
+++ b/server/events/project_command_builder_internal_test.go
@@ -17,6 +17,10 @@ import (
// Test different permutations of global and repo config.
func TestBuildProjectCmdCtx(t *testing.T) {
+ emptyPolicySets := valid.PolicySets{
+ Version: nil,
+ PolicySets: []valid.PolicySet{},
+ }
baseRepo := models.Repo{
FullName: "owner/repo",
VCSHost: models.VCSHost{
@@ -68,6 +72,7 @@ workflows:
User: models.User{},
Verbose: true,
Workspace: "myworkspace",
+ PolicySets: emptyPolicySets,
},
expPlanSteps: []string{"init", "plan"},
expApplySteps: []string{"apply"},
@@ -119,6 +124,7 @@ projects:
User: models.User{},
Verbose: true,
Workspace: "myworkspace",
+ PolicySets: emptyPolicySets,
},
expPlanSteps: []string{"init", "plan"},
expApplySteps: []string{"apply"},
@@ -170,6 +176,7 @@ projects:
User: models.User{},
Verbose: true,
Workspace: "myworkspace",
+ PolicySets: emptyPolicySets,
},
expPlanSteps: []string{"init", "plan"},
expApplySteps: []string{"apply"},
@@ -229,6 +236,7 @@ projects:
User: models.User{},
Verbose: true,
Workspace: "myworkspace",
+ PolicySets: emptyPolicySets,
},
expPlanSteps: []string{"plan"},
expApplySteps: []string{},
@@ -375,6 +383,7 @@ workflows:
User: models.User{},
Verbose: true,
Workspace: "myworkspace",
+ PolicySets: emptyPolicySets,
},
expPlanSteps: []string{"plan"},
expApplySteps: []string{"apply"},
@@ -430,6 +439,7 @@ projects:
User: models.User{},
Verbose: true,
Workspace: "myworkspace",
+ PolicySets: emptyPolicySets,
},
expPlanSteps: []string{"plan"},
expApplySteps: []string{"apply"},
@@ -488,6 +498,7 @@ workflows:
User: models.User{},
Verbose: true,
Workspace: "myworkspace",
+ PolicySets: emptyPolicySets,
},
expPlanSteps: []string{},
expApplySteps: []string{},
@@ -529,6 +540,7 @@ projects:
User: models.User{},
Verbose: true,
Workspace: "myworkspace",
+ PolicySets: emptyPolicySets,
},
expPlanSteps: []string{"plan"},
expApplySteps: []string{"apply"},
@@ -565,22 +577,23 @@ projects:
Ok(t, ioutil.WriteFile(filepath.Join(tmp, "atlantis.yaml"), []byte(c.repoCfg), 0600))
}
- builder := &DefaultProjectCommandBuilder{
- WorkingDirLocker: NewDefaultWorkingDirLocker(),
- WorkingDir: workingDir,
- ParserValidator: parser,
- VCSClient: vcsClient,
- ProjectFinder: &DefaultProjectFinder{},
- PendingPlanFinder: &DefaultPendingPlanFinder{},
- CommentBuilder: &CommentParser{},
- GlobalCfg: globalCfg,
- SkipCloneNoChanges: false,
- }
+ builder := NewProjectCommandBuilder(
+ false,
+ parser,
+ &DefaultProjectFinder{},
+ vcsClient,
+ workingDir,
+ NewDefaultWorkingDirLocker(),
+ globalCfg,
+ &DefaultPendingPlanFinder{},
+ &CommentParser{},
+ false,
+ )
// We run a test for each type of command.
for _, cmd := range []models.CommandName{models.PlanCommand, models.ApplyCommand} {
t.Run(cmd.String(), func(t *testing.T) {
- ctx, err := builder.buildProjectCommandCtx(&CommandContext{
+ ctxs, err := builder.buildProjectCommandCtx(&CommandContext{
Pull: models.PullRequest{
BaseRepo: baseRepo,
},
@@ -591,6 +604,7 @@ projects:
ErrEquals(t, c.expErr, err)
return
}
+ ctx := ctxs[0]
Ok(t, err)
@@ -609,8 +623,10 @@ projects:
})
}
+ c.expCtx.CommandName = cmd
// Init fields we couldn't in our cases map.
c.expCtx.Steps = expSteps
+ ctx.PolicySets = emptyPolicySets
Equals(t, c.expCtx, ctx)
// Equals() doesn't compare TF version properly so have to
@@ -624,6 +640,203 @@ projects:
}
}
+func TestBuildProjectCmdCtx_WithPolicCheckEnabled(t *testing.T) {
+ emptyPolicySets := valid.PolicySets{
+ Version: nil,
+ PolicySets: []valid.PolicySet{},
+ }
+ baseRepo := models.Repo{
+ FullName: "owner/repo",
+ VCSHost: models.VCSHost{
+ Hostname: "github.com",
+ },
+ }
+ pull := models.PullRequest{
+ BaseRepo: baseRepo,
+ }
+ cases := map[string]struct {
+ globalCfg string
+ repoCfg string
+ expErr string
+ expCtx models.ProjectCommandContext
+ expPolicyCheckSteps []string
+ }{
+ // Test that if we've set global defaults and no project config
+ // that the global defaults are used.
+ "global defaults": {
+ globalCfg: `
+repos:
+- id: /.*/
+`,
+ repoCfg: "",
+ expCtx: models.ProjectCommandContext{
+ ApplyCmd: "atlantis apply -d project1 -w myworkspace",
+ BaseRepo: baseRepo,
+ EscapedCommentArgs: []string{`\f\l\a\g`},
+ AutomergeEnabled: false,
+ AutoplanEnabled: true,
+ HeadRepo: models.Repo{},
+ Log: nil,
+ PullMergeable: true,
+ Pull: pull,
+ ProjectName: "",
+ ApplyRequirements: []string{},
+ RePlanCmd: "atlantis plan -d project1 -w myworkspace -- flag",
+ RepoRelDir: "project1",
+ User: models.User{},
+ Verbose: true,
+ Workspace: "myworkspace",
+ PolicySets: emptyPolicySets,
+ },
+ expPolicyCheckSteps: []string{"show", "policy_check"},
+ },
+
+ // If the repos are allowed to set everything then their config should
+ // come through.
+ "full repo permissions": {
+ globalCfg: `
+repos:
+- id: /.*/
+ workflow: default
+ apply_requirements: [approved]
+ allowed_overrides: [apply_requirements, workflow]
+ allow_custom_workflows: true
+workflows:
+ default:
+ policy_check:
+ steps: []
+`,
+ repoCfg: `
+version: 3
+automerge: true
+projects:
+- dir: project1
+ workspace: myworkspace
+ autoplan:
+ enabled: true
+ when_modified: [../modules/**/*.tf]
+ terraform_version: v10.0
+ apply_requirements: []
+ workflow: custom
+workflows:
+ custom:
+ policy_check:
+ steps:
+ - policy_check
+`,
+ expCtx: models.ProjectCommandContext{
+ ApplyCmd: "atlantis apply -d project1 -w myworkspace",
+ BaseRepo: baseRepo,
+ EscapedCommentArgs: []string{`\f\l\a\g`},
+ AutomergeEnabled: true,
+ AutoplanEnabled: true,
+ HeadRepo: models.Repo{},
+ Log: nil,
+ PullMergeable: true,
+ Pull: pull,
+ ProjectName: "",
+ ApplyRequirements: []string{},
+ RepoConfigVersion: 3,
+ RePlanCmd: "atlantis plan -d project1 -w myworkspace -- flag",
+ RepoRelDir: "project1",
+ TerraformVersion: mustVersion("10.0"),
+ User: models.User{},
+ Verbose: true,
+ Workspace: "myworkspace",
+ PolicySets: emptyPolicySets,
+ },
+ expPolicyCheckSteps: []string{"policy_check"},
+ },
+ }
+
+ for name, c := range cases {
+ t.Run(name, func(t *testing.T) {
+ tmp, cleanup := DirStructure(t, map[string]interface{}{
+ "project1": map[string]interface{}{
+ "main.tf": nil,
+ },
+ "modules": map[string]interface{}{
+ "module": map[string]interface{}{
+ "main.tf": nil,
+ },
+ },
+ })
+ defer cleanup()
+
+ workingDir := NewMockWorkingDir()
+ When(workingDir.Clone(matchers.AnyPtrToLoggingSimpleLogger(), matchers.AnyModelsRepo(), matchers.AnyModelsPullRequest(), AnyString())).ThenReturn(tmp, false, nil)
+ vcsClient := vcsmocks.NewMockClient()
+ When(vcsClient.GetModifiedFiles(matchers.AnyModelsRepo(), matchers.AnyModelsPullRequest())).ThenReturn([]string{"modules/module/main.tf"}, nil)
+
+ // Write and parse the global config file.
+ globalCfgPath := filepath.Join(tmp, "global.yaml")
+ Ok(t, ioutil.WriteFile(globalCfgPath, []byte(c.globalCfg), 0600))
+ parser := &yaml.ParserValidator{}
+ globalCfg, err := parser.ParseGlobalCfg(globalCfgPath, valid.NewGlobalCfg(false, false, false))
+ Ok(t, err)
+
+ if c.repoCfg != "" {
+ Ok(t, ioutil.WriteFile(filepath.Join(tmp, "atlantis.yaml"), []byte(c.repoCfg), 0600))
+ }
+
+ builder := NewProjectCommandBuilder(
+ true,
+ parser,
+ &DefaultProjectFinder{},
+ vcsClient,
+ workingDir,
+ NewDefaultWorkingDirLocker(),
+ globalCfg,
+ &DefaultPendingPlanFinder{},
+ &CommentParser{},
+ false,
+ )
+
+ cmd := models.PolicyCheckCommand
+ t.Run(cmd.String(), func(t *testing.T) {
+ ctxs, err := builder.buildProjectCommandCtx(&CommandContext{
+ Pull: models.PullRequest{
+ BaseRepo: baseRepo,
+ },
+ PullMergeable: true,
+ }, models.PlanCommand, "", []string{"flag"}, tmp, "project1", "myworkspace", true)
+
+ if c.expErr != "" {
+ ErrEquals(t, c.expErr, err)
+ return
+ }
+
+ ctx := ctxs[1]
+
+ Ok(t, err)
+
+ // Construct expected steps.
+ var stepNames []string
+ var expSteps []valid.Step
+
+ stepNames = c.expPolicyCheckSteps
+ for _, stepName := range stepNames {
+ expSteps = append(expSteps, valid.Step{
+ StepName: stepName,
+ })
+ }
+
+ c.expCtx.CommandName = cmd
+ // Init fields we couldn't in our cases map.
+ c.expCtx.Steps = expSteps
+ ctx.PolicySets = emptyPolicySets
+
+ Equals(t, c.expCtx, ctx)
+ // Equals() doesn't compare TF version properly so have to
+ // use .String().
+ if c.expCtx.TerraformVersion != nil {
+ Equals(t, c.expCtx.TerraformVersion.String(), ctx.TerraformVersion.String())
+ }
+ })
+ })
+ }
+}
+
func mustVersion(v string) *version.Version {
vers, err := version.NewVersion(v)
if err != nil {
diff --git a/server/events/project_command_builder_test.go b/server/events/project_command_builder_test.go
index 5cafdc8dbe..cd191a06fe 100644
--- a/server/events/project_command_builder_test.go
+++ b/server/events/project_command_builder_test.go
@@ -134,17 +134,18 @@ projects:
Ok(t, err)
}
- builder := &events.DefaultProjectCommandBuilder{
- WorkingDirLocker: events.NewDefaultWorkingDirLocker(),
- WorkingDir: workingDir,
- ParserValidator: &yaml.ParserValidator{},
- VCSClient: vcsClient,
- ProjectFinder: &events.DefaultProjectFinder{},
- PendingPlanFinder: &events.DefaultPendingPlanFinder{},
- CommentBuilder: &events.CommentParser{},
- GlobalCfg: valid.NewGlobalCfg(false, false, false),
- SkipCloneNoChanges: false,
- }
+ builder := events.NewProjectCommandBuilder(
+ false,
+ &yaml.ParserValidator{},
+ &events.DefaultProjectFinder{},
+ vcsClient,
+ workingDir,
+ events.NewDefaultWorkingDirLocker(),
+ valid.NewGlobalCfg(false, false, false),
+ &events.DefaultPendingPlanFinder{},
+ &events.CommentParser{},
+ false,
+ )
ctxs, err := builder.BuildAutoplanCommands(&events.CommandContext{
PullMergeable: true,
@@ -358,16 +359,18 @@ projects:
Ok(t, err)
}
- builder := &events.DefaultProjectCommandBuilder{
- WorkingDirLocker: events.NewDefaultWorkingDirLocker(),
- WorkingDir: workingDir,
- ParserValidator: &yaml.ParserValidator{},
- VCSClient: vcsClient,
- ProjectFinder: &events.DefaultProjectFinder{},
- CommentBuilder: &events.CommentParser{},
- GlobalCfg: valid.NewGlobalCfg(true, false, false),
- SkipCloneNoChanges: false,
- }
+ builder := events.NewProjectCommandBuilder(
+ false,
+ &yaml.ParserValidator{},
+ &events.DefaultProjectFinder{},
+ vcsClient,
+ workingDir,
+ events.NewDefaultWorkingDirLocker(),
+ valid.NewGlobalCfg(true, false, false),
+ &events.DefaultPendingPlanFinder{},
+ &events.CommentParser{},
+ false,
+ )
var actCtxs []models.ProjectCommandContext
var err error
@@ -492,16 +495,18 @@ projects:
Ok(t, err)
}
- builder := &events.DefaultProjectCommandBuilder{
- WorkingDirLocker: events.NewDefaultWorkingDirLocker(),
- WorkingDir: workingDir,
- ParserValidator: &yaml.ParserValidator{},
- VCSClient: vcsClient,
- ProjectFinder: &events.DefaultProjectFinder{},
- CommentBuilder: &events.CommentParser{},
- GlobalCfg: valid.NewGlobalCfg(true, false, false),
- SkipCloneNoChanges: false,
- }
+ builder := events.NewProjectCommandBuilder(
+ false,
+ &yaml.ParserValidator{},
+ &events.DefaultProjectFinder{},
+ vcsClient,
+ workingDir,
+ events.NewDefaultWorkingDirLocker(),
+ valid.NewGlobalCfg(true, false, false),
+ &events.DefaultPendingPlanFinder{},
+ &events.CommentParser{},
+ false,
+ )
ctxs, err := builder.BuildPlanCommands(
&events.CommandContext{},
@@ -564,17 +569,18 @@ func TestDefaultProjectCommandBuilder_BuildMultiApply(t *testing.T) {
matchers.AnyModelsPullRequest())).
ThenReturn(tmpDir, nil)
- builder := &events.DefaultProjectCommandBuilder{
- WorkingDirLocker: events.NewDefaultWorkingDirLocker(),
- WorkingDir: workingDir,
- ParserValidator: &yaml.ParserValidator{},
- VCSClient: nil,
- ProjectFinder: &events.DefaultProjectFinder{},
- PendingPlanFinder: &events.DefaultPendingPlanFinder{},
- CommentBuilder: &events.CommentParser{},
- GlobalCfg: valid.NewGlobalCfg(false, false, false),
- SkipCloneNoChanges: false,
- }
+ builder := events.NewProjectCommandBuilder(
+ false,
+ &yaml.ParserValidator{},
+ &events.DefaultProjectFinder{},
+ nil,
+ workingDir,
+ events.NewDefaultWorkingDirLocker(),
+ valid.NewGlobalCfg(false, false, false),
+ &events.DefaultPendingPlanFinder{},
+ &events.CommentParser{},
+ false,
+ )
ctxs, err := builder.BuildApplyCommands(
&events.CommandContext{},
@@ -632,16 +638,18 @@ projects:
matchers.AnyModelsPullRequest(),
AnyString())).ThenReturn(repoDir, nil)
- builder := &events.DefaultProjectCommandBuilder{
- WorkingDirLocker: events.NewDefaultWorkingDirLocker(),
- WorkingDir: workingDir,
- ParserValidator: &yaml.ParserValidator{},
- VCSClient: nil,
- ProjectFinder: &events.DefaultProjectFinder{},
- CommentBuilder: &events.CommentParser{},
- GlobalCfg: valid.NewGlobalCfg(true, false, false),
- SkipCloneNoChanges: false,
- }
+ builder := events.NewProjectCommandBuilder(
+ false,
+ &yaml.ParserValidator{},
+ &events.DefaultProjectFinder{},
+ nil,
+ workingDir,
+ events.NewDefaultWorkingDirLocker(),
+ valid.NewGlobalCfg(true, false, false),
+ &events.DefaultPendingPlanFinder{},
+ &events.CommentParser{},
+ false,
+ )
ctx := &events.CommandContext{
HeadRepo: models.Repo{},
@@ -694,16 +702,18 @@ func TestDefaultProjectCommandBuilder_EscapeArgs(t *testing.T) {
vcsClient := vcsmocks.NewMockClient()
When(vcsClient.GetModifiedFiles(matchers.AnyModelsRepo(), matchers.AnyModelsPullRequest())).ThenReturn([]string{"main.tf"}, nil)
- builder := &events.DefaultProjectCommandBuilder{
- WorkingDirLocker: events.NewDefaultWorkingDirLocker(),
- WorkingDir: workingDir,
- ParserValidator: &yaml.ParserValidator{},
- VCSClient: vcsClient,
- ProjectFinder: &events.DefaultProjectFinder{},
- CommentBuilder: &events.CommentParser{},
- GlobalCfg: valid.NewGlobalCfg(true, false, false),
- SkipCloneNoChanges: false,
- }
+ builder := events.NewProjectCommandBuilder(
+ false,
+ &yaml.ParserValidator{},
+ &events.DefaultProjectFinder{},
+ vcsClient,
+ workingDir,
+ events.NewDefaultWorkingDirLocker(),
+ valid.NewGlobalCfg(true, false, false),
+ &events.DefaultPendingPlanFinder{},
+ &events.CommentParser{},
+ false,
+ )
var actCtxs []models.ProjectCommandContext
var err error
@@ -858,16 +868,18 @@ projects:
matchers.AnyModelsPullRequest(),
AnyString())).ThenReturn(tmpDir, nil)
- builder := &events.DefaultProjectCommandBuilder{
- WorkingDirLocker: events.NewDefaultWorkingDirLocker(),
- WorkingDir: workingDir,
- VCSClient: vcsClient,
- ParserValidator: &yaml.ParserValidator{},
- ProjectFinder: &events.DefaultProjectFinder{},
- CommentBuilder: &events.CommentParser{},
- GlobalCfg: valid.NewGlobalCfg(true, false, false),
- SkipCloneNoChanges: false,
- }
+ builder := events.NewProjectCommandBuilder(
+ false,
+ &yaml.ParserValidator{},
+ &events.DefaultProjectFinder{},
+ vcsClient,
+ workingDir,
+ events.NewDefaultWorkingDirLocker(),
+ valid.NewGlobalCfg(true, false, false),
+ &events.DefaultPendingPlanFinder{},
+ &events.CommentParser{},
+ false,
+ )
actCtxs, err := builder.BuildPlanCommands(
&events.CommandContext{},
@@ -906,16 +918,18 @@ projects:
When(vcsClient.DownloadRepoConfigFile(matchers.AnyModelsPullRequest())).ThenReturn(true, []byte(atlantisYAML), nil)
workingDir := mocks.NewMockWorkingDir()
- builder := &events.DefaultProjectCommandBuilder{
- WorkingDirLocker: events.NewDefaultWorkingDirLocker(),
- WorkingDir: workingDir,
- ParserValidator: &yaml.ParserValidator{},
- VCSClient: vcsClient,
- ProjectFinder: &events.DefaultProjectFinder{},
- CommentBuilder: &events.CommentParser{},
- GlobalCfg: valid.NewGlobalCfg(true, false, false),
- SkipCloneNoChanges: true,
- }
+ builder := events.NewProjectCommandBuilder(
+ false,
+ &yaml.ParserValidator{},
+ &events.DefaultProjectFinder{},
+ vcsClient,
+ workingDir,
+ events.NewDefaultWorkingDirLocker(),
+ valid.NewGlobalCfg(true, false, false),
+ &events.DefaultPendingPlanFinder{},
+ &events.CommentParser{},
+ true,
+ )
var actCtxs []models.ProjectCommandContext
var err error
@@ -930,3 +944,43 @@ projects:
Equals(t, 0, len(actCtxs))
workingDir.VerifyWasCalled(Never()).Clone(matchers.AnyPtrToLoggingSimpleLogger(), matchers.AnyModelsRepo(), matchers.AnyModelsPullRequest(), AnyString())
}
+
+func TestDefaultProjectCommandBuilder_WithPolicyCheckEnabled_BuildAutoplanCommand(t *testing.T) {
+ RegisterMockTestingT(t)
+ tmpDir, cleanup := DirStructure(t, map[string]interface{}{
+ "main.tf": nil,
+ })
+ defer cleanup()
+
+ workingDir := mocks.NewMockWorkingDir()
+ When(workingDir.Clone(matchers.AnyPtrToLoggingSimpleLogger(), matchers.AnyModelsRepo(), matchers.AnyModelsPullRequest(), AnyString())).ThenReturn(tmpDir, false, nil)
+ vcsClient := vcsmocks.NewMockClient()
+ When(vcsClient.GetModifiedFiles(matchers.AnyModelsRepo(), matchers.AnyModelsPullRequest())).ThenReturn([]string{"main.tf"}, nil)
+ globalCfg := valid.NewGlobalCfg(false, false, false)
+
+ builder := events.NewProjectCommandBuilder(
+ true,
+ &yaml.ParserValidator{},
+ &events.DefaultProjectFinder{},
+ vcsClient,
+ workingDir,
+ events.NewDefaultWorkingDirLocker(),
+ globalCfg,
+ &events.DefaultPendingPlanFinder{},
+ &events.CommentParser{},
+ false,
+ )
+
+ ctxs, err := builder.BuildAutoplanCommands(&events.CommandContext{
+ PullMergeable: true,
+ })
+
+ Ok(t, err)
+ Equals(t, 2, len(ctxs))
+ planCtx := ctxs[0]
+ policyCheckCtx := ctxs[1]
+ Equals(t, models.PlanCommand, planCtx.CommandName)
+ Equals(t, globalCfg.Workflows["default"].Plan.Steps, planCtx.Steps)
+ Equals(t, models.PolicyCheckCommand, policyCheckCtx.CommandName)
+ Equals(t, globalCfg.Workflows["default"].PolicyCheck.Steps, policyCheckCtx.Steps)
+}
diff --git a/server/events/project_command_context_builder.go b/server/events/project_command_context_builder.go
new file mode 100644
index 0000000000..40919264cd
--- /dev/null
+++ b/server/events/project_command_context_builder.go
@@ -0,0 +1,220 @@
+package events
+
+import (
+ "path/filepath"
+ "regexp"
+
+ "github.com/hashicorp/go-version"
+ "github.com/hashicorp/terraform-config-inspect/tfconfig"
+ "github.com/runatlantis/atlantis/server/events/models"
+ "github.com/runatlantis/atlantis/server/events/yaml/valid"
+)
+
+func NewProjectCommandContextBulder(policyCheckEnabled bool, commentBuilder CommentBuilder) ProjectCommandContextBuilder {
+ projectCommandContextBuilder := &DefaultProjectCommandContextBuilder{
+ CommentBuilder: commentBuilder,
+ }
+
+ if policyCheckEnabled {
+ return &PolicyCheckProjectCommandContextBuilder{
+ CommentBuilder: commentBuilder,
+ ProjectCommandContextBuilder: projectCommandContextBuilder,
+ }
+ }
+
+ return projectCommandContextBuilder
+}
+
+type ProjectCommandContextBuilder interface {
+ // BuildProjectContext builds project command contexts for atlantis commands
+ BuildProjectContext(
+ ctx *CommandContext,
+ cmdName models.CommandName,
+ prjCfg valid.MergedProjectCfg,
+ commentFlags []string,
+ repoDir string,
+ automerge, parallelPlan, parallelApply, verbose bool,
+ ) []models.ProjectCommandContext
+}
+
+type DefaultProjectCommandContextBuilder struct {
+ CommentBuilder CommentBuilder
+}
+
+func (cb *DefaultProjectCommandContextBuilder) BuildProjectContext(
+ ctx *CommandContext,
+ cmdName models.CommandName,
+ prjCfg valid.MergedProjectCfg,
+ commentFlags []string,
+ repoDir string,
+ automerge, parallelApply, parallelPlan, verbose bool,
+) (projectCmds []models.ProjectCommandContext) {
+ ctx.Log.Debug("Building project command context for %s", cmdName)
+
+ var steps []valid.Step
+ switch cmdName {
+ case models.PlanCommand:
+ steps = prjCfg.Workflow.Plan.Steps
+ case models.ApplyCommand:
+ steps = prjCfg.Workflow.Apply.Steps
+ }
+
+ // If TerraformVersion not defined in config file look for a
+ // terraform.require_version block.
+ if prjCfg.TerraformVersion == nil {
+ prjCfg.TerraformVersion = getTfVersion(ctx, filepath.Join(repoDir, prjCfg.RepoRelDir))
+ }
+
+ projectCmds = append(projectCmds, newProjectCommandContext(
+ ctx,
+ cmdName,
+ cb.CommentBuilder.BuildApplyComment(prjCfg.RepoRelDir, prjCfg.Workspace, prjCfg.Name),
+ cb.CommentBuilder.BuildPlanComment(prjCfg.RepoRelDir, prjCfg.Workspace, prjCfg.Name, commentFlags),
+ prjCfg,
+ steps,
+ prjCfg.PolicySets,
+ escapeArgs(commentFlags),
+ automerge,
+ parallelApply,
+ parallelPlan,
+ verbose,
+ ))
+
+ return
+}
+
+type PolicyCheckProjectCommandContextBuilder struct {
+ ProjectCommandContextBuilder *DefaultProjectCommandContextBuilder
+ CommentBuilder CommentBuilder
+}
+
+func (cb *PolicyCheckProjectCommandContextBuilder) BuildProjectContext(
+ ctx *CommandContext,
+ cmdName models.CommandName,
+ prjCfg valid.MergedProjectCfg,
+ commentFlags []string,
+ repoDir string,
+ automerge, parallelApply, parallelPlan, verbose bool,
+) (projectCmds []models.ProjectCommandContext) {
+ ctx.Log.Debug("PolicyChecks are enabled")
+ projectCmds = cb.ProjectCommandContextBuilder.BuildProjectContext(
+ ctx,
+ cmdName,
+ prjCfg,
+ commentFlags,
+ repoDir,
+ automerge,
+ parallelApply,
+ parallelPlan,
+ verbose,
+ )
+
+ if cmdName == models.PlanCommand {
+ ctx.Log.Debug("Building project command context for %s", models.PolicyCheckCommand)
+ steps := prjCfg.Workflow.PolicyCheck.Steps
+
+ projectCmds = append(projectCmds, newProjectCommandContext(
+ ctx,
+ models.PolicyCheckCommand,
+ cb.CommentBuilder.BuildApplyComment(prjCfg.RepoRelDir, prjCfg.Workspace, prjCfg.Name),
+ cb.CommentBuilder.BuildPlanComment(prjCfg.RepoRelDir, prjCfg.Workspace, prjCfg.Name, commentFlags),
+ prjCfg,
+ steps,
+ prjCfg.PolicySets,
+ escapeArgs(commentFlags),
+ automerge,
+ parallelApply,
+ parallelPlan,
+ verbose,
+ ))
+ }
+
+ return
+}
+
+// newProjectCommandContext is a initializer method that handles constructing the
+// ProjectCommandContext.
+func newProjectCommandContext(ctx *CommandContext,
+ cmd models.CommandName,
+ applyCmd string,
+ planCmd string,
+ projCfg valid.MergedProjectCfg,
+ steps []valid.Step,
+ policySets valid.PolicySets,
+ escapedCommentArgs []string,
+ automergeEnabled bool,
+ parallelApplyEnabled bool,
+ parallelPlanEnabled bool,
+ verbose bool,
+) models.ProjectCommandContext {
+ return models.ProjectCommandContext{
+ CommandName: cmd,
+ ApplyCmd: applyCmd,
+ BaseRepo: ctx.Pull.BaseRepo,
+ EscapedCommentArgs: escapedCommentArgs,
+ AutomergeEnabled: automergeEnabled,
+ ParallelApplyEnabled: parallelApplyEnabled,
+ ParallelPlanEnabled: parallelPlanEnabled,
+ AutoplanEnabled: projCfg.AutoplanEnabled,
+ Steps: steps,
+ HeadRepo: ctx.HeadRepo,
+ Log: ctx.Log,
+ PullMergeable: ctx.PullMergeable,
+ Pull: ctx.Pull,
+ ProjectName: projCfg.Name,
+ ApplyRequirements: projCfg.ApplyRequirements,
+ RePlanCmd: planCmd,
+ RepoRelDir: projCfg.RepoRelDir,
+ RepoConfigVersion: projCfg.RepoCfgVersion,
+ TerraformVersion: projCfg.TerraformVersion,
+ User: ctx.User,
+ Verbose: verbose,
+ Workspace: projCfg.Workspace,
+ PolicySets: policySets,
+ }
+}
+
+func escapeArgs(args []string) []string {
+ var escaped []string
+ for _, arg := range args {
+ var escapedArg string
+ for i := range arg {
+ escapedArg += "\\" + string(arg[i])
+ }
+ escaped = append(escaped, escapedArg)
+ }
+ return escaped
+}
+
+// Extracts required_version from Terraform configuration.
+// Returns nil if unable to determine version from configuration.
+func getTfVersion(ctx *CommandContext, absProjDir string) *version.Version {
+ module, diags := tfconfig.LoadModule(absProjDir)
+ if diags.HasErrors() {
+ ctx.Log.Err("trying to detect required version: %s", diags.Error())
+ return nil
+ }
+
+ if len(module.RequiredCore) != 1 {
+ ctx.Log.Info("cannot determine which version to use from terraform configuration, detected %d possibilities.", len(module.RequiredCore))
+ return nil
+ }
+ requiredVersionSetting := module.RequiredCore[0]
+
+ // We allow `= x.y.z`, `=x.y.z` or `x.y.z` where `x`, `y` and `z` are integers.
+ re := regexp.MustCompile(`^=?\s*([^\s]+)\s*$`)
+ matched := re.FindStringSubmatch(requiredVersionSetting)
+ if len(matched) == 0 {
+ ctx.Log.Debug("did not specify exact version in terraform configuration, found %q", requiredVersionSetting)
+ return nil
+ }
+ ctx.Log.Debug("found required_version setting of %q", requiredVersionSetting)
+ version, err := version.NewVersion(matched[1])
+ if err != nil {
+ ctx.Log.Debug(err.Error())
+ return nil
+ }
+
+ ctx.Log.Info("detected module requires version: %q", version.String())
+ return version
+}
diff --git a/server/events/project_command_context_builder_test.go b/server/events/project_command_context_builder_test.go
new file mode 100644
index 0000000000..79457f0dd2
--- /dev/null
+++ b/server/events/project_command_context_builder_test.go
@@ -0,0 +1 @@
+package events_test
diff --git a/server/events/project_command_pool_executor.go b/server/events/project_command_pool_executor.go
new file mode 100644
index 0000000000..8c83ea0aaa
--- /dev/null
+++ b/server/events/project_command_pool_executor.go
@@ -0,0 +1,52 @@
+package events
+
+import (
+ "sync"
+
+ "github.com/remeh/sizedwaitgroup"
+ "github.com/runatlantis/atlantis/server/events/models"
+)
+
+type prjCmdRunnerFunc func(ctx models.ProjectCommandContext) models.ProjectResult
+
+func runProjectCmdsParallel(
+ cmds []models.ProjectCommandContext,
+ runnerFunc prjCmdRunnerFunc,
+ poolSize int,
+) CommandResult {
+ var results []models.ProjectResult
+ mux := &sync.Mutex{}
+
+ wg := sizedwaitgroup.New(15)
+ for _, pCmd := range cmds {
+ pCmd := pCmd
+ var execute func()
+ wg.Add()
+
+ execute = func() {
+ defer wg.Done()
+ res := runnerFunc(pCmd)
+ mux.Lock()
+ results = append(results, res)
+ mux.Unlock()
+ }
+
+ go execute()
+ }
+
+ wg.Wait()
+ return CommandResult{ProjectResults: results}
+}
+
+func runProjectCmds(
+ cmds []models.ProjectCommandContext,
+ runnerFunc prjCmdRunnerFunc,
+) CommandResult {
+ var results []models.ProjectResult
+ for _, pCmd := range cmds {
+ res := runnerFunc(pCmd)
+
+ results = append(results, res)
+ }
+ return CommandResult{ProjectResults: results}
+}
diff --git a/server/events/project_command_runner.go b/server/events/project_command_runner.go
index cc3a8b0a4c..e10c41f8c7 100644
--- a/server/events/project_command_runner.go
+++ b/server/events/project_command_runner.go
@@ -80,28 +80,50 @@ type WebhooksSender interface {
//go:generate pegomock generate -m --use-experimental-model-gen --package mocks -o mocks/mock_project_command_runner.go ProjectCommandRunner
-// ProjectCommandRunner runs project commands. A project command is a command
-// for a specific TF project.
-type ProjectCommandRunner interface {
+type ProjectPlanCommandRunner interface {
// Plan runs terraform plan for the project described by ctx.
Plan(ctx models.ProjectCommandContext) models.ProjectResult
+}
+
+type ProjectApplyCommandRunner interface {
// Apply runs terraform apply for the project described by ctx.
Apply(ctx models.ProjectCommandContext) models.ProjectResult
}
+type ProjectPolicyCheckCommandRunner interface {
+ // PolicyCheck runs OPA defined policies for the project desribed by ctx.
+ PolicyCheck(ctx models.ProjectCommandContext) models.ProjectResult
+}
+
+type ProjectApprovePoliciesCommandRunner interface {
+ // Approves any failing OPA policies.
+ ApprovePolicies(ctx models.ProjectCommandContext) models.ProjectResult
+}
+
+// ProjectCommandRunner runs project commands. A project command is a command
+// for a specific TF project.
+type ProjectCommandRunner interface {
+ ProjectPlanCommandRunner
+ ProjectApplyCommandRunner
+ ProjectPolicyCheckCommandRunner
+ ProjectApprovePoliciesCommandRunner
+}
+
// DefaultProjectCommandRunner implements ProjectCommandRunner.
type DefaultProjectCommandRunner struct {
- Locker ProjectLocker
- LockURLGenerator LockURLGenerator
- InitStepRunner StepRunner
- PlanStepRunner StepRunner
- ApplyStepRunner StepRunner
- RunStepRunner CustomStepRunner
- EnvStepRunner EnvStepRunner
- PullApprovedChecker runtime.PullApprovedChecker
- WorkingDir WorkingDir
- Webhooks WebhooksSender
- WorkingDirLocker WorkingDirLocker
+ Locker ProjectLocker
+ LockURLGenerator LockURLGenerator
+ InitStepRunner StepRunner
+ PlanStepRunner StepRunner
+ ShowStepRunner StepRunner
+ ApplyStepRunner StepRunner
+ PolicyCheckStepRunner StepRunner
+ RunStepRunner CustomStepRunner
+ EnvStepRunner EnvStepRunner
+ PullApprovedChecker runtime.PullApprovedChecker
+ WorkingDir WorkingDir
+ Webhooks WebhooksSender
+ WorkingDirLocker WorkingDirLocker
}
// Plan runs terraform plan for the project described by ctx.
@@ -118,6 +140,20 @@ func (p *DefaultProjectCommandRunner) Plan(ctx models.ProjectCommandContext) mod
}
}
+// PolicyCheck evaluates policies defined with Rego for the project described by ctx.
+func (p *DefaultProjectCommandRunner) PolicyCheck(ctx models.ProjectCommandContext) models.ProjectResult {
+ policySuccess, failure, err := p.doPolicyCheck(ctx)
+ return models.ProjectResult{
+ Command: models.PolicyCheckCommand,
+ PolicyCheckSuccess: policySuccess,
+ Error: err,
+ Failure: failure,
+ RepoRelDir: ctx.RepoRelDir,
+ Workspace: ctx.Workspace,
+ ProjectName: ctx.ProjectName,
+ }
+}
+
// Apply runs terraform apply for the project described by ctx.
func (p *DefaultProjectCommandRunner) Apply(ctx models.ProjectCommandContext) models.ProjectResult {
applyOut, failure, err := p.doApply(ctx)
@@ -132,6 +168,100 @@ func (p *DefaultProjectCommandRunner) Apply(ctx models.ProjectCommandContext) mo
}
}
+func (p *DefaultProjectCommandRunner) ApprovePolicies(ctx models.ProjectCommandContext) models.ProjectResult {
+ approvedOut, failure, err := p.doApprovePolicies(ctx)
+ return models.ProjectResult{
+ Command: models.PolicyCheckCommand,
+ Failure: failure,
+ Error: err,
+ PolicyCheckSuccess: approvedOut,
+ RepoRelDir: ctx.RepoRelDir,
+ Workspace: ctx.Workspace,
+ ProjectName: ctx.ProjectName,
+ }
+}
+
+func (p *DefaultProjectCommandRunner) doApprovePolicies(ctx models.ProjectCommandContext) (*models.PolicyCheckSuccess, string, error) {
+
+ // TODO: Make this a bit smarter
+ // without checking some sort of state that the policy check has indeed passed this is likely to cause issues
+
+ return &models.PolicyCheckSuccess{
+ PolicyCheckOutput: "Policies approved",
+ }, "", nil
+}
+
+func (p *DefaultProjectCommandRunner) doPolicyCheck(ctx models.ProjectCommandContext) (*models.PolicyCheckSuccess, string, error) {
+ // Acquire Atlantis lock for this repo/dir/workspace.
+ // This should already be acquired from the prior plan operation.
+ // if for some reason an unlock happens between the plan and policy check step
+ // we will attempt to capture the lock here but fail to get the working directory
+ // at which point we will unlock again to preserve functionality
+ // If we fail to capture the lock here (super unlikely) then we error out and the user is forced to replan
+ lockAttempt, err := p.Locker.TryLock(ctx.Log, ctx.Pull, ctx.User, ctx.Workspace, models.NewProject(ctx.Pull.BaseRepo.FullName, ctx.RepoRelDir))
+
+ if err != nil {
+ return nil, "", errors.Wrap(err, "acquiring lock")
+ }
+ if !lockAttempt.LockAcquired {
+ return nil, lockAttempt.LockFailureReason, nil
+ }
+ ctx.Log.Debug("acquired lock for project")
+
+ // Acquire internal lock for the directory we're going to operate in.
+ // We should refactor this to keep the lock for the duration of plan and policy check since as of now
+ // there is a small gap where we don't have the lock and if we can't get this here, we should just unlock the PR.
+ unlockFn, err := p.WorkingDirLocker.TryLock(ctx.Pull.BaseRepo.FullName, ctx.Pull.Num, ctx.Workspace)
+ if err != nil {
+ return nil, "", err
+ }
+ defer unlockFn()
+
+ // we shouldn't attempt to clone this again. If changes occur to the pull request while the plan is happening
+ // that shouldn't affect this particular operation.
+ repoDir, err := p.WorkingDir.GetWorkingDir(ctx.Pull.BaseRepo, ctx.Pull, ctx.Workspace)
+ if err != nil {
+
+ // let's unlock here since something probably nuked our directory between the plan and policy check phase
+ if unlockErr := lockAttempt.UnlockFn(); unlockErr != nil {
+ ctx.Log.Err("error unlocking state after plan error: %v", unlockErr)
+ }
+
+ if os.IsNotExist(err) {
+ return nil, "", errors.New("project has not been cloned–did you run plan?")
+ }
+ return nil, "", err
+ }
+ absPath := filepath.Join(repoDir, ctx.RepoRelDir)
+ if _, err = os.Stat(absPath); os.IsNotExist(err) {
+
+ // let's unlock here since something probably nuked our directory between the plan and policy check phase
+ if unlockErr := lockAttempt.UnlockFn(); unlockErr != nil {
+ ctx.Log.Err("error unlocking state after plan error: %v", unlockErr)
+ }
+
+ return nil, "", DirNotExistErr{RepoRelDir: ctx.RepoRelDir}
+ }
+
+ outputs, err := p.runSteps(ctx.Steps, ctx, absPath)
+ if err != nil {
+ // Note: we are explicitly not unlocking the pr here since a failing policy check will require
+ // approval
+ return nil, "", fmt.Errorf("%s\n%s", err, strings.Join(outputs, "\n"))
+ }
+
+ return &models.PolicyCheckSuccess{
+ LockURL: p.LockURLGenerator.GenerateLockURL(lockAttempt.LockKey),
+ PolicyCheckOutput: strings.Join(outputs, "\n"),
+ RePlanCmd: ctx.RePlanCmd,
+ ApplyCmd: ctx.ApplyCmd,
+
+ // set this to false right now because we don't have this information
+ // TODO: refactor the templates in a sane way so we don't need this
+ HasDiverged: false,
+ }, "", nil
+}
+
func (p *DefaultProjectCommandRunner) doPlan(ctx models.ProjectCommandContext) (*models.PlanSuccess, string, error) {
// Acquire Atlantis lock for this repo/dir/workspace.
lockAttempt, err := p.Locker.TryLock(ctx.Log, ctx.Pull, ctx.User, ctx.Workspace, models.NewProject(ctx.Pull.BaseRepo.FullName, ctx.RepoRelDir))
@@ -180,39 +310,6 @@ func (p *DefaultProjectCommandRunner) doPlan(ctx models.ProjectCommandContext) (
}, "", nil
}
-func (p *DefaultProjectCommandRunner) runSteps(steps []valid.Step, ctx models.ProjectCommandContext, absPath string) ([]string, error) {
- var outputs []string
- envs := make(map[string]string)
- for _, step := range steps {
- var out string
- var err error
- switch step.StepName {
- case "init":
- out, err = p.InitStepRunner.Run(ctx, step.ExtraArgs, absPath, envs)
- case "plan":
- out, err = p.PlanStepRunner.Run(ctx, step.ExtraArgs, absPath, envs)
- case "apply":
- out, err = p.ApplyStepRunner.Run(ctx, step.ExtraArgs, absPath, envs)
- case "run":
- out, err = p.RunStepRunner.Run(ctx, step.RunCommand, absPath, envs)
- case "env":
- out, err = p.EnvStepRunner.Run(ctx, step.RunCommand, step.EnvVarValue, absPath, envs)
- envs[step.EnvVarName] = out
- // We reset out to the empty string because we don't want it to
- // be printed to the PR, it's solely to set the environment variable.
- out = ""
- }
-
- if out != "" {
- outputs = append(outputs, out)
- }
- if err != nil {
- return outputs, err
- }
- }
- return outputs, nil
-}
-
func (p *DefaultProjectCommandRunner) doApply(ctx models.ProjectCommandContext) (applyOut string, failure string, err error) {
repoDir, err := p.WorkingDir.GetWorkingDir(ctx.Pull.BaseRepo, ctx.Pull, ctx.Workspace)
if err != nil {
@@ -263,3 +360,40 @@ func (p *DefaultProjectCommandRunner) doApply(ctx models.ProjectCommandContext)
}
return strings.Join(outputs, "\n"), "", nil
}
+
+func (p *DefaultProjectCommandRunner) runSteps(steps []valid.Step, ctx models.ProjectCommandContext, absPath string) ([]string, error) {
+ var outputs []string
+ envs := make(map[string]string)
+ for _, step := range steps {
+ var out string
+ var err error
+ switch step.StepName {
+ case "init":
+ out, err = p.InitStepRunner.Run(ctx, step.ExtraArgs, absPath, envs)
+ case "plan":
+ out, err = p.PlanStepRunner.Run(ctx, step.ExtraArgs, absPath, envs)
+ case "show":
+ _, err = p.ShowStepRunner.Run(ctx, step.ExtraArgs, absPath, envs)
+ case "policy_check":
+ out, err = p.PolicyCheckStepRunner.Run(ctx, step.ExtraArgs, absPath, envs)
+ case "apply":
+ out, err = p.ApplyStepRunner.Run(ctx, step.ExtraArgs, absPath, envs)
+ case "run":
+ out, err = p.RunStepRunner.Run(ctx, step.RunCommand, absPath, envs)
+ case "env":
+ out, err = p.EnvStepRunner.Run(ctx, step.RunCommand, step.EnvVarValue, absPath, envs)
+ envs[step.EnvVarName] = out
+ // We reset out to the empty string because we don't want it to
+ // be printed to the PR, it's solely to set the environment variable.
+ out = ""
+ }
+
+ if out != "" {
+ outputs = append(outputs, out)
+ }
+ if err != nil {
+ return outputs, err
+ }
+ }
+ return outputs, nil
+}
diff --git a/server/events/pull_updater.go b/server/events/pull_updater.go
new file mode 100644
index 0000000000..418f91711f
--- /dev/null
+++ b/server/events/pull_updater.go
@@ -0,0 +1,32 @@
+package events
+
+import "github.com/runatlantis/atlantis/server/events/vcs"
+
+type PullUpdater struct {
+ HidePrevPlanComments bool
+ VCSClient vcs.Client
+ MarkdownRenderer *MarkdownRenderer
+}
+
+func (c *PullUpdater) updatePull(ctx *CommandContext, command PullCommand, res CommandResult) {
+ // Log if we got any errors or failures.
+ if res.Error != nil {
+ ctx.Log.Err(res.Error.Error())
+ } else if res.Failure != "" {
+ ctx.Log.Warn(res.Failure)
+ }
+
+ // HidePrevPlanComments will hide old comments left from previous plan runs to reduce
+ // clutter in a pull/merge request. This will not delete the comment, since the
+ // comment trail may be useful in auditing or backtracing problems.
+ if c.HidePrevPlanComments {
+ if err := c.VCSClient.HidePrevPlanComments(ctx.Pull.BaseRepo, ctx.Pull.Num); err != nil {
+ ctx.Log.Err("unable to hide old comments: %s", err)
+ }
+ }
+
+ comment := c.MarkdownRenderer.Render(res, command.CommandName(), ctx.Log.History.String(), command.IsVerbose(), ctx.Pull.BaseRepo.VCSHost.Type)
+ if err := c.VCSClient.CreateComment(ctx.Pull.BaseRepo, ctx.Pull.Num, comment, command.CommandName().String()); err != nil {
+ ctx.Log.Err("unable to comment: %s", err)
+ }
+}
diff --git a/server/events/runtime/apply_step_runner.go b/server/events/runtime/apply_step_runner.go
index 1f8cb44640..a847381ebb 100644
--- a/server/events/runtime/apply_step_runner.go
+++ b/server/events/runtime/apply_step_runner.go
@@ -1,7 +1,6 @@
package runtime
import (
- "bytes"
"fmt"
"io/ioutil"
"os"
@@ -38,7 +37,9 @@ func (a *ApplyStepRunner) Run(ctx models.ProjectCommandContext, extraArgs []stri
ctx.Log.Info("starting apply")
var out string
- if a.isRemotePlan(contents) {
+
+ // TODO: Leverage PlanTypeStepRunnerDelegate here
+ if IsRemotePlan(contents) {
args := append(append([]string{"apply", "-input=false", "-no-color"}, extraArgs...), ctx.EscapedCommentArgs...)
out, err = a.runRemoteApply(ctx, args, path, planPath, ctx.TerraformVersion, envs)
if err == nil {
@@ -61,15 +62,6 @@ func (a *ApplyStepRunner) Run(ctx models.ProjectCommandContext, extraArgs []stri
return out, err
}
-// isRemotePlan returns true if planContents are from a plan that was generated
-// using TFE remote operations.
-func (a *ApplyStepRunner) isRemotePlan(planContents []byte) bool {
- // We add a header to plans generated by the remote backend so we can
- // detect that they're remote in the apply phase.
- remoteOpsHeaderBytes := []byte(remoteOpsHeader)
- return bytes.Equal(planContents[:len(remoteOpsHeaderBytes)], remoteOpsHeaderBytes)
-}
-
func (a *ApplyStepRunner) hasTargetFlag(ctx models.ProjectCommandContext, extraArgs []string) bool {
isTargetFlag := func(s string) bool {
if s == "-target" {
diff --git a/server/events/runtime/cache/mocks/matchers/ptr_to_go_version_version.go b/server/events/runtime/cache/mocks/matchers/ptr_to_go_version_version.go
new file mode 100644
index 0000000000..587598c7ad
--- /dev/null
+++ b/server/events/runtime/cache/mocks/matchers/ptr_to_go_version_version.go
@@ -0,0 +1,20 @@
+// Code generated by pegomock. DO NOT EDIT.
+package matchers
+
+import (
+ "reflect"
+ "github.com/petergtz/pegomock"
+ go_version "github.com/hashicorp/go-version"
+)
+
+func AnyPtrToGoVersionVersion() *go_version.Version {
+ pegomock.RegisterMatcher(pegomock.NewAnyMatcher(reflect.TypeOf((*(*go_version.Version))(nil)).Elem()))
+ var nullValue *go_version.Version
+ return nullValue
+}
+
+func EqPtrToGoVersionVersion(value *go_version.Version) *go_version.Version {
+ pegomock.RegisterMatcher(&pegomock.EqMatcher{Value: value})
+ var nullValue *go_version.Version
+ return nullValue
+}
diff --git a/server/events/runtime/cache/mocks/mock_key_serializer.go b/server/events/runtime/cache/mocks/mock_key_serializer.go
new file mode 100644
index 0000000000..e5a65f169c
--- /dev/null
+++ b/server/events/runtime/cache/mocks/mock_key_serializer.go
@@ -0,0 +1,109 @@
+// Code generated by pegomock. DO NOT EDIT.
+// Source: github.com/runatlantis/atlantis/server/events/runtime/cache (interfaces: KeySerializer)
+
+package mocks
+
+import (
+ go_version "github.com/hashicorp/go-version"
+ pegomock "github.com/petergtz/pegomock"
+ "reflect"
+ "time"
+)
+
+type MockKeySerializer struct {
+ fail func(message string, callerSkip ...int)
+}
+
+func NewMockKeySerializer(options ...pegomock.Option) *MockKeySerializer {
+ mock := &MockKeySerializer{}
+ for _, option := range options {
+ option.Apply(mock)
+ }
+ return mock
+}
+
+func (mock *MockKeySerializer) SetFailHandler(fh pegomock.FailHandler) { mock.fail = fh }
+func (mock *MockKeySerializer) FailHandler() pegomock.FailHandler { return mock.fail }
+
+func (mock *MockKeySerializer) Serialize(key *go_version.Version) (string, error) {
+ if mock == nil {
+ panic("mock must not be nil. Use myMock := NewMockKeySerializer().")
+ }
+ params := []pegomock.Param{key}
+ result := pegomock.GetGenericMockFrom(mock).Invoke("Serialize", params, []reflect.Type{reflect.TypeOf((*string)(nil)).Elem(), reflect.TypeOf((*error)(nil)).Elem()})
+ var ret0 string
+ var ret1 error
+ if len(result) != 0 {
+ if result[0] != nil {
+ ret0 = result[0].(string)
+ }
+ if result[1] != nil {
+ ret1 = result[1].(error)
+ }
+ }
+ return ret0, ret1
+}
+
+func (mock *MockKeySerializer) VerifyWasCalledOnce() *VerifierMockKeySerializer {
+ return &VerifierMockKeySerializer{
+ mock: mock,
+ invocationCountMatcher: pegomock.Times(1),
+ }
+}
+
+func (mock *MockKeySerializer) VerifyWasCalled(invocationCountMatcher pegomock.Matcher) *VerifierMockKeySerializer {
+ return &VerifierMockKeySerializer{
+ mock: mock,
+ invocationCountMatcher: invocationCountMatcher,
+ }
+}
+
+func (mock *MockKeySerializer) VerifyWasCalledInOrder(invocationCountMatcher pegomock.Matcher, inOrderContext *pegomock.InOrderContext) *VerifierMockKeySerializer {
+ return &VerifierMockKeySerializer{
+ mock: mock,
+ invocationCountMatcher: invocationCountMatcher,
+ inOrderContext: inOrderContext,
+ }
+}
+
+func (mock *MockKeySerializer) VerifyWasCalledEventually(invocationCountMatcher pegomock.Matcher, timeout time.Duration) *VerifierMockKeySerializer {
+ return &VerifierMockKeySerializer{
+ mock: mock,
+ invocationCountMatcher: invocationCountMatcher,
+ timeout: timeout,
+ }
+}
+
+type VerifierMockKeySerializer struct {
+ mock *MockKeySerializer
+ invocationCountMatcher pegomock.Matcher
+ inOrderContext *pegomock.InOrderContext
+ timeout time.Duration
+}
+
+func (verifier *VerifierMockKeySerializer) Serialize(key *go_version.Version) *MockKeySerializer_Serialize_OngoingVerification {
+ params := []pegomock.Param{key}
+ methodInvocations := pegomock.GetGenericMockFrom(verifier.mock).Verify(verifier.inOrderContext, verifier.invocationCountMatcher, "Serialize", params, verifier.timeout)
+ return &MockKeySerializer_Serialize_OngoingVerification{mock: verifier.mock, methodInvocations: methodInvocations}
+}
+
+type MockKeySerializer_Serialize_OngoingVerification struct {
+ mock *MockKeySerializer
+ methodInvocations []pegomock.MethodInvocation
+}
+
+func (c *MockKeySerializer_Serialize_OngoingVerification) GetCapturedArguments() *go_version.Version {
+ key := c.GetAllCapturedArguments()
+ return key[len(key)-1]
+}
+
+func (c *MockKeySerializer_Serialize_OngoingVerification) GetAllCapturedArguments() (_param0 []*go_version.Version) {
+ params := pegomock.GetGenericMockFrom(c.mock).GetInvocationParams(c.methodInvocations)
+ if len(params) > 0 {
+ _param0 = make([]*go_version.Version, len(c.methodInvocations))
+ for u, param := range params[0] {
+ _param0[u] = param.(*go_version.Version)
+ }
+ }
+ return
+}
diff --git a/server/events/runtime/cache/mocks/mock_version_path.go b/server/events/runtime/cache/mocks/mock_version_path.go
new file mode 100644
index 0000000000..a79c3d9b0c
--- /dev/null
+++ b/server/events/runtime/cache/mocks/mock_version_path.go
@@ -0,0 +1,109 @@
+// Code generated by pegomock. DO NOT EDIT.
+// Source: github.com/runatlantis/atlantis/server/events/runtime/cache (interfaces: ExecutionVersionCache)
+
+package mocks
+
+import (
+ go_version "github.com/hashicorp/go-version"
+ pegomock "github.com/petergtz/pegomock"
+ "reflect"
+ "time"
+)
+
+type MockExecutionVersionCache struct {
+ fail func(message string, callerSkip ...int)
+}
+
+func NewMockExecutionVersionCache(options ...pegomock.Option) *MockExecutionVersionCache {
+ mock := &MockExecutionVersionCache{}
+ for _, option := range options {
+ option.Apply(mock)
+ }
+ return mock
+}
+
+func (mock *MockExecutionVersionCache) SetFailHandler(fh pegomock.FailHandler) { mock.fail = fh }
+func (mock *MockExecutionVersionCache) FailHandler() pegomock.FailHandler { return mock.fail }
+
+func (mock *MockExecutionVersionCache) Get(key *go_version.Version) (string, error) {
+ if mock == nil {
+ panic("mock must not be nil. Use myMock := NewMockExecutionVersionCache().")
+ }
+ params := []pegomock.Param{key}
+ result := pegomock.GetGenericMockFrom(mock).Invoke("Get", params, []reflect.Type{reflect.TypeOf((*string)(nil)).Elem(), reflect.TypeOf((*error)(nil)).Elem()})
+ var ret0 string
+ var ret1 error
+ if len(result) != 0 {
+ if result[0] != nil {
+ ret0 = result[0].(string)
+ }
+ if result[1] != nil {
+ ret1 = result[1].(error)
+ }
+ }
+ return ret0, ret1
+}
+
+func (mock *MockExecutionVersionCache) VerifyWasCalledOnce() *VerifierMockExecutionVersionCache {
+ return &VerifierMockExecutionVersionCache{
+ mock: mock,
+ invocationCountMatcher: pegomock.Times(1),
+ }
+}
+
+func (mock *MockExecutionVersionCache) VerifyWasCalled(invocationCountMatcher pegomock.Matcher) *VerifierMockExecutionVersionCache {
+ return &VerifierMockExecutionVersionCache{
+ mock: mock,
+ invocationCountMatcher: invocationCountMatcher,
+ }
+}
+
+func (mock *MockExecutionVersionCache) VerifyWasCalledInOrder(invocationCountMatcher pegomock.Matcher, inOrderContext *pegomock.InOrderContext) *VerifierMockExecutionVersionCache {
+ return &VerifierMockExecutionVersionCache{
+ mock: mock,
+ invocationCountMatcher: invocationCountMatcher,
+ inOrderContext: inOrderContext,
+ }
+}
+
+func (mock *MockExecutionVersionCache) VerifyWasCalledEventually(invocationCountMatcher pegomock.Matcher, timeout time.Duration) *VerifierMockExecutionVersionCache {
+ return &VerifierMockExecutionVersionCache{
+ mock: mock,
+ invocationCountMatcher: invocationCountMatcher,
+ timeout: timeout,
+ }
+}
+
+type VerifierMockExecutionVersionCache struct {
+ mock *MockExecutionVersionCache
+ invocationCountMatcher pegomock.Matcher
+ inOrderContext *pegomock.InOrderContext
+ timeout time.Duration
+}
+
+func (verifier *VerifierMockExecutionVersionCache) Get(key *go_version.Version) *MockExecutionVersionCache_Get_OngoingVerification {
+ params := []pegomock.Param{key}
+ methodInvocations := pegomock.GetGenericMockFrom(verifier.mock).Verify(verifier.inOrderContext, verifier.invocationCountMatcher, "Get", params, verifier.timeout)
+ return &MockExecutionVersionCache_Get_OngoingVerification{mock: verifier.mock, methodInvocations: methodInvocations}
+}
+
+type MockExecutionVersionCache_Get_OngoingVerification struct {
+ mock *MockExecutionVersionCache
+ methodInvocations []pegomock.MethodInvocation
+}
+
+func (c *MockExecutionVersionCache_Get_OngoingVerification) GetCapturedArguments() *go_version.Version {
+ key := c.GetAllCapturedArguments()
+ return key[len(key)-1]
+}
+
+func (c *MockExecutionVersionCache_Get_OngoingVerification) GetAllCapturedArguments() (_param0 []*go_version.Version) {
+ params := pegomock.GetGenericMockFrom(c.mock).GetInvocationParams(c.methodInvocations)
+ if len(params) > 0 {
+ _param0 = make([]*go_version.Version, len(c.methodInvocations))
+ for u, param := range params[0] {
+ _param0[u] = param.(*go_version.Version)
+ }
+ }
+ return
+}
diff --git a/server/events/runtime/cache/version_path.go b/server/events/runtime/cache/version_path.go
new file mode 100644
index 0000000000..56c3f21a44
--- /dev/null
+++ b/server/events/runtime/cache/version_path.go
@@ -0,0 +1,132 @@
+package cache
+
+import (
+ "fmt"
+ "sync"
+
+ "github.com/hashicorp/go-version"
+ "github.com/pkg/errors"
+ "github.com/runatlantis/atlantis/server/events/runtime/models"
+)
+
+//go:generate pegomock generate -m --use-experimental-model-gen --package mocks -o mocks/mock_version_path.go ExecutionVersionCache
+//go:generate pegomock generate -m --use-experimental-model-gen --package mocks -o mocks/mock_key_serializer.go KeySerializer
+
+type ExecutionVersionCache interface {
+ Get(key *version.Version) (string, error)
+}
+
+type KeySerializer interface {
+ Serialize(key *version.Version) (string, error)
+}
+
+type DefaultDiskLookupKeySerializer struct {
+ binaryName string
+}
+
+func (s *DefaultDiskLookupKeySerializer) Serialize(key *version.Version) (string, error) {
+ return fmt.Sprintf("%s%s", s.binaryName, key.Original()), nil
+}
+
+// ExecutionVersionDiskLayer is a cache layer which attempts to find the the version on disk,
+// before calling the configured loading function.
+type ExecutionVersionDiskLayer struct {
+ versionRootDir models.FilePath
+ exec models.Exec
+ keySerializer KeySerializer
+ loader func(v *version.Version, destPath string) (models.FilePath, error)
+ binaryName string
+}
+
+// Gets a path from cache
+func (v *ExecutionVersionDiskLayer) Get(key *version.Version) (string, error) {
+ binaryVersion, err := v.keySerializer.Serialize(key)
+
+ if err != nil {
+ return "", errors.Wrapf(err, "serializing key for disk lookup")
+ }
+
+ // first check for the binary in our path
+ path, err := v.exec.LookPath(binaryVersion)
+
+ if err == nil {
+ return path, nil
+ }
+
+ // if the binary is not in our path, let's look in the version root directory
+ binaryPath := v.versionRootDir.Join(binaryVersion)
+
+ // if the binary doesn't exist there, we need to load it.
+ if binaryPath.NotExists() {
+
+ // load it into a directory first and then sym link it to the serialized key aka binary version
+ loaderPath := v.versionRootDir.Join(v.binaryName, "versions", key.Original())
+
+ loadedBinary, err := v.loader(key, loaderPath.Resolve())
+
+ if err != nil {
+ return "", errors.Wrapf(err, "loading %s", loaderPath)
+ }
+
+ binaryPath, err = loadedBinary.Symlink(binaryPath.Resolve())
+
+ if err != nil {
+ return "", errors.Wrapf(err, "linking %s to %s", loaderPath, loadedBinary)
+ }
+ }
+
+ return binaryPath.Resolve(), nil
+}
+
+// ExecutionVersionMemoryLayer is an in-memory cache which delegates to a disk layer
+// if a version's path doesn't exist yet.
+type ExecutionVersionMemoryLayer struct {
+ // RWMutex allows us to have separation between reader locks/writer locks which is great
+ // since writing of data shouldn't happen too often
+ lock sync.RWMutex
+ diskLayer ExecutionVersionCache
+ cache map[string]string
+}
+
+func (v *ExecutionVersionMemoryLayer) Get(key *version.Version) (string, error) {
+
+ // If we need to we can rip this out into a KeySerializer impl, for now this
+ // seems overkill
+ serializedKey := key.String()
+
+ v.lock.RLock()
+ _, ok := v.cache[serializedKey]
+ v.lock.RUnlock()
+
+ if !ok {
+ v.lock.Lock()
+ defer v.lock.Unlock()
+ value, err := v.diskLayer.Get(key)
+
+ if err != nil {
+ return "", errors.Wrapf(err, "fetching %s from cache", serializedKey)
+ }
+ v.cache[serializedKey] = value
+ }
+ return v.cache[serializedKey], nil
+}
+
+func NewExecutionVersionLayeredLoadingCache(
+ binaryName string,
+ versionRootDir string,
+ loader func(v *version.Version, destPath string) (models.FilePath, error),
+) ExecutionVersionCache {
+
+ diskLayer := &ExecutionVersionDiskLayer{
+ exec: models.LocalExec{},
+ versionRootDir: models.LocalFilePath(versionRootDir),
+ keySerializer: &DefaultDiskLookupKeySerializer{binaryName: binaryName},
+ loader: loader,
+ binaryName: binaryName,
+ }
+
+ return &ExecutionVersionMemoryLayer{
+ diskLayer: diskLayer,
+ cache: make(map[string]string),
+ }
+}
diff --git a/server/events/runtime/cache/version_path_test.go b/server/events/runtime/cache/version_path_test.go
new file mode 100644
index 0000000000..a5f5e08b8b
--- /dev/null
+++ b/server/events/runtime/cache/version_path_test.go
@@ -0,0 +1,243 @@
+package cache
+
+import (
+ "errors"
+ "path/filepath"
+ "testing"
+
+ "github.com/hashicorp/go-version"
+ . "github.com/petergtz/pegomock"
+ cache_mocks "github.com/runatlantis/atlantis/server/events/runtime/cache/mocks"
+ "github.com/runatlantis/atlantis/server/events/runtime/models"
+ models_mocks "github.com/runatlantis/atlantis/server/events/runtime/models/mocks"
+ . "github.com/runatlantis/atlantis/testing"
+)
+
+func TestExecutionVersionDiskLayer(t *testing.T) {
+
+ binaryVersion := "bin1.0"
+ binaryName := "bin"
+
+ expectedPath := "some/path/bin1.0"
+ versionInput, _ := version.NewVersion("1.0")
+
+ RegisterMockTestingT(t)
+
+ mockFilePath := models_mocks.NewMockFilePath()
+ mockExec := models_mocks.NewMockExec()
+ mockSerializer := cache_mocks.NewMockKeySerializer()
+
+ t.Run("serializer error", func(t *testing.T) {
+ subject := &ExecutionVersionDiskLayer{
+ versionRootDir: mockFilePath,
+ exec: mockExec,
+ loader: func(v *version.Version, destPath string) (models.FilePath, error) {
+ if destPath == expectedPath && v == versionInput {
+ return models.LocalFilePath(filepath.Join(destPath, "bin")), nil
+ }
+
+ t.Fatalf("unexpected inputs to loader")
+
+ return models.LocalFilePath(""), nil
+ },
+ keySerializer: mockSerializer,
+ }
+
+ When(mockSerializer.Serialize(versionInput)).ThenReturn("", errors.New("serializer error"))
+ When(mockExec.LookPath(binaryVersion)).ThenReturn(expectedPath, nil)
+
+ _, err := subject.Get(versionInput)
+
+ Assert(t, err != nil, "err is expected")
+
+ mockFilePath.VerifyWasCalled(Never()).Join(AnyString())
+ mockFilePath.VerifyWasCalled(Never()).NotExists()
+ mockFilePath.VerifyWasCalled(Never()).Resolve()
+ mockExec.VerifyWasCalled(Never()).LookPath(AnyString())
+ })
+
+ t.Run("finds in path", func(t *testing.T) {
+ subject := &ExecutionVersionDiskLayer{
+ versionRootDir: mockFilePath,
+ exec: mockExec,
+ loader: func(v *version.Version, destPath string) (models.FilePath, error) {
+ t.Fatalf("shouldn't be called")
+
+ return models.LocalFilePath(""), nil
+ },
+ keySerializer: mockSerializer,
+ }
+
+ When(mockSerializer.Serialize(versionInput)).ThenReturn(binaryVersion, nil)
+ When(mockExec.LookPath(binaryVersion)).ThenReturn(expectedPath, nil)
+
+ resultPath, err := subject.Get(versionInput)
+
+ Ok(t, err)
+
+ Assert(t, resultPath == expectedPath, "path is expected")
+
+ mockFilePath.VerifyWasCalled(Never()).Join(AnyString())
+ mockFilePath.VerifyWasCalled(Never()).Resolve()
+ mockFilePath.VerifyWasCalled(Never()).NotExists()
+ })
+
+ t.Run("finds in version root", func(t *testing.T) {
+ subject := &ExecutionVersionDiskLayer{
+ versionRootDir: mockFilePath,
+ exec: mockExec,
+ loader: func(v *version.Version, destPath string) (models.FilePath, error) {
+
+ t.Fatalf("shouldn't be called")
+
+ return models.LocalFilePath(""), nil
+ },
+ keySerializer: mockSerializer,
+ }
+
+ When(mockSerializer.Serialize(versionInput)).ThenReturn(binaryVersion, nil)
+ When(mockExec.LookPath(binaryVersion)).ThenReturn("", errors.New("error"))
+
+ When(mockFilePath.Join(binaryVersion)).ThenReturn(mockFilePath)
+
+ When(mockFilePath.NotExists()).ThenReturn(false)
+ When(mockFilePath.Resolve()).ThenReturn(expectedPath)
+
+ resultPath, err := subject.Get(versionInput)
+
+ Ok(t, err)
+
+ Assert(t, resultPath == expectedPath, "path is expected")
+ })
+
+ t.Run("loads version", func(t *testing.T) {
+ mockLoaderPath := models_mocks.NewMockFilePath()
+ mockSymlinkPath := models_mocks.NewMockFilePath()
+ mockLoadedBinaryPath := models_mocks.NewMockFilePath()
+ expectedLoaderPath := "/some/path/to/binary"
+ expectedBinaryVersionPath := filepath.Join(expectedPath, binaryVersion)
+
+ subject := &ExecutionVersionDiskLayer{
+ versionRootDir: mockFilePath,
+ exec: mockExec,
+ loader: func(v *version.Version, destPath string) (models.FilePath, error) {
+
+ if destPath == expectedLoaderPath && v == versionInput {
+ return mockLoadedBinaryPath, nil
+ }
+
+ t.Fatalf("unexpected inputs to loader")
+
+ return models.LocalFilePath(""), nil
+ },
+ binaryName: binaryName,
+ keySerializer: mockSerializer,
+ }
+
+ When(mockSerializer.Serialize(versionInput)).ThenReturn(binaryVersion, nil)
+ When(mockExec.LookPath(binaryVersion)).ThenReturn("", errors.New("error"))
+
+ When(mockFilePath.Join(binaryVersion)).ThenReturn(mockFilePath)
+ When(mockFilePath.Resolve()).ThenReturn(expectedBinaryVersionPath)
+
+ When(mockFilePath.NotExists()).ThenReturn(true)
+
+ When(mockFilePath.Join(binaryName, "versions", versionInput.Original())).ThenReturn(mockLoaderPath)
+
+ When(mockLoaderPath.Resolve()).ThenReturn(expectedLoaderPath)
+ When(mockLoadedBinaryPath.Symlink(expectedBinaryVersionPath)).ThenReturn(mockSymlinkPath, nil)
+
+ When(mockSymlinkPath.Resolve()).ThenReturn(expectedPath)
+
+ resultPath, err := subject.Get(versionInput)
+
+ Ok(t, err)
+
+ Assert(t, resultPath == expectedPath, "path is expected")
+ })
+
+ t.Run("loader error", func(t *testing.T) {
+ mockLoaderPath := models_mocks.NewMockFilePath()
+ expectedLoaderPath := "/some/path/to/binary"
+ subject := &ExecutionVersionDiskLayer{
+ versionRootDir: mockFilePath,
+ exec: mockExec,
+ loader: func(v *version.Version, destPath string) (models.FilePath, error) {
+
+ if destPath == expectedLoaderPath && v == versionInput {
+ return models.LocalFilePath(""), errors.New("error")
+ }
+
+ t.Fatalf("unexpected inputs to loader")
+
+ return models.LocalFilePath(""), nil
+ },
+ keySerializer: mockSerializer,
+ binaryName: binaryName,
+ }
+
+ When(mockSerializer.Serialize(versionInput)).ThenReturn(binaryVersion, nil)
+ When(mockExec.LookPath(binaryVersion)).ThenReturn("", errors.New("error"))
+
+ When(mockFilePath.Join(binaryVersion)).ThenReturn(mockFilePath)
+
+ When(mockFilePath.NotExists()).ThenReturn(true)
+
+ When(mockFilePath.Join(binaryName, "versions", versionInput.Original())).ThenReturn(mockLoaderPath)
+
+ When(mockLoaderPath.Resolve()).ThenReturn(expectedLoaderPath)
+
+ _, err := subject.Get(versionInput)
+
+ Assert(t, err != nil, "path is expected")
+ })
+}
+
+func TestExecutionVersionMemoryLayer(t *testing.T) {
+ expectedPath := "some/path"
+ versionInput, _ := version.NewVersion("1.0")
+
+ RegisterMockTestingT(t)
+
+ mockLayer := cache_mocks.NewMockExecutionVersionCache()
+
+ cache := make(map[string]string)
+
+ subject := &ExecutionVersionMemoryLayer{
+ diskLayer: mockLayer,
+ cache: cache,
+ }
+
+ t.Run("exists in cache", func(t *testing.T) {
+ cache[versionInput.String()] = expectedPath
+
+ resultPath, err := subject.Get(versionInput)
+
+ Ok(t, err)
+
+ Assert(t, resultPath == expectedPath, "path is expected")
+ })
+
+ t.Run("disk layer error", func(t *testing.T) {
+ delete(cache, versionInput.String())
+
+ When(mockLayer.Get(versionInput)).ThenReturn("", errors.New("error"))
+
+ _, err := subject.Get(versionInput)
+
+ Assert(t, err != nil, "error is expected")
+ })
+
+ t.Run("disk layer success", func(t *testing.T) {
+ delete(cache, versionInput.String())
+
+ When(mockLayer.Get(versionInput)).ThenReturn(expectedPath, nil)
+
+ resultPath, err := subject.Get(versionInput)
+
+ Ok(t, err)
+
+ Assert(t, resultPath == expectedPath, "path is expected")
+ Assert(t, cache[versionInput.String()] == resultPath, "path is cached")
+ })
+}
diff --git a/server/events/runtime/executor.go b/server/events/runtime/executor.go
new file mode 100644
index 0000000000..80cf3f0961
--- /dev/null
+++ b/server/events/runtime/executor.go
@@ -0,0 +1,25 @@
+package runtime
+
+import (
+ version "github.com/hashicorp/go-version"
+ "github.com/runatlantis/atlantis/server/events/models"
+ "github.com/runatlantis/atlantis/server/logging"
+)
+
+//go:generate pegomock generate -m --use-experimental-model-gen --package mocks -o mocks/mock_versionedexecutorworkflow.go VersionedExecutorWorkflow
+
+// VersionedExecutorWorkflow defines a versioned execution for a given project context
+type VersionedExecutorWorkflow interface {
+ ExecutorVersionEnsurer
+ Executor
+}
+
+// Executor runs an executable with provided environment variables and arguments and returns stdout
+type Executor interface {
+ Run(ctx models.ProjectCommandContext, executablePath string, envs map[string]string, workdir string) (string, error)
+}
+
+// ExecutorVersionEnsurer ensures a given version exists and outputs a path to the executable
+type ExecutorVersionEnsurer interface {
+ EnsureExecutorVersion(log *logging.SimpleLogger, v *version.Version) (string, error)
+}
diff --git a/server/events/runtime/minimum_version_step_runner_delegate.go b/server/events/runtime/minimum_version_step_runner_delegate.go
new file mode 100644
index 0000000000..0ec8acf156
--- /dev/null
+++ b/server/events/runtime/minimum_version_step_runner_delegate.go
@@ -0,0 +1,44 @@
+package runtime
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/go-version"
+ "github.com/pkg/errors"
+ "github.com/runatlantis/atlantis/server/events/models"
+)
+
+// MinimumVersionStepRunnerDelegate ensures that a given step runner can't run unless the command version being used
+// is greater than a provided minimum
+type MinimumVersionStepRunnerDelegate struct {
+ minimumVersion *version.Version
+ defaultTfVersion *version.Version
+ delegate Runner
+}
+
+func NewMinimumVersionStepRunnerDelegate(minimumVersionStr string, defaultVersion *version.Version, delegate Runner) (Runner, error) {
+ minimumVersion, err := version.NewVersion(minimumVersionStr)
+
+ if err != nil {
+ return &MinimumVersionStepRunnerDelegate{}, errors.Wrap(err, "initializing minimum version")
+ }
+
+ return &MinimumVersionStepRunnerDelegate{
+ minimumVersion: minimumVersion,
+ defaultTfVersion: defaultVersion,
+ delegate: delegate,
+ }, nil
+}
+
+func (r *MinimumVersionStepRunnerDelegate) Run(ctx models.ProjectCommandContext, extraArgs []string, path string, envs map[string]string) (string, error) {
+ tfVersion := r.defaultTfVersion
+ if ctx.TerraformVersion != nil {
+ tfVersion = ctx.TerraformVersion
+ }
+
+ if tfVersion.LessThan(r.minimumVersion) {
+ return fmt.Sprintf("Version: %s is unsupported for this step. Minimum version is: %s", tfVersion.String(), r.minimumVersion.String()), nil
+ }
+
+ return r.delegate.Run(ctx, extraArgs, path, envs)
+}
diff --git a/server/events/runtime/minimum_version_step_runner_delegate_test.go b/server/events/runtime/minimum_version_step_runner_delegate_test.go
new file mode 100644
index 0000000000..423052298b
--- /dev/null
+++ b/server/events/runtime/minimum_version_step_runner_delegate_test.go
@@ -0,0 +1,120 @@
+package runtime
+
+import (
+ "testing"
+
+ "github.com/hashicorp/go-version"
+ . "github.com/petergtz/pegomock"
+ "github.com/runatlantis/atlantis/server/events/models"
+ "github.com/runatlantis/atlantis/server/events/runtime/mocks"
+ . "github.com/runatlantis/atlantis/testing"
+)
+
+func TestRunMinimumVersionDelegate(t *testing.T) {
+ RegisterMockTestingT(t)
+
+ mockDelegate := mocks.NewMockRunner()
+
+ tfVersion12, _ := version.NewVersion("0.12.0")
+ tfVersion11, _ := version.NewVersion("0.11.14")
+
+ // these stay the same for all tests
+ extraArgs := []string{"extra", "args"}
+ envs := map[string]string{}
+ path := ""
+
+ expectedOut := "some valid output from delegate"
+
+ t.Run("default version success", func(t *testing.T) {
+ subject := &MinimumVersionStepRunnerDelegate{
+ defaultTfVersion: tfVersion12,
+ minimumVersion: tfVersion12,
+ delegate: mockDelegate,
+ }
+
+ ctx := models.ProjectCommandContext{}
+
+ When(mockDelegate.Run(ctx, extraArgs, path, envs)).ThenReturn(expectedOut, nil)
+
+ output, err := subject.Run(
+ ctx,
+ extraArgs,
+ path,
+ envs,
+ )
+
+ Equals(t, expectedOut, output)
+ Ok(t, err)
+ })
+
+ t.Run("ctx version success", func(t *testing.T) {
+ subject := &MinimumVersionStepRunnerDelegate{
+ defaultTfVersion: tfVersion11,
+ minimumVersion: tfVersion12,
+ delegate: mockDelegate,
+ }
+
+ ctx := models.ProjectCommandContext{
+ TerraformVersion: tfVersion12,
+ }
+
+ When(mockDelegate.Run(ctx, extraArgs, path, envs)).ThenReturn(expectedOut, nil)
+
+ output, err := subject.Run(
+ ctx,
+ extraArgs,
+ path,
+ envs,
+ )
+
+ Equals(t, expectedOut, output)
+ Ok(t, err)
+ })
+
+ t.Run("default version failure", func(t *testing.T) {
+ subject := &MinimumVersionStepRunnerDelegate{
+ defaultTfVersion: tfVersion11,
+ minimumVersion: tfVersion12,
+ delegate: mockDelegate,
+ }
+
+ ctx := models.ProjectCommandContext{}
+
+ output, err := subject.Run(
+ ctx,
+ extraArgs,
+ path,
+ envs,
+ )
+
+ mockDelegate.VerifyWasCalled(Never())
+
+ Equals(t, "Version: 0.11.14 is unsupported for this step. Minimum version is: 0.12.0", output)
+ Ok(t, err)
+ })
+
+ t.Run("ctx version failure", func(t *testing.T) {
+ subject := &MinimumVersionStepRunnerDelegate{
+ defaultTfVersion: tfVersion12,
+ minimumVersion: tfVersion12,
+ delegate: mockDelegate,
+ }
+
+ ctx := models.ProjectCommandContext{
+ TerraformVersion: tfVersion11,
+ }
+
+ output, err := subject.Run(
+ ctx,
+ extraArgs,
+ path,
+ envs,
+ )
+
+ mockDelegate.VerifyWasCalled(Never())
+
+ Equals(t, "Version: 0.11.14 is unsupported for this step. Minimum version is: 0.12.0", output)
+ Ok(t, err)
+ })
+
+}
diff --git a/server/events/runtime/mocks/matchers/map_of_string_to_string.go b/server/events/runtime/mocks/matchers/map_of_string_to_string.go
new file mode 100644
index 0000000000..65175de1a1
--- /dev/null
+++ b/server/events/runtime/mocks/matchers/map_of_string_to_string.go
@@ -0,0 +1,31 @@
+// Code generated by pegomock. DO NOT EDIT.
+package matchers
+
+import (
+ "github.com/petergtz/pegomock"
+ "reflect"
+)
+
+func AnyMapOfStringToString() map[string]string {
+ pegomock.RegisterMatcher(pegomock.NewAnyMatcher(reflect.TypeOf((*(map[string]string))(nil)).Elem()))
+ var nullValue map[string]string
+ return nullValue
+}
+
+func EqMapOfStringToString(value map[string]string) map[string]string {
+ pegomock.RegisterMatcher(&pegomock.EqMatcher{Value: value})
+ var nullValue map[string]string
+ return nullValue
+}
+
+func NotEqMapOfStringToString(value map[string]string) map[string]string {
+ pegomock.RegisterMatcher(&pegomock.NotEqMatcher{Value: value})
+ var nullValue map[string]string
+ return nullValue
+}
+
+func MapOfStringToStringThat(matcher pegomock.ArgumentMatcher) map[string]string {
+ pegomock.RegisterMatcher(matcher)
+ var nullValue map[string]string
+ return nullValue
+}
diff --git a/server/events/runtime/mocks/matchers/models_projectcommandcontext.go b/server/events/runtime/mocks/matchers/models_projectcommandcontext.go
new file mode 100644
index 0000000000..535f8b9671
--- /dev/null
+++ b/server/events/runtime/mocks/matchers/models_projectcommandcontext.go
@@ -0,0 +1,33 @@
+// Code generated by pegomock. DO NOT EDIT.
+package matchers
+
+import (
+ "github.com/petergtz/pegomock"
+ "reflect"
+
+ models "github.com/runatlantis/atlantis/server/events/models"
+)
+
+func AnyModelsProjectCommandContext() models.ProjectCommandContext {
+ pegomock.RegisterMatcher(pegomock.NewAnyMatcher(reflect.TypeOf((*(models.ProjectCommandContext))(nil)).Elem()))
+ var nullValue models.ProjectCommandContext
+ return nullValue
+}
+
+func EqModelsProjectCommandContext(value models.ProjectCommandContext) models.ProjectCommandContext {
+ pegomock.RegisterMatcher(&pegomock.EqMatcher{Value: value})
+ var nullValue models.ProjectCommandContext
+ return nullValue
+}
+
+func NotEqModelsProjectCommandContext(value models.ProjectCommandContext) models.ProjectCommandContext {
+ pegomock.RegisterMatcher(&pegomock.NotEqMatcher{Value: value})
+ var nullValue models.ProjectCommandContext
+ return nullValue
+}
+
+func ModelsProjectCommandContextThat(matcher pegomock.ArgumentMatcher) models.ProjectCommandContext {
+ pegomock.RegisterMatcher(matcher)
+ var nullValue models.ProjectCommandContext
+ return nullValue
+}
diff --git a/server/events/runtime/mocks/matchers/ptr_to_go_version_version.go b/server/events/runtime/mocks/matchers/ptr_to_go_version_version.go
new file mode 100644
index 0000000000..587598c7ad
--- /dev/null
+++ b/server/events/runtime/mocks/matchers/ptr_to_go_version_version.go
@@ -0,0 +1,20 @@
+// Code generated by pegomock. DO NOT EDIT.
+package matchers
+
+import (
+ "reflect"
+ "github.com/petergtz/pegomock"
+ go_version "github.com/hashicorp/go-version"
+)
+
+func AnyPtrToGoVersionVersion() *go_version.Version {
+ pegomock.RegisterMatcher(pegomock.NewAnyMatcher(reflect.TypeOf((*(*go_version.Version))(nil)).Elem()))
+ var nullValue *go_version.Version
+ return nullValue
+}
+
+func EqPtrToGoVersionVersion(value *go_version.Version) *go_version.Version {
+ pegomock.RegisterMatcher(&pegomock.EqMatcher{Value: value})
+ var nullValue *go_version.Version
+ return nullValue
+}
diff --git a/server/events/runtime/mocks/matchers/ptr_to_logging_simplelogger.go b/server/events/runtime/mocks/matchers/ptr_to_logging_simplelogger.go
new file mode 100644
index 0000000000..04c72791bc
--- /dev/null
+++ b/server/events/runtime/mocks/matchers/ptr_to_logging_simplelogger.go
@@ -0,0 +1,20 @@
+// Code generated by pegomock. DO NOT EDIT.
+package matchers
+
+import (
+ "reflect"
+ "github.com/petergtz/pegomock"
+ logging "github.com/runatlantis/atlantis/server/logging"
+)
+
+func AnyPtrToLoggingSimpleLogger() *logging.SimpleLogger {
+ pegomock.RegisterMatcher(pegomock.NewAnyMatcher(reflect.TypeOf((*(*logging.SimpleLogger))(nil)).Elem()))
+ var nullValue *logging.SimpleLogger
+ return nullValue
+}
+
+func EqPtrToLoggingSimpleLogger(value *logging.SimpleLogger) *logging.SimpleLogger {
+ pegomock.RegisterMatcher(&pegomock.EqMatcher{Value: value})
+ var nullValue *logging.SimpleLogger
+ return nullValue
+}
diff --git a/server/events/runtime/mocks/matchers/slice_of_string.go b/server/events/runtime/mocks/matchers/slice_of_string.go
new file mode 100644
index 0000000000..f9281819dd
--- /dev/null
+++ b/server/events/runtime/mocks/matchers/slice_of_string.go
@@ -0,0 +1,31 @@
+// Code generated by pegomock. DO NOT EDIT.
+package matchers
+
+import (
+ "github.com/petergtz/pegomock"
+ "reflect"
+)
+
+func AnySliceOfString() []string {
+ pegomock.RegisterMatcher(pegomock.NewAnyMatcher(reflect.TypeOf((*([]string))(nil)).Elem()))
+ var nullValue []string
+ return nullValue
+}
+
+func EqSliceOfString(value []string) []string {
+ pegomock.RegisterMatcher(&pegomock.EqMatcher{Value: value})
+ var nullValue []string
+ return nullValue
+}
+
+func NotEqSliceOfString(value []string) []string {
+ pegomock.RegisterMatcher(&pegomock.NotEqMatcher{Value: value})
+ var nullValue []string
+ return nullValue
+}
+
+func SliceOfStringThat(matcher pegomock.ArgumentMatcher) []string {
+ pegomock.RegisterMatcher(matcher)
+ var nullValue []string
+ return nullValue
+}
diff --git a/server/events/runtime/mocks/mock_runner.go b/server/events/runtime/mocks/mock_runner.go
new file mode 100644
index 0000000000..5a3965b5d6
--- /dev/null
+++ b/server/events/runtime/mocks/mock_runner.go
@@ -0,0 +1,121 @@
+// Code generated by pegomock. DO NOT EDIT.
+// Source: github.com/runatlantis/atlantis/server/events/runtime (interfaces: Runner)
+
+package mocks
+
+import (
+ pegomock "github.com/petergtz/pegomock"
+ models "github.com/runatlantis/atlantis/server/events/models"
+ "reflect"
+ "time"
+)
+
+type MockRunner struct {
+ fail func(message string, callerSkip ...int)
+}
+
+func NewMockRunner(options ...pegomock.Option) *MockRunner {
+ mock := &MockRunner{}
+ for _, option := range options {
+ option.Apply(mock)
+ }
+ return mock
+}
+
+func (mock *MockRunner) SetFailHandler(fh pegomock.FailHandler) { mock.fail = fh }
+func (mock *MockRunner) FailHandler() pegomock.FailHandler { return mock.fail }
+
+func (mock *MockRunner) Run(ctx models.ProjectCommandContext, extraArgs []string, path string, envs map[string]string) (string, error) {
+ if mock == nil {
+ panic("mock must not be nil. Use myMock := NewMockRunner().")
+ }
+ params := []pegomock.Param{ctx, extraArgs, path, envs}
+ result := pegomock.GetGenericMockFrom(mock).Invoke("Run", params, []reflect.Type{reflect.TypeOf((*string)(nil)).Elem(), reflect.TypeOf((*error)(nil)).Elem()})
+ var ret0 string
+ var ret1 error
+ if len(result) != 0 {
+ if result[0] != nil {
+ ret0 = result[0].(string)
+ }
+ if result[1] != nil {
+ ret1 = result[1].(error)
+ }
+ }
+ return ret0, ret1
+}
+
+func (mock *MockRunner) VerifyWasCalledOnce() *VerifierMockRunner {
+ return &VerifierMockRunner{
+ mock: mock,
+ invocationCountMatcher: pegomock.Times(1),
+ }
+}
+
+func (mock *MockRunner) VerifyWasCalled(invocationCountMatcher pegomock.InvocationCountMatcher) *VerifierMockRunner {
+ return &VerifierMockRunner{
+ mock: mock,
+ invocationCountMatcher: invocationCountMatcher,
+ }
+}
+
+func (mock *MockRunner) VerifyWasCalledInOrder(invocationCountMatcher pegomock.InvocationCountMatcher, inOrderContext *pegomock.InOrderContext) *VerifierMockRunner {
+ return &VerifierMockRunner{
+ mock: mock,
+ invocationCountMatcher: invocationCountMatcher,
+ inOrderContext: inOrderContext,
+ }
+}
+
+func (mock *MockRunner) VerifyWasCalledEventually(invocationCountMatcher pegomock.InvocationCountMatcher, timeout time.Duration) *VerifierMockRunner {
+ return &VerifierMockRunner{
+ mock: mock,
+ invocationCountMatcher: invocationCountMatcher,
+ timeout: timeout,
+ }
+}
+
+type VerifierMockRunner struct {
+ mock *MockRunner
+ invocationCountMatcher pegomock.InvocationCountMatcher
+ inOrderContext *pegomock.InOrderContext
+ timeout time.Duration
+}
+
+func (verifier *VerifierMockRunner) Run(ctx models.ProjectCommandContext, extraArgs []string, path string, envs map[string]string) *MockRunner_Run_OngoingVerification {
+ params := []pegomock.Param{ctx, extraArgs, path, envs}
+ methodInvocations := pegomock.GetGenericMockFrom(verifier.mock).Verify(verifier.inOrderContext, verifier.invocationCountMatcher, "Run", params, verifier.timeout)
+ return &MockRunner_Run_OngoingVerification{mock: verifier.mock, methodInvocations: methodInvocations}
+}
+
+type MockRunner_Run_OngoingVerification struct {
+ mock *MockRunner
+ methodInvocations []pegomock.MethodInvocation
+}
+
+func (c *MockRunner_Run_OngoingVerification) GetCapturedArguments() (models.ProjectCommandContext, []string, string, map[string]string) {
+ ctx, extraArgs, path, envs := c.GetAllCapturedArguments()
+ return ctx[len(ctx)-1], extraArgs[len(extraArgs)-1], path[len(path)-1], envs[len(envs)-1]
+}
+
+func (c *MockRunner_Run_OngoingVerification) GetAllCapturedArguments() (_param0 []models.ProjectCommandContext, _param1 [][]string, _param2 []string, _param3 []map[string]string) {
+ params := pegomock.GetGenericMockFrom(c.mock).GetInvocationParams(c.methodInvocations)
+ if len(params) > 0 {
+ _param0 = make([]models.ProjectCommandContext, len(c.methodInvocations))
+ for u, param := range params[0] {
+ _param0[u] = param.(models.ProjectCommandContext)
+ }
+ _param1 = make([][]string, len(c.methodInvocations))
+ for u, param := range params[1] {
+ _param1[u] = param.([]string)
+ }
+ _param2 = make([]string, len(c.methodInvocations))
+ for u, param := range params[2] {
+ _param2[u] = param.(string)
+ }
+ _param3 = make([]map[string]string, len(c.methodInvocations))
+ for u, param := range params[3] {
+ _param3[u] = param.(map[string]string)
+ }
+ }
+ return
+}
diff --git a/server/events/runtime/mocks/mock_versionedexecutorworkflow.go b/server/events/runtime/mocks/mock_versionedexecutorworkflow.go
new file mode 100644
index 0000000000..f566d289ec
--- /dev/null
+++ b/server/events/runtime/mocks/mock_versionedexecutorworkflow.go
@@ -0,0 +1,173 @@
+// Code generated by pegomock. DO NOT EDIT.
+// Source: github.com/runatlantis/atlantis/server/events/runtime (interfaces: VersionedExecutorWorkflow)
+
+package mocks
+
+import (
+ go_version "github.com/hashicorp/go-version"
+ pegomock "github.com/petergtz/pegomock"
+ models "github.com/runatlantis/atlantis/server/events/models"
+ logging "github.com/runatlantis/atlantis/server/logging"
+ "reflect"
+ "time"
+)
+
+type MockVersionedExecutorWorkflow struct {
+ fail func(message string, callerSkip ...int)
+}
+
+func NewMockVersionedExecutorWorkflow(options ...pegomock.Option) *MockVersionedExecutorWorkflow {
+ mock := &MockVersionedExecutorWorkflow{}
+ for _, option := range options {
+ option.Apply(mock)
+ }
+ return mock
+}
+
+func (mock *MockVersionedExecutorWorkflow) SetFailHandler(fh pegomock.FailHandler) { mock.fail = fh }
+func (mock *MockVersionedExecutorWorkflow) FailHandler() pegomock.FailHandler { return mock.fail }
+
+func (mock *MockVersionedExecutorWorkflow) EnsureExecutorVersion(log *logging.SimpleLogger, v *go_version.Version) (string, error) {
+ if mock == nil {
+ panic("mock must not be nil. Use myMock := NewMockVersionedExecutorWorkflow().")
+ }
+ params := []pegomock.Param{log, v}
+ result := pegomock.GetGenericMockFrom(mock).Invoke("EnsureExecutorVersion", params, []reflect.Type{reflect.TypeOf((*string)(nil)).Elem(), reflect.TypeOf((*error)(nil)).Elem()})
+ var ret0 string
+ var ret1 error
+ if len(result) != 0 {
+ if result[0] != nil {
+ ret0 = result[0].(string)
+ }
+ if result[1] != nil {
+ ret1 = result[1].(error)
+ }
+ }
+ return ret0, ret1
+}
+
+func (mock *MockVersionedExecutorWorkflow) Run(ctx models.ProjectCommandContext, executablePath string, envs map[string]string, workdir string) (string, error) {
+ if mock == nil {
+ panic("mock must not be nil. Use myMock := NewMockVersionedExecutorWorkflow().")
+ }
+ params := []pegomock.Param{ctx, executablePath, envs, workdir}
+ result := pegomock.GetGenericMockFrom(mock).Invoke("Run", params, []reflect.Type{reflect.TypeOf((*string)(nil)).Elem(), reflect.TypeOf((*error)(nil)).Elem()})
+ var ret0 string
+ var ret1 error
+ if len(result) != 0 {
+ if result[0] != nil {
+ ret0 = result[0].(string)
+ }
+ if result[1] != nil {
+ ret1 = result[1].(error)
+ }
+ }
+ return ret0, ret1
+}
+
+func (mock *MockVersionedExecutorWorkflow) VerifyWasCalledOnce() *VerifierMockVersionedExecutorWorkflow {
+ return &VerifierMockVersionedExecutorWorkflow{
+ mock: mock,
+ invocationCountMatcher: pegomock.Times(1),
+ }
+}
+
+func (mock *MockVersionedExecutorWorkflow) VerifyWasCalled(invocationCountMatcher pegomock.Matcher) *VerifierMockVersionedExecutorWorkflow {
+ return &VerifierMockVersionedExecutorWorkflow{
+ mock: mock,
+ invocationCountMatcher: invocationCountMatcher,
+ }
+}
+
+func (mock *MockVersionedExecutorWorkflow) VerifyWasCalledInOrder(invocationCountMatcher pegomock.Matcher, inOrderContext *pegomock.InOrderContext) *VerifierMockVersionedExecutorWorkflow {
+ return &VerifierMockVersionedExecutorWorkflow{
+ mock: mock,
+ invocationCountMatcher: invocationCountMatcher,
+ inOrderContext: inOrderContext,
+ }
+}
+
+func (mock *MockVersionedExecutorWorkflow) VerifyWasCalledEventually(invocationCountMatcher pegomock.Matcher, timeout time.Duration) *VerifierMockVersionedExecutorWorkflow {
+ return &VerifierMockVersionedExecutorWorkflow{
+ mock: mock,
+ invocationCountMatcher: invocationCountMatcher,
+ timeout: timeout,
+ }
+}
+
+type VerifierMockVersionedExecutorWorkflow struct {
+ mock *MockVersionedExecutorWorkflow
+ invocationCountMatcher pegomock.Matcher
+ inOrderContext *pegomock.InOrderContext
+ timeout time.Duration
+}
+
+func (verifier *VerifierMockVersionedExecutorWorkflow) EnsureExecutorVersion(log *logging.SimpleLogger, v *go_version.Version) *MockVersionedExecutorWorkflow_EnsureExecutorVersion_OngoingVerification {
+ params := []pegomock.Param{log, v}
+ methodInvocations := pegomock.GetGenericMockFrom(verifier.mock).Verify(verifier.inOrderContext, verifier.invocationCountMatcher, "EnsureExecutorVersion", params, verifier.timeout)
+ return &MockVersionedExecutorWorkflow_EnsureExecutorVersion_OngoingVerification{mock: verifier.mock, methodInvocations: methodInvocations}
+}
+
+type MockVersionedExecutorWorkflow_EnsureExecutorVersion_OngoingVerification struct {
+ mock *MockVersionedExecutorWorkflow
+ methodInvocations []pegomock.MethodInvocation
+}
+
+func (c *MockVersionedExecutorWorkflow_EnsureExecutorVersion_OngoingVerification) GetCapturedArguments() (*logging.SimpleLogger, *go_version.Version) {
+ log, v := c.GetAllCapturedArguments()
+ return log[len(log)-1], v[len(v)-1]
+}
+
+func (c *MockVersionedExecutorWorkflow_EnsureExecutorVersion_OngoingVerification) GetAllCapturedArguments() (_param0 []*logging.SimpleLogger, _param1 []*go_version.Version) {
+ params := pegomock.GetGenericMockFrom(c.mock).GetInvocationParams(c.methodInvocations)
+ if len(params) > 0 {
+ _param0 = make([]*logging.SimpleLogger, len(c.methodInvocations))
+ for u, param := range params[0] {
+ _param0[u] = param.(*logging.SimpleLogger)
+ }
+ _param1 = make([]*go_version.Version, len(c.methodInvocations))
+ for u, param := range params[1] {
+ _param1[u] = param.(*go_version.Version)
+ }
+ }
+ return
+}
+
+func (verifier *VerifierMockVersionedExecutorWorkflow) Run(ctx models.ProjectCommandContext, executablePath string, envs map[string]string, workdir string) *MockVersionedExecutorWorkflow_Run_OngoingVerification {
+ params := []pegomock.Param{ctx, executablePath, envs, workdir}
+ methodInvocations := pegomock.GetGenericMockFrom(verifier.mock).Verify(verifier.inOrderContext, verifier.invocationCountMatcher, "Run", params, verifier.timeout)
+ return &MockVersionedExecutorWorkflow_Run_OngoingVerification{mock: verifier.mock, methodInvocations: methodInvocations}
+}
+
+type MockVersionedExecutorWorkflow_Run_OngoingVerification struct {
+ mock *MockVersionedExecutorWorkflow
+ methodInvocations []pegomock.MethodInvocation
+}
+
+func (c *MockVersionedExecutorWorkflow_Run_OngoingVerification) GetCapturedArguments() (models.ProjectCommandContext, string, map[string]string, string) {
+ ctx, executablePath, envs, workdir := c.GetAllCapturedArguments()
+ return ctx[len(ctx)-1], executablePath[len(executablePath)-1], envs[len(envs)-1], workdir[len(workdir)-1]
+}
+
+func (c *MockVersionedExecutorWorkflow_Run_OngoingVerification) GetAllCapturedArguments() (_param0 []models.ProjectCommandContext, _param1 []string, _param2 []map[string]string, _param3 []string) {
+ params := pegomock.GetGenericMockFrom(c.mock).GetInvocationParams(c.methodInvocations)
+ if len(params) > 0 {
+ _param0 = make([]models.ProjectCommandContext, len(c.methodInvocations))
+ for u, param := range params[0] {
+ _param0[u] = param.(models.ProjectCommandContext)
+ }
+ _param1 = make([]string, len(c.methodInvocations))
+ for u, param := range params[1] {
+ _param1[u] = param.(string)
+ }
+ _param2 = make([]map[string]string, len(c.methodInvocations))
+ for u, param := range params[2] {
+ _param2[u] = param.(map[string]string)
+ }
+ _param3 = make([]string, len(c.methodInvocations))
+ for u, param := range params[3] {
+ _param3[u] = param.(string)
+ }
+ }
+ return
+}
diff --git a/server/events/runtime/models/exec.go b/server/events/runtime/models/exec.go
new file mode 100644
index 0000000000..6950b731e3
--- /dev/null
+++ b/server/events/runtime/models/exec.go
@@ -0,0 +1,48 @@
+package models
+
+import (
+ "fmt"
+ "os"
+ "os/exec"
+ "strings"
+)
+
+//go:generate pegomock generate -m --use-experimental-model-gen --package mocks -o mocks/mock_exec.go Exec
+
+type Exec interface {
+ LookPath(file string) (string, error)
+ CombinedOutput(args []string, envs map[string]string, workdir string) (string, error)
+}
+
+type LocalExec struct{}
+
+func (e LocalExec) LookPath(file string) (string, error) {
+ return exec.LookPath(file)
+}
+
+// CombinedOutput encapsulates creating a command and running it. We should think about
+// how to flexibly add parameters here as this is meant to satisfy very simple usecases
+// for more complex usecases we can add a Command function to this method which will
+// allow us to edit a Cmd directly.
+func (e LocalExec) CombinedOutput(args []string, envs map[string]string, workdir string) (string, error) {
+ formattedArgs := strings.Join(args, " ")
+
+ envVars := []string{}
+ for key, val := range envs {
+ envVars = append(envVars, fmt.Sprintf("%s=%s", key, val))
+ }
+
+ // TODO: move this os.Environ call out to the server so this
+ // can happen once at the beginning
+ envVars = append(envVars, os.Environ()...)
+
+ // honestly not entirely sure why we're using sh -c but it's used
+ // for the terraform binary so copying it for now
+ cmd := exec.Command("sh", "-c", formattedArgs)
+ cmd.Env = envVars
+ cmd.Dir = workdir
+
+ output, err := cmd.CombinedOutput()
+
+ return string(output), err
+}
diff --git a/server/events/runtime/models/filepath.go b/server/events/runtime/models/filepath.go
new file mode 100644
index 0000000000..45e40a37ce
--- /dev/null
+++ b/server/events/runtime/models/filepath.go
@@ -0,0 +1,40 @@
+package models
+
+import (
+ "os"
+ "path/filepath"
+)
+
+//go:generate pegomock generate -m --use-experimental-model-gen --package mocks -o mocks/mock_filepath.go FilePath
+
+type FilePath interface {
+ NotExists() bool
+ Join(elem ...string) FilePath
+ Symlink(newname string) (FilePath, error)
+ Resolve() string
+}
+
+type LocalFilePath string
+
+func (fp LocalFilePath) NotExists() bool {
+ _, err := os.Stat(string(fp))
+
+ return os.IsNotExist(err)
+}
+
+func (fp LocalFilePath) Join(elem ...string) FilePath {
+ pathComponents := []string{}
+
+ pathComponents = append(pathComponents, string(fp))
+ pathComponents = append(pathComponents, elem...)
+
+ return LocalFilePath(filepath.Join(pathComponents...))
+}
+
+func (fp LocalFilePath) Symlink(newname string) (FilePath, error) {
+ return LocalFilePath(newname), os.Symlink(fp.Resolve(), newname)
+}
+
+func (fp LocalFilePath) Resolve() string {
+ return string(fp)
+}
diff --git a/server/events/runtime/models/mocks/matchers/map_of_string_to_string.go b/server/events/runtime/models/mocks/matchers/map_of_string_to_string.go
new file mode 100644
index 0000000000..4d969915af
--- /dev/null
+++ b/server/events/runtime/models/mocks/matchers/map_of_string_to_string.go
@@ -0,0 +1,21 @@
+// Code generated by pegomock. DO NOT EDIT.
+package matchers
+
+import (
+ "reflect"
+ "github.com/petergtz/pegomock"
+
+
+)
+
+func AnyMapOfStringToString() map[string]string {
+ pegomock.RegisterMatcher(pegomock.NewAnyMatcher(reflect.TypeOf((*(map[string]string))(nil)).Elem()))
+ var nullValue map[string]string
+ return nullValue
+}
+
+func EqMapOfStringToString(value map[string]string) map[string]string {
+ pegomock.RegisterMatcher(&pegomock.EqMatcher{Value: value})
+ var nullValue map[string]string
+ return nullValue
+}
diff --git a/server/events/runtime/models/mocks/matchers/models_filepath.go b/server/events/runtime/models/mocks/matchers/models_filepath.go
new file mode 100644
index 0000000000..0350b20157
--- /dev/null
+++ b/server/events/runtime/models/mocks/matchers/models_filepath.go
@@ -0,0 +1,20 @@
+// Code generated by pegomock. DO NOT EDIT.
+package matchers
+
+import (
+ "reflect"
+ "github.com/petergtz/pegomock"
+ models "github.com/runatlantis/atlantis/server/events/runtime/models"
+)
+
+func AnyModelsFilePath() models.FilePath {
+ pegomock.RegisterMatcher(pegomock.NewAnyMatcher(reflect.TypeOf((*(models.FilePath))(nil)).Elem()))
+ var nullValue models.FilePath
+ return nullValue
+}
+
+func EqModelsFilePath(value models.FilePath) models.FilePath {
+ pegomock.RegisterMatcher(&pegomock.EqMatcher{Value: value})
+ var nullValue models.FilePath
+ return nullValue
+}
diff --git a/server/events/runtime/models/mocks/matchers/slice_of_string.go b/server/events/runtime/models/mocks/matchers/slice_of_string.go
new file mode 100644
index 0000000000..96f9b24ae2
--- /dev/null
+++ b/server/events/runtime/models/mocks/matchers/slice_of_string.go
@@ -0,0 +1,20 @@
+// Code generated by pegomock. DO NOT EDIT.
+package matchers
+
+import (
+ "reflect"
+ "github.com/petergtz/pegomock"
+
+)
+
+func AnySliceOfString() []string {
+ pegomock.RegisterMatcher(pegomock.NewAnyMatcher(reflect.TypeOf((*([]string))(nil)).Elem()))
+ var nullValue []string
+ return nullValue
+}
+
+func EqSliceOfString(value []string) []string {
+ pegomock.RegisterMatcher(&pegomock.EqMatcher{Value: value})
+ var nullValue []string
+ return nullValue
+}
diff --git a/server/events/runtime/models/mocks/mock_exec.go b/server/events/runtime/models/mocks/mock_exec.go
new file mode 100644
index 0000000000..ccbcb92b57
--- /dev/null
+++ b/server/events/runtime/models/mocks/mock_exec.go
@@ -0,0 +1,162 @@
+// Code generated by pegomock. DO NOT EDIT.
+// Source: github.com/runatlantis/atlantis/server/events/runtime/models (interfaces: Exec)
+
+package mocks
+
+import (
+ pegomock "github.com/petergtz/pegomock"
+ "reflect"
+ "time"
+)
+
+type MockExec struct {
+ fail func(message string, callerSkip ...int)
+}
+
+func NewMockExec(options ...pegomock.Option) *MockExec {
+ mock := &MockExec{}
+ for _, option := range options {
+ option.Apply(mock)
+ }
+ return mock
+}
+
+func (mock *MockExec) SetFailHandler(fh pegomock.FailHandler) { mock.fail = fh }
+func (mock *MockExec) FailHandler() pegomock.FailHandler { return mock.fail }
+
+func (mock *MockExec) LookPath(file string) (string, error) {
+ if mock == nil {
+ panic("mock must not be nil. Use myMock := NewMockExec().")
+ }
+ params := []pegomock.Param{file}
+ result := pegomock.GetGenericMockFrom(mock).Invoke("LookPath", params, []reflect.Type{reflect.TypeOf((*string)(nil)).Elem(), reflect.TypeOf((*error)(nil)).Elem()})
+ var ret0 string
+ var ret1 error
+ if len(result) != 0 {
+ if result[0] != nil {
+ ret0 = result[0].(string)
+ }
+ if result[1] != nil {
+ ret1 = result[1].(error)
+ }
+ }
+ return ret0, ret1
+}
+
+func (mock *MockExec) CombinedOutput(args []string, envs map[string]string, workdir string) (string, error) {
+ if mock == nil {
+ panic("mock must not be nil. Use myMock := NewMockExec().")
+ }
+ params := []pegomock.Param{args, envs, workdir}
+ result := pegomock.GetGenericMockFrom(mock).Invoke("CombinedOutput", params, []reflect.Type{reflect.TypeOf((*string)(nil)).Elem(), reflect.TypeOf((*error)(nil)).Elem()})
+ var ret0 string
+ var ret1 error
+ if len(result) != 0 {
+ if result[0] != nil {
+ ret0 = result[0].(string)
+ }
+ if result[1] != nil {
+ ret1 = result[1].(error)
+ }
+ }
+ return ret0, ret1
+}
+
+func (mock *MockExec) VerifyWasCalledOnce() *VerifierMockExec {
+ return &VerifierMockExec{
+ mock: mock,
+ invocationCountMatcher: pegomock.Times(1),
+ }
+}
+
+func (mock *MockExec) VerifyWasCalled(invocationCountMatcher pegomock.Matcher) *VerifierMockExec {
+ return &VerifierMockExec{
+ mock: mock,
+ invocationCountMatcher: invocationCountMatcher,
+ }
+}
+
+func (mock *MockExec) VerifyWasCalledInOrder(invocationCountMatcher pegomock.Matcher, inOrderContext *pegomock.InOrderContext) *VerifierMockExec {
+ return &VerifierMockExec{
+ mock: mock,
+ invocationCountMatcher: invocationCountMatcher,
+ inOrderContext: inOrderContext,
+ }
+}
+
+func (mock *MockExec) VerifyWasCalledEventually(invocationCountMatcher pegomock.Matcher, timeout time.Duration) *VerifierMockExec {
+ return &VerifierMockExec{
+ mock: mock,
+ invocationCountMatcher: invocationCountMatcher,
+ timeout: timeout,
+ }
+}
+
+type VerifierMockExec struct {
+ mock *MockExec
+ invocationCountMatcher pegomock.Matcher
+ inOrderContext *pegomock.InOrderContext
+ timeout time.Duration
+}
+
+func (verifier *VerifierMockExec) LookPath(file string) *MockExec_LookPath_OngoingVerification {
+ params := []pegomock.Param{file}
+ methodInvocations := pegomock.GetGenericMockFrom(verifier.mock).Verify(verifier.inOrderContext, verifier.invocationCountMatcher, "LookPath", params, verifier.timeout)
+ return &MockExec_LookPath_OngoingVerification{mock: verifier.mock, methodInvocations: methodInvocations}
+}
+
+type MockExec_LookPath_OngoingVerification struct {
+ mock *MockExec
+ methodInvocations []pegomock.MethodInvocation
+}
+
+func (c *MockExec_LookPath_OngoingVerification) GetCapturedArguments() string {
+ file := c.GetAllCapturedArguments()
+ return file[len(file)-1]
+}
+
+func (c *MockExec_LookPath_OngoingVerification) GetAllCapturedArguments() (_param0 []string) {
+ params := pegomock.GetGenericMockFrom(c.mock).GetInvocationParams(c.methodInvocations)
+ if len(params) > 0 {
+ _param0 = make([]string, len(c.methodInvocations))
+ for u, param := range params[0] {
+ _param0[u] = param.(string)
+ }
+ }
+ return
+}
+
+func (verifier *VerifierMockExec) CombinedOutput(args []string, envs map[string]string, workdir string) *MockExec_CombinedOutput_OngoingVerification {
+ params := []pegomock.Param{args, envs, workdir}
+ methodInvocations := pegomock.GetGenericMockFrom(verifier.mock).Verify(verifier.inOrderContext, verifier.invocationCountMatcher, "CombinedOutput", params, verifier.timeout)
+ return &MockExec_CombinedOutput_OngoingVerification{mock: verifier.mock, methodInvocations: methodInvocations}
+}
+
+type MockExec_CombinedOutput_OngoingVerification struct {
+ mock *MockExec
+ methodInvocations []pegomock.MethodInvocation
+}
+
+func (c *MockExec_CombinedOutput_OngoingVerification) GetCapturedArguments() ([]string, map[string]string, string) {
+ args, envs, workdir := c.GetAllCapturedArguments()
+ return args[len(args)-1], envs[len(envs)-1], workdir[len(workdir)-1]
+}
+
+func (c *MockExec_CombinedOutput_OngoingVerification) GetAllCapturedArguments() (_param0 [][]string, _param1 []map[string]string, _param2 []string) {
+ params := pegomock.GetGenericMockFrom(c.mock).GetInvocationParams(c.methodInvocations)
+ if len(params) > 0 {
+ _param0 = make([][]string, len(c.methodInvocations))
+ for u, param := range params[0] {
+ _param0[u] = param.([]string)
+ }
+ _param1 = make([]map[string]string, len(c.methodInvocations))
+ for u, param := range params[1] {
+ _param1[u] = param.(map[string]string)
+ }
+ _param2 = make([]string, len(c.methodInvocations))
+ for u, param := range params[2] {
+ _param2[u] = param.(string)
+ }
+ }
+ return
+}
diff --git a/server/events/runtime/models/mocks/mock_filepath.go b/server/events/runtime/models/mocks/mock_filepath.go
new file mode 100644
index 0000000000..869df1de36
--- /dev/null
+++ b/server/events/runtime/models/mocks/mock_filepath.go
@@ -0,0 +1,226 @@
+// Code generated by pegomock. DO NOT EDIT.
+// Source: github.com/runatlantis/atlantis/server/events/runtime/models (interfaces: FilePath)
+
+package mocks
+
+import (
+ pegomock "github.com/petergtz/pegomock"
+ models "github.com/runatlantis/atlantis/server/events/runtime/models"
+ "reflect"
+ "time"
+)
+
+type MockFilePath struct {
+ fail func(message string, callerSkip ...int)
+}
+
+func NewMockFilePath(options ...pegomock.Option) *MockFilePath {
+ mock := &MockFilePath{}
+ for _, option := range options {
+ option.Apply(mock)
+ }
+ return mock
+}
+
+func (mock *MockFilePath) SetFailHandler(fh pegomock.FailHandler) { mock.fail = fh }
+func (mock *MockFilePath) FailHandler() pegomock.FailHandler { return mock.fail }
+
+func (mock *MockFilePath) NotExists() bool {
+ if mock == nil {
+ panic("mock must not be nil. Use myMock := NewMockFilePath().")
+ }
+ params := []pegomock.Param{}
+ result := pegomock.GetGenericMockFrom(mock).Invoke("NotExists", params, []reflect.Type{reflect.TypeOf((*bool)(nil)).Elem()})
+ var ret0 bool
+ if len(result) != 0 {
+ if result[0] != nil {
+ ret0 = result[0].(bool)
+ }
+ }
+ return ret0
+}
+
+func (mock *MockFilePath) Join(elem ...string) models.FilePath {
+ if mock == nil {
+ panic("mock must not be nil. Use myMock := NewMockFilePath().")
+ }
+ params := []pegomock.Param{}
+ for _, param := range elem {
+ params = append(params, param)
+ }
+ result := pegomock.GetGenericMockFrom(mock).Invoke("Join", params, []reflect.Type{reflect.TypeOf((*models.FilePath)(nil)).Elem()})
+ var ret0 models.FilePath
+ if len(result) != 0 {
+ if result[0] != nil {
+ ret0 = result[0].(models.FilePath)
+ }
+ }
+ return ret0
+}
+
+func (mock *MockFilePath) Symlink(newname string) (models.FilePath, error) {
+ if mock == nil {
+ panic("mock must not be nil. Use myMock := NewMockFilePath().")
+ }
+ params := []pegomock.Param{newname}
+ result := pegomock.GetGenericMockFrom(mock).Invoke("Symlink", params, []reflect.Type{reflect.TypeOf((*models.FilePath)(nil)).Elem(), reflect.TypeOf((*error)(nil)).Elem()})
+ var ret0 models.FilePath
+ var ret1 error
+ if len(result) != 0 {
+ if result[0] != nil {
+ ret0 = result[0].(models.FilePath)
+ }
+ if result[1] != nil {
+ ret1 = result[1].(error)
+ }
+ }
+ return ret0, ret1
+}
+
+func (mock *MockFilePath) Resolve() string {
+ if mock == nil {
+ panic("mock must not be nil. Use myMock := NewMockFilePath().")
+ }
+ params := []pegomock.Param{}
+ result := pegomock.GetGenericMockFrom(mock).Invoke("Resolve", params, []reflect.Type{reflect.TypeOf((*string)(nil)).Elem()})
+ var ret0 string
+ if len(result) != 0 {
+ if result[0] != nil {
+ ret0 = result[0].(string)
+ }
+ }
+ return ret0
+}
+
+func (mock *MockFilePath) VerifyWasCalledOnce() *VerifierMockFilePath {
+ return &VerifierMockFilePath{
+ mock: mock,
+ invocationCountMatcher: pegomock.Times(1),
+ }
+}
+
+func (mock *MockFilePath) VerifyWasCalled(invocationCountMatcher pegomock.Matcher) *VerifierMockFilePath {
+ return &VerifierMockFilePath{
+ mock: mock,
+ invocationCountMatcher: invocationCountMatcher,
+ }
+}
+
+func (mock *MockFilePath) VerifyWasCalledInOrder(invocationCountMatcher pegomock.Matcher, inOrderContext *pegomock.InOrderContext) *VerifierMockFilePath {
+ return &VerifierMockFilePath{
+ mock: mock,
+ invocationCountMatcher: invocationCountMatcher,
+ inOrderContext: inOrderContext,
+ }
+}
+
+func (mock *MockFilePath) VerifyWasCalledEventually(invocationCountMatcher pegomock.Matcher, timeout time.Duration) *VerifierMockFilePath {
+ return &VerifierMockFilePath{
+ mock: mock,
+ invocationCountMatcher: invocationCountMatcher,
+ timeout: timeout,
+ }
+}
+
+type VerifierMockFilePath struct {
+ mock *MockFilePath
+ invocationCountMatcher pegomock.Matcher
+ inOrderContext *pegomock.InOrderContext
+ timeout time.Duration
+}
+
+func (verifier *VerifierMockFilePath) NotExists() *MockFilePath_NotExists_OngoingVerification {
+ params := []pegomock.Param{}
+ methodInvocations := pegomock.GetGenericMockFrom(verifier.mock).Verify(verifier.inOrderContext, verifier.invocationCountMatcher, "NotExists", params, verifier.timeout)
+ return &MockFilePath_NotExists_OngoingVerification{mock: verifier.mock, methodInvocations: methodInvocations}
+}
+
+type MockFilePath_NotExists_OngoingVerification struct {
+ mock *MockFilePath
+ methodInvocations []pegomock.MethodInvocation
+}
+
+func (c *MockFilePath_NotExists_OngoingVerification) GetCapturedArguments() {
+}
+
+func (c *MockFilePath_NotExists_OngoingVerification) GetAllCapturedArguments() {
+}
+
+func (verifier *VerifierMockFilePath) Join(elem ...string) *MockFilePath_Join_OngoingVerification {
+ params := []pegomock.Param{}
+ for _, param := range elem {
+ params = append(params, param)
+ }
+ methodInvocations := pegomock.GetGenericMockFrom(verifier.mock).Verify(verifier.inOrderContext, verifier.invocationCountMatcher, "Join", params, verifier.timeout)
+ return &MockFilePath_Join_OngoingVerification{mock: verifier.mock, methodInvocations: methodInvocations}
+}
+
+type MockFilePath_Join_OngoingVerification struct {
+ mock *MockFilePath
+ methodInvocations []pegomock.MethodInvocation
+}
+
+func (c *MockFilePath_Join_OngoingVerification) GetCapturedArguments() []string {
+ elem := c.GetAllCapturedArguments()
+ return elem[len(elem)-1]
+}
+
+func (c *MockFilePath_Join_OngoingVerification) GetAllCapturedArguments() (_param0 [][]string) {
+ params := pegomock.GetGenericMockFrom(c.mock).GetInvocationParams(c.methodInvocations)
+ if len(params) > 0 {
+ _param0 = make([][]string, len(c.methodInvocations))
+ for u := 0; u < len(c.methodInvocations); u++ {
+ _param0[u] = make([]string, len(params)-0)
+ for x := 0; x < len(params); x++ {
+ if params[x][u] != nil {
+ _param0[u][x-0] = params[x][u].(string)
+ }
+ }
+ }
+ }
+ return
+}
+
+func (verifier *VerifierMockFilePath) Symlink(newname string) *MockFilePath_Symlink_OngoingVerification {
+ params := []pegomock.Param{newname}
+ methodInvocations := pegomock.GetGenericMockFrom(verifier.mock).Verify(verifier.inOrderContext, verifier.invocationCountMatcher, "Symlink", params, verifier.timeout)
+ return &MockFilePath_Symlink_OngoingVerification{mock: verifier.mock, methodInvocations: methodInvocations}
+}
+
+type MockFilePath_Symlink_OngoingVerification struct {
+ mock *MockFilePath
+ methodInvocations []pegomock.MethodInvocation
+}
+
+func (c *MockFilePath_Symlink_OngoingVerification) GetCapturedArguments() string {
+ newname := c.GetAllCapturedArguments()
+ return newname[len(newname)-1]
+}
+
+func (c *MockFilePath_Symlink_OngoingVerification) GetAllCapturedArguments() (_param0 []string) {
+ params := pegomock.GetGenericMockFrom(c.mock).GetInvocationParams(c.methodInvocations)
+ if len(params) > 0 {
+ _param0 = make([]string, len(c.methodInvocations))
+ for u, param := range params[0] {
+ _param0[u] = param.(string)
+ }
+ }
+ return
+}
+
+func (verifier *VerifierMockFilePath) Resolve() *MockFilePath_Resolve_OngoingVerification {
+ params := []pegomock.Param{}
+ methodInvocations := pegomock.GetGenericMockFrom(verifier.mock).Verify(verifier.inOrderContext, verifier.invocationCountMatcher, "Resolve", params, verifier.timeout)
+ return &MockFilePath_Resolve_OngoingVerification{mock: verifier.mock, methodInvocations: methodInvocations}
+}
+
+type MockFilePath_Resolve_OngoingVerification struct {
+ mock *MockFilePath
+ methodInvocations []pegomock.MethodInvocation
+}
+
+func (c *MockFilePath_Resolve_OngoingVerification) GetCapturedArguments() {
+}
+
+func (c *MockFilePath_Resolve_OngoingVerification) GetAllCapturedArguments() {
+}
diff --git a/server/events/runtime/plan_type_step_runner_delegate.go b/server/events/runtime/plan_type_step_runner_delegate.go
new file mode 100644
index 0000000000..a372cd2e00
--- /dev/null
+++ b/server/events/runtime/plan_type_step_runner_delegate.go
@@ -0,0 +1,65 @@
+package runtime
+
+import (
+ "io/ioutil"
+ "path/filepath"
+
+ "github.com/pkg/errors"
+ "github.com/runatlantis/atlantis/server/events/models"
+)
+
+// NullRunner is a runner that isn't configured for a given plan type but outputs nothing
+type NullRunner struct{}
+
+func (p NullRunner) Run(ctx models.ProjectCommandContext, extraArgs []string, path string, envs map[string]string) (string, error) {
+ ctx.Log.Debug("runner not configured for plan type")
+
+ return "", nil
+}
+
+// RemoteBackendUnsupportedRunner is a runner that is responsible for outputting that the remote backend is unsupported
+type RemoteBackendUnsupportedRunner struct{}
+
+func (p RemoteBackendUnsupportedRunner) Run(ctx models.ProjectCommandContext, extraArgs []string, path string, envs map[string]string) (string, error) {
+ ctx.Log.Debug("runner not configured for remote backend")
+
+ return "Remote backend is unsupported for this step.", nil
+}
+
+func NewPlanTypeStepRunnerDelegate(defaultRunner Runner, remotePlanRunner Runner) Runner {
+ return &PlanTypeStepRunnerDelegate{
+ defaultRunner: defaultRunner,
+ remotePlanRunner: remotePlanRunner,
+ }
+}
+
+// PlanTypeStepRunnerDelegate delegates based on the type of plan, ie. remote backend which doesn't support certain functions
+type PlanTypeStepRunnerDelegate struct {
+ defaultRunner Runner
+ remotePlanRunner Runner
+}
+
+func (p *PlanTypeStepRunnerDelegate) isRemotePlan(planFile string) (bool, error) {
+ data, err := ioutil.ReadFile(planFile)
+
+ if err != nil {
+ return false, errors.Wrapf(err, "unable to read %s", planFile)
+ }
+
+ return IsRemotePlan(data), nil
+}
+
+func (p *PlanTypeStepRunnerDelegate) Run(ctx models.ProjectCommandContext, extraArgs []string, path string, envs map[string]string) (string, error) {
+ planFile := filepath.Join(path, GetPlanFilename(ctx.Workspace, ctx.ProjectName))
+ remotePlan, err := p.isRemotePlan(planFile)
+
+ if err != nil {
+ return "", err
+ }
+
+ if remotePlan {
+ return p.remotePlanRunner.Run(ctx, extraArgs, path, envs)
+ }
+
+ return p.defaultRunner.Run(ctx, extraArgs, path, envs)
+}
diff --git a/server/events/runtime/plan_type_step_runner_delegate_test.go b/server/events/runtime/plan_type_step_runner_delegate_test.go
new file mode 100644
index 0000000000..0f54611ab6
--- /dev/null
+++ b/server/events/runtime/plan_type_step_runner_delegate_test.go
@@ -0,0 +1,159 @@
+package runtime
+
+import (
+ "errors"
+ "io/ioutil"
+ "path/filepath"
+ "testing"
+
+ "github.com/hashicorp/go-version"
+ . "github.com/petergtz/pegomock"
+ . "github.com/runatlantis/atlantis/testing"
+
+ "github.com/runatlantis/atlantis/server/events/models"
+ "github.com/runatlantis/atlantis/server/events/runtime/mocks"
+)
+
+var planFileContents = `
+An execution plan has been generated and is shown below.
+Resource actions are indicated with the following symbols:
+ - destroy
+
+Terraform will perform the following actions:
+
+ - null_resource.hi[1]
+
+
+Plan: 0 to add, 0 to change, 1 to destroy.`
+
+func TestRunDelegate(t *testing.T) {
+
+ RegisterMockTestingT(t)
+
+ mockDefaultRunner := mocks.NewMockRunner()
+ mockRemoteRunner := mocks.NewMockRunner()
+
+ subject := &PlanTypeStepRunnerDelegate{
+ defaultRunner: mockDefaultRunner,
+ remotePlanRunner: mockRemoteRunner,
+ }
+
+ tfVersion, _ := version.NewVersion("0.12.0")
+
+ t.Run("Remote Runner Success", func(t *testing.T) {
+ tmpDir, cleanup := TempDir(t)
+ defer cleanup()
+ planPath := filepath.Join(tmpDir, "workspace.tfplan")
+ err := ioutil.WriteFile(planPath, []byte("Atlantis: this plan was created by remote ops\n"+planFileContents), 0644)
+ Ok(t, err)
+
+ ctx := models.ProjectCommandContext{
+ Workspace: "workspace",
+ RepoRelDir: ".",
+ EscapedCommentArgs: []string{"comment", "args"},
+ TerraformVersion: tfVersion,
+ }
+ extraArgs := []string{"extra", "args"}
+ envs := map[string]string{}
+
+ expectedOut := "some random output"
+
+ When(mockRemoteRunner.Run(ctx, extraArgs, tmpDir, envs)).ThenReturn(expectedOut, nil)
+
+ output, err := subject.Run(ctx, extraArgs, tmpDir, envs)
+
+ mockDefaultRunner.VerifyWasCalled(Never())
+
+ Equals(t, expectedOut, output)
+ Ok(t, err)
+
+ })
+
+ t.Run("Remote Runner Failure", func(t *testing.T) {
+ tmpDir, cleanup := TempDir(t)
+ defer cleanup()
+ planPath := filepath.Join(tmpDir, "workspace.tfplan")
+ err := ioutil.WriteFile(planPath, []byte("Atlantis: this plan was created by remote ops\n"+planFileContents), 0644)
+ Ok(t, err)
+
+ ctx := models.ProjectCommandContext{
+ Workspace: "workspace",
+ RepoRelDir: ".",
+ EscapedCommentArgs: []string{"comment", "args"},
+ TerraformVersion: tfVersion,
+ }
+ extraArgs := []string{"extra", "args"}
+ envs := map[string]string{}
+
+ expectedOut := "some random output"
+
+ When(mockRemoteRunner.Run(ctx, extraArgs, tmpDir, envs)).ThenReturn(expectedOut, errors.New("err"))
+
+ output, err := subject.Run(ctx, extraArgs, tmpDir, envs)
+
+ mockDefaultRunner.VerifyWasCalled(Never())
+
+ Equals(t, expectedOut, output)
+ Assert(t, err != nil, "err should not be nil")
+
+ })
+
+ t.Run("Local Runner Success", func(t *testing.T) {
+ tmpDir, cleanup := TempDir(t)
+ defer cleanup()
+ planPath := filepath.Join(tmpDir, "workspace.tfplan")
+ err := ioutil.WriteFile(planPath, []byte(planFileContents), 0644)
+ Ok(t, err)
+
+ ctx := models.ProjectCommandContext{
+ Workspace: "workspace",
+ RepoRelDir: ".",
+ EscapedCommentArgs: []string{"comment", "args"},
+ TerraformVersion: tfVersion,
+ }
+ extraArgs := []string{"extra", "args"}
+ envs := map[string]string{}
+
+ expectedOut := "some random output"
+
+ When(mockDefaultRunner.Run(ctx, extraArgs, tmpDir, envs)).ThenReturn(expectedOut, nil)
+
+ output, err := subject.Run(ctx, extraArgs, tmpDir, envs)
+
+ mockRemoteRunner.VerifyWasCalled(Never())
+
+ Equals(t, expectedOut, output)
+ Ok(t, err)
+
+ })
+
+ t.Run("Local Runner Failure", func(t *testing.T) {
+ tmpDir, cleanup := TempDir(t)
+ defer cleanup()
+ planPath := filepath.Join(tmpDir, "workspace.tfplan")
+ err := ioutil.WriteFile(planPath, []byte(planFileContents), 0644)
+ Ok(t, err)
+
+ ctx := models.ProjectCommandContext{
+ Workspace: "workspace",
+ RepoRelDir: ".",
+ EscapedCommentArgs: []string{"comment", "args"},
+ TerraformVersion: tfVersion,
+ }
+ extraArgs := []string{"extra", "args"}
+ envs := map[string]string{}
+
+ expectedOut := "some random output"
+
+ When(mockDefaultRunner.Run(ctx, extraArgs, tmpDir, envs)).ThenReturn(expectedOut, errors.New("err"))
+
+ output, err := subject.Run(ctx, extraArgs, tmpDir, envs)
+
+ mockRemoteRunner.VerifyWasCalled(Never())
+
+ Equals(t, expectedOut, output)
+ Assert(t, err != nil, "err should not be nil")
+
+ })
+
+}
diff --git a/server/events/runtime/policy/conftest_client.go b/server/events/runtime/policy/conftest_client.go
new file mode 100644
index 0000000000..053c8a9a76
--- /dev/null
+++ b/server/events/runtime/policy/conftest_client.go
@@ -0,0 +1,243 @@
+package policy
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+
+ version "github.com/hashicorp/go-version"
+ "github.com/pkg/errors"
+ "github.com/runatlantis/atlantis/server/events/models"
+ "github.com/runatlantis/atlantis/server/events/runtime/cache"
+ runtime_models "github.com/runatlantis/atlantis/server/events/runtime/models"
+ "github.com/runatlantis/atlantis/server/events/terraform"
+ "github.com/runatlantis/atlantis/server/events/yaml/valid"
+ "github.com/runatlantis/atlantis/server/logging"
+)
+
+const (
+ DefaultConftestVersionEnvKey = "DEFAULT_CONFTEST_VERSION"
+ conftestBinaryName = "conftest"
+ conftestDownloadURLPrefix = "https://github.com/open-policy-agent/conftest/releases/download/v"
+ conftestArch = "x86_64"
+)
+
+type Arg struct {
+ Param string
+ Option string
+}
+
+func (a Arg) build() []string {
+ return []string{a.Option, a.Param}
+}
+
+func NewPolicyArg(parameter string) Arg {
+ return Arg{
+ Param: parameter,
+ Option: "-p",
+ }
+}
+
+type ConftestTestCommandArgs struct {
+ PolicyArgs []Arg
+ InputFile string
+ Command string
+}
+
+func (c ConftestTestCommandArgs) build() ([]string, error) {
+
+ if len(c.PolicyArgs) == 0 {
+ return []string{}, errors.New("no policies specified")
+ }
+
+ // add the subcommand
+ commandArgs := []string{c.Command, "test"}
+
+ for _, a := range c.PolicyArgs {
+ commandArgs = append(commandArgs, a.build()...)
+ }
+
+ commandArgs = append(commandArgs, c.InputFile, "--no-color", "--all-namespaces")
+
+ return commandArgs, nil
+}
+
+//go:generate pegomock generate -m --use-experimental-model-gen --package mocks -o mocks/mock_conftest_client.go SourceResolver
+// SourceResolver resolves the policy set to a local fs path
+type SourceResolver interface {
+ Resolve(policySet valid.PolicySet) (string, error)
+}
+
+// LocalSourceResolver resolves a local policy set to a local fs path
+type LocalSourceResolver struct {
+}
+
+func (p *LocalSourceResolver) Resolve(policySet valid.PolicySet) (string, error) {
+ return policySet.Path, nil
+
+}
+
+// SourceResolverProxy proxies to underlying source resolvers dynamically
+type SourceResolverProxy struct {
+ localSourceResolver SourceResolver
+}
+
+func (p *SourceResolverProxy) Resolve(policySet valid.PolicySet) (string, error) {
+ switch source := policySet.Source; source {
+ case valid.LocalPolicySet:
+ return p.localSourceResolver.Resolve(policySet)
+ default:
+ return "", errors.New(fmt.Sprintf("unable to resolve policy set source %s", source))
+ }
+}
+
+type ConfTestVersionDownloader struct {
+ downloader terraform.Downloader
+}
+
+func (c ConfTestVersionDownloader) downloadConfTestVersion(v *version.Version, destPath string) (runtime_models.FilePath, error) {
+ versionURLPrefix := fmt.Sprintf("%s%s", conftestDownloadURLPrefix, v.Original())
+
+ // download binary in addition to checksum file
+ binURL := fmt.Sprintf("%s/conftest_%s_%s_%s.tar.gz", versionURLPrefix, v.Original(), strings.Title(runtime.GOOS), conftestArch)
+ checksumURL := fmt.Sprintf("%s/checksums.txt", versionURLPrefix)
+
+ // underlying implementation uses go-getter so the URL is formatted as such.
+ // i know i know, I'm assuming an interface implementation with my inputs.
+ // realistically though the interface just exists for testing so ¯\_(ツ)_/¯
+ fullSrcURL := fmt.Sprintf("%s?checksum=file:%s", binURL, checksumURL)
+
+ if err := c.downloader.GetAny(destPath, fullSrcURL); err != nil {
+ return runtime_models.LocalFilePath(""), errors.Wrapf(err, "downloading conftest version %s at %q", v.String(), fullSrcURL)
+ }
+
+ binPath := filepath.Join(destPath, "conftest")
+
+ return runtime_models.LocalFilePath(binPath), nil
+}
+
+// ConfTestExecutorWorkflow runs a versioned conftest binary with the args built from the project context.
+// Project context defines whether conftest runs a local policy set or runs a test on a remote policy set.
+type ConfTestExecutorWorkflow struct {
+ SourceResolver SourceResolver
+ VersionCache cache.ExecutionVersionCache
+ DefaultConftestVersion *version.Version
+ Exec runtime_models.Exec
+}
+
+func NewConfTestExecutorWorkflow(log *logging.SimpleLogger, versionRootDir string, conftestDownloder terraform.Downloader) *ConfTestExecutorWorkflow {
+ downloader := ConfTestVersionDownloader{
+ downloader: conftestDownloder,
+ }
+ version, err := getDefaultVersion()
+
+ if err != nil {
+ // conftest default versions are not essential to service startup so let's not block on it.
+ log.Warn("failed to get default conftest version. Will attempt request scoped lazy loads %s", err.Error())
+ }
+
+ versionCache := cache.NewExecutionVersionLayeredLoadingCache(
+ conftestBinaryName,
+ versionRootDir,
+ downloader.downloadConfTestVersion,
+ )
+
+ return &ConfTestExecutorWorkflow{
+ VersionCache: versionCache,
+ DefaultConftestVersion: version,
+ SourceResolver: &SourceResolverProxy{
+ localSourceResolver: &LocalSourceResolver{},
+ },
+ Exec: runtime_models.LocalExec{},
+ }
+}
+
+func (c *ConfTestExecutorWorkflow) Run(ctx models.ProjectCommandContext, executablePath string, envs map[string]string, workdir string) (string, error) {
+ policyArgs := []Arg{}
+ policySetNames := []string{}
+ ctx.Log.Debug("policy sets, %s ", ctx.PolicySets)
+ for _, policySet := range ctx.PolicySets.PolicySets {
+ path, err := c.SourceResolver.Resolve(policySet)
+
+ // Let's not fail the whole step because of a single failure. Log and fail silently
+ if err != nil {
+ ctx.Log.Err("Error resolving policyset %s. err: %s", policySet.Name, err.Error())
+ continue
+ }
+
+ policyArg := NewPolicyArg(path)
+ policyArgs = append(policyArgs, policyArg)
+
+ policySetNames = append(policySetNames, policySet.Name)
+ }
+
+ inputFile := filepath.Join(workdir, ctx.GetShowResultFileName())
+
+ args := ConftestTestCommandArgs{
+ PolicyArgs: policyArgs,
+ InputFile: inputFile,
+ Command: executablePath,
+ }
+
+ serializedArgs, err := args.build()
+
+ if err != nil {
+ ctx.Log.Warn("No policies have been configured")
+ return "", nil
+ // TODO: enable when we can pass policies in otherwise e2e tests with policy checks fail
+ // return "", errors.Wrap(err, "building args")
+ }
+
+ initialOutput := fmt.Sprintf("Checking plan against the following policies: \n %s\n", strings.Join(policySetNames, "\n "))
+ cmdOutput, err := c.Exec.CombinedOutput(serializedArgs, envs, workdir)
+
+ return c.sanitizeOutput(inputFile, initialOutput+cmdOutput), err
+
+}
+
+func (c *ConfTestExecutorWorkflow) sanitizeOutput(inputFile string, output string) string {
+ return strings.Replace(output, inputFile, "", -1)
+}
+
+func (c *ConfTestExecutorWorkflow) EnsureExecutorVersion(log *logging.SimpleLogger, v *version.Version) (string, error) {
+ // we have no information to proceed so fail hard
+ if c.DefaultConftestVersion == nil && v == nil {
+ return "", errors.New("no conftest version configured/specified")
+ }
+
+ var versionToRetrieve *version.Version
+
+ if v == nil {
+ versionToRetrieve = c.DefaultConftestVersion
+ } else {
+ versionToRetrieve = v
+ }
+
+ localPath, err := c.VersionCache.Get(versionToRetrieve)
+
+ if err != nil {
+ return "", err
+ }
+
+ return localPath, nil
+
+}
+
+func getDefaultVersion() (*version.Version, error) {
+ // ensure version is not default version.
+ // first check for the env var and if that doesn't exist use the local executable version
+ defaultVersion, exists := os.LookupEnv(DefaultConftestVersionEnvKey)
+
+ if !exists {
+ return nil, errors.New(fmt.Sprintf("%s not set.", DefaultConftestVersionEnvKey))
+ }
+
+ wrappedVersion, err := version.NewVersion(defaultVersion)
+
+ if err != nil {
+ return nil, errors.Wrapf(err, "wrapping version %s", defaultVersion)
+ }
+ return wrappedVersion, nil
+}
diff --git a/server/events/runtime/policy/conftest_client_test.go b/server/events/runtime/policy/conftest_client_test.go
new file mode 100644
index 0000000000..570a89b2a6
--- /dev/null
+++ b/server/events/runtime/policy/conftest_client_test.go
@@ -0,0 +1,246 @@
+package policy
+
+import (
+ "errors"
+ "fmt"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "testing"
+
+ "github.com/hashicorp/go-version"
+ . "github.com/petergtz/pegomock"
+ "github.com/runatlantis/atlantis/server/events/models"
+ "github.com/runatlantis/atlantis/server/events/runtime/cache/mocks"
+ models_mocks "github.com/runatlantis/atlantis/server/events/runtime/models/mocks"
+ conftest_mocks "github.com/runatlantis/atlantis/server/events/runtime/policy/mocks"
+ terraform_mocks "github.com/runatlantis/atlantis/server/events/terraform/mocks"
+ "github.com/runatlantis/atlantis/server/events/yaml/valid"
+ "github.com/runatlantis/atlantis/server/logging"
+ . "github.com/runatlantis/atlantis/testing"
+)
+
+func TestConfTestVersionDownloader(t *testing.T) {
+
+ version, _ := version.NewVersion("0.21.0")
+ destPath := "some/path"
+
+ fullURL := fmt.Sprintf("https://github.com/open-policy-agent/conftest/releases/download/v0.21.0/conftest_0.21.0_%s_x86_64.tar.gz?checksum=file:https://github.com/open-policy-agent/conftest/releases/download/v0.21.0/checksums.txt", strings.Title(runtime.GOOS))
+
+ RegisterMockTestingT(t)
+
+ mockDownloader := terraform_mocks.NewMockDownloader()
+
+ subject := ConfTestVersionDownloader{downloader: mockDownloader}
+
+ t.Run("success", func(t *testing.T) {
+
+ When(mockDownloader.GetFile(EqString(destPath), EqString(fullURL))).ThenReturn(nil)
+ binPath, err := subject.downloadConfTestVersion(version, destPath)
+
+ mockDownloader.VerifyWasCalledOnce().GetAny(EqString(destPath), EqString(fullURL))
+
+ Ok(t, err)
+
+ Assert(t, binPath.Resolve() == filepath.Join(destPath, "conftest"), "expected binpath")
+ })
+
+ t.Run("error", func(t *testing.T) {
+
+ When(mockDownloader.GetAny(EqString(destPath), EqString(fullURL))).ThenReturn(errors.New("err"))
+ _, err := subject.downloadConfTestVersion(version, destPath)
+
+ Assert(t, err != nil, "err is expected")
+ })
+}
+
+func TestEnsureExecutorVersion(t *testing.T) {
+
+ defaultVersion, _ := version.NewVersion("1.0")
+ expectedPath := "some/path"
+
+ RegisterMockTestingT(t)
+
+ mockCache := mocks.NewMockExecutionVersionCache()
+ log := logging.NewNoopLogger()
+
+ t.Run("no specified version or default version", func(t *testing.T) {
+ subject := &ConfTestExecutorWorkflow{
+ VersionCache: mockCache,
+ }
+
+ _, err := subject.EnsureExecutorVersion(log, nil)
+
+ Assert(t, err != nil, "expected error finding version")
+ })
+
+ t.Run("use default version", func(t *testing.T) {
+ subject := &ConfTestExecutorWorkflow{
+ VersionCache: mockCache,
+ DefaultConftestVersion: defaultVersion,
+ }
+
+ When(mockCache.Get(defaultVersion)).ThenReturn(expectedPath, nil)
+
+ path, err := subject.EnsureExecutorVersion(log, nil)
+
+ Ok(t, err)
+
+ Assert(t, path == expectedPath, "path is expected")
+ })
+
+ t.Run("use specified version", func(t *testing.T) {
+ subject := &ConfTestExecutorWorkflow{
+ VersionCache: mockCache,
+ DefaultConftestVersion: defaultVersion,
+ }
+
+ versionInput, _ := version.NewVersion("2.0")
+
+ When(mockCache.Get(versionInput)).ThenReturn(expectedPath, nil)
+
+ path, err := subject.EnsureExecutorVersion(log, versionInput)
+
+ Ok(t, err)
+
+ Assert(t, path == expectedPath, "path is expected")
+ })
+
+ t.Run("cache error", func(t *testing.T) {
+ subject := &ConfTestExecutorWorkflow{
+ VersionCache: mockCache,
+ DefaultConftestVersion: defaultVersion,
+ }
+
+ versionInput, _ := version.NewVersion("2.0")
+
+ When(mockCache.Get(versionInput)).ThenReturn(expectedPath, errors.New("some err"))
+
+ _, err := subject.EnsureExecutorVersion(log, versionInput)
+
+ Assert(t, err != nil, "path is expected")
+ })
+}
+
+func TestRun(t *testing.T) {
+
+ RegisterMockTestingT(t)
+ mockResolver := conftest_mocks.NewMockSourceResolver()
+ mockExec := models_mocks.NewMockExec()
+
+ subject := &ConfTestExecutorWorkflow{
+ SourceResolver: mockResolver,
+ Exec: mockExec,
+ }
+
+ policySetName1 := "policy1"
+ policySetPath1 := "/some/path"
+ localPolicySetPath1 := "/tmp/some/path"
+
+ policySetName2 := "policy2"
+ policySetPath2 := "/some/path2"
+ localPolicySetPath2 := "/tmp/some/path2"
+ executablePath := "/usr/bin/conftest"
+ envs := map[string]string{
+ "key": "val",
+ }
+ workdir := "/some_workdir"
+
+ policySet1 := valid.PolicySet{
+ Source: valid.LocalPolicySet,
+ Path: policySetPath1,
+ Name: policySetName1,
+ }
+
+ policySet2 := valid.PolicySet{
+ Source: valid.LocalPolicySet,
+ Path: policySetPath2,
+ Name: policySetName2,
+ }
+
+ ctx := models.ProjectCommandContext{
+ PolicySets: valid.PolicySets{
+ PolicySets: []valid.PolicySet{
+ policySet1,
+ policySet2,
+ },
+ },
+ ProjectName: "testproj",
+ Workspace: "default",
+ }
+
+ t.Run("success", func(t *testing.T) {
+
+ expectedOutput := "Success"
+ expectedResult := "Checking plan against the following policies: \n policy1\n policy2\nSuccess"
+ expectedArgs := []string{executablePath, "test", "-p", localPolicySetPath1, "-p", localPolicySetPath2, "/some_workdir/testproj-default.json", "--no-color", "--all-namespaces"}
+
+ When(mockResolver.Resolve(policySet1)).ThenReturn(localPolicySetPath1, nil)
+ When(mockResolver.Resolve(policySet2)).ThenReturn(localPolicySetPath2, nil)
+
+ When(mockExec.CombinedOutput(expectedArgs, envs, workdir)).ThenReturn(expectedOutput, nil)
+
+ result, err := subject.Run(ctx, executablePath, envs, workdir)
+
+ fmt.Println(result)
+
+ Ok(t, err)
+
+ Assert(t, result == expectedResult, "result is expected")
+
+ })
+
+ t.Run("error resolving one policy source", func(t *testing.T) {
+
+ expectedOutput := "Success"
+ expectedResult := "Checking plan against the following policies: \n policy1\nSuccess"
+ expectedArgs := []string{executablePath, "test", "-p", localPolicySetPath1, "/some_workdir/testproj-default.json", "--no-color", "--all-namespaces"}
+
+ When(mockResolver.Resolve(policySet1)).ThenReturn(localPolicySetPath1, nil)
+ When(mockResolver.Resolve(policySet2)).ThenReturn("", errors.New("err"))
+
+ When(mockExec.CombinedOutput(expectedArgs, envs, workdir)).ThenReturn(expectedOutput, nil)
+
+ result, err := subject.Run(ctx, executablePath, envs, workdir)
+
+ Ok(t, err)
+
+ Assert(t, result == expectedResult, "result is expected")
+
+ })
+
+ t.Run("error resolving both policy sources", func(t *testing.T) {
+
+ expectedResult := "Success"
+ expectedArgs := []string{executablePath, "test", "-p", localPolicySetPath1, "/some_workdir/testproj-default.json", "--no-color", "--all-namespaces"}
+
+ When(mockResolver.Resolve(policySet1)).ThenReturn("", errors.New("err"))
+ When(mockResolver.Resolve(policySet2)).ThenReturn("", errors.New("err"))
+
+ When(mockExec.CombinedOutput(expectedArgs, envs, workdir)).ThenReturn(expectedResult, nil)
+
+ result, err := subject.Run(ctx, executablePath, envs, workdir)
+
+ Ok(t, err)
+
+ Assert(t, result == "", "result is expected")
+
+ })
+
+ t.Run("error running cmd", func(t *testing.T) {
+ expectedOutput := "FAIL - /some_workdir/testproj-default.json - failure"
+ expectedResult := "Checking plan against the following policies: \n policy1\n policy2\nFAIL - - failure"
+ expectedArgs := []string{executablePath, "test", "-p", localPolicySetPath1, "-p", localPolicySetPath2, "/some_workdir/testproj-default.json", "--no-color", "--all-namespaces"}
+
+ When(mockResolver.Resolve(policySet1)).ThenReturn(localPolicySetPath1, nil)
+ When(mockResolver.Resolve(policySet2)).ThenReturn(localPolicySetPath2, nil)
+
+ When(mockExec.CombinedOutput(expectedArgs, envs, workdir)).ThenReturn(expectedOutput, errors.New("exit status code 1"))
+
+ result, err := subject.Run(ctx, executablePath, envs, workdir)
+
+ Assert(t, result == expectedResult, "rseult is expected")
+ Assert(t, err != nil, "error is expected")
+
+ })
+}
diff --git a/server/events/runtime/policy/mocks/matchers/valid_policyset.go b/server/events/runtime/policy/mocks/matchers/valid_policyset.go
new file mode 100644
index 0000000000..40e5b7da9b
--- /dev/null
+++ b/server/events/runtime/policy/mocks/matchers/valid_policyset.go
@@ -0,0 +1,20 @@
+// Code generated by pegomock. DO NOT EDIT.
+package matchers
+
+import (
+ "reflect"
+ "github.com/petergtz/pegomock"
+ valid "github.com/runatlantis/atlantis/server/events/yaml/valid"
+)
+
+func AnyValidPolicySet() valid.PolicySet {
+ pegomock.RegisterMatcher(pegomock.NewAnyMatcher(reflect.TypeOf((*(valid.PolicySet))(nil)).Elem()))
+ var nullValue valid.PolicySet
+ return nullValue
+}
+
+func EqValidPolicySet(value valid.PolicySet) valid.PolicySet {
+ pegomock.RegisterMatcher(&pegomock.EqMatcher{Value: value})
+ var nullValue valid.PolicySet
+ return nullValue
+}
diff --git a/server/events/runtime/policy/mocks/mock_conftest_client.go b/server/events/runtime/policy/mocks/mock_conftest_client.go
new file mode 100644
index 0000000000..47fd257bf4
--- /dev/null
+++ b/server/events/runtime/policy/mocks/mock_conftest_client.go
@@ -0,0 +1,109 @@
+// Code generated by pegomock. DO NOT EDIT.
+// Source: github.com/runatlantis/atlantis/server/events/runtime/policy (interfaces: SourceResolver)
+
+package mocks
+
+import (
+ pegomock "github.com/petergtz/pegomock"
+ valid "github.com/runatlantis/atlantis/server/events/yaml/valid"
+ "reflect"
+ "time"
+)
+
+type MockSourceResolver struct {
+ fail func(message string, callerSkip ...int)
+}
+
+func NewMockSourceResolver(options ...pegomock.Option) *MockSourceResolver {
+ mock := &MockSourceResolver{}
+ for _, option := range options {
+ option.Apply(mock)
+ }
+ return mock
+}
+
+func (mock *MockSourceResolver) SetFailHandler(fh pegomock.FailHandler) { mock.fail = fh }
+func (mock *MockSourceResolver) FailHandler() pegomock.FailHandler { return mock.fail }
+
+func (mock *MockSourceResolver) Resolve(policySet valid.PolicySet) (string, error) {
+ if mock == nil {
+ panic("mock must not be nil. Use myMock := NewMockSourceResolver().")
+ }
+ params := []pegomock.Param{policySet}
+ result := pegomock.GetGenericMockFrom(mock).Invoke("Resolve", params, []reflect.Type{reflect.TypeOf((*string)(nil)).Elem(), reflect.TypeOf((*error)(nil)).Elem()})
+ var ret0 string
+ var ret1 error
+ if len(result) != 0 {
+ if result[0] != nil {
+ ret0 = result[0].(string)
+ }
+ if result[1] != nil {
+ ret1 = result[1].(error)
+ }
+ }
+ return ret0, ret1
+}
+
+func (mock *MockSourceResolver) VerifyWasCalledOnce() *VerifierMockSourceResolver {
+ return &VerifierMockSourceResolver{
+ mock: mock,
+ invocationCountMatcher: pegomock.Times(1),
+ }
+}
+
+func (mock *MockSourceResolver) VerifyWasCalled(invocationCountMatcher pegomock.Matcher) *VerifierMockSourceResolver {
+ return &VerifierMockSourceResolver{
+ mock: mock,
+ invocationCountMatcher: invocationCountMatcher,
+ }
+}
+
+func (mock *MockSourceResolver) VerifyWasCalledInOrder(invocationCountMatcher pegomock.Matcher, inOrderContext *pegomock.InOrderContext) *VerifierMockSourceResolver {
+ return &VerifierMockSourceResolver{
+ mock: mock,
+ invocationCountMatcher: invocationCountMatcher,
+ inOrderContext: inOrderContext,
+ }
+}
+
+func (mock *MockSourceResolver) VerifyWasCalledEventually(invocationCountMatcher pegomock.Matcher, timeout time.Duration) *VerifierMockSourceResolver {
+ return &VerifierMockSourceResolver{
+ mock: mock,
+ invocationCountMatcher: invocationCountMatcher,
+ timeout: timeout,
+ }
+}
+
+type VerifierMockSourceResolver struct {
+ mock *MockSourceResolver
+ invocationCountMatcher pegomock.Matcher
+ inOrderContext *pegomock.InOrderContext
+ timeout time.Duration
+}
+
+func (verifier *VerifierMockSourceResolver) Resolve(policySet valid.PolicySet) *MockSourceResolver_Resolve_OngoingVerification {
+ params := []pegomock.Param{policySet}
+ methodInvocations := pegomock.GetGenericMockFrom(verifier.mock).Verify(verifier.inOrderContext, verifier.invocationCountMatcher, "Resolve", params, verifier.timeout)
+ return &MockSourceResolver_Resolve_OngoingVerification{mock: verifier.mock, methodInvocations: methodInvocations}
+}
+
+type MockSourceResolver_Resolve_OngoingVerification struct {
+ mock *MockSourceResolver
+ methodInvocations []pegomock.MethodInvocation
+}
+
+func (c *MockSourceResolver_Resolve_OngoingVerification) GetCapturedArguments() valid.PolicySet {
+ policySet := c.GetAllCapturedArguments()
+ return policySet[len(policySet)-1]
+}
+
+func (c *MockSourceResolver_Resolve_OngoingVerification) GetAllCapturedArguments() (_param0 []valid.PolicySet) {
+ params := pegomock.GetGenericMockFrom(c.mock).GetInvocationParams(c.methodInvocations)
+ if len(params) > 0 {
+ _param0 = make([]valid.PolicySet, len(c.methodInvocations))
+ for u, param := range params[0] {
+ _param0[u] = param.(valid.PolicySet)
+ }
+ }
+ return
+}
diff --git a/server/events/runtime/policy_check_step_runner.go b/server/events/runtime/policy_check_step_runner.go
new file mode 100644
index 0000000000..29be03e322
--- /dev/null
+++ b/server/events/runtime/policy_check_step_runner.go
@@ -0,0 +1,38 @@
+package runtime
+
+import (
+ "github.com/hashicorp/go-version"
+ "github.com/pkg/errors"
+ "github.com/runatlantis/atlantis/server/events/models"
+)
+
+// PolicyCheckStepRunner runs a policy check command given a ctx
+type PolicyCheckStepRunner struct {
+ versionEnsurer ExecutorVersionEnsurer
+ executor Executor
+}
+
+// NewPolicyCheckStepRunner creates a new step runner from an executor workflow
+func NewPolicyCheckStepRunner(defaultTfVersion *version.Version, executorWorkflow VersionedExecutorWorkflow) (Runner, error) {
+
+ runner := &PlanTypeStepRunnerDelegate{
+ defaultRunner: &PolicyCheckStepRunner{
+ versionEnsurer: executorWorkflow,
+ executor: executorWorkflow,
+ },
+ remotePlanRunner: RemoteBackendUnsupportedRunner{},
+ }
+
+ return NewMinimumVersionStepRunnerDelegate(minimumShowTfVersion, defaultTfVersion, runner)
+}
+
+// Run ensures a given version for the executable, builds the args from the project context and then runs executable returning the result
+func (p *PolicyCheckStepRunner) Run(ctx models.ProjectCommandContext, extraArgs []string, path string, envs map[string]string) (string, error) {
+ executable, err := p.versionEnsurer.EnsureExecutorVersion(ctx.Log, ctx.PolicySets.Version)
+
+ if err != nil {
+ return "", errors.Wrapf(err, "ensuring policy executor version")
+ }
+
+ return p.executor.Run(ctx, executable, envs, path)
+}
diff --git a/server/events/runtime/policy_check_step_runner_test.go b/server/events/runtime/policy_check_step_runner_test.go
new file mode 100644
index 0000000000..a1b723af90
--- /dev/null
+++ b/server/events/runtime/policy_check_step_runner_test.go
@@ -0,0 +1,76 @@
+package runtime
+
+import (
+ "errors"
+ "testing"
+
+ "github.com/hashicorp/go-version"
+ . "github.com/petergtz/pegomock"
+ "github.com/runatlantis/atlantis/server/events/models"
+ "github.com/runatlantis/atlantis/server/events/runtime/mocks"
+ "github.com/runatlantis/atlantis/server/events/yaml/valid"
+ "github.com/runatlantis/atlantis/server/logging"
+ . "github.com/runatlantis/atlantis/testing"
+)
+
+func TestRun(t *testing.T) {
+ RegisterMockTestingT(t)
+ logger := logging.NewNoopLogger()
+ workspace := "default"
+ v, _ := version.NewVersion("1.0")
+ workdir := "/path"
+ executablePath := "some/path/conftest"
+
+ context := models.ProjectCommandContext{
+ Log: logger,
+ EscapedCommentArgs: []string{"comment", "args"},
+ Workspace: workspace,
+ RepoRelDir: ".",
+ User: models.User{Username: "username"},
+ Pull: models.PullRequest{
+ Num: 2,
+ },
+ BaseRepo: models.Repo{
+ FullName: "owner/repo",
+ Owner: "owner",
+ Name: "repo",
+ },
+ PolicySets: valid.PolicySets{
+ Version: v,
+ PolicySets: []valid.PolicySet{},
+ },
+ }
+
+ executorWorkflow := mocks.NewMockVersionedExecutorWorkflow()
+ s := &PolicyCheckStepRunner{
+ versionEnsurer: executorWorkflow,
+ executor: executorWorkflow,
+ }
+
+ t.Run("success", func(t *testing.T) {
+ When(executorWorkflow.EnsureExecutorVersion(logger, v)).ThenReturn(executablePath, nil)
+ When(executorWorkflow.Run(context, executablePath, map[string]string(nil), workdir)).ThenReturn("Success!", nil)
+
+ output, err := s.Run(context, []string{"extra", "args"}, workdir, map[string]string(nil))
+
+ Ok(t, err)
+ Equals(t, "Success!", output)
+ })
+
+ t.Run("ensure version failure", func(t *testing.T) {
+ expectedErr := errors.New("error ensuring version")
+ When(executorWorkflow.EnsureExecutorVersion(logger, v)).ThenReturn("", expectedErr)
+
+ _, err := s.Run(context, []string{"extra", "args"}, workdir, map[string]string(nil))
+
+ Assert(t, err != nil, "error is not nil")
+ })
+ t.Run("executor failure", func(t *testing.T) {
+ When(executorWorkflow.EnsureExecutorVersion(logger, v)).ThenReturn(executablePath, nil)
+ When(executorWorkflow.Run(context, executablePath, map[string]string(nil), workdir)).ThenReturn("", errors.New("error running executor"))
+
+ _, err := s.Run(context, []string{"extra", "args"}, workdir, map[string]string(nil))
+
+ Assert(t, err != nil, "error is not nil")
+ })
+}
diff --git a/server/events/runtime/runtime.go b/server/events/runtime/runtime.go
index d671a40694..fb8656f5dd 100644
--- a/server/events/runtime/runtime.go
+++ b/server/events/runtime/runtime.go
@@ -3,6 +3,7 @@
package runtime
import (
+ "bytes"
"fmt"
"regexp"
"strings"
@@ -48,6 +49,12 @@ type StatusUpdater interface {
UpdateProject(ctx models.ProjectCommandContext, cmdName models.CommandName, status models.CommitStatus, url string) error
}
+//go:generate pegomock generate -m --use-experimental-model-gen --package mocks -o mocks/mock_runner.go Runner
+// Runner mirrors events.StepRunner as a way to bring it into this package
+type Runner interface {
+ Run(ctx models.ProjectCommandContext, extraArgs []string, path string, envs map[string]string) (string, error)
+}
+
// MustConstraint returns a constraint. It panics on error.
func MustConstraint(constraint string) version.Constraints {
c, err := version.NewConstraint(constraint)
@@ -67,6 +74,15 @@ func GetPlanFilename(workspace string, projName string) string {
return fmt.Sprintf("%s-%s.tfplan", projName, workspace)
}
+// isRemotePlan returns true if planContents are from a plan that was generated
+// using TFE remote operations.
+func IsRemotePlan(planContents []byte) bool {
+ // We add a header to plans generated by the remote backend so we can
+ // detect that they're remote in the apply phase.
+ remoteOpsHeaderBytes := []byte(remoteOpsHeader)
+ return bytes.Equal(planContents[:len(remoteOpsHeaderBytes)], remoteOpsHeaderBytes)
+}
+
// ProjectNameFromPlanfile returns the project name that a planfile with name
// filename is for. If filename is for a project without a name then it will
// return an empty string. workspace is the workspace this project is in.
diff --git a/server/events/runtime/show_step_runner.go b/server/events/runtime/show_step_runner.go
new file mode 100644
index 0000000000..7464636ecd
--- /dev/null
+++ b/server/events/runtime/show_step_runner.go
@@ -0,0 +1,60 @@
+package runtime
+
+import (
+ "io/ioutil"
+ "os"
+ "path/filepath"
+
+ "github.com/hashicorp/go-version"
+ "github.com/pkg/errors"
+ "github.com/runatlantis/atlantis/server/events/models"
+)
+
+const minimumShowTfVersion string = "0.12.0"
+
+func NewShowStepRunner(executor TerraformExec, defaultTFVersion *version.Version) (Runner, error) {
+ runner := &PlanTypeStepRunnerDelegate{
+ defaultRunner: &ShowStepRunner{
+ TerraformExecutor: executor,
+ DefaultTFVersion: defaultTFVersion,
+ },
+ remotePlanRunner: NullRunner{},
+ }
+
+ return NewMinimumVersionStepRunnerDelegate(minimumShowTfVersion, defaultTFVersion, runner)
+}
+
+// ShowStepRunner runs terraform show on an existing plan file and outputs it to a json file
+type ShowStepRunner struct {
+ TerraformExecutor TerraformExec
+ DefaultTFVersion *version.Version
+}
+
+func (p *ShowStepRunner) Run(ctx models.ProjectCommandContext, extraArgs []string, path string, envs map[string]string) (string, error) {
+ tfVersion := p.DefaultTFVersion
+ if ctx.TerraformVersion != nil {
+ tfVersion = ctx.TerraformVersion
+ }
+
+ planFile := filepath.Join(path, GetPlanFilename(ctx.Workspace, ctx.ProjectName))
+ showResultFile := filepath.Join(path, ctx.GetShowResultFileName())
+
+ output, err := p.TerraformExecutor.RunCommandWithVersion(
+ ctx.Log,
+ path,
+ []string{"show", "-no-color", "-json", filepath.Clean(planFile)},
+ envs,
+ tfVersion,
+ ctx.Workspace,
+ )
+
+ if err != nil {
+ return "", errors.Wrap(err, "running terraform show")
+ }
+
+ if err := ioutil.WriteFile(showResultFile, []byte(output), os.ModePerm); err != nil {
+ return "", errors.Wrap(err, "writing terraform show result")
+ }
+
+ return output, nil
+}
diff --git a/server/events/runtime/show_step_runner_test.go b/server/events/runtime/show_step_runner_test.go
new file mode 100644
index 0000000000..977530278f
--- /dev/null
+++ b/server/events/runtime/show_step_runner_test.go
@@ -0,0 +1,94 @@
+package runtime
+
+import (
+ "errors"
+ "io/ioutil"
+ "path/filepath"
+ "testing"
+
+ "github.com/hashicorp/go-version"
+ . "github.com/petergtz/pegomock"
+ "github.com/runatlantis/atlantis/server/events/models"
+ "github.com/runatlantis/atlantis/server/events/terraform/mocks"
+ "github.com/runatlantis/atlantis/server/logging"
+ . "github.com/runatlantis/atlantis/testing"
+)
+
+func TestShowStepRunnner(t *testing.T) {
+ logger := logging.NewNoopLogger()
+ path, _ := ioutil.TempDir("", "")
+ resultPath := filepath.Join(path, "test-default.json")
+ envs := map[string]string{"key": "val"}
+ tfVersion, _ := version.NewVersion("0.12")
+ context := models.ProjectCommandContext{
+ Workspace: "default",
+ ProjectName: "test",
+ Log: logger,
+ }
+
+ RegisterMockTestingT(t)
+
+ mockExecutor := mocks.NewMockClient()
+
+ subject := ShowStepRunner{
+ TerraformExecutor: mockExecutor,
+ DefaultTFVersion: tfVersion,
+ }
+
+ t.Run("success", func(t *testing.T) {
+
+ When(mockExecutor.RunCommandWithVersion(
+ logger, path, []string{"show", "-no-color", "-json", filepath.Join(path, "test-default.tfplan")}, envs, tfVersion, context.Workspace,
+ )).ThenReturn("success", nil)
+
+ r, err := subject.Run(context, []string{}, path, envs)
+
+ Ok(t, err)
+
+ actual, _ := ioutil.ReadFile(resultPath)
+
+ actualStr := string(actual)
+ Assert(t, actualStr == "success", "got expected result")
+ Assert(t, r == "success", "returned expected result")
+
+ })
+
+ t.Run("success w/ version override", func(t *testing.T) {
+
+ v, _ := version.NewVersion("0.13.0")
+
+ contextWithVersionOverride := models.ProjectCommandContext{
+ Workspace: "default",
+ ProjectName: "test",
+ Log: logger,
+ TerraformVersion: v,
+ }
+
+ When(mockExecutor.RunCommandWithVersion(
+ logger, path, []string{"show", "-no-color", "-json", filepath.Join(path, "test-default.tfplan")}, envs, v, context.Workspace,
+ )).ThenReturn("success", nil)
+
+ r, err := subject.Run(contextWithVersionOverride, []string{}, path, envs)
+
+ Ok(t, err)
+
+ actual, _ := ioutil.ReadFile(resultPath)
+
+ actualStr := string(actual)
+ Assert(t, actualStr == "success", "got expected result")
+ Assert(t, r == "success", "returned expected result")
+
+ })
+
+ t.Run("failure running command", func(t *testing.T) {
+ When(mockExecutor.RunCommandWithVersion(
+ logger, path, []string{"show", "-no-color", "-json", filepath.Join(path, "test-default.tfplan")}, envs, tfVersion, context.Workspace,
+ )).ThenReturn("success", errors.New("error"))
+
+ _, err := subject.Run(context, []string{}, path, envs)
+
+ Assert(t, err != nil, "error is returned")
+
+ })
+
+}
diff --git a/server/events/terraform/mocks/mock_downloader.go b/server/events/terraform/mocks/mock_downloader.go
index 30118bd7fa..6ab9467e53 100644
--- a/server/events/terraform/mocks/mock_downloader.go
+++ b/server/events/terraform/mocks/mock_downloader.go
@@ -43,6 +43,24 @@ func (mock *MockDownloader) GetFile(dst string, src string, opts ...go_getter.Cl
return ret0
}
+func (mock *MockDownloader) GetAny(dst string, src string, opts ...go_getter.ClientOption) error {
+ if mock == nil {
+ panic("mock must not be nil. Use myMock := NewMockDownloader().")
+ }
+ params := []pegomock.Param{dst, src}
+ for _, param := range opts {
+ params = append(params, param)
+ }
+ result := pegomock.GetGenericMockFrom(mock).Invoke("GetAny", params, []reflect.Type{reflect.TypeOf((*error)(nil)).Elem()})
+ var ret0 error
+ if len(result) != 0 {
+ if result[0] != nil {
+ ret0 = result[0].(error)
+ }
+ }
+ return ret0
+}
+
func (mock *MockDownloader) VerifyWasCalledOnce() *VerifierMockDownloader {
return &VerifierMockDownloader{
mock: mock,
@@ -122,3 +140,46 @@ func (c *MockDownloader_GetFile_OngoingVerification) GetAllCapturedArguments() (
}
return
}
+
+func (verifier *VerifierMockDownloader) GetAny(dst string, src string, opts ...go_getter.ClientOption) *MockDownloader_GetAny_OngoingVerification {
+ params := []pegomock.Param{dst, src}
+ for _, param := range opts {
+ params = append(params, param)
+ }
+ methodInvocations := pegomock.GetGenericMockFrom(verifier.mock).Verify(verifier.inOrderContext, verifier.invocationCountMatcher, "GetAny", params, verifier.timeout)
+ return &MockDownloader_GetAny_OngoingVerification{mock: verifier.mock, methodInvocations: methodInvocations}
+}
+
+type MockDownloader_GetAny_OngoingVerification struct {
+ mock *MockDownloader
+ methodInvocations []pegomock.MethodInvocation
+}
+
+func (c *MockDownloader_GetAny_OngoingVerification) GetCapturedArguments() (string, string, []go_getter.ClientOption) {
+ dst, src, opts := c.GetAllCapturedArguments()
+ return dst[len(dst)-1], src[len(src)-1], opts[len(opts)-1]
+}
+
+func (c *MockDownloader_GetAny_OngoingVerification) GetAllCapturedArguments() (_param0 []string, _param1 []string, _param2 [][]go_getter.ClientOption) {
+ params := pegomock.GetGenericMockFrom(c.mock).GetInvocationParams(c.methodInvocations)
+ if len(params) > 0 {
+ _param0 = make([]string, len(c.methodInvocations))
+ for u, param := range params[0] {
+ _param0[u] = param.(string)
+ }
+ _param1 = make([]string, len(c.methodInvocations))
+ for u, param := range params[1] {
+ _param1[u] = param.(string)
+ }
+ _param2 = make([][]go_getter.ClientOption, len(c.methodInvocations))
+ for u := 0; u < len(c.methodInvocations); u++ {
+ _param2[u] = make([]go_getter.ClientOption, len(params)-2)
+ for x := 2; x < len(params); x++ {
+ if params[x][u] != nil {
+ _param2[u][x-2] = params[x][u].(go_getter.ClientOption)
+ }
+ }
+ }
+ }
+ return
+}
diff --git a/server/events/terraform/terraform_client.go b/server/events/terraform/terraform_client.go
index 5b0c1f6cc5..4f0310ec86 100644
--- a/server/events/terraform/terraform_client.go
+++ b/server/events/terraform/terraform_client.go
@@ -49,7 +49,9 @@ type Client interface {
type DefaultClient struct {
// defaultVersion is the default version of terraform to use if another
// version isn't specified.
- defaultVersion *version.Version
+ defaultVersion *version.Version
+ // We will run terraform with the TF_PLUGIN_CACHE_DIR env var set to this
+ // directory inside our data dir.
terraformPluginCacheDir string
binDir string
// overrideTF can be used to override the terraform binary during testing
@@ -75,17 +77,9 @@ type DefaultClient struct {
// Downloader is for downloading terraform versions.
type Downloader interface {
GetFile(dst, src string, opts ...getter.ClientOption) error
+ GetAny(dst, src string, opts ...getter.ClientOption) error
}
-const (
- // terraformPluginCacheDir is the name of the dir inside our data dir
- // where we tell terraform to cache plugins and modules.
- terraformPluginCacheDirName = "plugin-cache"
- // binDirName is the name of the directory inside our data dir where
- // we download terraform binaries.
- binDirName = "bin"
-)
-
// versionRegex extracts the version from `terraform version` output.
// Terraform v0.12.0-alpha4 (2c36829d3265661d8edbd5014de8090ea7e2a076)
// => 0.12.0-alpha4
@@ -104,7 +98,8 @@ var versionRegex = regexp.MustCompile("Terraform v(.*?)(\\s.*)?\n")
// Will asynchronously download the required version if it doesn't exist already.
func NewClient(
log *logging.SimpleLogger,
- dataDir string,
+ binDir string,
+ cacheDir string,
tfeToken string,
tfeHostname string,
defaultVersionStr string,
@@ -134,11 +129,6 @@ func NewClient(
}
}
- binDir := filepath.Join(dataDir, binDirName)
- if err := os.MkdirAll(binDir, 0700); err != nil {
- return nil, errors.Wrapf(err, "unable to create terraform bin dir %q", binDir)
- }
-
if defaultVersionStr != "" {
defaultVersion, err := version.NewVersion(defaultVersionStr)
if err != nil {
@@ -168,13 +158,6 @@ func NewClient(
}
}
- // We will run terraform with the TF_PLUGIN_CACHE_DIR env var set to this
- // directory inside our data dir.
- cacheDir := filepath.Join(dataDir, terraformPluginCacheDirName)
- if err := os.MkdirAll(cacheDir, 0700); err != nil {
- return nil, errors.Wrapf(err, "unable to create terraform plugin cache directory at %q", terraformPluginCacheDirName)
- }
-
return &DefaultClient{
defaultVersion: finalDefaultVersion,
terraformPluginCacheDir: cacheDir,
@@ -490,3 +473,8 @@ type DefaultDownloader struct{}
func (d *DefaultDownloader) GetFile(dst, src string, opts ...getter.ClientOption) error {
return getter.GetFile(dst, src, opts...)
}
+
+// See go-getter.GetFile.
+func (d *DefaultDownloader) GetAny(dst, src string, opts ...getter.ClientOption) error {
+ return getter.GetAny(dst, src, opts...)
+}
diff --git a/server/events/terraform/terraform_client_test.go b/server/events/terraform/terraform_client_test.go
index 18ac5525b7..4b0f7ea0bd 100644
--- a/server/events/terraform/terraform_client_test.go
+++ b/server/events/terraform/terraform_client_test.go
@@ -59,7 +59,7 @@ func TestNewClient_LocalTFOnly(t *testing.T) {
Your version of Terraform is out of date! The latest version
is 0.11.13. You can update by downloading from www.terraform.io/downloads.html
`
- tmp, cleanup := TempDir(t)
+ tmp, binDir, cacheDir, cleanup := mkSubDirs(t)
defer cleanup()
// We're testing this by adding our own "fake" terraform binary to path that
@@ -68,7 +68,7 @@ is 0.11.13. You can update by downloading from www.terraform.io/downloads.html
Ok(t, err)
defer tempSetEnv(t, "PATH", fmt.Sprintf("%s:%s", tmp, os.Getenv("PATH")))()
- c, err := terraform.NewClient(nil, tmp, "", "", "", cmd.DefaultTFVersionFlag, cmd.DefaultTFDownloadURL, nil, true)
+ c, err := terraform.NewClient(nil, binDir, cacheDir, "", "", "", cmd.DefaultTFVersionFlag, cmd.DefaultTFDownloadURL, nil, true)
Ok(t, err)
Ok(t, err)
@@ -87,7 +87,7 @@ func TestNewClient_LocalTFMatchesFlag(t *testing.T) {
Your version of Terraform is out of date! The latest version
is 0.11.13. You can update by downloading from www.terraform.io/downloads.html
`
- tmp, cleanup := TempDir(t)
+ tmp, binDir, cacheDir, cleanup := mkSubDirs(t)
defer cleanup()
// We're testing this by adding our own "fake" terraform binary to path that
@@ -96,7 +96,7 @@ is 0.11.13. You can update by downloading from www.terraform.io/downloads.html
Ok(t, err)
defer tempSetEnv(t, "PATH", fmt.Sprintf("%s:%s", tmp, os.Getenv("PATH")))()
- c, err := terraform.NewClient(nil, tmp, "", "", "0.11.10", cmd.DefaultTFVersionFlag, cmd.DefaultTFDownloadURL, nil, true)
+ c, err := terraform.NewClient(nil, binDir, cacheDir, "", "", "0.11.10", cmd.DefaultTFVersionFlag, cmd.DefaultTFDownloadURL, nil, true)
Ok(t, err)
Ok(t, err)
@@ -110,13 +110,13 @@ is 0.11.13. You can update by downloading from www.terraform.io/downloads.html
// Test that if terraform is not in PATH and we didn't set the default-tf flag
// that we error.
func TestNewClient_NoTF(t *testing.T) {
- tmp, cleanup := TempDir(t)
+ tmp, binDir, cacheDir, cleanup := mkSubDirs(t)
defer cleanup()
// Set PATH to only include our empty directory.
defer tempSetEnv(t, "PATH", tmp)()
- _, err := terraform.NewClient(nil, tmp, "", "", "", cmd.DefaultTFVersionFlag, cmd.DefaultTFDownloadURL, nil, true)
+ _, err := terraform.NewClient(nil, binDir, cacheDir, "", "", "", cmd.DefaultTFVersionFlag, cmd.DefaultTFDownloadURL, nil, true)
ErrEquals(t, "terraform not found in $PATH. Set --default-tf-version or download terraform from https://www.terraform.io/downloads.html", err)
}
@@ -124,7 +124,7 @@ func TestNewClient_NoTF(t *testing.T) {
// that we use it.
func TestNewClient_DefaultTFFlagInPath(t *testing.T) {
fakeBinOut := "Terraform v0.11.10\n"
- tmp, cleanup := TempDir(t)
+ tmp, binDir, cacheDir, cleanup := mkSubDirs(t)
defer cleanup()
// We're testing this by adding our own "fake" terraform binary to path that
@@ -133,7 +133,7 @@ func TestNewClient_DefaultTFFlagInPath(t *testing.T) {
Ok(t, err)
defer tempSetEnv(t, "PATH", fmt.Sprintf("%s:%s", tmp, os.Getenv("PATH")))()
- c, err := terraform.NewClient(nil, tmp, "", "", "0.11.10", cmd.DefaultTFVersionFlag, cmd.DefaultTFDownloadURL, nil, true)
+ c, err := terraform.NewClient(nil, binDir, cacheDir, "", "", "0.11.10", cmd.DefaultTFVersionFlag, cmd.DefaultTFDownloadURL, nil, true)
Ok(t, err)
Ok(t, err)
@@ -148,16 +148,15 @@ func TestNewClient_DefaultTFFlagInPath(t *testing.T) {
// bin dir that we use it.
func TestNewClient_DefaultTFFlagInBinDir(t *testing.T) {
fakeBinOut := "Terraform v0.11.10\n"
- tmp, cleanup := TempDir(t)
+ tmp, binDir, cacheDir, cleanup := mkSubDirs(t)
defer cleanup()
// Add our fake binary to {datadir}/bin/terraform{version}.
- Ok(t, os.Mkdir(filepath.Join(tmp, "bin"), 0700))
- err := ioutil.WriteFile(filepath.Join(tmp, "bin", "terraform0.11.10"), []byte(fmt.Sprintf("#!/bin/sh\necho '%s'", fakeBinOut)), 0755)
+ err := ioutil.WriteFile(filepath.Join(binDir, "terraform0.11.10"), []byte(fmt.Sprintf("#!/bin/sh\necho '%s'", fakeBinOut)), 0755)
Ok(t, err)
defer tempSetEnv(t, "PATH", fmt.Sprintf("%s:%s", tmp, os.Getenv("PATH")))()
- c, err := terraform.NewClient(logging.NewNoopLogger(), tmp, "", "", "0.11.10", cmd.DefaultTFVersionFlag, cmd.DefaultTFDownloadURL, nil, true)
+ c, err := terraform.NewClient(logging.NewNoopLogger(), binDir, cacheDir, "", "", "0.11.10", cmd.DefaultTFVersionFlag, cmd.DefaultTFDownloadURL, nil, true)
Ok(t, err)
Ok(t, err)
@@ -171,7 +170,7 @@ func TestNewClient_DefaultTFFlagInBinDir(t *testing.T) {
// Test that if we don't have that version of TF that we download it.
func TestNewClient_DefaultTFFlagDownload(t *testing.T) {
RegisterMockTestingT(t)
- tmp, cleanup := TempDir(t)
+ tmp, binDir, cacheDir, cleanup := mkSubDirs(t)
defer cleanup()
// Set PATH to empty so there's no TF available.
@@ -183,7 +182,7 @@ func TestNewClient_DefaultTFFlagDownload(t *testing.T) {
err := ioutil.WriteFile(params[0].(string), []byte("#!/bin/sh\necho '\nTerraform v0.11.10\n'"), 0755)
return []pegomock.ReturnValue{err}
})
- c, err := terraform.NewClient(nil, tmp, "", "", "0.11.10", cmd.DefaultTFVersionFlag, "https://my-mirror.releases.mycompany.com", mockDownloader, true)
+ c, err := terraform.NewClient(nil, binDir, cacheDir, "", "", "0.11.10", cmd.DefaultTFVersionFlag, "https://my-mirror.releases.mycompany.com", mockDownloader, true)
Ok(t, err)
Ok(t, err)
@@ -205,16 +204,16 @@ func TestNewClient_DefaultTFFlagDownload(t *testing.T) {
// Test that we get an error if the terraform version flag is malformed.
func TestNewClient_BadVersion(t *testing.T) {
- tmp, cleanup := TempDir(t)
+ _, binDir, cacheDir, cleanup := mkSubDirs(t)
defer cleanup()
- _, err := terraform.NewClient(nil, tmp, "", "", "malformed", cmd.DefaultTFVersionFlag, cmd.DefaultTFDownloadURL, nil, true)
+ _, err := terraform.NewClient(nil, binDir, cacheDir, "", "", "malformed", cmd.DefaultTFVersionFlag, cmd.DefaultTFDownloadURL, nil, true)
ErrEquals(t, "Malformed version: malformed", err)
}
// Test that if we run a command with a version we don't have, we download it.
func TestRunCommandWithVersion_DLsTF(t *testing.T) {
RegisterMockTestingT(t)
- tmp, cleanup := TempDir(t)
+ tmp, binDir, cacheDir, cleanup := mkSubDirs(t)
defer cleanup()
mockDownloader := mocks.NewMockDownloader()
@@ -230,7 +229,7 @@ func TestRunCommandWithVersion_DLsTF(t *testing.T) {
return []pegomock.ReturnValue{err}
})
- c, err := terraform.NewClient(nil, tmp, "", "", "0.11.10", cmd.DefaultTFVersionFlag, cmd.DefaultTFDownloadURL, mockDownloader, true)
+ c, err := terraform.NewClient(nil, binDir, cacheDir, "", "", "0.11.10", cmd.DefaultTFVersionFlag, cmd.DefaultTFDownloadURL, mockDownloader, true)
Ok(t, err)
Equals(t, "0.11.10", c.DefaultVersion().String())
@@ -244,12 +243,12 @@ func TestRunCommandWithVersion_DLsTF(t *testing.T) {
// Test the EnsureVersion downloads terraform.
func TestEnsureVersion_downloaded(t *testing.T) {
RegisterMockTestingT(t)
- tmp, cleanup := TempDir(t)
+ tmp, binDir, cacheDir, cleanup := mkSubDirs(t)
defer cleanup()
mockDownloader := mocks.NewMockDownloader()
- c, err := terraform.NewClient(nil, tmp, "", "", "0.11.10", cmd.DefaultTFVersionFlag, cmd.DefaultTFDownloadURL, mockDownloader, true)
+ c, err := terraform.NewClient(nil, binDir, cacheDir, "", "", "0.11.10", cmd.DefaultTFVersionFlag, cmd.DefaultTFDownloadURL, mockDownloader, true)
Ok(t, err)
Equals(t, "0.11.10", c.DefaultVersion().String())
@@ -277,3 +276,17 @@ func tempSetEnv(t *testing.T, key string, value string) func() {
Ok(t, os.Setenv(key, value))
return func() { os.Setenv(key, orig) }
}
+
+// returns parent, bindir, cachedir, cleanup func
+func mkSubDirs(t *testing.T) (string, string, string, func()) {
+ tmp, cleanup := TempDir(t)
+ binDir := filepath.Join(tmp, "bin")
+ err := os.MkdirAll(binDir, 0700)
+ Ok(t, err)
+
+ cachedir := filepath.Join(tmp, "plugin-cache")
+ err = os.MkdirAll(cachedir, 0700)
+ Ok(t, err)
+
+ return tmp, binDir, cachedir, cleanup
+}
diff --git a/server/events/unlock_command_runner.go b/server/events/unlock_command_runner.go
new file mode 100644
index 0000000000..733534a636
--- /dev/null
+++ b/server/events/unlock_command_runner.go
@@ -0,0 +1,39 @@
+package events
+
+import (
+ "github.com/runatlantis/atlantis/server/events/models"
+ "github.com/runatlantis/atlantis/server/events/vcs"
+)
+
+func NewUnlockCommandRunner(
+ deleteLockCommand DeleteLockCommand,
+ vcsClient vcs.Client,
+) *UnlockCommandRunner {
+ return &UnlockCommandRunner{
+ deleteLockCommand: deleteLockCommand,
+ vcsClient: vcsClient,
+ }
+}
+
+type UnlockCommandRunner struct {
+ vcsClient vcs.Client
+ deleteLockCommand DeleteLockCommand
+}
+
+func (u *UnlockCommandRunner) Run(
+ ctx *CommandContext,
+ cmd *CommentCommand,
+) {
+ baseRepo := ctx.Pull.BaseRepo
+ pullNum := ctx.Pull.Num
+
+ vcsMessage := "All Atlantis locks for this PR have been unlocked and plans discarded"
+ err := u.deleteLockCommand.DeleteLocksByPull(baseRepo.FullName, pullNum)
+ if err != nil {
+ vcsMessage = "Failed to delete PR locks"
+ ctx.Log.Err("failed to delete locks by pull %s", err.Error())
+ }
+ if commentErr := u.vcsClient.CreateComment(baseRepo, pullNum, vcsMessage, models.UnlockCommand.String()); commentErr != nil {
+ ctx.Log.Err("unable to comment: %s", commentErr)
+ }
+}
diff --git a/server/events/yaml/parser_validator_test.go b/server/events/yaml/parser_validator_test.go
index 9339481e32..92a1bf3470 100644
--- a/server/events/yaml/parser_validator_test.go
+++ b/server/events/yaml/parser_validator_test.go
@@ -156,8 +156,9 @@ workflows:
Version: 2,
Workflows: map[string]valid.Workflow{
"custom": {
- Name: "custom",
- Apply: valid.DefaultApplyStage,
+ Name: "custom",
+ Apply: valid.DefaultApplyStage,
+ PolicyCheck: valid.DefaultPolicyCheckStage,
Plan: valid.Stage{
Steps: []valid.Step{
{
@@ -333,9 +334,10 @@ workflows:
},
Workflows: map[string]valid.Workflow{
"default": {
- Name: "default",
- Plan: valid.DefaultPlanStage,
- Apply: valid.DefaultApplyStage,
+ Name: "default",
+ Plan: valid.DefaultPlanStage,
+ Apply: valid.DefaultApplyStage,
+ PolicyCheck: valid.DefaultPolicyCheckStage,
},
},
},
@@ -369,9 +371,10 @@ workflows:
},
Workflows: map[string]valid.Workflow{
"myworkflow": {
- Name: "myworkflow",
- Apply: valid.DefaultApplyStage,
- Plan: valid.DefaultPlanStage,
+ Name: "myworkflow",
+ Apply: valid.DefaultApplyStage,
+ Plan: valid.DefaultPlanStage,
+ PolicyCheck: valid.DefaultPolicyCheckStage,
},
},
},
@@ -407,9 +410,10 @@ workflows:
},
Workflows: map[string]valid.Workflow{
"myworkflow": {
- Name: "myworkflow",
- Apply: valid.DefaultApplyStage,
- Plan: valid.DefaultPlanStage,
+ Name: "myworkflow",
+ Apply: valid.DefaultApplyStage,
+ Plan: valid.DefaultPlanStage,
+ PolicyCheck: valid.DefaultPolicyCheckStage,
},
},
},
@@ -445,9 +449,10 @@ workflows:
},
Workflows: map[string]valid.Workflow{
"myworkflow": {
- Name: "myworkflow",
- Apply: valid.DefaultApplyStage,
- Plan: valid.DefaultPlanStage,
+ Name: "myworkflow",
+ Apply: valid.DefaultApplyStage,
+ Plan: valid.DefaultPlanStage,
+ PolicyCheck: valid.DefaultPolicyCheckStage,
},
},
},
@@ -483,9 +488,10 @@ workflows:
},
Workflows: map[string]valid.Workflow{
"myworkflow": {
- Name: "myworkflow",
- Apply: valid.DefaultApplyStage,
- Plan: valid.DefaultPlanStage,
+ Name: "myworkflow",
+ Apply: valid.DefaultApplyStage,
+ Plan: valid.DefaultPlanStage,
+ PolicyCheck: valid.DefaultPolicyCheckStage,
},
},
},
@@ -618,6 +624,10 @@ workflows:
steps:
- init
- plan
+ policy_check:
+ steps:
+ - init
+ - policy_check
apply:
steps:
- plan # NOTE: we don't validate if they make sense
@@ -648,6 +658,16 @@ workflows:
},
},
},
+ PolicyCheck: valid.Stage{
+ Steps: []valid.Step{
+ {
+ StepName: "init",
+ },
+ {
+ StepName: "policy_check",
+ },
+ },
+ },
Apply: valid.Stage{
Steps: []valid.Step{
{
@@ -678,6 +698,11 @@ workflows:
extra_args:
- arg1
- arg2
+ policy_check:
+ steps:
+ - policy_check:
+ extra_args:
+ - arg1
apply:
steps:
- plan:
@@ -712,6 +737,14 @@ workflows:
},
},
},
+ PolicyCheck: valid.Stage{
+ Steps: []valid.Step{
+ {
+ StepName: "policy_check",
+ ExtraArgs: []string{"arg1"},
+ },
+ },
+ },
Apply: valid.Stage{
Steps: []valid.Step{
{
@@ -739,6 +772,9 @@ workflows:
plan:
steps:
- run: "echo \"plan hi\""
+ policy_check:
+ steps:
+ - run: "echo \"opa hi\""
apply:
steps:
- run: echo apply "arg 2"
@@ -766,6 +802,14 @@ workflows:
},
},
},
+ PolicyCheck: valid.Stage{
+ Steps: []valid.Step{
+ {
+ StepName: "run",
+ RunCommand: "echo \"opa hi\"",
+ },
+ },
+ },
Apply: valid.Stage{
Steps: []valid.Step{
{
@@ -791,6 +835,11 @@ workflows:
- env:
name: env_name
value: env_value
+ policy_check:
+ steps:
+ - env:
+ name: env_name
+ value: env_value
apply:
steps:
- env:
@@ -821,6 +870,15 @@ workflows:
},
},
},
+ PolicyCheck: valid.Stage{
+ Steps: []valid.Step{
+ {
+ StepName: "env",
+ EnvVarName: "env_name",
+ EnvVarValue: "env_value",
+ },
+ },
+ },
Apply: valid.Stage{
Steps: []valid.Step{
{
@@ -908,6 +966,21 @@ func TestParseGlobalCfg(t *testing.T) {
},
},
},
+ PolicyCheck: valid.Stage{
+ Steps: []valid.Step{
+ {
+ StepName: "run",
+ RunCommand: "custom command",
+ },
+ {
+ StepName: "plan",
+ ExtraArgs: []string{"extra", "args"},
+ },
+ {
+ StepName: "policy_check",
+ },
+ },
+ },
Apply: valid.Stage{
Steps: []valid.Step{
{
@@ -921,6 +994,8 @@ func TestParseGlobalCfg(t *testing.T) {
},
}
+ conftestVersion, _ := version.NewVersion("v1.0.0")
+
cases := map[string]struct {
input string
expErr string
@@ -979,9 +1054,10 @@ workflows:
Workflows: map[string]valid.Workflow{
"default": defaultCfg.Workflows["default"],
"name": {
- Name: "name",
- Apply: valid.DefaultApplyStage,
- Plan: valid.DefaultPlanStage,
+ Name: "name",
+ Apply: valid.DefaultApplyStage,
+ Plan: valid.DefaultPlanStage,
+ PolicyCheck: valid.DefaultPolicyCheckStage,
},
},
},
@@ -998,9 +1074,10 @@ workflows:
Workflows: map[string]valid.Workflow{
"default": defaultCfg.Workflows["default"],
"name": {
- Name: "name",
- Apply: valid.DefaultApplyStage,
- Plan: valid.DefaultPlanStage,
+ Name: "name",
+ Apply: valid.DefaultApplyStage,
+ Plan: valid.DefaultPlanStage,
+ PolicyCheck: valid.DefaultPolicyCheckStage,
},
},
},
@@ -1018,9 +1095,10 @@ workflows:
Workflows: map[string]valid.Workflow{
"default": defaultCfg.Workflows["default"],
"name": {
- Name: "name",
- Plan: valid.DefaultPlanStage,
- Apply: valid.DefaultApplyStage,
+ Name: "name",
+ Plan: valid.DefaultPlanStage,
+ PolicyCheck: valid.DefaultPolicyCheckStage,
+ Apply: valid.DefaultApplyStage,
},
},
},
@@ -1047,10 +1125,22 @@ workflows:
- init:
extra_args: [extra, args]
- plan
+ policy_check:
+ steps:
+ - run: custom command
+ - plan:
+ extra_args: [extra, args]
+ - policy_check
apply:
steps:
- run: custom command
- apply
+policies:
+ conftest_version: v1.0.0
+ policy_sets:
+ - name: good-policy
+ path: rel/path/to/policy
+ source: local
`,
exp: valid.GlobalCfg{
Repos: []valid.Repo{
@@ -1072,6 +1162,16 @@ workflows:
"default": defaultCfg.Workflows["default"],
"custom1": customWorkflow1,
},
+ PolicySets: valid.PolicySets{
+ Version: conftestVersion,
+ PolicySets: []valid.PolicySet{
+ {
+ Name: "good-policy",
+ Path: "rel/path/to/policy",
+ Source: valid.LocalPolicySet,
+ },
+ },
+ },
},
},
"id regex with trailing slash": {
@@ -1119,6 +1219,8 @@ workflows:
plan:
steps:
- run: custom
+ policy_check:
+ steps: []
apply:
steps: []
`,
@@ -1133,6 +1235,9 @@ workflows:
Apply: valid.Stage{
Steps: nil,
},
+ PolicyCheck: valid.Stage{
+ Steps: nil,
+ },
Plan: valid.Stage{
Steps: []valid.Step{
{
@@ -1166,6 +1271,7 @@ workflows:
},
},
}
+
for name, c := range cases {
t.Run(name, func(t *testing.T) {
r := yaml.ParserValidator{}
@@ -1182,6 +1288,11 @@ workflows:
return
}
Ok(t, err)
+
+ if !act.PolicySets.HasPolicies() {
+ c.exp.PolicySets = act.PolicySets
+ }
+
Equals(t, c.exp, act)
// Have to hand-compare regexes because Equals doesn't do it.
for i, actRepo := range act.Repos {
@@ -1214,6 +1325,17 @@ func TestParserValidator_ParseGlobalCfgJSON(t *testing.T) {
},
},
},
+ PolicyCheck: valid.Stage{
+ Steps: []valid.Step{
+ {
+ StepName: "plan",
+ },
+ {
+ StepName: "run",
+ RunCommand: "custom policy_check",
+ },
+ },
+ },
Apply: valid.Stage{
Steps: []valid.Step{
{
@@ -1224,6 +1346,8 @@ func TestParserValidator_ParseGlobalCfgJSON(t *testing.T) {
},
}
+ conftestVersion, _ := version.NewVersion("v1.0.0")
+
cases := map[string]struct {
json string
exp valid.GlobalCfg
@@ -1262,12 +1386,28 @@ func TestParserValidator_ParseGlobalCfgJSON(t *testing.T) {
{"run": "custom plan"}
]
},
+ "policy_check": {
+ "steps": [
+ "plan",
+ {"run": "custom policy_check"}
+ ]
+ },
"apply": {
"steps": [
{"run": "my custom command"}
]
}
}
+ },
+ "policies": {
+ "conftest_version": "v1.0.0",
+ "policy_sets": [
+ {
+ "name": "good-policy",
+ "source": "local",
+ "path": "rel/path/to/policy"
+ }
+ ]
}
}
`,
@@ -1296,6 +1436,16 @@ func TestParserValidator_ParseGlobalCfgJSON(t *testing.T) {
"default": valid.NewGlobalCfg(false, false, false).Workflows["default"],
"custom": customWorkflow,
},
+ PolicySets: valid.PolicySets{
+ Version: conftestVersion,
+ PolicySets: []valid.PolicySet{
+ {
+ Name: "good-policy",
+ Path: "rel/path/to/policy",
+ Source: valid.LocalPolicySet,
+ },
+ },
+ },
},
},
}
@@ -1308,6 +1458,11 @@ func TestParserValidator_ParseGlobalCfgJSON(t *testing.T) {
return
}
Ok(t, err)
+
+ if !cfg.PolicySets.HasPolicies() {
+ c.exp.PolicySets = cfg.PolicySets
+ }
+
Equals(t, c.exp, cfg)
})
}
diff --git a/server/events/yaml/raw/global_cfg.go b/server/events/yaml/raw/global_cfg.go
index f53cdbcd98..7d5157a757 100644
--- a/server/events/yaml/raw/global_cfg.go
+++ b/server/events/yaml/raw/global_cfg.go
@@ -12,8 +12,9 @@ import (
// GlobalCfg is the raw schema for server-side repo config.
type GlobalCfg struct {
- Repos []Repo `yaml:"repos" json:"repos"`
- Workflows map[string]Workflow `yaml:"workflows" json:"workflows"`
+ Repos []Repo `yaml:"repos" json:"repos"`
+ Workflows map[string]Workflow `yaml:"workflows" json:"workflows"`
+ PolicySets PolicySets `yaml:"policies" json:"policies"`
}
// Repo is the raw schema for repos in the server-side repo config.
@@ -106,9 +107,11 @@ func (g GlobalCfg) ToValid(defaultCfg valid.GlobalCfg) valid.GlobalCfg {
repos = append(repos, r.ToValid(workflows))
}
repos = append(defaultCfg.Repos, repos...)
+
return valid.GlobalCfg{
- Repos: repos,
- Workflows: workflows,
+ Repos: repos,
+ Workflows: workflows,
+ PolicySets: g.PolicySets.ToValid(),
}
}
diff --git a/server/events/yaml/raw/policies.go b/server/events/yaml/raw/policies.go
new file mode 100644
index 0000000000..2506cd66a6
--- /dev/null
+++ b/server/events/yaml/raw/policies.go
@@ -0,0 +1,79 @@
+package raw
+
+import (
+ validation "github.com/go-ozzo/ozzo-validation"
+ "github.com/hashicorp/go-version"
+ "github.com/runatlantis/atlantis/server/events/yaml/valid"
+)
+
+// PolicySets is the raw schema for repo-level atlantis.yaml config.
+type PolicySets struct {
+ Version *string `yaml:"conftest_version,omitempty" json:"conftest_version,omitempty"`
+ Owners PolicyOwners `yaml:"owners,omitempty" json:"owners,omitempty"`
+ PolicySets []PolicySet `yaml:"policy_sets" json:"policy_sets"`
+}
+
+func (p PolicySets) Validate() error {
+ return validation.ValidateStruct(&p,
+ validation.Field(&p.Version, validation.By(VersionValidator)),
+ validation.Field(&p.PolicySets, validation.Required.Error("cannot be empty; Declare policies that you would like to enforce")),
+ )
+}
+
+func (p PolicySets) ToValid() valid.PolicySets {
+ policySets := valid.PolicySets{}
+
+ if p.Version != nil {
+ policySets.Version, _ = version.NewVersion(*p.Version)
+ }
+
+ policySets.Owners = p.Owners.ToValid()
+
+ validPolicySets := make([]valid.PolicySet, 0)
+ for _, rawPolicySet := range p.PolicySets {
+ validPolicySets = append(validPolicySets, rawPolicySet.ToValid())
+ }
+ policySets.PolicySets = validPolicySets
+
+ return policySets
+}
+
+type PolicyOwners struct {
+ Users []string `yaml:"users,omitempty" json:"users,omitempty"`
+}
+
+func (o PolicyOwners) ToValid() valid.PolicyOwners {
+ var policyOwners valid.PolicyOwners
+
+ if len(o.Users) > 0 {
+ policyOwners.Users = o.Users
+ }
+ return policyOwners
+}
+
+type PolicySet struct {
+ Path string `yaml:"path" json:"path"`
+ Source string `yaml:"source" json:"source"`
+ Name string `yaml:"name" json:"name"`
+ Owners PolicyOwners `yaml:"owners,omitempty" json:"owners,omitempty"`
+}
+
+func (p PolicySet) Validate() error {
+ return validation.ValidateStruct(&p,
+ validation.Field(&p.Name, validation.Required.Error("is required")),
+ validation.Field(&p.Owners),
+ validation.Field(&p.Path, validation.Required.Error("is required")),
+ validation.Field(&p.Source, validation.In(valid.LocalPolicySet, valid.GithubPolicySet).Error("only 'local' and 'github' source types are supported")),
+ )
+}
+
+func (p PolicySet) ToValid() valid.PolicySet {
+ var policySet valid.PolicySet
+
+ policySet.Name = p.Name
+ policySet.Path = p.Path
+ policySet.Source = p.Source
+ policySet.Owners = p.Owners.ToValid()
+
+ return policySet
+}
diff --git a/server/events/yaml/raw/policies_test.go b/server/events/yaml/raw/policies_test.go
new file mode 100644
index 0000000000..0fe3c2c161
--- /dev/null
+++ b/server/events/yaml/raw/policies_test.go
@@ -0,0 +1,260 @@
+package raw_test
+
+import (
+ "testing"
+
+ "github.com/hashicorp/go-version"
+ "github.com/runatlantis/atlantis/server/events/yaml/raw"
+ "github.com/runatlantis/atlantis/server/events/yaml/valid"
+ . "github.com/runatlantis/atlantis/testing"
+ yaml "gopkg.in/yaml.v2"
+)
+
+func TestPolicySetsConfig_YAMLMarshalling(t *testing.T) {
+ cases := []struct {
+ description string
+ input string
+ exp raw.PolicySets
+ expErr string
+ }{
+ {
+ description: "valid yaml",
+ input: `
+conftest_version: v1.0.0
+policy_sets:
+- name: policy-name
+ source: "local"
+ path: "rel/path/to/policy-set"
+`,
+ exp: raw.PolicySets{
+ Version: String("v1.0.0"),
+ PolicySets: []raw.PolicySet{
+ {
+ Name: "policy-name",
+ Source: valid.LocalPolicySet,
+ Path: "rel/path/to/policy-set",
+ },
+ },
+ },
+ },
+ }
+
+ for _, c := range cases {
+ t.Run(c.description, func(t *testing.T) {
+ var got raw.PolicySets
+ err := yaml.UnmarshalStrict([]byte(c.input), &got)
+ if c.expErr != "" {
+ ErrEquals(t, c.expErr, err)
+ return
+ }
+ Ok(t, err)
+ Equals(t, c.exp, got)
+
+ _, err = yaml.Marshal(got)
+ Ok(t, err)
+
+ var got2 raw.PolicySets
+ err = yaml.UnmarshalStrict([]byte(c.input), &got2)
+ Ok(t, err)
+ Equals(t, got2, got)
+ })
+ }
+}
+
+func TestPolicySets_Validate(t *testing.T) {
+ cases := []struct {
+ description string
+ input raw.PolicySets
+ expErr string
+ }{
+ // Valid inputs.
+ {
+ description: "policies",
+ input: raw.PolicySets{
+ Version: String("v1.0.0"),
+ PolicySets: []raw.PolicySet{
+ {
+ Name: "policy-name-1",
+ Path: "rel/path/to/source",
+ Source: valid.LocalPolicySet,
+ },
+ {
+ Name: "policy-name-2",
+ Owners: raw.PolicyOwners{
+ Users: []string{
+ "john-doe",
+ "jane-doe",
+ },
+ },
+ Path: "rel/path/to/source",
+ Source: valid.GithubPolicySet,
+ },
+ },
+ },
+ expErr: "",
+ },
+
+ // Invalid inputs.
+ {
+ description: "empty elem",
+ input: raw.PolicySets{},
+ expErr: "policy_sets: cannot be empty; Declare policies that you would like to enforce.",
+ },
+
+ {
+ description: "missing policy name and source path",
+ input: raw.PolicySets{
+ PolicySets: []raw.PolicySet{
+ {},
+ },
+ },
+ expErr: "policy_sets: (0: (name: is required; path: is required.).).",
+ },
+ {
+ description: "invalid source type",
+ input: raw.PolicySets{
+ PolicySets: []raw.PolicySet{
+ {
+ Name: "good-policy",
+ Source: "invalid-source-type",
+ Path: "rel/path/to/source",
+ },
+ },
+ },
+ expErr: "policy_sets: (0: (source: only 'local' and 'github' source types are supported.).).",
+ },
+ {
+ description: "empty string version",
+ input: raw.PolicySets{
+ Version: String(""),
+ PolicySets: []raw.PolicySet{
+ {
+ Name: "policy-name-1",
+ Path: "rel/path/to/source",
+ Source: valid.LocalPolicySet,
+ },
+ },
+ },
+ expErr: "conftest_version: version \"\" could not be parsed: Malformed version: .",
+ },
+ {
+ description: "invalid version",
+ input: raw.PolicySets{
+ Version: String("version123"),
+ PolicySets: []raw.PolicySet{
+ {
+ Name: "policy-name-1",
+ Path: "rel/path/to/source",
+ Source: valid.LocalPolicySet,
+ },
+ },
+ },
+ expErr: "conftest_version: version \"version123\" could not be parsed: Malformed version: version123.",
+ },
+ }
+
+ for _, c := range cases {
+ t.Run(c.description, func(t *testing.T) {
+ err := c.input.Validate()
+ if c.expErr == "" {
+ Ok(t, err)
+ return
+ }
+ ErrEquals(t, c.expErr, err)
+ })
+ }
+}
+
+func TestPolicySets_ToValid(t *testing.T) {
+ version, _ := version.NewVersion("v1.0.0")
+ cases := []struct {
+ description string
+ input raw.PolicySets
+ exp valid.PolicySets
+ }{
+ {
+ description: "valid policies with owners",
+ input: raw.PolicySets{
+ Version: String("v1.0.0"),
+ Owners: raw.PolicyOwners{
+ Users: []string{
+ "test",
+ },
+ },
+ PolicySets: []raw.PolicySet{
+ {
+ Name: "good-policy",
+ Owners: raw.PolicyOwners{
+ Users: []string{
+ "john-doe",
+ "jane-doe",
+ },
+ },
+ Path: "rel/path/to/source",
+ Source: valid.LocalPolicySet,
+ },
+ },
+ },
+ exp: valid.PolicySets{
+ Version: version,
+ Owners: valid.PolicyOwners{
+ Users: []string{"test"},
+ },
+ PolicySets: []valid.PolicySet{
+ {
+ Name: "good-policy",
+ Owners: valid.PolicyOwners{
+ Users: []string{
+ "john-doe",
+ "jane-doe",
+ },
+ },
+ Path: "rel/path/to/source",
+ Source: "local",
+ },
+ },
+ },
+ },
+ {
+ description: "valid policies without owners",
+ input: raw.PolicySets{
+ Version: String("v1.0.0"),
+ PolicySets: []raw.PolicySet{
+ {
+ Name: "good-policy",
+ Owners: raw.PolicyOwners{
+ Users: []string{
+ "john-doe",
+ "jane-doe",
+ },
+ },
+ Path: "rel/path/to/source",
+ Source: valid.LocalPolicySet,
+ },
+ },
+ },
+ exp: valid.PolicySets{
+ Version: version,
+ PolicySets: []valid.PolicySet{
+ {
+ Name: "good-policy",
+ Owners: valid.PolicyOwners{
+ Users: []string{
+ "john-doe",
+ "jane-doe",
+ },
+ },
+ Path: "rel/path/to/source",
+ Source: "local",
+ },
+ },
+ },
+ },
+ }
+
+ for _, c := range cases {
+ t.Run(c.description, func(t *testing.T) {
+ Equals(t, c.exp, c.input.ToValid())
+ })
+ }
+}
diff --git a/server/events/yaml/raw/project.go b/server/events/yaml/raw/project.go
index e19fe6e5ca..8573fcf7ac 100644
--- a/server/events/yaml/raw/project.go
+++ b/server/events/yaml/raw/project.go
@@ -36,14 +36,6 @@ func (p Project) Validate() error {
return nil
}
- validTFVersion := func(value interface{}) error {
- strPtr := value.(*string)
- if strPtr == nil {
- return nil
- }
- _, err := version.NewVersion(*strPtr)
- return errors.Wrapf(err, "version %q could not be parsed", *strPtr)
- }
validName := func(value interface{}) error {
strPtr := value.(*string)
if strPtr == nil {
@@ -60,7 +52,7 @@ func (p Project) Validate() error {
return validation.ValidateStruct(&p,
validation.Field(&p.Dir, validation.Required, validation.By(hasDotDot)),
validation.Field(&p.ApplyRequirements, validation.By(validApplyReq)),
- validation.Field(&p.TerraformVersion, validation.By(validTFVersion)),
+ validation.Field(&p.TerraformVersion, validation.By(VersionValidator)),
validation.Field(&p.Name, validation.By(validName)),
)
}
diff --git a/server/events/yaml/raw/raw.go b/server/events/yaml/raw/raw.go
index 2c08e6a820..d10625255c 100644
--- a/server/events/yaml/raw/raw.go
+++ b/server/events/yaml/raw/raw.go
@@ -2,3 +2,19 @@
// supported in atlantis.yaml. The structs here represent the exact data that
// comes from the file before it is parsed/validated further.
package raw
+
+import (
+ version "github.com/hashicorp/go-version"
+ "github.com/pkg/errors"
+)
+
+// VersionValidator helper function to validate binary version.
+// Function implements ozzo-validation::Rule.Validate interface.
+func VersionValidator(value interface{}) error {
+ strPtr := value.(*string)
+ if strPtr == nil {
+ return nil
+ }
+ _, err := version.NewVersion(*strPtr)
+ return errors.Wrapf(err, "version %q could not be parsed", *strPtr)
+}
diff --git a/server/events/yaml/raw/repo_cfg.go b/server/events/yaml/raw/repo_cfg.go
index cd51977270..c8851e853f 100644
--- a/server/events/yaml/raw/repo_cfg.go
+++ b/server/events/yaml/raw/repo_cfg.go
@@ -16,11 +16,15 @@ const DefaultParallelApply = false
// DefaultParallelPlan is the default setting for parallel plan
const DefaultParallelPlan = false
+// DefaultParallelPolicyCheck is the default setting for parallel plan
+const DefaultParallelPolicyCheck = false
+
// RepoCfg is the raw schema for repo-level atlantis.yaml config.
type RepoCfg struct {
Version *int `yaml:"version,omitempty"`
Projects []Project `yaml:"projects,omitempty"`
Workflows map[string]Workflow `yaml:"workflows,omitempty"`
+ PolicySets PolicySets `yaml:"policies,omitempty"`
Automerge *bool `yaml:"automerge,omitempty"`
ParallelApply *bool `yaml:"parallel_apply,omitempty"`
ParallelPlan *bool `yaml:"parallel_plan,omitempty"`
@@ -71,11 +75,12 @@ func (r RepoCfg) ToValid() valid.RepoCfg {
}
return valid.RepoCfg{
- Version: *r.Version,
- Projects: validProjects,
- Workflows: validWorkflows,
- Automerge: automerge,
- ParallelApply: parallelApply,
- ParallelPlan: parallelPlan,
+ Version: *r.Version,
+ Projects: validProjects,
+ Workflows: validWorkflows,
+ Automerge: automerge,
+ ParallelApply: parallelApply,
+ ParallelPlan: parallelPlan,
+ ParallelPolicyCheck: parallelPlan,
}
}
diff --git a/server/events/yaml/raw/repo_cfg_test.go b/server/events/yaml/raw/repo_cfg_test.go
index 8f0dcaf450..430b8c61b9 100644
--- a/server/events/yaml/raw/repo_cfg_test.go
+++ b/server/events/yaml/raw/repo_cfg_test.go
@@ -141,6 +141,8 @@ workflows:
default:
plan:
steps: []
+ policy_check:
+ steps: []
apply:
steps: []`,
exp: raw.RepoCfg{
@@ -169,6 +171,9 @@ workflows:
Plan: &raw.Stage{
Steps: []raw.Step{},
},
+ PolicyCheck: &raw.Stage{
+ Steps: []raw.Step{},
+ },
},
},
},
@@ -295,8 +300,9 @@ func TestConfig_ToValid(t *testing.T) {
Version: Int(2),
Workflows: map[string]raw.Workflow{
"myworkflow": {
- Plan: &raw.Stage{},
- Apply: nil,
+ Plan: &raw.Stage{},
+ Apply: nil,
+ PolicyCheck: nil,
},
},
},
@@ -308,6 +314,16 @@ func TestConfig_ToValid(t *testing.T) {
"myworkflow": {
Name: "myworkflow",
Plan: valid.DefaultPlanStage,
+ PolicyCheck: valid.Stage{
+ Steps: []valid.Step{
+ {
+ StepName: "show",
+ },
+ {
+ StepName: "policy_check",
+ },
+ },
+ },
Apply: valid.Stage{
Steps: []valid.Step{
{
@@ -334,6 +350,13 @@ func TestConfig_ToValid(t *testing.T) {
},
},
},
+ PolicyCheck: &raw.Stage{
+ Steps: []raw.Step{
+ {
+ Key: String("policy_check"),
+ },
+ },
+ },
Plan: &raw.Stage{
Steps: []raw.Step{
{
@@ -363,6 +386,13 @@ func TestConfig_ToValid(t *testing.T) {
},
},
},
+ PolicyCheck: valid.Stage{
+ Steps: []valid.Step{
+ {
+ StepName: "policy_check",
+ },
+ },
+ },
Plan: valid.Stage{
Steps: []valid.Step{
{
diff --git a/server/events/yaml/raw/step.go b/server/events/yaml/raw/step.go
index 0effbfc2ea..2cbc64443e 100644
--- a/server/events/yaml/raw/step.go
+++ b/server/events/yaml/raw/step.go
@@ -12,21 +12,23 @@ import (
)
const (
- ExtraArgsKey = "extra_args"
- NameArgKey = "name"
- CommandArgKey = "command"
- ValueArgKey = "value"
- RunStepName = "run"
- PlanStepName = "plan"
- ApplyStepName = "apply"
- InitStepName = "init"
- EnvStepName = "env"
+ ExtraArgsKey = "extra_args"
+ NameArgKey = "name"
+ CommandArgKey = "command"
+ ValueArgKey = "value"
+ RunStepName = "run"
+ PlanStepName = "plan"
+ PolicyCheckStepName = "policy_check"
+ ApplyStepName = "apply"
+ InitStepName = "init"
+ EnvStepName = "env"
)
// Step represents a single action/command to perform. In YAML, it can be set as
// 1. A single string for a built-in command:
// - init
// - plan
+// - policy_check
// 2. A map for an env step with name and command or value
// - env:
// name: test
@@ -73,10 +75,18 @@ func (s *Step) MarshalJSON() ([]byte, error) {
return json.Marshal(out)
}
+func (s Step) validStepName(stepName string) bool {
+ return stepName == InitStepName ||
+ stepName == PlanStepName ||
+ stepName == ApplyStepName ||
+ stepName == EnvStepName ||
+ stepName == PolicyCheckStepName
+}
+
func (s Step) Validate() error {
validStep := func(value interface{}) error {
str := *value.(*string)
- if str != InitStepName && str != PlanStepName && str != ApplyStepName && str != EnvStepName {
+ if !s.validStepName(str) {
return fmt.Errorf("%q is not a valid step type, maybe you omitted the 'run' key", str)
}
return nil
@@ -96,7 +106,7 @@ func (s Step) Validate() error {
len(keys), strings.Join(keys, ","))
}
for stepName, args := range elem {
- if stepName != InitStepName && stepName != PlanStepName && stepName != ApplyStepName {
+ if !s.validStepName(stepName) {
return fmt.Errorf("%q is not a valid step type", stepName)
}
var argKeys []string
diff --git a/server/events/yaml/raw/step_test.go b/server/events/yaml/raw/step_test.go
index 37f2e73afe..94737ef002 100644
--- a/server/events/yaml/raw/step_test.go
+++ b/server/events/yaml/raw/step_test.go
@@ -433,6 +433,15 @@ func TestStep_ToValid(t *testing.T) {
StepName: "plan",
},
},
+ {
+ description: "policy_check step",
+ input: raw.Step{
+ Key: String("policy_check"),
+ },
+ exp: valid.Step{
+ StepName: "policy_check",
+ },
+ },
{
description: "apply step",
input: raw.Step{
@@ -486,6 +495,20 @@ func TestStep_ToValid(t *testing.T) {
ExtraArgs: []string{"arg1", "arg2"},
},
},
+ {
+ description: "policy_check extra_args",
+ input: raw.Step{
+ Map: MapType{
+ "policy_check": {
+ "extra_args": []string{"arg1", "arg2"},
+ },
+ },
+ },
+ exp: valid.Step{
+ StepName: "policy_check",
+ ExtraArgs: []string{"arg1", "arg2"},
+ },
+ },
{
description: "apply extra_args",
input: raw.Step{
diff --git a/server/events/yaml/raw/workflow.go b/server/events/yaml/raw/workflow.go
index 399ece21a8..7429453298 100644
--- a/server/events/yaml/raw/workflow.go
+++ b/server/events/yaml/raw/workflow.go
@@ -6,30 +6,35 @@ import (
)
type Workflow struct {
- Apply *Stage `yaml:"apply,omitempty" json:"apply,omitempty"`
- Plan *Stage `yaml:"plan,omitempty" json:"plan,omitempty"`
+ Apply *Stage `yaml:"apply,omitempty" json:"apply,omitempty"`
+ Plan *Stage `yaml:"plan,omitempty" json:"plan,omitempty"`
+ PolicyCheck *Stage `yaml:"policy_check,omitempty" json:"policy_check,omitempty"`
}
func (w Workflow) Validate() error {
return validation.ValidateStruct(&w,
validation.Field(&w.Apply),
validation.Field(&w.Plan),
+ validation.Field(&w.PolicyCheck),
)
}
+func (w Workflow) toValidStage(stage *Stage, defaultStage valid.Stage) valid.Stage {
+ if stage == nil || stage.Steps == nil {
+ return defaultStage
+ }
+
+ return stage.ToValid()
+}
+
func (w Workflow) ToValid(name string) valid.Workflow {
v := valid.Workflow{
Name: name,
}
- if w.Apply == nil || w.Apply.Steps == nil {
- v.Apply = valid.DefaultApplyStage
- } else {
- v.Apply = w.Apply.ToValid()
- }
- if w.Plan == nil || w.Plan.Steps == nil {
- v.Plan = valid.DefaultPlanStage
- } else {
- v.Plan = w.Plan.ToValid()
- }
+
+ v.Apply = w.toValidStage(w.Apply, valid.DefaultApplyStage)
+ v.Plan = w.toValidStage(w.Plan, valid.DefaultPlanStage)
+ v.PolicyCheck = w.toValidStage(w.PolicyCheck, valid.DefaultPolicyCheckStage)
+
return v
}
diff --git a/server/events/yaml/raw/workflow_test.go b/server/events/yaml/raw/workflow_test.go
index b0b3369dc6..5b200540e7 100644
--- a/server/events/yaml/raw/workflow_test.go
+++ b/server/events/yaml/raw/workflow_test.go
@@ -21,16 +21,18 @@ func TestWorkflow_UnmarshalYAML(t *testing.T) {
description: "empty",
input: ``,
exp: raw.Workflow{
- Apply: nil,
- Plan: nil,
+ Apply: nil,
+ PolicyCheck: nil,
+ Plan: nil,
},
},
{
description: "yaml null",
input: `~`,
exp: raw.Workflow{
- Apply: nil,
- Plan: nil,
+ Apply: nil,
+ PolicyCheck: nil,
+ Plan: nil,
},
},
{
@@ -44,17 +46,35 @@ apply:
Plan: nil,
},
},
+ {
+ description: "only plan/policy_check/apply set",
+ input: `
+plan:
+policy_check:
+apply:
+`,
+ exp: raw.Workflow{
+ Apply: nil,
+ PolicyCheck: nil,
+ Plan: nil,
+ },
+ },
{
description: "steps set to null",
input: `
plan:
steps: ~
+policy_check:
+ steps: ~
apply:
steps: ~`,
exp: raw.Workflow{
Plan: &raw.Stage{
Steps: nil,
},
+ PolicyCheck: &raw.Stage{
+ Steps: nil,
+ },
Apply: &raw.Stage{
Steps: nil,
},
@@ -65,12 +85,17 @@ apply:
input: `
plan:
steps: []
+policy_check:
+ steps: []
apply:
steps: []`,
exp: raw.Workflow{
Plan: &raw.Stage{
Steps: []raw.Step{},
},
+ PolicyCheck: &raw.Stage{
+ Steps: []raw.Step{},
+ },
Apply: &raw.Stage{
Steps: []raw.Step{},
},
@@ -120,8 +145,9 @@ func TestWorkflow_ToValid(t *testing.T) {
description: "nothing set",
input: raw.Workflow{},
exp: valid.Workflow{
- Apply: valid.DefaultApplyStage,
- Plan: valid.DefaultPlanStage,
+ Apply: valid.DefaultApplyStage,
+ Plan: valid.DefaultPlanStage,
+ PolicyCheck: valid.DefaultPolicyCheckStage,
},
},
{
@@ -134,6 +160,13 @@ func TestWorkflow_ToValid(t *testing.T) {
},
},
},
+ PolicyCheck: &raw.Stage{
+ Steps: []raw.Step{
+ {
+ Key: String("policy_check"),
+ },
+ },
+ },
Plan: &raw.Stage{
Steps: []raw.Step{
{
@@ -150,6 +183,13 @@ func TestWorkflow_ToValid(t *testing.T) {
},
},
},
+ PolicyCheck: valid.Stage{
+ Steps: []valid.Step{
+ {
+ StepName: "policy_check",
+ },
+ },
+ },
Plan: valid.Stage{
Steps: []valid.Step{
{
diff --git a/server/events/yaml/valid/global_cfg.go b/server/events/yaml/valid/global_cfg.go
index b7305e741b..dac71e19b2 100644
--- a/server/events/yaml/valid/global_cfg.go
+++ b/server/events/yaml/valid/global_cfg.go
@@ -21,8 +21,9 @@ const DefaultWorkflowName = "default"
// GlobalCfg is the final parsed version of server-side repo config.
type GlobalCfg struct {
- Repos []Repo
- Workflows map[string]Workflow
+ Repos []Repo
+ Workflows map[string]Workflow
+ PolicySets PolicySets
}
// Repo is the final parsed version of server-side repo config.
@@ -51,6 +52,7 @@ type MergedProjectCfg struct {
AutoplanEnabled bool
TerraformVersion *version.Version
RepoCfgVersion int
+ PolicySets PolicySets
}
// PreWorkflowHook is a map of custom run commands to run before workflows.
@@ -68,6 +70,18 @@ var DefaultApplyStage = Stage{
},
}
+// DefaultPolicyCheckStage is the Atlantis default policy check stage.
+var DefaultPolicyCheckStage = Stage{
+ Steps: []Step{
+ {
+ StepName: "show",
+ },
+ {
+ StepName: "policy_check",
+ },
+ },
+}
+
// DefaultPlanStage is the Atlantis default plan stage.
var DefaultPlanStage = Stage{
Steps: []Step{
@@ -88,9 +102,10 @@ var DefaultPlanStage = Stage{
// for all repos.
func NewGlobalCfg(allowRepoCfg bool, mergeableReq bool, approvedReq bool) GlobalCfg {
defaultWorkflow := Workflow{
- Name: DefaultWorkflowName,
- Apply: DefaultApplyStage,
- Plan: DefaultPlanStage,
+ Name: DefaultWorkflowName,
+ Apply: DefaultApplyStage,
+ Plan: DefaultPlanStage,
+ PolicyCheck: DefaultPolicyCheckStage,
}
// Must construct slices here instead of using a `var` declaration because
// we treat nil slices differently.
@@ -195,6 +210,7 @@ func (g GlobalCfg) MergeProjectCfg(log logging.SimpleLogging, repoID string, pro
AutoplanEnabled: proj.Autoplan.Enabled,
TerraformVersion: proj.TerraformVersion,
RepoCfgVersion: rCfg.Version,
+ PolicySets: g.PolicySets,
}
}
@@ -211,6 +227,7 @@ func (g GlobalCfg) DefaultProjCfg(log logging.SimpleLogging, repoID string, repo
Name: "",
AutoplanEnabled: DefaultAutoPlanEnabled,
TerraformVersion: nil,
+ PolicySets: g.PolicySets,
}
}
diff --git a/server/events/yaml/valid/global_cfg_test.go b/server/events/yaml/valid/global_cfg_test.go
index 0404816045..2c85205e25 100644
--- a/server/events/yaml/valid/global_cfg_test.go
+++ b/server/events/yaml/valid/global_cfg_test.go
@@ -7,6 +7,7 @@ import (
"regexp"
"testing"
+ "github.com/hashicorp/go-version"
"github.com/mohae/deepcopy"
"github.com/runatlantis/atlantis/server/events/yaml"
"github.com/runatlantis/atlantis/server/events/yaml/valid"
@@ -24,6 +25,16 @@ func TestNewGlobalCfg(t *testing.T) {
},
},
},
+ PolicyCheck: valid.Stage{
+ Steps: []valid.Step{
+ {
+ StepName: "show",
+ },
+ {
+ StepName: "policy_check",
+ },
+ },
+ },
Plan: valid.Stage{
Steps: []valid.Step{
{
@@ -427,7 +438,121 @@ func TestGlobalCfg_ValidateRepoCfg(t *testing.T) {
}
}
+func TestGlobalCfg_WithPolicySets(t *testing.T) {
+ version, _ := version.NewVersion("v1.0.0")
+ cases := map[string]struct {
+ gCfg string
+ proj valid.Project
+ repoID string
+ exp valid.MergedProjectCfg
+ }{
+ "policies are added to MergedProjectCfg when present": {
+ gCfg: `
+repos:
+- id: /.*/
+policies:
+ policy_sets:
+ - name: good-policy
+ source: local
+ path: rel/path/to/source
+`,
+ repoID: "github.com/owner/repo",
+ proj: valid.Project{
+ Dir: ".",
+ Workspace: "default",
+ WorkflowName: String("custom"),
+ },
+ exp: valid.MergedProjectCfg{
+ ApplyRequirements: []string{},
+ Workflow: valid.Workflow{
+ Name: "default",
+ Apply: valid.DefaultApplyStage,
+ Plan: valid.DefaultPlanStage,
+ PolicyCheck: valid.DefaultPolicyCheckStage,
+ },
+ PolicySets: valid.PolicySets{
+ Version: nil,
+ PolicySets: []valid.PolicySet{
+ {
+ Name: "good-policy",
+ Path: "rel/path/to/source",
+ Source: "local",
+ },
+ },
+ },
+ RepoRelDir: ".",
+ Workspace: "default",
+ Name: "",
+ AutoplanEnabled: false,
+ },
+ },
+ "policies set correct version if specified": {
+ gCfg: `
+repos:
+- id: /.*/
+policies:
+ conftest_version: v1.0.0
+ policy_sets:
+ - name: good-policy
+ source: local
+ path: rel/path/to/source
+`,
+ repoID: "github.com/owner/repo",
+ proj: valid.Project{
+ Dir: ".",
+ Workspace: "default",
+ WorkflowName: String("custom"),
+ },
+ exp: valid.MergedProjectCfg{
+ ApplyRequirements: []string{},
+ Workflow: valid.Workflow{
+ Name: "default",
+ Apply: valid.DefaultApplyStage,
+ Plan: valid.DefaultPlanStage,
+ PolicyCheck: valid.DefaultPolicyCheckStage,
+ },
+ PolicySets: valid.PolicySets{
+ Version: version,
+ PolicySets: []valid.PolicySet{
+ {
+ Name: "good-policy",
+ Path: "rel/path/to/source",
+ Source: "local",
+ },
+ },
+ },
+ RepoRelDir: ".",
+ Workspace: "default",
+ Name: "",
+ AutoplanEnabled: false,
+ },
+ },
+ }
+ for name, c := range cases {
+ t.Run(name, func(t *testing.T) {
+ tmp, cleanup := TempDir(t)
+ defer cleanup()
+ var global valid.GlobalCfg
+ if c.gCfg != "" {
+ path := filepath.Join(tmp, "config.yaml")
+ Ok(t, ioutil.WriteFile(path, []byte(c.gCfg), 0600))
+ var err error
+ global, err = (&yaml.ParserValidator{}).ParseGlobalCfg(path, valid.NewGlobalCfg(false, false, false))
+ Ok(t, err)
+ } else {
+ global = valid.NewGlobalCfg(false, false, false)
+ }
+
+ Equals(t,
+ c.exp,
+ global.MergeProjectCfg(logging.NewNoopLogger(), c.repoID, c.proj, valid.RepoCfg{}))
+ })
+ }
+}
+
func TestGlobalCfg_MergeProjectCfg(t *testing.T) {
+ var emptyPolicySets valid.PolicySets
+
cases := map[string]struct {
gCfg string
repoID string
@@ -454,8 +579,9 @@ workflows:
exp: valid.MergedProjectCfg{
ApplyRequirements: []string{},
Workflow: valid.Workflow{
- Name: "custom",
- Apply: valid.DefaultApplyStage,
+ Name: "custom",
+ Apply: valid.DefaultApplyStage,
+ PolicyCheck: valid.DefaultPolicyCheckStage,
Plan: valid.Stage{
Steps: []valid.Step{
{
@@ -468,6 +594,7 @@ workflows:
Workspace: "default",
Name: "",
AutoplanEnabled: false,
+ PolicySets: emptyPolicySets,
},
},
"repo-side apply reqs win out if allowed": {
@@ -487,14 +614,16 @@ repos:
exp: valid.MergedProjectCfg{
ApplyRequirements: []string{"mergeable"},
Workflow: valid.Workflow{
- Name: "default",
- Apply: valid.DefaultApplyStage,
- Plan: valid.DefaultPlanStage,
+ Name: "default",
+ Apply: valid.DefaultApplyStage,
+ PolicyCheck: valid.DefaultPolicyCheckStage,
+ Plan: valid.DefaultPlanStage,
},
RepoRelDir: ".",
Workspace: "default",
Name: "",
AutoplanEnabled: false,
+ PolicySets: emptyPolicySets,
},
},
"last server-side match wins": {
@@ -517,14 +646,16 @@ repos:
exp: valid.MergedProjectCfg{
ApplyRequirements: []string{"approved", "mergeable"},
Workflow: valid.Workflow{
- Name: "default",
- Apply: valid.DefaultApplyStage,
- Plan: valid.DefaultPlanStage,
+ Name: "default",
+ Apply: valid.DefaultApplyStage,
+ PolicyCheck: valid.DefaultPolicyCheckStage,
+ Plan: valid.DefaultPlanStage,
},
RepoRelDir: "mydir",
Workspace: "myworkspace",
Name: "myname",
AutoplanEnabled: false,
+ PolicySets: emptyPolicySets,
},
},
"autoplan is set properly": {
@@ -543,14 +674,16 @@ repos:
exp: valid.MergedProjectCfg{
ApplyRequirements: []string{},
Workflow: valid.Workflow{
- Name: "default",
- Apply: valid.DefaultApplyStage,
- Plan: valid.DefaultPlanStage,
+ Name: "default",
+ Apply: valid.DefaultApplyStage,
+ PolicyCheck: valid.DefaultPolicyCheckStage,
+ Plan: valid.DefaultPlanStage,
},
RepoRelDir: "mydir",
Workspace: "myworkspace",
Name: "myname",
AutoplanEnabled: true,
+ PolicySets: emptyPolicySets,
},
},
}
@@ -569,6 +702,7 @@ repos:
global = valid.NewGlobalCfg(false, false, false)
}
+ global.PolicySets = emptyPolicySets
Equals(t, c.exp, global.MergeProjectCfg(logging.NewNoopLogger(), c.repoID, c.proj, valid.RepoCfg{Workflows: c.repoWorkflows}))
})
}
diff --git a/server/events/yaml/valid/policies.go b/server/events/yaml/valid/policies.go
new file mode 100644
index 0000000000..e41bf2a78a
--- /dev/null
+++ b/server/events/yaml/valid/policies.go
@@ -0,0 +1,44 @@
+package valid
+
+import (
+ "github.com/hashicorp/go-version"
+)
+
+const (
+ LocalPolicySet string = "local"
+ GithubPolicySet string = "github"
+)
+
+// PolicySets defines version of policy checker binary(conftest) and a list of
+// PolicySet objects. PolicySets struct is used by PolicyCheck workflow to build
+// context to enforce policies.
+type PolicySets struct {
+ Version *version.Version
+ Owners PolicyOwners
+ PolicySets []PolicySet
+}
+
+type PolicyOwners struct {
+ Users []string
+}
+
+type PolicySet struct {
+ Source string
+ Path string
+ Name string
+ Owners PolicyOwners
+}
+
+func (p *PolicySets) HasPolicies() bool {
+ return len(p.PolicySets) > 0
+}
+
+func (p *PolicySets) IsOwner(username string) bool {
+ for _, uname := range p.Owners.Users {
+ if uname == username {
+ return true
+ }
+ }
+
+ return false
+}
diff --git a/server/events/yaml/valid/repo_cfg.go b/server/events/yaml/valid/repo_cfg.go
index b05b6b1249..78234cd1e0 100644
--- a/server/events/yaml/valid/repo_cfg.go
+++ b/server/events/yaml/valid/repo_cfg.go
@@ -2,17 +2,24 @@
// after it's been parsed and validated.
package valid
-import version "github.com/hashicorp/go-version"
+import (
+ "fmt"
+ "strings"
+
+ version "github.com/hashicorp/go-version"
+)
// RepoCfg is the atlantis.yaml config after it's been parsed and validated.
type RepoCfg struct {
// Version is the version of the atlantis YAML file.
- Version int
- Projects []Project
- Workflows map[string]Workflow
- Automerge bool
- ParallelApply bool
- ParallelPlan bool
+ Version int
+ Projects []Project
+ Workflows map[string]Workflow
+ PolicySets PolicySets
+ Automerge bool
+ ParallelApply bool
+ ParallelPlan bool
+ ParallelPolicyCheck bool
}
func (r RepoCfg) FindProjectsByDirWorkspace(repoRelDir string, workspace string) []Project {
@@ -45,6 +52,36 @@ func (r RepoCfg) FindProjectByName(name string) *Project {
return nil
}
+// validateWorkspaceAllowed returns an error if repoCfg defines projects in
+// repoRelDir but none of them use workspace. We want this to be an error
+// because if users have gone to the trouble of defining projects in repoRelDir
+// then it's likely that if we're running a command for a workspace that isn't
+// defined then they probably just typed the workspace name wrong.
+func (r RepoCfg) ValidateWorkspaceAllowed(repoRelDir string, workspace string) error {
+ projects := r.FindProjectsByDir(repoRelDir)
+
+ // If that directory doesn't have any projects configured then we don't
+ // enforce workspace names.
+ if len(projects) == 0 {
+ return nil
+ }
+
+ var configuredSpaces []string
+ for _, p := range projects {
+ if p.Workspace == workspace {
+ return nil
+ }
+ configuredSpaces = append(configuredSpaces, p.Workspace)
+ }
+
+ return fmt.Errorf(
+ "running commands in workspace %q is not allowed because this"+
+ " directory is only configured for the following workspaces: %s",
+ workspace,
+ strings.Join(configuredSpaces, ", "),
+ )
+}
+
type Project struct {
Dir string
Workspace string
@@ -87,7 +124,8 @@ type Step struct {
}
type Workflow struct {
- Name string
- Apply Stage
- Plan Stage
+ Name string
+ Apply Stage
+ Plan Stage
+ PolicyCheck Stage
}
diff --git a/server/events_controller_e2e_test.go b/server/events_controller_e2e_test.go
index 2a396cc8d9..fdadc2ec2f 100644
--- a/server/events_controller_e2e_test.go
+++ b/server/events_controller_e2e_test.go
@@ -25,6 +25,7 @@ import (
"github.com/runatlantis/atlantis/server/events/mocks/matchers"
"github.com/runatlantis/atlantis/server/events/models"
"github.com/runatlantis/atlantis/server/events/runtime"
+ "github.com/runatlantis/atlantis/server/events/runtime/policy"
"github.com/runatlantis/atlantis/server/events/terraform"
vcsmocks "github.com/runatlantis/atlantis/server/events/vcs/mocks"
"github.com/runatlantis/atlantis/server/events/webhooks"
@@ -40,7 +41,19 @@ func (m *NoopTFDownloader) GetFile(dst, src string, opts ...getter.ClientOption)
return nil
}
+func (m *NoopTFDownloader) GetAny(dst, src string, opts ...getter.ClientOption) error {
+ return nil
+}
+
+type LocalConftestCache struct {
+}
+
+func (m *LocalConftestCache) Get(key *version.Version) (string, error) {
+ return exec.LookPath("conftest0.21.0")
+}
+
func TestGitHubWorkflow(t *testing.T) {
+
if testing.Short() {
t.SkipNow()
}
@@ -62,6 +75,10 @@ func TestGitHubWorkflow(t *testing.T) {
ExpAutoplan bool
// ExpParallel is true if we expect Atlantis to run parallel plans or applies.
ExpParallel bool
+ // ExpMergeable is true if we expect Atlantis to be able to merge.
+ // If for instance policy check is failing and there are no approvals
+ // ExpMergeable should be false
+ ExpMergeable bool
// ExpReplies is a list of files containing the expected replies that
// Atlantis writes to the pull request in order. A reply from a parallel operation
// will be matched using a substring check.
@@ -181,22 +198,6 @@ func TestGitHubWorkflow(t *testing.T) {
{"exp-output-merge.txt"},
},
},
- {
- Description: "simple with atlantis.yaml and plan/apply all",
- RepoDir: "simple-yaml",
- ModifiedFiles: []string{"main.tf"},
- ExpAutoplan: true,
- Comments: []string{
- "atlantis plan",
- "atlantis apply",
- },
- ExpReplies: [][]string{
- {"exp-output-autoplan.txt"},
- {"exp-output-autoplan.txt"},
- {"exp-output-apply-all.txt"},
- {"exp-output-merge.txt"},
- },
- },
{
Description: "modules staging only",
RepoDir: "modules",
@@ -336,7 +337,152 @@ func TestGitHubWorkflow(t *testing.T) {
t.Run(c.Description, func(t *testing.T) {
RegisterMockTestingT(t)
- ctrl, vcsClient, githubGetter, atlantisWorkspace := setupE2E(t, c.RepoDir)
+ ctrl, vcsClient, githubGetter, atlantisWorkspace := setupE2E(t, c.RepoDir, false)
+ // Set the repo to be cloned through the testing backdoor.
+ repoDir, headSHA, cleanup := initializeRepo(t, c.RepoDir)
+ defer cleanup()
+ atlantisWorkspace.TestingOverrideHeadCloneURL = fmt.Sprintf("file://%s", repoDir)
+
+ // Setup test dependencies.
+ w := httptest.NewRecorder()
+ When(githubGetter.GetPullRequest(AnyRepo(), AnyInt())).ThenReturn(GitHubPullRequestParsed(headSHA), nil)
+ When(vcsClient.GetModifiedFiles(AnyRepo(), matchers.AnyModelsPullRequest())).ThenReturn(c.ModifiedFiles, nil)
+
+ // First, send the open pull request event which triggers autoplan.
+ pullOpenedReq := GitHubPullRequestOpenedEvent(t, headSHA)
+ ctrl.Post(w, pullOpenedReq)
+ responseContains(t, w, 200, "Processing...")
+
+ // Now send any other comments.
+ for _, comment := range c.Comments {
+ commentReq := GitHubCommentEvent(t, comment)
+ w = httptest.NewRecorder()
+ ctrl.Post(w, commentReq)
+ responseContains(t, w, 200, "Processing...")
+ }
+
+ // Send the "pull closed" event which would be triggered by the
+ // automerge or a manual merge.
+ pullClosedReq := GitHubPullRequestClosedEvent(t)
+ w = httptest.NewRecorder()
+ ctrl.Post(w, pullClosedReq)
+ responseContains(t, w, 200, "Pull request cleaned successfully")
+
+ // Now we're ready to verify Atlantis made all the comments back (or
+ // replies) that we expect. We expect each plan to have 1 comment,
+ // and apply have 1 for each comment plus one for the locks deleted at the
+ // end.
+ expNumReplies := len(c.Comments) + 1
+
+ if c.ExpAutoplan {
+ expNumReplies++
+ }
+
+ if c.ExpAutomerge {
+ expNumReplies++
+ }
+
+ _, _, actReplies, _ := vcsClient.VerifyWasCalled(Times(expNumReplies)).CreateComment(AnyRepo(), AnyInt(), AnyString(), AnyString()).GetAllCapturedArguments()
+ Assert(t, len(c.ExpReplies) == len(actReplies), "missing expected replies, got %d but expected %d", len(actReplies), len(c.ExpReplies))
+ for i, expReply := range c.ExpReplies {
+ assertCommentEquals(t, expReply, actReplies[i], c.RepoDir, c.ExpParallel)
+ }
+
+ if c.ExpAutomerge {
+ // Verify that the merge API call was made.
+ vcsClient.VerifyWasCalledOnce().MergePull(matchers.AnyModelsPullRequest())
+ } else {
+ vcsClient.VerifyWasCalled(Never()).MergePull(matchers.AnyModelsPullRequest())
+ }
+ })
+ }
+}
+
+func TestGitHubWorkflowWithPolicyCheck(t *testing.T) {
+ if testing.Short() {
+ t.SkipNow()
+ }
+ // Ensure we have >= TF 0.12 locally.
+ ensureRunning012(t)
+ // Ensure we have >= Conftest 0.21 locally.
+ ensureRunningConftest(t)
+
+ cases := []struct {
+ Description string
+ // RepoDir is relative to testfixtures/test-repos.
+ RepoDir string
+ // ModifiedFiles are the list of files that have been modified in this
+ // pull request.
+ ModifiedFiles []string
+ // Comments are what our mock user writes to the pull request.
+ Comments []string
+ // ExpAutomerge is true if we expect Atlantis to automerge.
+ ExpAutomerge bool
+ // ExpAutoplan is true if we expect Atlantis to autoplan.
+ ExpAutoplan bool
+ // ExpParallel is true if we expect Atlantis to run parallel plans or applies.
+ ExpParallel bool
+ // ExpReplies is a list of files containing the expected replies that
+ // Atlantis writes to the pull request in order. A reply from a parallel operation
+ // will be matched using a substring check.
+ ExpReplies [][]string
+ }{
+ {
+ Description: "failing policy approved by the owner",
+ RepoDir: "policy-checks",
+ ModifiedFiles: []string{"main.tf"},
+ ExpAutoplan: true,
+ Comments: []string{
+ "atlantis approve_policies",
+ "atlantis apply",
+ },
+ ExpReplies: [][]string{
+ {"exp-output-autoplan.txt"},
+ {"exp-output-auto-policy-check.txt"},
+ {"exp-output-approve-policies.txt"},
+ {"exp-output-apply.txt"},
+ {"exp-output-merge.txt"},
+ },
+ },
+ {
+ Description: "failing policy without approval",
+ RepoDir: "policy-checks",
+ ModifiedFiles: []string{"main.tf"},
+ ExpAutoplan: true,
+ Comments: []string{
+ "atlantis apply",
+ },
+ ExpReplies: [][]string{
+ {"exp-output-autoplan.txt"},
+ {"exp-output-auto-policy-check.txt"},
+ {"exp-output-apply-failed.txt"},
+ {"exp-output-merge.txt"},
+ },
+ },
+ {
+ Description: "failing policy approved by non owner",
+ RepoDir: "policy-checks-diff-owner",
+ ModifiedFiles: []string{"main.tf"},
+ ExpAutoplan: true,
+ Comments: []string{
+ "atlantis approve_policies",
+ "atlantis apply",
+ },
+ ExpReplies: [][]string{
+ {"exp-output-autoplan.txt"},
+ {"exp-output-auto-policy-check.txt"},
+ {"exp-output-approve-policies.txt"},
+ {"exp-output-apply-failed.txt"},
+ {"exp-output-merge.txt"},
+ },
+ },
+ }
+
+ for _, c := range cases {
+ t.Run(c.Description, func(t *testing.T) {
+ RegisterMockTestingT(t)
+
+ ctrl, vcsClient, githubGetter, atlantisWorkspace := setupE2E(t, c.RepoDir, true)
// Set the repo to be cloned through the testing backdoor.
repoDir, headSHA, cleanup := initializeRepo(t, c.RepoDir)
defer cleanup()
@@ -344,6 +490,7 @@ func TestGitHubWorkflow(t *testing.T) {
// Setup test dependencies.
w := httptest.NewRecorder()
+ When(vcsClient.PullIsMergeable(AnyRepo(), matchers.AnyModelsPullRequest())).ThenReturn(true, nil)
When(githubGetter.GetPullRequest(AnyRepo(), AnyInt())).ThenReturn(GitHubPullRequestParsed(headSHA), nil)
When(vcsClient.GetModifiedFiles(AnyRepo(), matchers.AnyModelsPullRequest())).ThenReturn(c.ModifiedFiles, nil)
@@ -367,14 +514,24 @@ func TestGitHubWorkflow(t *testing.T) {
ctrl.Post(w, pullClosedReq)
responseContains(t, w, 200, "Pull request cleaned successfully")
- // Now we're ready to verify Atlantis made all the comments back
- // (or replies) that we expect.
- // We expect replies for each comment plus one for the locks deleted
- // at the end.
+ // Now we're ready to verify Atlantis made all the comments back (or
+ // replies) that we expect. We expect each plan to have 2 comments,
+ // one for plan one for policy check and apply have 1 for each
+ // comment plus one for the locks deleted at the end.
expNumReplies := len(c.Comments) + 1
+
if c.ExpAutoplan {
expNumReplies++
+ expNumReplies++
}
+
+ var planRegex = regexp.MustCompile("plan")
+ for _, comment := range c.Comments {
+ if planRegex.MatchString(comment) {
+ expNumReplies++
+ }
+ }
+
if c.ExpAutomerge {
expNumReplies++
}
@@ -395,11 +552,18 @@ func TestGitHubWorkflow(t *testing.T) {
}
}
-func setupE2E(t *testing.T, repoDir string) (server.EventsController, *vcsmocks.MockClient, *mocks.MockGithubPullGetter, *events.FileWorkspace) {
+func setupE2E(t *testing.T, repoDir string, policyChecksEnabled bool) (server.EventsController, *vcsmocks.MockClient, *mocks.MockGithubPullGetter, *events.FileWorkspace) {
allowForkPRs := false
- dataDir, cleanup := TempDir(t)
+ dataDir, binDir, cacheDir, cleanup := mkSubDirs(t)
defer cleanup()
+ //env vars
+
+ if policyChecksEnabled {
+ // need this to be set or we'll fail the policy check step
+ os.Setenv(policy.DefaultConftestVersionEnvKey, "0.21.0")
+ }
+
// Mocks.
e2eVCSClient := vcsmocks.NewMockClient()
e2eStatusUpdater := &events.DefaultCommitStatusUpdater{Client: e2eVCSClient}
@@ -407,7 +571,7 @@ func setupE2E(t *testing.T, repoDir string) (server.EventsController, *vcsmocks.
e2eGitlabGetter := mocks.NewMockGitlabMergeRequestGetter()
// Real dependencies.
- logger := logging.NewSimpleLogger("server", true, logging.Debug)
+ logger := logging.NewSimpleLogger("server", true, logging.Error)
eventParser := &events.EventParser{
GithubUser: "github-user",
GithubToken: "github-token",
@@ -418,7 +582,7 @@ func setupE2E(t *testing.T, repoDir string) (server.EventsController, *vcsmocks.
GithubUser: "github-user",
GitlabUser: "gitlab-user",
}
- terraformClient, err := terraform.NewClient(logger, dataDir, "", "", "", "default-tf-version", "https://releases.hashicorp.com", &NoopTFDownloader{}, false)
+ terraformClient, err := terraform.NewClient(logger, binDir, cacheDir, "", "", "", "default-tf-version", "https://releases.hashicorp.com", &NoopTFDownloader{}, false)
Ok(t, err)
boltdb, err := db.New(dataDir)
Ok(t, err)
@@ -442,6 +606,9 @@ func setupE2E(t *testing.T, repoDir string) (server.EventsController, *vcsmocks.
Ok(t, err)
}
drainer := &events.Drainer{}
+
+ parallelPoolSize := 1
+
preWorkflowHooksCommandRunner := &events.DefaultPreWorkflowHooksCommandRunner{
VCSClient: e2eVCSClient,
GlobalCfg: globalCfg,
@@ -451,55 +618,146 @@ func setupE2E(t *testing.T, repoDir string) (server.EventsController, *vcsmocks.
Drainer: drainer,
PreWorkflowHookRunner: &runtime.PreWorkflowHookRunner{},
}
- commandRunner := &events.DefaultCommandRunner{
- ProjectCommandRunner: &events.DefaultProjectCommandRunner{
- Locker: projectLocker,
- LockURLGenerator: &mockLockURLGenerator{},
- InitStepRunner: &runtime.InitStepRunner{
- TerraformExecutor: terraformClient,
- DefaultTFVersion: defaultTFVersion,
- },
- PlanStepRunner: &runtime.PlanStepRunner{
- TerraformExecutor: terraformClient,
- DefaultTFVersion: defaultTFVersion,
- },
- ApplyStepRunner: &runtime.ApplyStepRunner{
- TerraformExecutor: terraformClient,
- },
- RunStepRunner: &runtime.RunStepRunner{
- TerraformExecutor: terraformClient,
- DefaultTFVersion: defaultTFVersion,
- },
- PullApprovedChecker: e2eVCSClient,
- WorkingDir: workingDir,
- Webhooks: &mockWebhookSender{},
- WorkingDirLocker: locker,
+ projectCommandBuilder := events.NewProjectCommandBuilder(
+ policyChecksEnabled,
+ parser,
+ &events.DefaultProjectFinder{},
+ e2eVCSClient,
+ workingDir,
+ locker,
+ globalCfg,
+ &events.DefaultPendingPlanFinder{},
+ commentParser,
+ false,
+ )
+
+ showStepRunner, err := runtime.NewShowStepRunner(terraformClient, defaultTFVersion)
+
+ Ok(t, err)
+
+ conftestVersion, _ := version.NewVersion("0.21.0")
+
+ conftextExec := policy.NewConfTestExecutorWorkflow(logger, binDir, &NoopTFDownloader{})
+
+ // swapping out version cache to something that always returns local contest
+ // binary
+ conftextExec.VersionCache = &LocalConftestCache{}
+
+ policyCheckRunner, err := runtime.NewPolicyCheckStepRunner(
+ conftestVersion,
+ conftextExec,
+ )
+
+ Ok(t, err)
+
+ projectCommandRunner := &events.DefaultProjectCommandRunner{
+ Locker: projectLocker,
+ LockURLGenerator: &mockLockURLGenerator{},
+ InitStepRunner: &runtime.InitStepRunner{
+ TerraformExecutor: terraformClient,
+ DefaultTFVersion: defaultTFVersion,
+ },
+ PlanStepRunner: &runtime.PlanStepRunner{
+ TerraformExecutor: terraformClient,
+ DefaultTFVersion: defaultTFVersion,
},
- EventParser: eventParser,
- VCSClient: e2eVCSClient,
- GithubPullGetter: e2eGithubGetter,
- GitlabMergeRequestGetter: e2eGitlabGetter,
- CommitStatusUpdater: e2eStatusUpdater,
- MarkdownRenderer: &events.MarkdownRenderer{},
- Logger: logger,
- AllowForkPRs: allowForkPRs,
- AllowForkPRsFlag: "allow-fork-prs",
- ProjectCommandBuilder: &events.DefaultProjectCommandBuilder{
- ParserValidator: parser,
- ProjectFinder: &events.DefaultProjectFinder{},
- VCSClient: e2eVCSClient,
- WorkingDir: workingDir,
- WorkingDirLocker: locker,
- PendingPlanFinder: &events.DefaultPendingPlanFinder{},
- CommentBuilder: commentParser,
- GlobalCfg: globalCfg,
- SkipCloneNoChanges: false,
+ ShowStepRunner: showStepRunner,
+ PolicyCheckStepRunner: policyCheckRunner,
+ ApplyStepRunner: &runtime.ApplyStepRunner{
+ TerraformExecutor: terraformClient,
},
- DB: boltdb,
- PendingPlanFinder: &events.DefaultPendingPlanFinder{},
- GlobalAutomerge: false,
- WorkingDir: workingDir,
- Drainer: drainer,
+ RunStepRunner: &runtime.RunStepRunner{
+ TerraformExecutor: terraformClient,
+ DefaultTFVersion: defaultTFVersion,
+ },
+ PullApprovedChecker: e2eVCSClient,
+ WorkingDir: workingDir,
+ Webhooks: &mockWebhookSender{},
+ WorkingDirLocker: locker,
+ }
+
+ dbUpdater := &events.DBUpdater{
+ DB: boltdb,
+ }
+
+ pullUpdater := &events.PullUpdater{
+ HidePrevPlanComments: false,
+ VCSClient: e2eVCSClient,
+ MarkdownRenderer: &events.MarkdownRenderer{},
+ }
+
+ autoMerger := &events.AutoMerger{
+ VCSClient: e2eVCSClient,
+ GlobalAutomerge: false,
+ }
+
+ policyCheckCommandRunner := events.NewPolicyCheckCommandRunner(
+ dbUpdater,
+ pullUpdater,
+ e2eStatusUpdater,
+ projectCommandRunner,
+ parallelPoolSize,
+ )
+
+ planCommandRunner := events.NewPlanCommandRunner(
+ false,
+ e2eVCSClient,
+ &events.DefaultPendingPlanFinder{},
+ workingDir,
+ e2eStatusUpdater,
+ projectCommandBuilder,
+ projectCommandRunner,
+ dbUpdater,
+ pullUpdater,
+ policyCheckCommandRunner,
+ autoMerger,
+ parallelPoolSize,
+ )
+
+ applyCommandRunner := events.NewApplyCommandRunner(
+ e2eVCSClient,
+ false,
+ false,
+ e2eStatusUpdater,
+ projectCommandBuilder,
+ projectCommandRunner,
+ autoMerger,
+ pullUpdater,
+ dbUpdater,
+ boltdb,
+ parallelPoolSize,
+ )
+
+ approvePoliciesCommandRunner := events.NewApprovePoliciesCommandRunner(
+ e2eStatusUpdater,
+ projectCommandBuilder,
+ projectCommandRunner,
+ pullUpdater,
+ dbUpdater,
+ )
+
+ unlockCommandRunner := events.NewUnlockCommandRunner(
+ mocks.NewMockDeleteLockCommand(),
+ e2eVCSClient,
+ )
+
+ commentCommandRunnerByCmd := map[models.CommandName]events.CommentCommandRunner{
+ models.PlanCommand: planCommandRunner,
+ models.ApplyCommand: applyCommandRunner,
+ models.ApprovePoliciesCommand: approvePoliciesCommandRunner,
+ models.UnlockCommand: unlockCommandRunner,
+ }
+
+ commandRunner := &events.DefaultCommandRunner{
+ EventParser: eventParser,
+ VCSClient: e2eVCSClient,
+ GithubPullGetter: e2eGithubGetter,
+ GitlabMergeRequestGetter: e2eGitlabGetter,
+ Logger: logger,
+ AllowForkPRs: allowForkPRs,
+ AllowForkPRsFlag: "allow-fork-prs",
+ CommentCommandRunnerByCmd: commentCommandRunnerByCmd,
+ Drainer: drainer,
}
repoAllowlistChecker, err := events.NewRepoAllowlistChecker("*")
@@ -705,6 +963,48 @@ func assertCommentEquals(t *testing.T, expReplies []string, act string, repoDir
}
}
+// returns parent, bindir, cachedir, cleanup func
+func mkSubDirs(t *testing.T) (string, string, string, func()) {
+ tmp, cleanup := TempDir(t)
+ binDir := filepath.Join(tmp, "bin")
+ err := os.MkdirAll(binDir, 0700)
+ Ok(t, err)
+
+ cachedir := filepath.Join(tmp, "plugin-cache")
+ err = os.MkdirAll(cachedir, 0700)
+ Ok(t, err)
+
+ return tmp, binDir, cachedir, cleanup
+}
+
+// Will fail test if conftest isn't in path and isn't version >= 0.21.0
+func ensureRunningConftest(t *testing.T) {
+ localPath, err := exec.LookPath("conftest0.21.0")
+ if err != nil {
+ t.Log("conftest >= 0.21 must be installed to run this test")
+ t.FailNow()
+ }
+ versionOutBytes, err := exec.Command(localPath, "--version").Output() // #nosec
+ if err != nil {
+ t.Logf("error running conftest version: %s", err)
+ t.FailNow()
+ }
+ versionOutput := string(versionOutBytes)
+ match := versionConftestRegex.FindStringSubmatch(versionOutput)
+ if len(match) <= 1 {
+ t.Logf("could not parse contest version from %s", versionOutput)
+ t.FailNow()
+ }
+ localVersion, err := version.NewVersion(match[1])
+ Ok(t, err)
+ minVersion, err := version.NewVersion("0.21.0")
+ Ok(t, err)
+ if localVersion.LessThan(minVersion) {
+ t.Logf("must have contest version >= %s, you have %s", minVersion, localVersion)
+ t.FailNow()
+ }
+}
+
// Will fail test if terraform isn't in path and isn't version >= 0.12
func ensureRunning012(t *testing.T) {
localPath, err := exec.LookPath("terraform")
@@ -740,3 +1040,5 @@ func ensureRunning012(t *testing.T) {
// Terraform v0.11.10
// => 0.11.10
var versionRegex = regexp.MustCompile("Terraform v(.*?)(\\s.*)?\n")
+
+var versionConftestRegex = regexp.MustCompile("Version: (.*?)(\\s.*)?\n")
diff --git a/server/server.go b/server/server.go
index d32241685a..153ce6bfc9 100644
--- a/server/server.go
+++ b/server/server.go
@@ -25,6 +25,7 @@ import (
"net/url"
"os"
"os/signal"
+ "path/filepath"
"sort"
"strings"
"syscall"
@@ -41,6 +42,7 @@ import (
"github.com/runatlantis/atlantis/server/events/locking"
"github.com/runatlantis/atlantis/server/events/models"
"github.com/runatlantis/atlantis/server/events/runtime"
+ "github.com/runatlantis/atlantis/server/events/runtime/policy"
"github.com/runatlantis/atlantis/server/events/terraform"
"github.com/runatlantis/atlantis/server/events/vcs"
"github.com/runatlantis/atlantis/server/events/vcs/bitbucketcloud"
@@ -62,6 +64,14 @@ const (
// route. ex:
// mux.Router.Get(LockViewRouteName).URL(LockViewRouteIDQueryParam, "my id")
LockViewRouteIDQueryParam = "id"
+
+ // binDirName is the name of the directory inside our data dir where
+ // we download binaries.
+ BinDirName = "bin"
+
+ // terraformPluginCacheDir is the name of the dir inside our data dir
+ // where we tell terraform to cache plugins and modules.
+ TerraformPluginCacheDirName = "plugin-cache"
)
// Server runs the Atlantis web server.
@@ -123,6 +133,13 @@ func NewServer(userConfig UserConfig, config Config) (*Server, error) {
var bitbucketCloudClient *bitbucketcloud.Client
var bitbucketServerClient *bitbucketserver.Client
var azuredevopsClient *vcs.AzureDevopsClient
+
+ policyChecksEnabled := false
+ if userConfig.EnablePolicyChecksFlag {
+ logger.Info("Policy Checks are enabled")
+ policyChecksEnabled = true
+ }
+
if userConfig.GithubUser != "" || userConfig.GithubAppID != 0 {
supportedVCSHosts = append(supportedVCSHosts, models.Github)
if userConfig.GithubUser != "" {
@@ -234,9 +251,23 @@ func NewServer(userConfig UserConfig, config Config) (*Server, error) {
}
vcsClient := vcs.NewClientProxy(githubClient, gitlabClient, bitbucketCloudClient, bitbucketServerClient, azuredevopsClient)
commitStatusUpdater := &events.DefaultCommitStatusUpdater{Client: vcsClient, StatusName: userConfig.VCSStatusName}
+
+ binDir, err := mkSubDir(userConfig.DataDir, BinDirName)
+
+ if err != nil {
+ return nil, err
+ }
+
+ cacheDir, err := mkSubDir(userConfig.DataDir, TerraformPluginCacheDirName)
+
+ if err != nil {
+ return nil, err
+ }
+
terraformClient, err := terraform.NewClient(
logger,
- userConfig.DataDir,
+ binDir,
+ cacheDir,
userConfig.TFEToken,
userConfig.TFEHostname,
userConfig.DefaultTFVersion,
@@ -372,69 +403,150 @@ func NewServer(userConfig UserConfig, config Config) (*Server, error) {
Drainer: drainer,
PreWorkflowHookRunner: &runtime.PreWorkflowHookRunner{},
}
- commandRunner := &events.DefaultCommandRunner{
- VCSClient: vcsClient,
- GithubPullGetter: githubClient,
- GitlabMergeRequestGetter: gitlabClient,
- AzureDevopsPullGetter: azuredevopsClient,
- CommitStatusUpdater: commitStatusUpdater,
- EventParser: eventParser,
- MarkdownRenderer: markdownRenderer,
- Logger: logger,
- AllowForkPRs: userConfig.AllowForkPRs,
- AllowForkPRsFlag: config.AllowForkPRsFlag,
- HidePrevPlanComments: userConfig.HidePrevPlanComments,
- SilenceForkPRErrors: userConfig.SilenceForkPRErrors,
- SilenceForkPRErrorsFlag: config.SilenceForkPRErrorsFlag,
- SilenceVCSStatusNoPlans: userConfig.SilenceVCSStatusNoPlans,
- DisableApplyAll: userConfig.DisableApplyAll,
- DisableApply: userConfig.DisableApply,
- DisableAutoplan: userConfig.DisableAutoplan,
- ParallelPoolSize: userConfig.ParallelPoolSize,
- ProjectCommandBuilder: &events.DefaultProjectCommandBuilder{
- ParserValidator: validator,
- ProjectFinder: &events.DefaultProjectFinder{},
- VCSClient: vcsClient,
- WorkingDir: workingDir,
- WorkingDirLocker: workingDirLocker,
- GlobalCfg: globalCfg,
- PendingPlanFinder: pendingPlanFinder,
- CommentBuilder: commentParser,
- SkipCloneNoChanges: userConfig.SkipCloneNoChanges,
+ projectCommandBuilder := events.NewProjectCommandBuilder(
+ policyChecksEnabled,
+ validator,
+ &events.DefaultProjectFinder{},
+ vcsClient,
+ workingDir,
+ workingDirLocker,
+ globalCfg,
+ pendingPlanFinder,
+ commentParser,
+ userConfig.SkipCloneNoChanges,
+ )
+
+ showStepRunner, err := runtime.NewShowStepRunner(terraformClient, defaultTfVersion)
+
+ if err != nil {
+ return nil, errors.Wrap(err, "initializing show step runner")
+ }
+
+ policyCheckRunner, err := runtime.NewPolicyCheckStepRunner(
+ defaultTfVersion,
+ policy.NewConfTestExecutorWorkflow(logger, binDir, &terraform.DefaultDownloader{}),
+ )
+
+ if err != nil {
+ return nil, errors.Wrap(err, "initializing policy check runner")
+ }
+
+ projectCommandRunner := &events.DefaultProjectCommandRunner{
+ Locker: projectLocker,
+ LockURLGenerator: router,
+ InitStepRunner: &runtime.InitStepRunner{
+ TerraformExecutor: terraformClient,
+ DefaultTFVersion: defaultTfVersion,
},
- ProjectCommandRunner: &events.DefaultProjectCommandRunner{
- Locker: projectLocker,
- LockURLGenerator: router,
- InitStepRunner: &runtime.InitStepRunner{
- TerraformExecutor: terraformClient,
- DefaultTFVersion: defaultTfVersion,
- },
- PlanStepRunner: &runtime.PlanStepRunner{
- TerraformExecutor: terraformClient,
- DefaultTFVersion: defaultTfVersion,
- CommitStatusUpdater: commitStatusUpdater,
- AsyncTFExec: terraformClient,
- },
- ApplyStepRunner: &runtime.ApplyStepRunner{
- TerraformExecutor: terraformClient,
- CommitStatusUpdater: commitStatusUpdater,
- AsyncTFExec: terraformClient,
- },
+ PlanStepRunner: &runtime.PlanStepRunner{
+ TerraformExecutor: terraformClient,
+ DefaultTFVersion: defaultTfVersion,
+ CommitStatusUpdater: commitStatusUpdater,
+ AsyncTFExec: terraformClient,
+ },
+ ShowStepRunner: showStepRunner,
+ PolicyCheckStepRunner: policyCheckRunner,
+ ApplyStepRunner: &runtime.ApplyStepRunner{
+ TerraformExecutor: terraformClient,
+ CommitStatusUpdater: commitStatusUpdater,
+ AsyncTFExec: terraformClient,
+ },
+ RunStepRunner: runStepRunner,
+ EnvStepRunner: &runtime.EnvStepRunner{
RunStepRunner: runStepRunner,
- EnvStepRunner: &runtime.EnvStepRunner{
- RunStepRunner: runStepRunner,
- },
- PullApprovedChecker: vcsClient,
- WorkingDir: workingDir,
- Webhooks: webhooksManager,
- WorkingDirLocker: workingDirLocker,
},
- WorkingDir: workingDir,
- PendingPlanFinder: pendingPlanFinder,
- DB: boltdb,
- DeleteLockCommand: deleteLockCommand,
- GlobalAutomerge: userConfig.Automerge,
- Drainer: drainer,
+ PullApprovedChecker: vcsClient,
+ WorkingDir: workingDir,
+ Webhooks: webhooksManager,
+ WorkingDirLocker: workingDirLocker,
+ }
+
+ dbUpdater := &events.DBUpdater{
+ DB: boltdb,
+ }
+
+ pullUpdater := &events.PullUpdater{
+ HidePrevPlanComments: userConfig.HidePrevPlanComments,
+ VCSClient: vcsClient,
+ MarkdownRenderer: markdownRenderer,
+ }
+
+ autoMerger := &events.AutoMerger{
+ VCSClient: vcsClient,
+ GlobalAutomerge: userConfig.Automerge,
+ }
+
+ policyCheckCommandRunner := events.NewPolicyCheckCommandRunner(
+ dbUpdater,
+ pullUpdater,
+ commitStatusUpdater,
+ projectCommandRunner,
+ userConfig.ParallelPoolSize,
+ )
+
+ planCommandRunner := events.NewPlanCommandRunner(
+ userConfig.SilenceVCSStatusNoPlans,
+ vcsClient,
+ pendingPlanFinder,
+ workingDir,
+ commitStatusUpdater,
+ projectCommandBuilder,
+ projectCommandRunner,
+ dbUpdater,
+ pullUpdater,
+ policyCheckCommandRunner,
+ autoMerger,
+ userConfig.ParallelPoolSize,
+ )
+
+ applyCommandRunner := events.NewApplyCommandRunner(
+ vcsClient,
+ userConfig.DisableApplyAll,
+ userConfig.DisableApply,
+ commitStatusUpdater,
+ projectCommandBuilder,
+ projectCommandRunner,
+ autoMerger,
+ pullUpdater,
+ dbUpdater,
+ boltdb,
+ userConfig.ParallelPoolSize,
+ )
+
+ approvePoliciesCommandRunner := events.NewApprovePoliciesCommandRunner(
+ commitStatusUpdater,
+ projectCommandBuilder,
+ projectCommandRunner,
+ pullUpdater,
+ dbUpdater,
+ )
+
+ unlockCommandRunner := events.NewUnlockCommandRunner(
+ deleteLockCommand,
+ vcsClient,
+ )
+
+ commentCommandRunnerByCmd := map[models.CommandName]events.CommentCommandRunner{
+ models.PlanCommand: planCommandRunner,
+ models.ApplyCommand: applyCommandRunner,
+ models.ApprovePoliciesCommand: approvePoliciesCommandRunner,
+ models.UnlockCommand: unlockCommandRunner,
+ }
+
+ commandRunner := &events.DefaultCommandRunner{
+ VCSClient: vcsClient,
+ GithubPullGetter: githubClient,
+ GitlabMergeRequestGetter: gitlabClient,
+ AzureDevopsPullGetter: azuredevopsClient,
+ CommentCommandRunnerByCmd: commentCommandRunnerByCmd,
+ EventParser: eventParser,
+ Logger: logger,
+ AllowForkPRs: userConfig.AllowForkPRs,
+ AllowForkPRsFlag: config.AllowForkPRsFlag,
+ SilenceForkPRErrors: userConfig.SilenceForkPRErrors,
+ SilenceForkPRErrorsFlag: config.SilenceForkPRErrorsFlag,
+ DisableAutoplan: userConfig.DisableAutoplan,
+ Drainer: drainer,
}
repoAllowlist, err := events.NewRepoAllowlistChecker(userConfig.RepoAllowlist)
if err != nil {
@@ -612,6 +724,15 @@ func (s *Server) Index(w http.ResponseWriter, _ *http.Request) {
}
}
+func mkSubDir(parentDir string, subDir string) (string, error) {
+ fullDir := filepath.Join(parentDir, subDir)
+ if err := os.MkdirAll(fullDir, 0700); err != nil {
+ return "", errors.Wrapf(err, "unable to creare dir %q", fullDir)
+ }
+
+ return fullDir, nil
+}
+
// Healthz returns the health check response. It always returns a 200 currently.
func (s *Server) Healthz(w http.ResponseWriter, _ *http.Request) {
data, err := json.MarshalIndent(&struct {
diff --git a/server/server_test.go b/server/server_test.go
index dfd2b870e3..7be7947af2 100644
--- a/server/server_test.go
+++ b/server/server_test.go
@@ -20,14 +20,10 @@ import (
"net/http"
"net/http/httptest"
"net/url"
- "path/filepath"
"strings"
"testing"
"time"
- "github.com/runatlantis/atlantis/server/events"
- "github.com/runatlantis/atlantis/server/events/yaml/valid"
-
"github.com/gorilla/mux"
. "github.com/petergtz/pegomock"
"github.com/runatlantis/atlantis/server"
@@ -49,35 +45,6 @@ func TestNewServer(t *testing.T) {
}
// todo: test what happens if we set different flags. The generated config should be different.
-func TestRepoConfig(t *testing.T) {
- t.SkipNow()
- tmpDir, err := ioutil.TempDir("", "")
- Ok(t, err)
-
- repoYaml := `
-repos:
-- id: "https://github.com/runatlantis/atlantis"
-`
- expConfig := valid.GlobalCfg{
- Repos: []valid.Repo{
- {
- ID: "https://github.com/runatlantis/atlantis",
- },
- },
- Workflows: map[string]valid.Workflow{},
- }
- repoFileLocation := filepath.Join(tmpDir, "repos.yaml")
- err = ioutil.WriteFile(repoFileLocation, []byte(repoYaml), 0600)
- Ok(t, err)
-
- s, err := server.NewServer(server.UserConfig{
- DataDir: tmpDir,
- RepoConfig: repoFileLocation,
- AtlantisURL: "http://example.com",
- }, server.Config{})
- Ok(t, err)
- Equals(t, expConfig, s.CommandRunner.ProjectCommandBuilder.(*events.DefaultProjectCommandBuilder).GlobalCfg)
-}
func TestNewServer_InvalidAtlantisURL(t *testing.T) {
tmpDir, err := ioutil.TempDir("", "")
diff --git a/server/testfixtures/test-repos/modules-yaml/production/main.tf b/server/testfixtures/test-repos/modules-yaml/production/main.tf
index e8c380ec41..9d09972041 100644
--- a/server/testfixtures/test-repos/modules-yaml/production/main.tf
+++ b/server/testfixtures/test-repos/modules-yaml/production/main.tf
@@ -1,7 +1,7 @@
module "null" {
source = "../modules/null"
- var = "production"
+ var = "production"
}
output "var" {
value = module.null.var
-}
\ No newline at end of file
+}
diff --git a/server/testfixtures/test-repos/modules-yaml/staging/main.tf b/server/testfixtures/test-repos/modules-yaml/staging/main.tf
index be20bbab3d..2f12d35c8d 100644
--- a/server/testfixtures/test-repos/modules-yaml/staging/main.tf
+++ b/server/testfixtures/test-repos/modules-yaml/staging/main.tf
@@ -1,7 +1,7 @@
module "null" {
source = "../modules/null"
- var = "staging"
+ var = "staging"
}
output "var" {
value = module.null.var
-}
\ No newline at end of file
+}
diff --git a/server/testfixtures/test-repos/modules/production/main.tf b/server/testfixtures/test-repos/modules/production/main.tf
index e8c380ec41..9d09972041 100644
--- a/server/testfixtures/test-repos/modules/production/main.tf
+++ b/server/testfixtures/test-repos/modules/production/main.tf
@@ -1,7 +1,7 @@
module "null" {
source = "../modules/null"
- var = "production"
+ var = "production"
}
output "var" {
value = module.null.var
-}
\ No newline at end of file
+}
diff --git a/server/testfixtures/test-repos/modules/staging/main.tf b/server/testfixtures/test-repos/modules/staging/main.tf
index be20bbab3d..2f12d35c8d 100644
--- a/server/testfixtures/test-repos/modules/staging/main.tf
+++ b/server/testfixtures/test-repos/modules/staging/main.tf
@@ -1,7 +1,7 @@
module "null" {
source = "../modules/null"
- var = "staging"
+ var = "staging"
}
output "var" {
value = module.null.var
-}
\ No newline at end of file
+}
diff --git a/server/testfixtures/test-repos/policy-checks-diff-owner/atlantis.yaml b/server/testfixtures/test-repos/policy-checks-diff-owner/atlantis.yaml
new file mode 100644
index 0000000000..8435733cd2
--- /dev/null
+++ b/server/testfixtures/test-repos/policy-checks-diff-owner/atlantis.yaml
@@ -0,0 +1,4 @@
+version: 3
+projects:
+- dir: .
+ workspace: default
diff --git a/server/testfixtures/test-repos/policy-checks-diff-owner/exp-output-apply-failed.txt b/server/testfixtures/test-repos/policy-checks-diff-owner/exp-output-apply-failed.txt
new file mode 100644
index 0000000000..fbb8325fc7
--- /dev/null
+++ b/server/testfixtures/test-repos/policy-checks-diff-owner/exp-output-apply-failed.txt
@@ -0,0 +1,4 @@
+Ran Apply for dir: `.` workspace: `default`
+
+**Apply Failed**: Pull request must be mergeable before running apply.
+
diff --git a/server/testfixtures/test-repos/policy-checks-diff-owner/exp-output-approve-policies.txt b/server/testfixtures/test-repos/policy-checks-diff-owner/exp-output-approve-policies.txt
new file mode 100644
index 0000000000..1b72496de1
--- /dev/null
+++ b/server/testfixtures/test-repos/policy-checks-diff-owner/exp-output-approve-policies.txt
@@ -0,0 +1,4 @@
+**Approve Policies Error**
+```
+contact policy owners to approve failing policies
+```
diff --git a/server/testfixtures/test-repos/policy-checks-diff-owner/exp-output-auto-policy-check.txt b/server/testfixtures/test-repos/policy-checks-diff-owner/exp-output-auto-policy-check.txt
new file mode 100644
index 0000000000..a922cceca2
--- /dev/null
+++ b/server/testfixtures/test-repos/policy-checks-diff-owner/exp-output-auto-policy-check.txt
@@ -0,0 +1,15 @@
+Ran Policy Check for dir: `.` workspace: `default`
+
+**Policy Check Error**
+```
+exit status 1
+Checking plan against the following policies:
+ test_policy
+FAIL - - WARNING: Null Resource creation is prohibited.
+
+1 test, 0 passed, 0 warnings, 1 failure, 0 exceptions
+
+```
+* :heavy_check_mark: To **approve** failing policies either request an approval from approvers or address the failure by modifying the codebase.
+
+
diff --git a/server/testfixtures/test-repos/policy-checks-diff-owner/exp-output-autoplan.txt b/server/testfixtures/test-repos/policy-checks-diff-owner/exp-output-autoplan.txt
new file mode 100644
index 0000000000..d278415b40
--- /dev/null
+++ b/server/testfixtures/test-repos/policy-checks-diff-owner/exp-output-autoplan.txt
@@ -0,0 +1,36 @@
+Ran Plan for dir: `.` workspace: `default`
+
+Show Output
+
+```diff
+
+An execution plan has been generated and is shown below.
+Resource actions are indicated with the following symbols:
++ create
+
+Terraform will perform the following actions:
+
+ # null_resource.simple[0] will be created
++ resource "null_resource" "simple" {
+ + id = (known after apply)
+ }
+
+Plan: 1 to add, 0 to change, 0 to destroy.
+
+Changes to Outputs:
++ workspace = "default"
+
+```
+
+* :arrow_forward: To **apply** this plan, comment:
+ * `atlantis apply -d .`
+* :put_litter_in_its_place: To **delete** this plan click [here](lock-url)
+* :repeat: To **plan** this project again, comment:
+ * `atlantis plan -d .`
+
+
+---
+* :fast_forward: To **apply** all unapplied plans from this pull request, comment:
+ * `atlantis apply`
+* :put_litter_in_its_place: To delete all plans and locks for the PR, comment:
+ * `atlantis unlock`
diff --git a/server/testfixtures/test-repos/policy-checks-diff-owner/exp-output-merge.txt b/server/testfixtures/test-repos/policy-checks-diff-owner/exp-output-merge.txt
new file mode 100644
index 0000000000..872c5ee40c
--- /dev/null
+++ b/server/testfixtures/test-repos/policy-checks-diff-owner/exp-output-merge.txt
@@ -0,0 +1,3 @@
+Locks and plans deleted for the projects and workspaces modified in this pull request:
+
+- dir: `.` workspace: `default`
diff --git a/server/testfixtures/test-repos/policy-checks-diff-owner/main.tf b/server/testfixtures/test-repos/policy-checks-diff-owner/main.tf
new file mode 100644
index 0000000000..582f9ea01d
--- /dev/null
+++ b/server/testfixtures/test-repos/policy-checks-diff-owner/main.tf
@@ -0,0 +1,7 @@
+resource "null_resource" "simple" {
+ count = 1
+}
+
+output "workspace" {
+ value = terraform.workspace
+}
diff --git a/server/testfixtures/test-repos/policy-checks-diff-owner/policies/policy.rego b/server/testfixtures/test-repos/policy-checks-diff-owner/policies/policy.rego
new file mode 100644
index 0000000000..126c2e4591
--- /dev/null
+++ b/server/testfixtures/test-repos/policy-checks-diff-owner/policies/policy.rego
@@ -0,0 +1,28 @@
+package main
+
+import input as tfplan
+
+deny[reason] {
+ num_deletes.null_resource > 0
+ reason := "WARNING: Null Resource creation is prohibited."
+}
+
+resource_types = {"null_resource"}
+
+resources[resource_type] = all {
+ some resource_type
+ resource_types[resource_type]
+ all := [name |
+ name := tfplan.resource_changes[_]
+ name.type == resource_type
+ ]
+}
+
+# number of deletions of resources of a given type
+num_deletes[resource_type] = num {
+ some resource_type
+ resource_types[resource_type]
+ all := resources[resource_type]
+ deletions := [res | res := all[_]; res.change.actions[_] == "create"]
+ num := count(deletions)
+}
diff --git a/server/testfixtures/test-repos/policy-checks-diff-owner/repos.yaml b/server/testfixtures/test-repos/policy-checks-diff-owner/repos.yaml
new file mode 100644
index 0000000000..a535795f68
--- /dev/null
+++ b/server/testfixtures/test-repos/policy-checks-diff-owner/repos.yaml
@@ -0,0 +1,12 @@
+repos:
+ - id: /.*/
+ apply_requirements: [mergeable]
+policies:
+ owners:
+ users:
+ - someoneelse
+ policy_sets:
+ - name: test_policy
+ path: policies/policy.rego
+ source: local
+
diff --git a/server/testfixtures/test-repos/policy-checks/atlantis.yaml b/server/testfixtures/test-repos/policy-checks/atlantis.yaml
new file mode 100644
index 0000000000..8435733cd2
--- /dev/null
+++ b/server/testfixtures/test-repos/policy-checks/atlantis.yaml
@@ -0,0 +1,4 @@
+version: 3
+projects:
+- dir: .
+ workspace: default
diff --git a/server/testfixtures/test-repos/policy-checks/exp-output-apply-failed.txt b/server/testfixtures/test-repos/policy-checks/exp-output-apply-failed.txt
new file mode 100644
index 0000000000..fbb8325fc7
--- /dev/null
+++ b/server/testfixtures/test-repos/policy-checks/exp-output-apply-failed.txt
@@ -0,0 +1,4 @@
+Ran Apply for dir: `.` workspace: `default`
+
+**Apply Failed**: Pull request must be mergeable before running apply.
+
diff --git a/server/testfixtures/test-repos/policy-checks/exp-output-apply.txt b/server/testfixtures/test-repos/policy-checks/exp-output-apply.txt
new file mode 100644
index 0000000000..e6e44deb94
--- /dev/null
+++ b/server/testfixtures/test-repos/policy-checks/exp-output-apply.txt
@@ -0,0 +1,24 @@
+Ran Apply for dir: `.` workspace: `default`
+
+Show Output
+
+```diff
+null_resource.simple:
+null_resource.simple:
+
+Apply complete! Resources: 1 added, 0 changed, 0 destroyed.
+
+The state of your infrastructure has been saved to the path
+below. This state is required to modify and destroy your
+infrastructure, so keep it safe. To inspect the complete state
+use the `terraform show` command.
+
+State path: terraform.tfstate
+
+Outputs:
+
+workspace = "default"
+
+```
+
+
diff --git a/server/testfixtures/test-repos/policy-checks/exp-output-approve-policies.txt b/server/testfixtures/test-repos/policy-checks/exp-output-approve-policies.txt
new file mode 100644
index 0000000000..f5e100c23e
--- /dev/null
+++ b/server/testfixtures/test-repos/policy-checks/exp-output-approve-policies.txt
@@ -0,0 +1,5 @@
+Approved Policies for 1 projects:
+
+1. dir: `.` workspace: `default`
+
+
diff --git a/server/testfixtures/test-repos/policy-checks/exp-output-auto-policy-check.txt b/server/testfixtures/test-repos/policy-checks/exp-output-auto-policy-check.txt
new file mode 100644
index 0000000000..a922cceca2
--- /dev/null
+++ b/server/testfixtures/test-repos/policy-checks/exp-output-auto-policy-check.txt
@@ -0,0 +1,15 @@
+Ran Policy Check for dir: `.` workspace: `default`
+
+**Policy Check Error**
+```
+exit status 1
+Checking plan against the following policies:
+ test_policy
+FAIL - - WARNING: Null Resource creation is prohibited.
+
+1 test, 0 passed, 0 warnings, 1 failure, 0 exceptions
+
+```
+* :heavy_check_mark: To **approve** failing policies either request an approval from approvers or address the failure by modifying the codebase.
+
+
diff --git a/server/testfixtures/test-repos/policy-checks/exp-output-autoplan.txt b/server/testfixtures/test-repos/policy-checks/exp-output-autoplan.txt
new file mode 100644
index 0000000000..d278415b40
--- /dev/null
+++ b/server/testfixtures/test-repos/policy-checks/exp-output-autoplan.txt
@@ -0,0 +1,36 @@
+Ran Plan for dir: `.` workspace: `default`
+
+Show Output
+
+```diff
+
+An execution plan has been generated and is shown below.
+Resource actions are indicated with the following symbols:
++ create
+
+Terraform will perform the following actions:
+
+ # null_resource.simple[0] will be created
++ resource "null_resource" "simple" {
+ + id = (known after apply)
+ }
+
+Plan: 1 to add, 0 to change, 0 to destroy.
+
+Changes to Outputs:
++ workspace = "default"
+
+```
+
+* :arrow_forward: To **apply** this plan, comment:
+ * `atlantis apply -d .`
+* :put_litter_in_its_place: To **delete** this plan click [here](lock-url)
+* :repeat: To **plan** this project again, comment:
+ * `atlantis plan -d .`
+
+
+---
+* :fast_forward: To **apply** all unapplied plans from this pull request, comment:
+ * `atlantis apply`
+* :put_litter_in_its_place: To delete all plans and locks for the PR, comment:
+ * `atlantis unlock`
diff --git a/server/testfixtures/test-repos/policy-checks/exp-output-merge.txt b/server/testfixtures/test-repos/policy-checks/exp-output-merge.txt
new file mode 100644
index 0000000000..872c5ee40c
--- /dev/null
+++ b/server/testfixtures/test-repos/policy-checks/exp-output-merge.txt
@@ -0,0 +1,3 @@
+Locks and plans deleted for the projects and workspaces modified in this pull request:
+
+- dir: `.` workspace: `default`
diff --git a/server/testfixtures/test-repos/policy-checks/main.tf b/server/testfixtures/test-repos/policy-checks/main.tf
new file mode 100644
index 0000000000..582f9ea01d
--- /dev/null
+++ b/server/testfixtures/test-repos/policy-checks/main.tf
@@ -0,0 +1,7 @@
+resource "null_resource" "simple" {
+ count = 1
+}
+
+output "workspace" {
+ value = terraform.workspace
+}
diff --git a/server/testfixtures/test-repos/policy-checks/policies/policy.rego b/server/testfixtures/test-repos/policy-checks/policies/policy.rego
new file mode 100644
index 0000000000..126c2e4591
--- /dev/null
+++ b/server/testfixtures/test-repos/policy-checks/policies/policy.rego
@@ -0,0 +1,28 @@
+package main
+
+import input as tfplan
+
+deny[reason] {
+ num_deletes.null_resource > 0
+ reason := "WARNING: Null Resource creation is prohibited."
+}
+
+resource_types = {"null_resource"}
+
+resources[resource_type] = all {
+ some resource_type
+ resource_types[resource_type]
+ all := [name |
+ name := tfplan.resource_changes[_]
+ name.type == resource_type
+ ]
+}
+
+# number of deletions of resources of a given type
+num_deletes[resource_type] = num {
+ some resource_type
+ resource_types[resource_type]
+ all := resources[resource_type]
+ deletions := [res | res := all[_]; res.change.actions[_] == "create"]
+ num := count(deletions)
+}
diff --git a/server/testfixtures/test-repos/policy-checks/repos.yaml b/server/testfixtures/test-repos/policy-checks/repos.yaml
new file mode 100644
index 0000000000..b1a44de4ca
--- /dev/null
+++ b/server/testfixtures/test-repos/policy-checks/repos.yaml
@@ -0,0 +1,12 @@
+repos:
+ - id: /.*/
+ apply_requirements: [mergeable]
+policies:
+ owners:
+ users:
+ - runatlantis
+ policy_sets:
+ - name: test_policy
+ path: policies/policy.rego
+ source: local
+
diff --git a/server/testfixtures/test-repos/simple-yaml/main.tf b/server/testfixtures/test-repos/simple-yaml/main.tf
index 5244684b7f..b71b9e786e 100644
--- a/server/testfixtures/test-repos/simple-yaml/main.tf
+++ b/server/testfixtures/test-repos/simple-yaml/main.tf
@@ -12,4 +12,4 @@ output "var" {
output "workspace" {
value = terraform.workspace
-}
\ No newline at end of file
+}
diff --git a/server/testfixtures/test-repos/simple/exp-output-auto-policy-check.txt b/server/testfixtures/test-repos/simple/exp-output-auto-policy-check.txt
new file mode 100644
index 0000000000..6f8ae8098c
--- /dev/null
+++ b/server/testfixtures/test-repos/simple/exp-output-auto-policy-check.txt
@@ -0,0 +1,17 @@
+Ran Policy Check for dir: `.` workspace: `default`
+
+```diff
+
+```
+
+* :arrow_forward: To **apply** this plan, comment:
+ * `atlantis apply -d .`
+* :put_litter_in_its_place: To **delete** this plan click [here](lock-url)
+* :repeat: To re-run policies **plan** this project again by commenting:
+ * `atlantis plan -d .`
+
+---
+* :fast_forward: To **apply** all unapplied plans from this pull request, comment:
+ * `atlantis apply`
+* :put_litter_in_its_place: To delete all plans and locks for the PR, comment:
+ * `atlantis unlock`
diff --git a/server/testfixtures/test-repos/tfvars-yaml-no-autoplan/main.tf b/server/testfixtures/test-repos/tfvars-yaml-no-autoplan/main.tf
index 72fbd2bec1..4acc30b31e 100644
--- a/server/testfixtures/test-repos/tfvars-yaml-no-autoplan/main.tf
+++ b/server/testfixtures/test-repos/tfvars-yaml-no-autoplan/main.tf
@@ -16,4 +16,4 @@ output "var" {
output "workspace" {
value = terraform.workspace
-}
\ No newline at end of file
+}
diff --git a/server/testfixtures/test-repos/tfvars-yaml/main.tf b/server/testfixtures/test-repos/tfvars-yaml/main.tf
index 72fbd2bec1..4acc30b31e 100644
--- a/server/testfixtures/test-repos/tfvars-yaml/main.tf
+++ b/server/testfixtures/test-repos/tfvars-yaml/main.tf
@@ -16,4 +16,4 @@ output "var" {
output "workspace" {
value = terraform.workspace
-}
\ No newline at end of file
+}
diff --git a/server/user_config.go b/server/user_config.go
index 7a00fabb10..81a3eeee86 100644
--- a/server/user_config.go
+++ b/server/user_config.go
@@ -27,6 +27,7 @@ type UserConfig struct {
DisableAutoplan bool `mapstructure:"disable-autoplan"`
DisableMarkdownFolding bool `mapstructure:"disable-markdown-folding"`
DisableRepoLocking bool `mapstructure:"disable-repo-locking"`
+ EnablePolicyChecksFlag bool `mapstructure:"enable-policy-checks"`
GithubHostname string `mapstructure:"gh-hostname"`
GithubToken string `mapstructure:"gh-token"`
GithubUser string `mapstructure:"gh-user"`
diff --git a/testing/Dockerfile b/testing/Dockerfile
index 4c2e235d85..de149d50e2 100644
--- a/testing/Dockerfile
+++ b/testing/Dockerfile
@@ -11,4 +11,19 @@ RUN curl -LOks https://releases.hashicorp.com/terraform/${TERRAFORM_VERSION}/ter
sudo unzip terraform_${TERRAFORM_VERSION}_linux_amd64.zip -d /usr/local/bin/tf/versions/${TERRAFORM_VERSION} && \
sudo ln -s /usr/local/bin/tf/versions/${TERRAFORM_VERSION}/terraform /usr/local/bin/terraform && \
rm terraform_${TERRAFORM_VERSION}_linux_amd64.zip
+
+# Install conftest
+ENV DEFAULT_CONFTEST_VERSION=0.21.0
+
+RUN AVAILABLE_CONFTEST_VERSIONS="${DEFAULT_CONFTEST_VERSION}" && \
+ for VERSION in ${AVAILABLE_CONFTEST_VERSIONS}; do \
+ curl -LOs https://github.com/open-policy-agent/conftest/releases/download/v${VERSION}/conftest_${VERSION}_Linux_x86_64.tar.gz && \
+ curl -LOs https://github.com/open-policy-agent/conftest/releases/download/v${VERSION}/checksums.txt && \
+ sed -n "/conftest_${VERSION}_Linux_x86_64.tar.gz/p" checksums.txt | sha256sum -c && \
+ sudo mkdir -p /usr/local/bin/cft/versions/${VERSION} && \
+ sudo tar -C /usr/local/bin/cft/versions/${VERSION} -xzf conftest_${VERSION}_Linux_x86_64.tar.gz && \
+ sudo ln -s /usr/local/bin/cft/versions/${VERSION}/conftest /usr/local/bin/conftest${VERSION} && \
+ rm conftest_${VERSION}_Linux_x86_64.tar.gz && \
+ rm checksums.txt; \
+ done
RUN go get golang.org/x/tools/cmd/goimports
diff --git a/vendor/modules.txt b/vendor/modules.txt
index d0eb8ba54e..960e2f9e6c 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -202,6 +202,10 @@ github.com/konsorten/go-windows-terminal-sequences
# github.com/leodido/go-urn v1.2.0
## explicit
github.com/leodido/go-urn
+# github.com/lusis/go-slackbot v0.0.0-20180109053408-401027ccfef5
+## explicit
+# github.com/lusis/slack-test v0.0.0-20190426140909-c40012f20018
+## explicit
# github.com/magiconair/properties v1.8.1
github.com/magiconair/properties
# github.com/mattn/go-colorable v0.0.9