From d9807ca6b1cfd229225030b7536aa98c3a0787a4 Mon Sep 17 00:00:00 2001 From: Finn Arne Gangstad Date: Wed, 7 Jun 2023 16:48:02 +0200 Subject: [PATCH 1/9] Safer handling of merging with an updated upstream. We used to call forceClone() to update with upstream, but this deletes the checked out directory. This is inefficient, can delete existing plan files, and is very surprising if you are working manually in the working directory. We now fetch an updated upstream, and re-do the merge operation. This leaves any working files intact. --- server/events/working_dir.go | 92 ++++++++++++++++++++++++------------ 1 file changed, 63 insertions(+), 29 deletions(-) diff --git a/server/events/working_dir.go b/server/events/working_dir.go index 5c57bd6a21..f6eba30c5b 100644 --- a/server/events/working_dir.go +++ b/server/events/working_dir.go @@ -95,7 +95,6 @@ func (w *FileWorkspace) Clone( p models.PullRequest, workspace string) (string, bool, error) { cloneDir := w.cloneDir(p.BaseRepo, p, workspace) - hasDiverged := false defer func() { w.SafeToReClone = false }() // If the directory already exists, check if it's at the right commit. @@ -125,8 +124,8 @@ func (w *FileWorkspace) Clone( // commit, only a 12 character prefix. if strings.HasPrefix(currCommit, p.HeadCommit) { if w.SafeToReClone && w.CheckoutMerge && w.recheckDiverged(log, p, headRepo, cloneDir) { - log.Info("base branch has been updated, using merge strategy and will clone again") - hasDiverged = true + log.Info("base branch has been updated, using merge strategy and will merge again") + return cloneDir, true, w.mergeAgain(log, cloneDir, headRepo, p) } else { log.Debug("repo is at correct commit %q so will not re-clone", p.HeadCommit) return cloneDir, false, nil @@ -138,7 +137,7 @@ func (w *FileWorkspace) Clone( } // Otherwise we clone the repo. - return cloneDir, hasDiverged, w.forceClone(log, cloneDir, headRepo, p) + return cloneDir, false, w.forceClone(log, cloneDir, headRepo, p) } // recheckDiverged returns true if the branch we're merging into has diverged @@ -242,26 +241,7 @@ func (w *FileWorkspace) forceClone(log logging.SimpleLogging, baseCloneURL = w.TestingOverrideBaseCloneURL } - runGit := func(args ...string) error { - cmd := exec.Command("git", args...) // nolint: gosec - cmd.Dir = cloneDir - // The git merge command requires these env vars are set. - cmd.Env = append(os.Environ(), []string{ - "EMAIL=atlantis@runatlantis.io", - "GIT_AUTHOR_NAME=atlantis", - "GIT_COMMITTER_NAME=atlantis", - }...) - - cmdStr := w.sanitizeGitCredentials(strings.Join(cmd.Args, " "), p.BaseRepo, headRepo) - output, err := cmd.CombinedOutput() - sanitizedOutput := w.sanitizeGitCredentials(string(output), p.BaseRepo, headRepo) - if err != nil { - sanitizedErrMsg := w.sanitizeGitCredentials(err.Error(), p.BaseRepo, headRepo) - return fmt.Errorf("running %s: %s: %s", cmdStr, sanitizedOutput, sanitizedErrMsg) - } - log.Debug("ran: %s. Output: %s", cmdStr, strings.TrimSuffix(sanitizedOutput, "\n")) - return nil - } + runGit := w.makeGitRunner(log, cloneDir, headRepo, p) // if branch strategy, use depth=1 if !w.CheckoutMerge { @@ -284,7 +264,66 @@ func (w *FileWorkspace) forceClone(log logging.SimpleLogging, if err := runGit("remote", "add", "head", headCloneURL); err != nil { return err } + if w.GpgNoSigningEnabled { + if err := runGit("config", "--local", "commit.gpgsign", "false"); err != nil { + return err + } + } + + return w.mergeToBaseBranch(p, runGit) +} + +// There is a new upstream update that we need, and we want to update to it +// without deleting any existing plans +func (w *FileWorkspace) mergeAgain(log logging.SimpleLogging, + cloneDir string, + headRepo models.Repo, + p models.PullRequest) error { + value, _ := cloneLocks.LoadOrStore(cloneDir, new(sync.Mutex)) + mutex := value.(*sync.Mutex) + + defer mutex.Unlock() + if locked := mutex.TryLock(); !locked { + mutex.Lock() + return nil + } + + runGit := w.makeGitRunner(log, cloneDir, headRepo, p) + + // Reset branch as if it was cloned again + if err := runGit("reset", "--hard", fmt.Sprintf("refs/remotes/head/%s", p.BaseBranch)); err != nil { + return err + } + + return w.mergeToBaseBranch(p, runGit) +} + +func (w *FileWorkspace) makeGitRunner(log logging.SimpleLogging, cloneDir string, headRepo models.Repo, p models.PullRequest) func(args ...string) error { + return func(args ...string) error { + cmd := exec.Command("git", args...) // nolint: gosec + cmd.Dir = cloneDir + // The git merge command requires these env vars are set. + cmd.Env = append(os.Environ(), []string{ + "EMAIL=atlantis@runatlantis.io", + "GIT_AUTHOR_NAME=atlantis", + "GIT_COMMITTER_NAME=atlantis", + }...) + + cmdStr := w.sanitizeGitCredentials(strings.Join(cmd.Args, " "), p.BaseRepo, headRepo) + output, err := cmd.CombinedOutput() + sanitizedOutput := w.sanitizeGitCredentials(string(output), p.BaseRepo, headRepo) + if err != nil { + sanitizedErrMsg := w.sanitizeGitCredentials(err.Error(), p.BaseRepo, headRepo) + return fmt.Errorf("running %s: %s: %s", cmdStr, sanitizedOutput, sanitizedErrMsg) + } + log.Debug("ran: %s. Output: %s", cmdStr, strings.TrimSuffix(sanitizedOutput, "\n")) + return nil + } +} + +// Merge the PR into the base branch. +func (w *FileWorkspace) mergeToBaseBranch(p models.PullRequest, runGit func(args ...string) error) error { fetchRef := fmt.Sprintf("+refs/heads/%s:", p.HeadBranch) fetchRemote := "head" if w.GithubAppEnabled { @@ -303,11 +342,6 @@ func (w *FileWorkspace) forceClone(log logging.SimpleLogging, } } - if w.GpgNoSigningEnabled { - if err := runGit("config", "--local", "commit.gpgsign", "false"); err != nil { - return err - } - } if err := runGit("merge-base", p.BaseBranch, "FETCH_HEAD"); err != nil { // git merge-base returning error means that we did not receive enough commits in shallow clone. // Fall back to retrieving full repo history. From e093ca71a5dcc14d1ee83ef61bcfca7c2831d3f4 Mon Sep 17 00:00:00 2001 From: Finn Arne Gangstad Date: Wed, 7 Jun 2023 17:00:20 +0200 Subject: [PATCH 2/9] Rename SafeToReClone -> CheckForUpstreamChanges It's never safe to clone again. But sometimes we need to check for upstream changes to avoid reverting changes. The flag is now used to know when we need to merge again non-destructively with new changes. --- server/events/mock_workingdir_test.go | 6 +++--- server/events/mocks/mock_working_dir.go | 6 +++--- server/events/project_command_runner.go | 2 +- server/events/working_dir.go | 16 ++++++++-------- server/events/working_dir_test.go | 4 ++-- 5 files changed, 17 insertions(+), 17 deletions(-) diff --git a/server/events/mock_workingdir_test.go b/server/events/mock_workingdir_test.go index b5a9b7335d..553c422e0b 100644 --- a/server/events/mock_workingdir_test.go +++ b/server/events/mock_workingdir_test.go @@ -132,12 +132,12 @@ func (mock *MockWorkingDir) HasDiverged(_param0 logging.SimpleLogging, _param1 s return ret0 } -func (mock *MockWorkingDir) SetSafeToReClone() { +func (mock *MockWorkingDir) SetCheckForUpstreamChanges() { if mock == nil { panic("mock must not be nil. Use myMock := NewMockWorkingDir().") } params := []pegomock.Param{} - pegomock.GetGenericMockFrom(mock).Invoke("SetSafeToReClone", params, []reflect.Type{}) + pegomock.GetGenericMockFrom(mock).Invoke("SetCheckForUpstreamChanges", params, []reflect.Type{}) } func (mock *MockWorkingDir) VerifyWasCalledOnce() *VerifierMockWorkingDir { @@ -381,7 +381,7 @@ func (c *MockWorkingDir_HasDiverged_OngoingVerification) GetAllCapturedArguments func (verifier *VerifierMockWorkingDir) SetSafeToReClone() *MockWorkingDir_SetSafeToReClone_OngoingVerification { params := []pegomock.Param{} - methodInvocations := pegomock.GetGenericMockFrom(verifier.mock).Verify(verifier.inOrderContext, verifier.invocationCountMatcher, "SetSafeToReClone", params, verifier.timeout) + methodInvocations := pegomock.GetGenericMockFrom(verifier.mock).Verify(verifier.inOrderContext, verifier.invocationCountMatcher, "SetCheckForUpstreamChanges", params, verifier.timeout) return &MockWorkingDir_SetSafeToReClone_OngoingVerification{mock: verifier.mock, methodInvocations: methodInvocations} } diff --git a/server/events/mocks/mock_working_dir.go b/server/events/mocks/mock_working_dir.go index 362b43f7f9..853c8f077f 100644 --- a/server/events/mocks/mock_working_dir.go +++ b/server/events/mocks/mock_working_dir.go @@ -132,12 +132,12 @@ func (mock *MockWorkingDir) HasDiverged(_param0 logging.SimpleLogging, _param1 s return ret0 } -func (mock *MockWorkingDir) SetSafeToReClone() { +func (mock *MockWorkingDir) SetCheckForUpstreamChanges() { if mock == nil { panic("mock must not be nil. Use myMock := NewMockWorkingDir().") } params := []pegomock.Param{} - pegomock.GetGenericMockFrom(mock).Invoke("SetSafeToReClone", params, []reflect.Type{}) + pegomock.GetGenericMockFrom(mock).Invoke("SetCheckForUpstreamChanges", params, []reflect.Type{}) } func (mock *MockWorkingDir) VerifyWasCalledOnce() *VerifierMockWorkingDir { @@ -381,7 +381,7 @@ func (c *MockWorkingDir_HasDiverged_OngoingVerification) GetAllCapturedArguments func (verifier *VerifierMockWorkingDir) SetSafeToReClone() *MockWorkingDir_SetSafeToReClone_OngoingVerification { params := []pegomock.Param{} - methodInvocations := pegomock.GetGenericMockFrom(verifier.mock).Verify(verifier.inOrderContext, verifier.invocationCountMatcher, "SetSafeToReClone", params, verifier.timeout) + methodInvocations := pegomock.GetGenericMockFrom(verifier.mock).Verify(verifier.inOrderContext, verifier.invocationCountMatcher, "SetCheckForUpstreamChanges", params, verifier.timeout) return &MockWorkingDir_SetSafeToReClone_OngoingVerification{mock: verifier.mock, methodInvocations: methodInvocations} } diff --git a/server/events/project_command_runner.go b/server/events/project_command_runner.go index 8c9a7592f0..e7b8655fce 100644 --- a/server/events/project_command_runner.go +++ b/server/events/project_command_runner.go @@ -528,7 +528,7 @@ func (p *DefaultProjectCommandRunner) doPlan(ctx command.ProjectContext) (*model } defer unlockFn() - p.WorkingDir.SetSafeToReClone() + p.WorkingDir.SetCheckForUpstreamChanges() // Clone is idempotent so okay to run even if the repo was already cloned. repoDir, hasDiverged, cloneErr := p.WorkingDir.Clone(ctx.Log, ctx.HeadRepo, ctx.Pull, ctx.Workspace) if cloneErr != nil { diff --git a/server/events/working_dir.go b/server/events/working_dir.go index f6eba30c5b..8d97faf362 100644 --- a/server/events/working_dir.go +++ b/server/events/working_dir.go @@ -52,7 +52,7 @@ type WorkingDir interface { // Set a flag in the workingdir so Clone() can know that it is safe to re-clone the workingdir if // the upstream branch has been modified. This is only safe after grabbing the project lock // and before running any plans - SetSafeToReClone() + SetCheckForUpstreamChanges() } // FileWorkspace implements WorkingDir with the file system. @@ -79,8 +79,8 @@ type FileWorkspace struct { GithubAppEnabled bool // use the global setting without overriding GpgNoSigningEnabled bool - // flag indicating if a re-clone will be safe (project lock held, about to run plan) - SafeToReClone bool + // flag indicating if we have to merge with potential new changes upstream (directly after grabbing project lock) + CheckForUpstreamChanges bool } // Clone git clones headRepo, checks out the branch and then returns the absolute @@ -95,7 +95,7 @@ func (w *FileWorkspace) Clone( p models.PullRequest, workspace string) (string, bool, error) { cloneDir := w.cloneDir(p.BaseRepo, p, workspace) - defer func() { w.SafeToReClone = false }() + defer func() { w.CheckForUpstreamChanges = false }() // If the directory already exists, check if it's at the right commit. // If so, then we do nothing. @@ -123,7 +123,7 @@ func (w *FileWorkspace) Clone( // We're prefix matching here because BitBucket doesn't give us the full // commit, only a 12 character prefix. if strings.HasPrefix(currCommit, p.HeadCommit) { - if w.SafeToReClone && w.CheckoutMerge && w.recheckDiverged(log, p, headRepo, cloneDir) { + if w.CheckForUpstreamChanges && w.CheckoutMerge && w.recheckDiverged(log, p, headRepo, cloneDir) { log.Info("base branch has been updated, using merge strategy and will merge again") return cloneDir, true, w.mergeAgain(log, cloneDir, headRepo, p) } else { @@ -403,7 +403,7 @@ func (w *FileWorkspace) sanitizeGitCredentials(s string, base models.Repo, head return strings.Replace(baseReplaced, head.CloneURL, head.SanitizedCloneURL, -1) } -// Set the flag that indicates it is safe to re-clone if necessary -func (w *FileWorkspace) SetSafeToReClone() { - w.SafeToReClone = true +// Set the flag that indicates we need to check for upstream changes (if using merge checkout strategy) +func (w *FileWorkspace) SetCheckForUpstreamChanges() { + w.CheckForUpstreamChanges = true } diff --git a/server/events/working_dir_test.go b/server/events/working_dir_test.go index 5bd8ff7f4c..c81938c7a3 100644 --- a/server/events/working_dir_test.go +++ b/server/events/working_dir_test.go @@ -480,7 +480,7 @@ func TestClone_MasterHasDiverged(t *testing.T) { Assert(t, hasDiverged == false, "Clone with CheckoutMerge=false should not merge") wd.CheckoutMerge = true - wd.SetSafeToReClone() + wd.SetCheckForUpstreamChanges() // Run the clone twice with the merge strategy, the first run should // return true for hasDiverged, subsequent runs should // return false since the first call is supposed to merge. @@ -492,7 +492,7 @@ func TestClone_MasterHasDiverged(t *testing.T) { Ok(t, err) Assert(t, hasDiverged == true, "First clone with CheckoutMerge=true with diverged base should have merged") - wd.SetSafeToReClone() + wd.SetCheckForUpstreamChanges() _, hasDiverged, err = wd.Clone(logging.NewNoopLogger(t), models.Repo{CloneURL: repoDir}, models.PullRequest{ BaseRepo: models.Repo{CloneURL: repoDir}, HeadBranch: "second-pr", From 00ad2bb365e066bf4d5b6abe41c54873c2f9e5f6 Mon Sep 17 00:00:00 2001 From: nitrocode <7775707+nitrocode@users.noreply.github.com> Date: Thu, 8 Jun 2023 07:50:02 -0500 Subject: [PATCH 3/9] Update fixtures.go --- server/events/vcs/testdata/fixtures.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/events/vcs/testdata/fixtures.go b/server/events/vcs/testdata/fixtures.go index b618730804..ac66d58a99 100644 --- a/server/events/vcs/testdata/fixtures.go +++ b/server/events/vcs/testdata/fixtures.go @@ -22,7 +22,7 @@ import ( "testing" "github.com/golang-jwt/jwt/v5" - "github.com/google/go-github/v52/github" + "github.com/google/go-github/v53/github" "github.com/mcdafydd/go-azuredevops/azuredevops" ) From c0e6b20b6da53e67d0ebadde97c3c7729168c9fa Mon Sep 17 00:00:00 2001 From: Finn Arne Gangstad Date: Thu, 8 Jun 2023 16:27:14 +0200 Subject: [PATCH 4/9] Add test to make sure plans are not wiped out As long as the branch itself has not been updated, plans should be kept. Even if upstream has changed. --- server/events/working_dir_test.go | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/server/events/working_dir_test.go b/server/events/working_dir_test.go index c81938c7a3..88d1783c6b 100644 --- a/server/events/working_dir_test.go +++ b/server/events/working_dir_test.go @@ -3,6 +3,7 @@ package events_test import ( "crypto/tls" "fmt" + "github.com/stretchr/testify/assert" "net/http" "os" "path/filepath" @@ -469,6 +470,13 @@ func TestClone_MasterHasDiverged(t *testing.T) { GpgNoSigningEnabled: true, } + // Pretend terraform has created a plan file, we'll check for it later + planFile := filepath.Join(repoDir, "repos/0/default/default.tfplan") + assert.NoFileExists(t, planFile) + _, err := os.Create(planFile) + Assert(t, err == nil, "creating plan file: %v", err) + assert.FileExists(t, planFile) + // Run the clone without the checkout merge strategy. It should return // false for hasDiverged _, hasDiverged, err := wd.Clone(logging.NewNoopLogger(t), models.Repo{}, models.PullRequest{ @@ -478,6 +486,7 @@ func TestClone_MasterHasDiverged(t *testing.T) { }, "default") Ok(t, err) Assert(t, hasDiverged == false, "Clone with CheckoutMerge=false should not merge") + assert.FileExists(t, planFile, "Existing plan file should not be deleted by Clone with merge disabled") wd.CheckoutMerge = true wd.SetCheckForUpstreamChanges() @@ -490,6 +499,7 @@ func TestClone_MasterHasDiverged(t *testing.T) { BaseBranch: "main", }, "default") Ok(t, err) + assert.FileExists(t, planFile, "Existing plan file should not be deleted by merging again") Assert(t, hasDiverged == true, "First clone with CheckoutMerge=true with diverged base should have merged") wd.SetCheckForUpstreamChanges() @@ -500,6 +510,7 @@ func TestClone_MasterHasDiverged(t *testing.T) { }, "default") Ok(t, err) Assert(t, hasDiverged == false, "Second clone with CheckoutMerge=true and initially diverged base should not merge again") + assert.FileExists(t, planFile, "Existing plan file should not have been deleted") } func TestHasDiverged_MasterHasDiverged(t *testing.T) { From 88eac0a2bc9bede7f327aecd57872c80e9eef5c5 Mon Sep 17 00:00:00 2001 From: Finn Arne Gangstad Date: Thu, 8 Jun 2023 17:04:17 +0200 Subject: [PATCH 5/9] renamed HasDiverged to MergedAgain in PlanResult and from Clone() This flag was only set to true in case a call to Clone() ended up merging with an updated upstream, so the new name better represents what it means. --- server/events/markdown_renderer_test.go | 4 +- server/events/mock_workingdir_test.go | 10 ++--- server/events/mocks/mock_working_dir.go | 10 ++--- server/events/models/models.go | 8 ++-- server/events/project_command_runner.go | 4 +- server/events/templates/diverged.tmpl | 2 +- server/events/working_dir_test.go | 52 ++++++++++++------------- 7 files changed, 45 insertions(+), 45 deletions(-) diff --git a/server/events/markdown_renderer_test.go b/server/events/markdown_renderer_test.go index 33381e8b62..95aa12b3dd 100644 --- a/server/events/markdown_renderer_test.go +++ b/server/events/markdown_renderer_test.go @@ -191,7 +191,7 @@ $$$ LockURL: "lock-url", RePlanCmd: "atlantis plan -d path -w workspace", ApplyCmd: "atlantis apply -d path -w workspace", - HasDiverged: true, + MergedAgain: true, }, Workspace: "workspace", RepoRelDir: "path", @@ -1975,7 +1975,7 @@ $$$ LockURL: "lock-url", RePlanCmd: "atlantis plan -d path -w workspace", ApplyCmd: "atlantis apply -d path -w workspace", - HasDiverged: true, + MergedAgain: true, }, Workspace: "workspace", RepoRelDir: "path", diff --git a/server/events/mock_workingdir_test.go b/server/events/mock_workingdir_test.go index 553c422e0b..aa224f119b 100644 --- a/server/events/mock_workingdir_test.go +++ b/server/events/mock_workingdir_test.go @@ -379,19 +379,19 @@ func (c *MockWorkingDir_HasDiverged_OngoingVerification) GetAllCapturedArguments return } -func (verifier *VerifierMockWorkingDir) SetSafeToReClone() *MockWorkingDir_SetSafeToReClone_OngoingVerification { +func (verifier *VerifierMockWorkingDir) SetCheckForUpstreamChanges() *MockWorkingDir_SetCheckForUpstreamChanges_OngoingVerification { params := []pegomock.Param{} methodInvocations := pegomock.GetGenericMockFrom(verifier.mock).Verify(verifier.inOrderContext, verifier.invocationCountMatcher, "SetCheckForUpstreamChanges", params, verifier.timeout) - return &MockWorkingDir_SetSafeToReClone_OngoingVerification{mock: verifier.mock, methodInvocations: methodInvocations} + return &MockWorkingDir_SetCheckForUpstreamChanges_OngoingVerification{mock: verifier.mock, methodInvocations: methodInvocations} } -type MockWorkingDir_SetSafeToReClone_OngoingVerification struct { +type MockWorkingDir_SetCheckForUpstreamChanges_OngoingVerification struct { mock *MockWorkingDir methodInvocations []pegomock.MethodInvocation } -func (c *MockWorkingDir_SetSafeToReClone_OngoingVerification) GetCapturedArguments() { +func (c *MockWorkingDir_SetCheckForUpstreamChanges_OngoingVerification) GetCapturedArguments() { } -func (c *MockWorkingDir_SetSafeToReClone_OngoingVerification) GetAllCapturedArguments() { +func (c *MockWorkingDir_SetCheckForUpstreamChanges_OngoingVerification) GetAllCapturedArguments() { } diff --git a/server/events/mocks/mock_working_dir.go b/server/events/mocks/mock_working_dir.go index 853c8f077f..e1fd6ae9a1 100644 --- a/server/events/mocks/mock_working_dir.go +++ b/server/events/mocks/mock_working_dir.go @@ -379,19 +379,19 @@ func (c *MockWorkingDir_HasDiverged_OngoingVerification) GetAllCapturedArguments return } -func (verifier *VerifierMockWorkingDir) SetSafeToReClone() *MockWorkingDir_SetSafeToReClone_OngoingVerification { +func (verifier *VerifierMockWorkingDir) SetCheckForUpstreamChanges() *MockWorkingDir_SetCheckForUpstreamChanges_OngoingVerification { params := []pegomock.Param{} methodInvocations := pegomock.GetGenericMockFrom(verifier.mock).Verify(verifier.inOrderContext, verifier.invocationCountMatcher, "SetCheckForUpstreamChanges", params, verifier.timeout) - return &MockWorkingDir_SetSafeToReClone_OngoingVerification{mock: verifier.mock, methodInvocations: methodInvocations} + return &MockWorkingDir_SetCheckForUpstreamChanges_OngoingVerification{mock: verifier.mock, methodInvocations: methodInvocations} } -type MockWorkingDir_SetSafeToReClone_OngoingVerification struct { +type MockWorkingDir_SetCheckForUpstreamChanges_OngoingVerification struct { mock *MockWorkingDir methodInvocations []pegomock.MethodInvocation } -func (c *MockWorkingDir_SetSafeToReClone_OngoingVerification) GetCapturedArguments() { +func (c *MockWorkingDir_SetCheckForUpstreamChanges_OngoingVerification) GetCapturedArguments() { } -func (c *MockWorkingDir_SetSafeToReClone_OngoingVerification) GetAllCapturedArguments() { +func (c *MockWorkingDir_SetCheckForUpstreamChanges_OngoingVerification) GetAllCapturedArguments() { } diff --git a/server/events/models/models.go b/server/events/models/models.go index 011aaa6bac..0b94ac1a83 100644 --- a/server/events/models/models.go +++ b/server/events/models/models.go @@ -361,10 +361,10 @@ type PlanSuccess struct { RePlanCmd string // ApplyCmd is the command that users should run to apply this plan. ApplyCmd string - // HasDiverged is true if we're using the checkout merge strategy and the - // branch we're merging into has been updated since we cloned and merged - // it. - HasDiverged bool + // MergedAgain is true if we're using the checkout merge strategy and the + // branch we're merging into had been updated, and we had to merge again + // before planning + MergedAgain bool } type PolicySetResult struct { diff --git a/server/events/project_command_runner.go b/server/events/project_command_runner.go index e7b8655fce..1d457c27de 100644 --- a/server/events/project_command_runner.go +++ b/server/events/project_command_runner.go @@ -530,7 +530,7 @@ func (p *DefaultProjectCommandRunner) doPlan(ctx command.ProjectContext) (*model p.WorkingDir.SetCheckForUpstreamChanges() // Clone is idempotent so okay to run even if the repo was already cloned. - repoDir, hasDiverged, cloneErr := p.WorkingDir.Clone(ctx.Log, ctx.HeadRepo, ctx.Pull, ctx.Workspace) + repoDir, mergedAgain, cloneErr := p.WorkingDir.Clone(ctx.Log, ctx.HeadRepo, ctx.Pull, ctx.Workspace) if cloneErr != nil { if unlockErr := lockAttempt.UnlockFn(); unlockErr != nil { ctx.Log.Err("error unlocking state after plan error: %v", unlockErr) @@ -561,7 +561,7 @@ func (p *DefaultProjectCommandRunner) doPlan(ctx command.ProjectContext) (*model TerraformOutput: strings.Join(outputs, "\n"), RePlanCmd: ctx.RePlanCmd, ApplyCmd: ctx.ApplyCmd, - HasDiverged: hasDiverged, + MergedAgain: mergedAgain, }, "", nil } diff --git a/server/events/templates/diverged.tmpl b/server/events/templates/diverged.tmpl index f0f4be0ed2..a6cd892de6 100644 --- a/server/events/templates/diverged.tmpl +++ b/server/events/templates/diverged.tmpl @@ -1,5 +1,5 @@ {{ define "diverged" -}} -{{ if .HasDiverged }} +{{ if .MergedAgain }} :warning: The branch we're merging into is ahead, it is recommended to pull new commits first. {{ end -}} {{ end -}} diff --git a/server/events/working_dir_test.go b/server/events/working_dir_test.go index 88d1783c6b..ad0849a49b 100644 --- a/server/events/working_dir_test.go +++ b/server/events/working_dir_test.go @@ -92,13 +92,13 @@ func TestClone_CheckoutMergeNoneExisting(t *testing.T) { GpgNoSigningEnabled: true, } - cloneDir, hasDiverged, err := wd.Clone(logging.NewNoopLogger(t), models.Repo{}, models.PullRequest{ + cloneDir, mergedAgain, err := wd.Clone(logging.NewNoopLogger(t), models.Repo{}, models.PullRequest{ BaseRepo: models.Repo{}, HeadBranch: "branch", BaseBranch: "main", }, "default") Ok(t, err) - Equals(t, false, hasDiverged) + Equals(t, false, mergedAgain) // Check the commits. actBaseCommit := runCmd(t, cloneDir, "git", "rev-parse", "HEAD~1") @@ -141,25 +141,25 @@ func TestClone_CheckoutMergeNoReclone(t *testing.T) { GpgNoSigningEnabled: true, } - _, hasDiverged, err := wd.Clone(logging.NewNoopLogger(t), models.Repo{}, models.PullRequest{ + _, mergedAgain, err := wd.Clone(logging.NewNoopLogger(t), models.Repo{}, models.PullRequest{ BaseRepo: models.Repo{}, HeadBranch: "branch", BaseBranch: "main", }, "default") Ok(t, err) - Equals(t, false, hasDiverged) + Equals(t, false, mergedAgain) // Create a file that we can use to check if the repo was recloned. runCmd(t, dataDir, "touch", "repos/0/default/proof") // Now run the clone again. - cloneDir, hasDiverged, err := wd.Clone(logging.NewNoopLogger(t), models.Repo{}, models.PullRequest{ + cloneDir, mergedAgain, err := wd.Clone(logging.NewNoopLogger(t), models.Repo{}, models.PullRequest{ BaseRepo: models.Repo{}, HeadBranch: "branch", BaseBranch: "main", }, "default") Ok(t, err) - Equals(t, false, hasDiverged) + Equals(t, false, mergedAgain) // Check that our proof file is still there, proving that we didn't reclone. _, err = os.Stat(filepath.Join(cloneDir, "proof")) @@ -191,25 +191,25 @@ func TestClone_CheckoutMergeNoRecloneFastForward(t *testing.T) { GpgNoSigningEnabled: true, } - _, hasDiverged, err := wd.Clone(logging.NewNoopLogger(t), models.Repo{}, models.PullRequest{ + _, mergedAgain, err := wd.Clone(logging.NewNoopLogger(t), models.Repo{}, models.PullRequest{ BaseRepo: models.Repo{}, HeadBranch: "branch", BaseBranch: "main", }, "default") Ok(t, err) - Equals(t, false, hasDiverged) + Equals(t, false, mergedAgain) // Create a file that we can use to check if the repo was recloned. runCmd(t, dataDir, "touch", "repos/0/default/proof") // Now run the clone again. - cloneDir, hasDiverged, err := wd.Clone(logging.NewNoopLogger(t), models.Repo{}, models.PullRequest{ + cloneDir, mergedAgain, err := wd.Clone(logging.NewNoopLogger(t), models.Repo{}, models.PullRequest{ BaseRepo: models.Repo{}, HeadBranch: "branch", BaseBranch: "main", }, "default") Ok(t, err) - Equals(t, false, hasDiverged) + Equals(t, false, mergedAgain) // Check that our proof file is still there, proving that we didn't reclone. _, err = os.Stat(filepath.Join(cloneDir, "proof")) @@ -303,13 +303,13 @@ func TestClone_CheckoutMergeShallow(t *testing.T) { GpgNoSigningEnabled: true, } - cloneDir, hasDiverged, err := wd.Clone(logging.NewNoopLogger(t), models.Repo{}, models.PullRequest{ + cloneDir, mergedAgain, err := wd.Clone(logging.NewNoopLogger(t), models.Repo{}, models.PullRequest{ BaseRepo: models.Repo{}, HeadBranch: "branch", BaseBranch: "main", }, "default") Ok(t, err) - Equals(t, false, hasDiverged) + Equals(t, false, mergedAgain) gotBaseCommitType := runCmd(t, cloneDir, "git", "cat-file", "-t", baseCommit) Assert(t, gotBaseCommitType == "commit\n", "should have merge-base in shallow repo") @@ -331,13 +331,13 @@ func TestClone_CheckoutMergeShallow(t *testing.T) { GpgNoSigningEnabled: true, } - cloneDir, hasDiverged, err := wd.Clone(logging.NewNoopLogger(t), models.Repo{}, models.PullRequest{ + cloneDir, mergedAgain, err := wd.Clone(logging.NewNoopLogger(t), models.Repo{}, models.PullRequest{ BaseRepo: models.Repo{}, HeadBranch: "branch", BaseBranch: "main", }, "default") Ok(t, err) - Equals(t, false, hasDiverged) + Equals(t, false, mergedAgain) gotBaseCommitType := runCmd(t, cloneDir, "git", "cat-file", "-t", baseCommit) Assert(t, gotBaseCommitType == "commit\n", "should have merge-base in full repo") @@ -364,12 +364,12 @@ func TestClone_NoReclone(t *testing.T) { TestingOverrideHeadCloneURL: fmt.Sprintf("file://%s", repoDir), GpgNoSigningEnabled: true, } - cloneDir, hasDiverged, err := wd.Clone(logging.NewNoopLogger(t), models.Repo{}, models.PullRequest{ + cloneDir, mergedAgain, err := wd.Clone(logging.NewNoopLogger(t), models.Repo{}, models.PullRequest{ BaseRepo: models.Repo{}, HeadBranch: "branch", }, "default") Ok(t, err) - Equals(t, false, hasDiverged) + Equals(t, false, mergedAgain) // Check that our proof file is still there. _, err = os.Stat(filepath.Join(cloneDir, "proof")) @@ -399,13 +399,13 @@ func TestClone_RecloneWrongCommit(t *testing.T) { TestingOverrideHeadCloneURL: fmt.Sprintf("file://%s", repoDir), GpgNoSigningEnabled: true, } - cloneDir, hasDiverged, err := wd.Clone(logging.NewNoopLogger(t), models.Repo{}, models.PullRequest{ + cloneDir, mergedAgain, err := wd.Clone(logging.NewNoopLogger(t), models.Repo{}, models.PullRequest{ BaseRepo: models.Repo{}, HeadBranch: "branch", HeadCommit: expCommit, }, "default") Ok(t, err) - Equals(t, false, hasDiverged) + Equals(t, false, mergedAgain) // Use rev-parse to verify at correct commit. actCommit := runCmd(t, cloneDir, "git", "rev-parse", "HEAD") @@ -478,38 +478,38 @@ func TestClone_MasterHasDiverged(t *testing.T) { assert.FileExists(t, planFile) // Run the clone without the checkout merge strategy. It should return - // false for hasDiverged - _, hasDiverged, err := wd.Clone(logging.NewNoopLogger(t), models.Repo{}, models.PullRequest{ + // false for mergedAgain + _, mergedAgain, err := wd.Clone(logging.NewNoopLogger(t), models.Repo{}, models.PullRequest{ BaseRepo: models.Repo{}, HeadBranch: "second-pr", BaseBranch: "main", }, "default") Ok(t, err) - Assert(t, hasDiverged == false, "Clone with CheckoutMerge=false should not merge") + Assert(t, mergedAgain == false, "Clone with CheckoutMerge=false should not merge") assert.FileExists(t, planFile, "Existing plan file should not be deleted by Clone with merge disabled") wd.CheckoutMerge = true wd.SetCheckForUpstreamChanges() // Run the clone twice with the merge strategy, the first run should - // return true for hasDiverged, subsequent runs should + // return true for mergedAgain, subsequent runs should // return false since the first call is supposed to merge. - _, hasDiverged, err = wd.Clone(logging.NewNoopLogger(t), models.Repo{CloneURL: repoDir}, models.PullRequest{ + _, mergedAgain, err = wd.Clone(logging.NewNoopLogger(t), models.Repo{CloneURL: repoDir}, models.PullRequest{ BaseRepo: models.Repo{CloneURL: repoDir}, HeadBranch: "second-pr", BaseBranch: "main", }, "default") Ok(t, err) assert.FileExists(t, planFile, "Existing plan file should not be deleted by merging again") - Assert(t, hasDiverged == true, "First clone with CheckoutMerge=true with diverged base should have merged") + Assert(t, mergedAgain == true, "First clone with CheckoutMerge=true with diverged base should have merged") wd.SetCheckForUpstreamChanges() - _, hasDiverged, err = wd.Clone(logging.NewNoopLogger(t), models.Repo{CloneURL: repoDir}, models.PullRequest{ + _, mergedAgain, err = wd.Clone(logging.NewNoopLogger(t), models.Repo{CloneURL: repoDir}, models.PullRequest{ BaseRepo: models.Repo{CloneURL: repoDir}, HeadBranch: "second-pr", BaseBranch: "main", }, "default") Ok(t, err) - Assert(t, hasDiverged == false, "Second clone with CheckoutMerge=true and initially diverged base should not merge again") + Assert(t, mergedAgain == false, "Second clone with CheckoutMerge=true and initially diverged base should not merge again") assert.FileExists(t, planFile, "Existing plan file should not have been deleted") } From ef5d2e26390277a1de68cb3bf0198e17e6f9b745 Mon Sep 17 00:00:00 2001 From: Finn Arne Gangstad Date: Thu, 8 Jun 2023 17:20:17 +0200 Subject: [PATCH 6/9] Test that Clone on branch update wipes old plans This complements the test that Clone with unmodified branch but modified upstream does _not_ wipe plans. --- server/events/working_dir_test.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/server/events/working_dir_test.go b/server/events/working_dir_test.go index ad0849a49b..6f536e41bb 100644 --- a/server/events/working_dir_test.go +++ b/server/events/working_dir_test.go @@ -393,6 +393,13 @@ func TestClone_RecloneWrongCommit(t *testing.T) { runCmd(t, repoDir, "git", "commit", "-m", "newfile") expCommit := runCmd(t, repoDir, "git", "rev-parse", "HEAD") + // Pretend that terraform has created a plan file, we'll check for it later + planFile := filepath.Join(dataDir, "repos/0/default/default.tfplan") + assert.NoFileExists(t, planFile) + _, err := os.Create(planFile) + Assert(t, err == nil, "creating plan file: %v", err) + assert.FileExists(t, planFile) + wd := &events.FileWorkspace{ DataDir: dataDir, CheckoutMerge: false, @@ -406,6 +413,7 @@ func TestClone_RecloneWrongCommit(t *testing.T) { }, "default") Ok(t, err) Equals(t, false, mergedAgain) + assert.NoFileExists(t, planFile, "Plan file should have been wiped out by Clone") // Use rev-parse to verify at correct commit. actCommit := runCmd(t, cloneDir, "git", "rev-parse", "HEAD") From 74040a738c5a70549cf2caf7ac3e9ad3b25cf41a Mon Sep 17 00:00:00 2001 From: Finn Arne Gangstad Date: Mon, 7 Aug 2023 16:06:29 +0200 Subject: [PATCH 7/9] runGit now runs git instead of returning a function that runs git --- server/events/working_dir.go | 70 +++++++++++++++++------------------- 1 file changed, 32 insertions(+), 38 deletions(-) diff --git a/server/events/working_dir.go b/server/events/working_dir.go index d67863e5c0..80bca96c9c 100644 --- a/server/events/working_dir.go +++ b/server/events/working_dir.go @@ -240,36 +240,34 @@ func (w *FileWorkspace) forceClone(cloneDir string, headRepo models.Repo, p mode baseCloneURL = w.TestingOverrideBaseCloneURL } - runGit := w.makeGitRunner(cloneDir, headRepo, p) - // if branch strategy, use depth=1 if !w.CheckoutMerge { - return runGit("clone", "--depth=1", "--branch", p.HeadBranch, "--single-branch", headCloneURL, cloneDir) + return w.runGit(cloneDir, headRepo, p, "clone", "--depth=1", "--branch", p.HeadBranch, "--single-branch", headCloneURL, cloneDir) } // if merge strategy... // if no checkout depth, omit depth arg if w.CheckoutDepth == 0 { - if err := runGit("clone", "--branch", p.BaseBranch, "--single-branch", baseCloneURL, cloneDir); err != nil { + if err := w.runGit(cloneDir, headRepo, p, "clone", "--branch", p.BaseBranch, "--single-branch", baseCloneURL, cloneDir); err != nil { return err } } else { - if err := runGit("clone", "--depth", fmt.Sprint(w.CheckoutDepth), "--branch", p.BaseBranch, "--single-branch", baseCloneURL, cloneDir); err != nil { + if err := w.runGit(cloneDir, headRepo, p, "clone", "--depth", fmt.Sprint(w.CheckoutDepth), "--branch", p.BaseBranch, "--single-branch", baseCloneURL, cloneDir); err != nil { return err } } - if err := runGit("remote", "add", "head", headCloneURL); err != nil { + if err := w.runGit(cloneDir, headRepo, p, "remote", "add", "head", headCloneURL); err != nil { return err } if w.GpgNoSigningEnabled { - if err := runGit("config", "--local", "commit.gpgsign", "false"); err != nil { + if err := w.runGit(cloneDir, headRepo, p, "config", "--local", "commit.gpgsign", "false"); err != nil { return err } } - return w.mergeToBaseBranch(p, runGit) + return w.mergeToBaseBranch(cloneDir, headRepo, p) } // There is a new upstream update that we need, and we want to update to it @@ -284,41 +282,37 @@ func (w *FileWorkspace) mergeAgain(cloneDir string, headRepo models.Repo, p mode return nil } - runGit := w.makeGitRunner(cloneDir, headRepo, p) - // Reset branch as if it was cloned again - if err := runGit("reset", "--hard", fmt.Sprintf("refs/remotes/head/%s", p.BaseBranch)); err != nil { + if err := w.runGit(cloneDir, headRepo, p, "reset", "--hard", fmt.Sprintf("refs/remotes/head/%s", p.BaseBranch)); err != nil { return err } - return w.mergeToBaseBranch(p, runGit) + return w.mergeToBaseBranch(cloneDir, headRepo, p) } -func (w *FileWorkspace) makeGitRunner(cloneDir string, headRepo models.Repo, p models.PullRequest) func(args ...string) error { - return func(args ...string) error { - cmd := exec.Command("git", args...) // nolint: gosec - cmd.Dir = cloneDir - // The git merge command requires these env vars are set. - cmd.Env = append(os.Environ(), []string{ - "EMAIL=atlantis@runatlantis.io", - "GIT_AUTHOR_NAME=atlantis", - "GIT_COMMITTER_NAME=atlantis", - }...) - - cmdStr := w.sanitizeGitCredentials(strings.Join(cmd.Args, " "), p.BaseRepo, headRepo) - output, err := cmd.CombinedOutput() - sanitizedOutput := w.sanitizeGitCredentials(string(output), p.BaseRepo, headRepo) - if err != nil { - sanitizedErrMsg := w.sanitizeGitCredentials(err.Error(), p.BaseRepo, headRepo) - return fmt.Errorf("running %s: %s: %s", cmdStr, sanitizedOutput, sanitizedErrMsg) - } - w.Logger.Debug("ran: %s. Output: %s", cmdStr, strings.TrimSuffix(sanitizedOutput, "\n")) - return nil +func (w *FileWorkspace) runGit(cloneDir string, headRepo models.Repo, p models.PullRequest, args ...string) error { + cmd := exec.Command("git", args...) // nolint: gosec + cmd.Dir = cloneDir + // The git merge command requires these env vars are set. + cmd.Env = append(os.Environ(), []string{ + "EMAIL=atlantis@runatlantis.io", + "GIT_AUTHOR_NAME=atlantis", + "GIT_COMMITTER_NAME=atlantis", + }...) + + cmdStr := w.sanitizeGitCredentials(strings.Join(cmd.Args, " "), p.BaseRepo, headRepo) + output, err := cmd.CombinedOutput() + sanitizedOutput := w.sanitizeGitCredentials(string(output), p.BaseRepo, headRepo) + if err != nil { + sanitizedErrMsg := w.sanitizeGitCredentials(err.Error(), p.BaseRepo, headRepo) + return fmt.Errorf("running %s: %s: %s", cmdStr, sanitizedOutput, sanitizedErrMsg) } + w.Logger.Debug("ran: %s. Output: %s", cmdStr, strings.TrimSuffix(sanitizedOutput, "\n")) + return nil } // Merge the PR into the base branch. -func (w *FileWorkspace) mergeToBaseBranch(p models.PullRequest, runGit func(args ...string) error) error { +func (w *FileWorkspace) mergeToBaseBranch(cloneDir string, headRepo models.Repo, p models.PullRequest) error { fetchRef := fmt.Sprintf("+refs/heads/%s:", p.HeadBranch) fetchRemote := "head" if w.GithubAppEnabled { @@ -328,19 +322,19 @@ func (w *FileWorkspace) mergeToBaseBranch(p models.PullRequest, runGit func(args // if no checkout depth, omit depth arg if w.CheckoutDepth == 0 { - if err := runGit("fetch", fetchRemote, fetchRef); err != nil { + if err := w.runGit(cloneDir, headRepo, p, "fetch", fetchRemote, fetchRef); err != nil { return err } } else { - if err := runGit("fetch", "--depth", fmt.Sprint(w.CheckoutDepth), fetchRemote, fetchRef); err != nil { + if err := w.runGit(cloneDir, headRepo, p, "fetch", "--depth", fmt.Sprint(w.CheckoutDepth), fetchRemote, fetchRef); err != nil { return err } } - if err := runGit("merge-base", p.BaseBranch, "FETCH_HEAD"); err != nil { + if err := w.runGit(cloneDir, headRepo, p, "merge-base", p.BaseBranch, "FETCH_HEAD"); err != nil { // git merge-base returning error means that we did not receive enough commits in shallow clone. // Fall back to retrieving full repo history. - if err := runGit("fetch", "--unshallow"); err != nil { + if err := w.runGit(cloneDir, headRepo, p, "fetch", "--unshallow"); err != nil { return err } } @@ -351,7 +345,7 @@ func (w *FileWorkspace) mergeToBaseBranch(p models.PullRequest, runGit func(args // git rev-parse HEAD^2 to get the head commit because it will // always succeed whereas without --no-ff, if the merge was fast // forwarded then git rev-parse HEAD^2 would fail. - return runGit("merge", "-q", "--no-ff", "-m", "atlantis-merge", "FETCH_HEAD") + return w.runGit(cloneDir, headRepo, p, "merge", "-q", "--no-ff", "-m", "atlantis-merge", "FETCH_HEAD") } // GetWorkingDir returns the path to the workspace for this repo and pull. From cf336812e1817aefe628f3c3896d42275a5c3c15 Mon Sep 17 00:00:00 2001 From: Finn Arne Gangstad Date: Mon, 7 Aug 2023 16:09:18 +0200 Subject: [PATCH 8/9] Updated template to merged again instead of diverged This is no longer a warning, but expected behavior in merge chekout mode --- server/events/markdown_renderer_test.go | 4 ++-- server/events/templates/diverged.tmpl | 5 ----- server/events/templates/merged_again.tmpl | 5 +++++ server/events/templates/plan_success_unwrapped.tmpl | 2 +- server/events/templates/plan_success_wrapped.tmpl | 2 +- 5 files changed, 9 insertions(+), 9 deletions(-) delete mode 100644 server/events/templates/diverged.tmpl create mode 100644 server/events/templates/merged_again.tmpl diff --git a/server/events/markdown_renderer_test.go b/server/events/markdown_renderer_test.go index 89f10694da..548ce03cdb 100644 --- a/server/events/markdown_renderer_test.go +++ b/server/events/markdown_renderer_test.go @@ -210,7 +210,7 @@ $$$ * :repeat: To **plan** this project again, comment: * $atlantis plan -d path -w workspace$ -:warning: The branch we're merging into is ahead, it is recommended to pull new commits first. +:twisted_rightwards_arrows: Upstream was modified, a new merge was performed. --- * :fast_forward: To **apply** all unapplied plans from this pull request, comment: @@ -1992,7 +1992,7 @@ $$$ * :repeat: To **plan** this project again, comment: * $atlantis plan -d path -w workspace$ -:warning: The branch we're merging into is ahead, it is recommended to pull new commits first. +:twisted_rightwards_arrows: Upstream was modified, a new merge was performed. --- * :fast_forward: To **apply** all unapplied plans from this pull request, comment: diff --git a/server/events/templates/diverged.tmpl b/server/events/templates/diverged.tmpl deleted file mode 100644 index a6cd892de6..0000000000 --- a/server/events/templates/diverged.tmpl +++ /dev/null @@ -1,5 +0,0 @@ -{{ define "diverged" -}} -{{ if .MergedAgain }} -:warning: The branch we're merging into is ahead, it is recommended to pull new commits first. -{{ end -}} -{{ end -}} diff --git a/server/events/templates/merged_again.tmpl b/server/events/templates/merged_again.tmpl new file mode 100644 index 0000000000..796afe552a --- /dev/null +++ b/server/events/templates/merged_again.tmpl @@ -0,0 +1,5 @@ +{{ define "mergedAgain" -}} +{{ if .MergedAgain }} +:twisted_rightwards_arrows: Upstream was modified, a new merge was performed. +{{ end -}} +{{ end -}} diff --git a/server/events/templates/plan_success_unwrapped.tmpl b/server/events/templates/plan_success_unwrapped.tmpl index 1f34216a0d..6bd81de233 100644 --- a/server/events/templates/plan_success_unwrapped.tmpl +++ b/server/events/templates/plan_success_unwrapped.tmpl @@ -16,5 +16,5 @@ This plan was not saved because one or more projects failed and automerge requir * :repeat: To **plan** this project again, comment: * `{{ .RePlanCmd }}` {{ end -}} -{{ template "diverged" . }} +{{ template "mergedAgain" . }} {{ end -}} diff --git a/server/events/templates/plan_success_wrapped.tmpl b/server/events/templates/plan_success_wrapped.tmpl index 9630de1b2e..cef96d0609 100644 --- a/server/events/templates/plan_success_wrapped.tmpl +++ b/server/events/templates/plan_success_wrapped.tmpl @@ -20,5 +20,5 @@ This plan was not saved because one or more projects failed and automerge requir {{ end -}} {{ .PlanSummary -}} -{{ template "diverged" . -}} +{{ template "mergedAgain" . -}} {{ end -}} From 2c0260d6ad267e3bcf0c8cd9940811e6128cf45b Mon Sep 17 00:00:00 2001 From: Finn Arne Gangstad Date: Fri, 11 Aug 2023 20:25:03 +0200 Subject: [PATCH 9/9] Rename git wrapper to wrappedGit, add a type for static config Every call to wrappedGit for the same PR uses identical setup for directory, head repo and PR, so passing the --- server/events/working_dir.go | 80 ++++++++++++++++++++---------------- 1 file changed, 45 insertions(+), 35 deletions(-) diff --git a/server/events/working_dir.go b/server/events/working_dir.go index 80bca96c9c..6d6430effb 100644 --- a/server/events/working_dir.go +++ b/server/events/working_dir.go @@ -100,6 +100,7 @@ func (w *FileWorkspace) Clone( cloneDir := w.cloneDir(p.BaseRepo, p, workspace) defer func() { w.CheckForUpstreamChanges = false }() + c := wrappedGitContext{cloneDir, headRepo, p} // If the directory already exists, check if it's at the right commit. // If so, then we do nothing. if _, err := os.Stat(cloneDir); err == nil { @@ -119,7 +120,7 @@ func (w *FileWorkspace) Clone( outputRevParseCmd, err := revParseCmd.CombinedOutput() if err != nil { w.Logger.Warn("will re-clone repo, could not determine if was at correct commit: %s: %s: %s", strings.Join(revParseCmd.Args, " "), err, string(outputRevParseCmd)) - return cloneDir, false, w.forceClone(cloneDir, headRepo, p) + return cloneDir, false, w.forceClone(c) } currCommit := strings.Trim(string(outputRevParseCmd), "\n") @@ -128,7 +129,7 @@ func (w *FileWorkspace) Clone( if strings.HasPrefix(currCommit, p.HeadCommit) { if w.CheckForUpstreamChanges && w.CheckoutMerge && w.recheckDiverged(p, headRepo, cloneDir) { w.Logger.Info("base branch has been updated, using merge strategy and will clone again") - return cloneDir, true, w.mergeAgain(cloneDir, headRepo, p) + return cloneDir, true, w.mergeAgain(c) } else { w.Logger.Debug("repo is at correct commit %q so will not re-clone", p.HeadCommit) return cloneDir, false, nil @@ -140,7 +141,7 @@ func (w *FileWorkspace) Clone( } // Otherwise we clone the repo. - return cloneDir, false, w.forceClone(cloneDir, headRepo, p) + return cloneDir, false, w.forceClone(c) } // recheckDiverged returns true if the branch we're merging into has diverged @@ -209,8 +210,8 @@ func (w *FileWorkspace) HasDiverged(cloneDir string) bool { return hasDiverged } -func (w *FileWorkspace) forceClone(cloneDir string, headRepo models.Repo, p models.PullRequest) error { - value, _ := cloneLocks.LoadOrStore(cloneDir, new(sync.Mutex)) +func (w *FileWorkspace) forceClone(c wrappedGitContext) error { + value, _ := cloneLocks.LoadOrStore(c.dir, new(sync.Mutex)) mutex := value.(*sync.Mutex) defer mutex.Unlock() @@ -219,61 +220,61 @@ func (w *FileWorkspace) forceClone(cloneDir string, headRepo models.Repo, p mode return nil } - err := os.RemoveAll(cloneDir) + err := os.RemoveAll(c.dir) if err != nil { - return errors.Wrapf(err, "deleting dir %q before cloning", cloneDir) + return errors.Wrapf(err, "deleting dir %q before cloning", c.dir) } // Create the directory and parents if necessary. - w.Logger.Info("creating dir %q", cloneDir) - if err := os.MkdirAll(cloneDir, 0700); err != nil { + w.Logger.Info("creating dir %q", c.dir) + if err := os.MkdirAll(c.dir, 0700); err != nil { return errors.Wrap(err, "creating new workspace") } // During testing, we mock some of this out. - headCloneURL := headRepo.CloneURL + headCloneURL := c.head.CloneURL if w.TestingOverrideHeadCloneURL != "" { headCloneURL = w.TestingOverrideHeadCloneURL } - baseCloneURL := p.BaseRepo.CloneURL + baseCloneURL := c.pr.BaseRepo.CloneURL if w.TestingOverrideBaseCloneURL != "" { baseCloneURL = w.TestingOverrideBaseCloneURL } // if branch strategy, use depth=1 if !w.CheckoutMerge { - return w.runGit(cloneDir, headRepo, p, "clone", "--depth=1", "--branch", p.HeadBranch, "--single-branch", headCloneURL, cloneDir) + return w.wrappedGit(c, "clone", "--depth=1", "--branch", c.pr.HeadBranch, "--single-branch", headCloneURL, c.dir) } // if merge strategy... // if no checkout depth, omit depth arg if w.CheckoutDepth == 0 { - if err := w.runGit(cloneDir, headRepo, p, "clone", "--branch", p.BaseBranch, "--single-branch", baseCloneURL, cloneDir); err != nil { + if err := w.wrappedGit(c, "clone", "--branch", c.pr.BaseBranch, "--single-branch", baseCloneURL, c.dir); err != nil { return err } } else { - if err := w.runGit(cloneDir, headRepo, p, "clone", "--depth", fmt.Sprint(w.CheckoutDepth), "--branch", p.BaseBranch, "--single-branch", baseCloneURL, cloneDir); err != nil { + if err := w.wrappedGit(c, "clone", "--depth", fmt.Sprint(w.CheckoutDepth), "--branch", c.pr.BaseBranch, "--single-branch", baseCloneURL, c.dir); err != nil { return err } } - if err := w.runGit(cloneDir, headRepo, p, "remote", "add", "head", headCloneURL); err != nil { + if err := w.wrappedGit(c, "remote", "add", "head", headCloneURL); err != nil { return err } if w.GpgNoSigningEnabled { - if err := w.runGit(cloneDir, headRepo, p, "config", "--local", "commit.gpgsign", "false"); err != nil { + if err := w.wrappedGit(c, "config", "--local", "commit.gpgsign", "false"); err != nil { return err } } - return w.mergeToBaseBranch(cloneDir, headRepo, p) + return w.mergeToBaseBranch(c) } // There is a new upstream update that we need, and we want to update to it // without deleting any existing plans -func (w *FileWorkspace) mergeAgain(cloneDir string, headRepo models.Repo, p models.PullRequest) error { - value, _ := cloneLocks.LoadOrStore(cloneDir, new(sync.Mutex)) +func (w *FileWorkspace) mergeAgain(c wrappedGitContext) error { + value, _ := cloneLocks.LoadOrStore(c.dir, new(sync.Mutex)) mutex := value.(*sync.Mutex) defer mutex.Unlock() @@ -283,28 +284,37 @@ func (w *FileWorkspace) mergeAgain(cloneDir string, headRepo models.Repo, p mode } // Reset branch as if it was cloned again - if err := w.runGit(cloneDir, headRepo, p, "reset", "--hard", fmt.Sprintf("refs/remotes/head/%s", p.BaseBranch)); err != nil { + if err := w.wrappedGit(c, "reset", "--hard", fmt.Sprintf("refs/remotes/head/%s", c.pr.BaseBranch)); err != nil { return err } - return w.mergeToBaseBranch(cloneDir, headRepo, p) + return w.mergeToBaseBranch(c) } -func (w *FileWorkspace) runGit(cloneDir string, headRepo models.Repo, p models.PullRequest, args ...string) error { +// wrappedGitContext is the configuration for wrappedGit that is typically unchanged +// for a series of calls to wrappedGit +type wrappedGitContext struct { + dir string + head models.Repo + pr models.PullRequest +} + +// wrappedGit runs git with additional environment settings required for git merge, +// and with sanitized error logging to avoid leaking git credentials +func (w *FileWorkspace) wrappedGit(c wrappedGitContext, args ...string) error { cmd := exec.Command("git", args...) // nolint: gosec - cmd.Dir = cloneDir + cmd.Dir = c.dir // The git merge command requires these env vars are set. cmd.Env = append(os.Environ(), []string{ "EMAIL=atlantis@runatlantis.io", "GIT_AUTHOR_NAME=atlantis", "GIT_COMMITTER_NAME=atlantis", }...) - - cmdStr := w.sanitizeGitCredentials(strings.Join(cmd.Args, " "), p.BaseRepo, headRepo) + cmdStr := w.sanitizeGitCredentials(strings.Join(cmd.Args, " "), c.pr.BaseRepo, c.head) output, err := cmd.CombinedOutput() - sanitizedOutput := w.sanitizeGitCredentials(string(output), p.BaseRepo, headRepo) + sanitizedOutput := w.sanitizeGitCredentials(string(output), c.pr.BaseRepo, c.head) if err != nil { - sanitizedErrMsg := w.sanitizeGitCredentials(err.Error(), p.BaseRepo, headRepo) + sanitizedErrMsg := w.sanitizeGitCredentials(err.Error(), c.pr.BaseRepo, c.head) return fmt.Errorf("running %s: %s: %s", cmdStr, sanitizedOutput, sanitizedErrMsg) } w.Logger.Debug("ran: %s. Output: %s", cmdStr, strings.TrimSuffix(sanitizedOutput, "\n")) @@ -312,29 +322,29 @@ func (w *FileWorkspace) runGit(cloneDir string, headRepo models.Repo, p models.P } // Merge the PR into the base branch. -func (w *FileWorkspace) mergeToBaseBranch(cloneDir string, headRepo models.Repo, p models.PullRequest) error { - fetchRef := fmt.Sprintf("+refs/heads/%s:", p.HeadBranch) +func (w *FileWorkspace) mergeToBaseBranch(c wrappedGitContext) error { + fetchRef := fmt.Sprintf("+refs/heads/%s:", c.pr.HeadBranch) fetchRemote := "head" if w.GithubAppEnabled { - fetchRef = fmt.Sprintf("pull/%d/head:", p.Num) + fetchRef = fmt.Sprintf("pull/%d/head:", c.pr.Num) fetchRemote = "origin" } // if no checkout depth, omit depth arg if w.CheckoutDepth == 0 { - if err := w.runGit(cloneDir, headRepo, p, "fetch", fetchRemote, fetchRef); err != nil { + if err := w.wrappedGit(c, "fetch", fetchRemote, fetchRef); err != nil { return err } } else { - if err := w.runGit(cloneDir, headRepo, p, "fetch", "--depth", fmt.Sprint(w.CheckoutDepth), fetchRemote, fetchRef); err != nil { + if err := w.wrappedGit(c, "fetch", "--depth", fmt.Sprint(w.CheckoutDepth), fetchRemote, fetchRef); err != nil { return err } } - if err := w.runGit(cloneDir, headRepo, p, "merge-base", p.BaseBranch, "FETCH_HEAD"); err != nil { + if err := w.wrappedGit(c, "merge-base", c.pr.BaseBranch, "FETCH_HEAD"); err != nil { // git merge-base returning error means that we did not receive enough commits in shallow clone. // Fall back to retrieving full repo history. - if err := w.runGit(cloneDir, headRepo, p, "fetch", "--unshallow"); err != nil { + if err := w.wrappedGit(c, "fetch", "--unshallow"); err != nil { return err } } @@ -345,7 +355,7 @@ func (w *FileWorkspace) mergeToBaseBranch(cloneDir string, headRepo models.Repo, // git rev-parse HEAD^2 to get the head commit because it will // always succeed whereas without --no-ff, if the merge was fast // forwarded then git rev-parse HEAD^2 would fail. - return w.runGit(cloneDir, headRepo, p, "merge", "-q", "--no-ff", "-m", "atlantis-merge", "FETCH_HEAD") + return w.wrappedGit(c, "merge", "-q", "--no-ff", "-m", "atlantis-merge", "FETCH_HEAD") } // GetWorkingDir returns the path to the workspace for this repo and pull.