Skip to content

Commit

Permalink
Incremental backup and point in time recovery for XtraBackup (#13156)
Browse files Browse the repository at this point in the history
* incremental backup is always using 'builtin' engine

Signed-off-by: Shlomi Noach <[email protected]>

* restore: use 'builtin' for incremental restore

Signed-off-by: Shlomi Noach <[email protected]>

* test all backup types

Signed-off-by: Shlomi Noach <[email protected]>

* format code

Signed-off-by: Shlomi Noach <[email protected]>

* Populate PurgedPosition

Signed-off-by: Shlomi Noach <[email protected]>

* cleanup backups at the end of each test case

Signed-off-by: Shlomi Noach <[email protected]>

* improved cleanup

Signed-off-by: Shlomi Noach <[email protected]>

* rename variable

Signed-off-by: Shlomi Noach <[email protected]>

* record all backups

Signed-off-by: Shlomi Noach <[email protected]>

* no need to cleanup backups in between test cases, since each new case has a completely different UUID

Signed-off-by: Shlomi Noach <[email protected]>

* install xtrabackup on backup_pitr tests

Signed-off-by: Shlomi Noach <[email protected]>

* use pgzip for xtrabackup

Signed-off-by: Shlomi Noach <[email protected]>

* more debug info

Signed-off-by: Shlomi Noach <[email protected]>

* builtin engine: store gtid_purged in manifest

Signed-off-by: Shlomi Noach <[email protected]>

* use backupfrom-GTID as incremental-from-GTID if first binary log has empty Previous-GTIDS

Signed-off-by: Shlomi Noach <[email protected]>

* more unit tests

Signed-off-by: Shlomi Noach <[email protected]>

* improve error message

Signed-off-by: Shlomi Noach <[email protected]>

* capturing MySQL's stderr and reading and logging if not empty

Signed-off-by: Shlomi Noach <[email protected]>

* At the end of Xtrabackup restore, validate that @@gtid_purged (and thereby @@gtid_executed) are set to the backup's @@gtid_purge. Make it so if they aren't

Signed-off-by: Shlomi Noach <[email protected]>

* add comperssion details into test case. Fix GTID validation of manifest file

Signed-off-by: Shlomi Noach <[email protected]>

* check manifest

Signed-off-by: Shlomi Noach <[email protected]>

* Refactor into function

Signed-off-by: Shlomi Noach <[email protected]>

* check manifest.Position.GTIDSet

Signed-off-by: Shlomi Noach <[email protected]>

* fix wrangler tests

Signed-off-by: Shlomi Noach <[email protected]>

* typo

Signed-off-by: Shlomi Noach <[email protected]>

* Update go/vt/mysqlctl/backup.go

Co-authored-by: Matt Lord <[email protected]>
Signed-off-by: Shlomi Noach <[email protected]>

* Update go/vt/mysqlctl/backup.go

Co-authored-by: Matt Lord <[email protected]>
Signed-off-by: Shlomi Noach <[email protected]>

* Update go/vt/mysqlctl/backup.go

Co-authored-by: Matt Lord <[email protected]>
Signed-off-by: Shlomi Noach <[email protected]>

* typo

Signed-off-by: Shlomi Noach <[email protected]>

* Update go/vt/mysqlctl/mysqld.go

Co-authored-by: Matt Lord <[email protected]>
Signed-off-by: Shlomi Noach <[email protected]>

* Update go/vt/mysqlctl/mysqld.go

Co-authored-by: Matt Lord <[email protected]>
Signed-off-by: Shlomi Noach <[email protected]>

* Update go/vt/mysqlctl/mysqld.go

Co-authored-by: Matt Lord <[email protected]>
Signed-off-by: Shlomi Noach <[email protected]>

---------

Signed-off-by: Shlomi Noach <[email protected]>
Co-authored-by: Matt Lord <[email protected]>
  • Loading branch information
shlomi-noach and mattlord authored Jun 6, 2023
1 parent 4ba60af commit bd50b93
Show file tree
Hide file tree
Showing 13 changed files with 392 additions and 203 deletions.
17 changes: 10 additions & 7 deletions .github/workflows/cluster_endtoend_backup_pitr.yml
Original file line number Diff line number Diff line change
Expand Up @@ -84,15 +84,16 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
# Get key to latest MySQL repo
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
# Setup MySQL 8.0
wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
# Setup Percona Server for MySQL 8.0
sudo apt-get update
sudo apt-get install -y lsb-release gnupg2 curl
wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
sudo percona-release setup ps80
sudo apt-get update
# Install everything else we need, and configure
sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5
sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils libncurses5
sudo service mysql stop
sudo service etcd stop
Expand All @@ -103,6 +104,8 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
sudo apt-get install percona-xtrabackup-80 lz4
- name: Setup launchable dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
Expand Down
16 changes: 16 additions & 0 deletions .github/workflows/cluster_endtoend_backup_pitr_mysql57.yml
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,10 @@ env:
LAUNCHABLE_WORKSPACE: "vitess-app"
GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"

# This is used if we need to pin the xtrabackup version used in tests.
# If this is NOT set then the latest version available will be used.
#XTRABACKUP_VERSION: "2.4.24-1"

jobs:
build:
name: Run endtoend tests on Cluster (backup_pitr) mysql57
Expand Down Expand Up @@ -114,6 +118,18 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
wget "https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb"
sudo apt-get install -y gnupg2
sudo dpkg -i "percona-release_latest.$(lsb_release -sc)_all.deb"
sudo apt-get update
if [[ -n $XTRABACKUP_VERSION ]]; then
debfile="percona-xtrabackup-24_$XTRABACKUP_VERSION.$(lsb_release -sc)_amd64.deb"
wget "https://repo.percona.com/pxb-24/apt/pool/main/p/percona-xtrabackup-24/$debfile"
sudo apt install -y "./$debfile"
else
sudo apt-get install -y percona-xtrabackup-24
fi
- name: Setup launchable dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
Expand Down
301 changes: 162 additions & 139 deletions go/test/endtoend/backup/pitr/backup_mysqlctld_pitr_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,161 +52,184 @@ func waitForReplica(t *testing.T) {
}

// TestIncrementalBackupMysqlctld - tests incremental backups using myslctld
func TestIncrementalBackupMysqlctld(t *testing.T) {
func TestIncrementalBackup(t *testing.T) {
defer cluster.PanicHandler(t)
// setup cluster for the testing
code, err := backup.LaunchCluster(backup.Mysqlctld, "xbstream", 0, nil)
require.NoError(t, err, "setup failed with status code %d", code)
defer backup.TearDownCluster()

backup.InitTestTable(t)

rowsPerPosition := map[string]int{}
backupPositions := []string{}

recordRowsPerPosition := func(t *testing.T) {
pos := backup.GetReplicaPosition(t)
msgs := backup.ReadRowsFromReplica(t)
if _, ok := rowsPerPosition[pos]; !ok {
backupPositions = append(backupPositions, pos)
rowsPerPosition[pos] = len(msgs)
}
}

var fullBackupPos mysql.Position
t.Run("full backup", func(t *testing.T) {
backup.InsertRowOnPrimary(t, "before-full-backup")
waitForReplica(t)
manifest, _ := backup.TestReplicaFullBackup(t)
fullBackupPos = manifest.Position
require.False(t, fullBackupPos.IsZero())
//
msgs := backup.ReadRowsFromReplica(t)
pos := mysql.EncodePosition(fullBackupPos)
backupPositions = append(backupPositions, pos)
rowsPerPosition[pos] = len(msgs)
})

lastBackupPos := fullBackupPos
backup.InsertRowOnPrimary(t, "before-incremental-backups")

tt := []struct {
name string
writeBeforeBackup bool
fromFullPosition bool
autoPosition bool
expectError string
tcases := []struct {
name string
setupType int
comprss *backup.CompressionDetails
}{
{
name: "first incremental backup",
"BuiltinBackup", backup.BuiltinBackup, nil,
},
{
name: "make writes, succeed",
writeBeforeBackup: true,
"XtraBackup", backup.XtraBackup, &backup.CompressionDetails{
CompressorEngineName: "pgzip",
},
},
{
name: "fail, no binary logs to backup",
expectError: "no binary logs to backup",
},
{
name: "make writes again, succeed",
writeBeforeBackup: true,
},
{
name: "auto position, succeed",
writeBeforeBackup: true,
autoPosition: true,
},
{
name: "fail auto position, no binary logs to backup",
autoPosition: true,
expectError: "no binary logs to backup",
},
{
name: "auto position, make writes again, succeed",
writeBeforeBackup: true,
autoPosition: true,
},
{
name: "from full backup position",
fromFullPosition: true,
"Mysqlctld", backup.Mysqlctld, nil,
},
}
var fromFullPositionBackups []string
for _, tc := range tt {
t.Run(tc.name, func(t *testing.T) {
if tc.writeBeforeBackup {
backup.InsertRowOnPrimary(t, "")
}
// we wait for 1 second because backups are written to a directory named after the current timestamp,
// in 1 second resolution. We want to avoid two backups that have the same pathname. Realistically this
// is only ever a problem in this end-to-end test, not in production.
// Also, we gie the replica a chance to catch up.
time.Sleep(1100 * time.Millisecond)
waitForReplica(t)
recordRowsPerPosition(t)
// configure --incremental-from-pos to either:
// - auto
// - explicit last backup pos
// - back in history to the original full backup
var incrementalFromPos mysql.Position
if !tc.autoPosition {
incrementalFromPos = lastBackupPos
if tc.fromFullPosition {
incrementalFromPos = fullBackupPos
for _, tcase := range tcases {
t.Run(tcase.name, func(t *testing.T) {
// setup cluster for the testing
code, err := backup.LaunchCluster(tcase.setupType, "xbstream", 0, tcase.comprss)
require.NoError(t, err, "setup failed with status code %d", code)
defer backup.TearDownCluster()

backup.InitTestTable(t)

rowsPerPosition := map[string]int{}
backupPositions := []string{}

recordRowsPerPosition := func(t *testing.T) {
pos := backup.GetReplicaPosition(t)
msgs := backup.ReadRowsFromReplica(t)
if _, ok := rowsPerPosition[pos]; !ok {
backupPositions = append(backupPositions, pos)
rowsPerPosition[pos] = len(msgs)
}
}
manifest, backupName := backup.TestReplicaIncrementalBackup(t, incrementalFromPos, tc.expectError)
if tc.expectError != "" {
return
}
defer func() {
lastBackupPos = manifest.Position
}()
if tc.fromFullPosition {
fromFullPositionBackups = append(fromFullPositionBackups, backupName)
}
require.False(t, manifest.FromPosition.IsZero())
require.NotEqual(t, manifest.Position, manifest.FromPosition)
require.True(t, manifest.Position.GTIDSet.Contains(manifest.FromPosition.GTIDSet))

gtidPurgedPos, err := mysql.ParsePosition(mysql.Mysql56FlavorID, backup.GetReplicaGtidPurged(t))
require.NoError(t, err)
fromPositionIncludingPurged := manifest.FromPosition.GTIDSet.Union(gtidPurgedPos.GTIDSet)
var fullBackupPos mysql.Position
t.Run("full backup", func(t *testing.T) {
backup.InsertRowOnPrimary(t, "before-full-backup")
waitForReplica(t)

manifest, _ := backup.TestReplicaFullBackup(t)
fullBackupPos = manifest.Position
require.False(t, fullBackupPos.IsZero())
//
msgs := backup.ReadRowsFromReplica(t)
pos := mysql.EncodePosition(fullBackupPos)
backupPositions = append(backupPositions, pos)
rowsPerPosition[pos] = len(msgs)
})

expectFromPosition := lastBackupPos.GTIDSet.Union(gtidPurgedPos.GTIDSet)
if !incrementalFromPos.IsZero() {
expectFromPosition = incrementalFromPos.GTIDSet.Union(gtidPurgedPos.GTIDSet)
lastBackupPos := fullBackupPos
backup.InsertRowOnPrimary(t, "before-incremental-backups")

tt := []struct {
name string
writeBeforeBackup bool
fromFullPosition bool
autoPosition bool
expectError string
}{
{
name: "first incremental backup",
},
{
name: "make writes, succeed",
writeBeforeBackup: true,
},
{
name: "fail, no binary logs to backup",
expectError: "no binary logs to backup",
},
{
name: "make writes again, succeed",
writeBeforeBackup: true,
},
{
name: "auto position, succeed",
writeBeforeBackup: true,
autoPosition: true,
},
{
name: "fail auto position, no binary logs to backup",
autoPosition: true,
expectError: "no binary logs to backup",
},
{
name: "auto position, make writes again, succeed",
writeBeforeBackup: true,
autoPosition: true,
},
{
name: "from full backup position",
fromFullPosition: true,
},
}
var fromFullPositionBackups []string
for _, tc := range tt {
t.Run(tc.name, func(t *testing.T) {
if tc.writeBeforeBackup {
backup.InsertRowOnPrimary(t, "")
}
// we wait for 1 second because backups are written to a directory named after the current timestamp,
// in 1 second resolution. We want to avoid two backups that have the same pathname. Realistically this
// is only ever a problem in this end-to-end test, not in production.
// Also, we gie the replica a chance to catch up.
time.Sleep(1100 * time.Millisecond)
waitForReplica(t)
recordRowsPerPosition(t)
// configure --incremental-from-pos to either:
// - auto
// - explicit last backup pos
// - back in history to the original full backup
var incrementalFromPos mysql.Position
if !tc.autoPosition {
incrementalFromPos = lastBackupPos
if tc.fromFullPosition {
incrementalFromPos = fullBackupPos
}
}
manifest, backupName := backup.TestReplicaIncrementalBackup(t, incrementalFromPos, tc.expectError)
if tc.expectError != "" {
return
}
defer func() {
lastBackupPos = manifest.Position
}()
if tc.fromFullPosition {
fromFullPositionBackups = append(fromFullPositionBackups, backupName)
}
require.False(t, manifest.FromPosition.IsZero())
require.NotEqual(t, manifest.Position, manifest.FromPosition)
require.True(t, manifest.Position.GTIDSet.Union(manifest.PurgedPosition.GTIDSet).Contains(manifest.FromPosition.GTIDSet))

gtidPurgedPos, err := mysql.ParsePosition(mysql.Mysql56FlavorID, backup.GetReplicaGtidPurged(t))
require.NoError(t, err)
fromPositionIncludingPurged := manifest.FromPosition.GTIDSet.Union(gtidPurgedPos.GTIDSet)

expectFromPosition := lastBackupPos.GTIDSet.Union(gtidPurgedPos.GTIDSet)
if !incrementalFromPos.IsZero() {
expectFromPosition = incrementalFromPos.GTIDSet.Union(gtidPurgedPos.GTIDSet)
}
require.Equalf(t, expectFromPosition, fromPositionIncludingPurged, "expected: %v, found: %v, gtid_purged: %v, manifest.Position: %v", expectFromPosition, fromPositionIncludingPurged, gtidPurgedPos, manifest.Position)
})
}
require.Equalf(t, expectFromPosition, fromPositionIncludingPurged, "expected: %v, found: %v", expectFromPosition, fromPositionIncludingPurged)
})
}

testRestores := func(t *testing.T) {
for _, r := range rand.Perm(len(backupPositions)) {
pos := backupPositions[r]
testName := fmt.Sprintf("%s, %d records", pos, rowsPerPosition[pos])
t.Run(testName, func(t *testing.T) {
restoreToPos, err := mysql.DecodePosition(pos)
require.NoError(t, err)
backup.TestReplicaRestoreToPos(t, restoreToPos, "")
msgs := backup.ReadRowsFromReplica(t)
count, ok := rowsPerPosition[pos]
require.True(t, ok)
assert.Equalf(t, count, len(msgs), "messages: %v", msgs)
testRestores := func(t *testing.T) {
for _, r := range rand.Perm(len(backupPositions)) {
pos := backupPositions[r]
testName := fmt.Sprintf("%s, %d records", pos, rowsPerPosition[pos])
t.Run(testName, func(t *testing.T) {
restoreToPos, err := mysql.DecodePosition(pos)
require.NoError(t, err)
backup.TestReplicaRestoreToPos(t, restoreToPos, "")
msgs := backup.ReadRowsFromReplica(t)
count, ok := rowsPerPosition[pos]
require.True(t, ok)
assert.Equalf(t, count, len(msgs), "messages: %v", msgs)
})
}
}
t.Run("PITR", func(t *testing.T) {
testRestores(t)
})
}
t.Run("remove full position backups", func(t *testing.T) {
// Delete the fromFullPosition backup(s), which leaves us with less restore options. Try again.
for _, backupName := range fromFullPositionBackups {
backup.RemoveBackup(t, backupName)
}
})
t.Run("PITR-2", func(t *testing.T) {
testRestores(t)
})
})
}
t.Run("PITR", func(t *testing.T) {
testRestores(t)
})
t.Run("remove full position backups", func(t *testing.T) {
// Delete the fromFullPosition backup(s), which leaves us with less restore options. Try again.
for _, backupName := range fromFullPositionBackups {
backup.RemoveBackup(t, backupName)
}
})
t.Run("PITR-2", func(t *testing.T) {
testRestores(t)
})
}
Loading

0 comments on commit bd50b93

Please sign in to comment.