Skip to content

Commit

Permalink
backup range by table info (pingcap#72)
Browse files Browse the repository at this point in the history
* backup range by table info

* update intergation test
  • Loading branch information
3pointer authored and overvenus committed Dec 5, 2019
1 parent d8f45dc commit 5266cd4
Show file tree
Hide file tree
Showing 7 changed files with 170 additions and 43 deletions.
65 changes: 37 additions & 28 deletions pkg/backup/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,13 +15,14 @@ import (
"github.com/pingcap/parser/model"
pd "github.com/pingcap/pd/client"
"github.com/pingcap/tidb/ddl"
"github.com/pingcap/tidb/distsql"
"github.com/pingcap/tidb/domain"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/meta/autoid"
"github.com/pingcap/tidb/session"
"github.com/pingcap/tidb/store/tikv"
"github.com/pingcap/tidb/tablecodec"
"github.com/pingcap/tidb/util/codec"
"github.com/pingcap/tidb/util/ranger"
"go.uber.org/zap"

"github.com/pingcap/br/pkg/meta"
Expand Down Expand Up @@ -144,36 +145,40 @@ func (bc *Client) SaveBackupMeta(path string) error {
return bc.storage.Write(utils.MetaFile, backupMetaData)
}

type tableRange struct {
startID, endID int64
}

func (tr tableRange) Range() Range {
startKey := tablecodec.GenTablePrefix(tr.startID)
endKey := tablecodec.GenTablePrefix(tr.endID)
return Range{
StartKey: []byte(startKey),
EndKey: []byte(endKey),
}
}

func buildTableRanges(tbl *model.TableInfo) []tableRange {
func buildTableRanges(tbl *model.TableInfo) ([]kv.KeyRange, error) {
pis := tbl.GetPartitionInfo()
if pis == nil {
// Short path, no partition.
tableID := tbl.ID
return []tableRange{{startID: tableID, endID: tableID + 1}}
return appendRanges(tbl, tbl.ID)
}

ranges := make([]tableRange, 0, len(pis.Definitions))
ranges := make([]kv.KeyRange, 0, len(pis.Definitions)*(len(tbl.Indices)+1)+1)
for _, def := range pis.Definitions {
ranges = append(ranges,
tableRange{
startID: def.ID,
endID: def.ID + 1,
})
rgs, err := appendRanges(tbl, def.ID)
if err != nil {
return nil, err
}
ranges = append(ranges, rgs...)
}
return ranges
return ranges, nil
}

func appendRanges(tbl *model.TableInfo, tblID int64) ([]kv.KeyRange, error) {
ranges := ranger.FullIntRange(false)
kvRanges := distsql.TableRangesToKVRanges(tblID, ranges, nil)
for _, index := range tbl.Indices {
if index.State != model.StatePublic {
continue
}
ranges = ranger.FullRange()
idxRanges, err := distsql.IndexRangesToKVRanges(nil, tblID, index.ID, ranges, nil)
if err != nil {
return nil, errors.Trace(err)
}
kvRanges = append(kvRanges, idxRanges...)
}
return kvRanges, nil

}

// BuildBackupRangeAndSchema gets the range and schema of tables.
Expand Down Expand Up @@ -255,11 +260,15 @@ LoadDb:
}
backupSchemas.pushPending(schema, dbInfo.Name.L, tableInfo.Name.L)

// TODO: We may need to include [t<tableID>, t<tableID+1>)
// in order to backup global index.
tableRanges := buildTableRanges(tableInfo)
tableRanges, err := buildTableRanges(tableInfo)
if err != nil {
return nil, nil, err
}
for _, r := range tableRanges {
ranges = append(ranges, r.Range())
ranges = append(ranges, Range{
StartKey: r.StartKey,
EndKey: r.EndKey,
})
}
}
}
Expand Down
37 changes: 25 additions & 12 deletions pkg/backup/client_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,15 @@ package backup

import (
"context"
"math"
"net/http"
"testing"
"time"

"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/tablecodec"
"github.com/pingcap/tidb/util/codec"

"github.com/pingcap/br/pkg/meta"

. "github.com/pingcap/check"
Expand Down Expand Up @@ -96,18 +101,22 @@ func (r *testBackup) TestGetTS(c *C) {
func (r *testBackup) TestBuildTableRange(c *C) {
type Case struct {
ids []int64
trs []tableRange
trs []kv.KeyRange
}
low := codec.EncodeInt(nil, math.MinInt64)
high := kv.Key(codec.EncodeInt(nil, math.MaxInt64)).PrefixNext()
cases := []Case{
{ids: []int64{1}, trs: []tableRange{{startID: 1, endID: 2}}},
{ids: []int64{1, 2, 3}, trs: []tableRange{
{startID: 1, endID: 2},
{startID: 2, endID: 3},
{startID: 3, endID: 4},
{ids: []int64{1}, trs: []kv.KeyRange{
{StartKey: tablecodec.EncodeRowKey(1, low), EndKey: tablecodec.EncodeRowKey(1, high)}},
},
{ids: []int64{1, 2, 3}, trs: []kv.KeyRange{
{StartKey: tablecodec.EncodeRowKey(1, low), EndKey: tablecodec.EncodeRowKey(1, high)},
{StartKey: tablecodec.EncodeRowKey(2, low), EndKey: tablecodec.EncodeRowKey(2, high)},
{StartKey: tablecodec.EncodeRowKey(3, low), EndKey: tablecodec.EncodeRowKey(3, high)},
}},
{ids: []int64{1, 3}, trs: []tableRange{
{startID: 1, endID: 2},
{startID: 3, endID: 4},
{ids: []int64{1, 3}, trs: []kv.KeyRange{
{StartKey: tablecodec.EncodeRowKey(1, low), EndKey: tablecodec.EncodeRowKey(1, high)},
{StartKey: tablecodec.EncodeRowKey(3, low), EndKey: tablecodec.EncodeRowKey(3, high)},
}},
}
for _, cs := range cases {
Expand All @@ -117,13 +126,17 @@ func (r *testBackup) TestBuildTableRange(c *C) {
tbl.Partition.Definitions = append(tbl.Partition.Definitions,
model.PartitionDefinition{ID: id})
}
ranges := buildTableRanges(tbl)
ranges, err := buildTableRanges(tbl)
c.Assert(err, IsNil)
c.Assert(ranges, DeepEquals, cs.trs)
}

tbl := &model.TableInfo{ID: 7}
ranges := buildTableRanges(tbl)
c.Assert(ranges, DeepEquals, []tableRange{{startID: 7, endID: 8}})
ranges, err := buildTableRanges(tbl)
c.Assert(err, IsNil)
c.Assert(ranges, DeepEquals, []kv.KeyRange{
{StartKey: tablecodec.EncodeRowKey(7, low), EndKey: tablecodec.EncodeRowKey(7, high)},
})

}

Expand Down
8 changes: 7 additions & 1 deletion pkg/utils/schema.go
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,12 @@ func LoadBackupTables(meta *backup.BackupMeta) (map[string]*Database, error) {
if err != nil {
return nil, errors.Trace(err)
}
partitions := make(map[int64]bool)
if tableInfo.Partition != nil {
for _, p := range tableInfo.Partition.Definitions {
partitions[p.ID] = true
}
}
// Find the files belong to the table
tableFiles := make([]*backup.File, 0)
for _, file := range meta.Files {
Expand All @@ -95,7 +101,7 @@ func LoadBackupTables(meta *backup.BackupMeta) (map[string]*Database, error) {
}
startTableID := tablecodec.DecodeTableID(file.GetStartKey())
// If the file contains a part of the data of the table, append it to the slice.
if startTableID == tableInfo.ID {
if ok := partitions[startTableID]; ok || startTableID == tableInfo.ID {
tableFiles = append(tableFiles, file)
}
}
Expand Down
5 changes: 5 additions & 0 deletions tests/br_full/run.sh
Original file line number Diff line number Diff line change
Expand Up @@ -58,3 +58,8 @@ if $fail; then
else
echo "TEST: [$TEST_NAME] successed!"
fi

for i in $(seq $DB_COUNT); do
run_sql "DROP DATABASE $DB${i};"
done

71 changes: 71 additions & 0 deletions tests/br_full_ddl/run.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@
#!/bin/sh
#
# Copyright 2019 PingCAP, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# See the License for the specific language governing permissions and
# limitations under the License.

set -eu
DB="$TEST_NAME"
TABLE="usertable"
DDL_COUNT=10
LOG=/$TEST_DIR/$DB/backup.log

run_sql "CREATE DATABASE $DB;"
go-ycsb load mysql -P tests/$TEST_NAME/workload -p mysql.host=$TIDB_IP -p mysql.port=$TIDB_PORT -p mysql.user=root -p mysql.db=$DB

row_count_ori=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}')

for i in $(seq $DDL_COUNT); do
run_sql "USE $DB; ALTER TABLE $TABLE ADD INDEX (FIELD$i);"
done

for i in $(sql $DDL_COUNT); do
if (( RANDOM % 2 )); then
run_sql "USE $DB; ALTER TABLE $TABLE DROP INDEX FIELD$i;"
fi
done

# backup full
echo "backup start..."
br --pd $PD_ADDR backup full -s "local://$TEST_DIR/$DB" --ratelimit 5 --concurrency 4 --fastchecksum true --log-file $LOG

checksum_count=$(cat $LOG | grep "fast checksum success" | wc -l | xargs)

if [ "${checksum_count}" != "1" ];then
echo "TEST: [$TEST_NAME] fail on fast checksum"
echo $(cat $LOG | grep checksum)
exit 1
fi

run_sql "DROP DATABASE $DB;"

# restore full
echo "restore start..."
br restore full -s "local://$TEST_DIR/$DB" --pd $PD_ADDR

row_count_new=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}')

fail=false
if [ "${row_count_ori}" != "${row_count_new}" ];then
fail=true
echo "TEST: [$TEST_NAME] fail on database $DB${i}"
fi
echo "database $DB$ [original] row count: ${row_count_ori}, [after br] row count: ${row_count_new}"

if $fail; then
echo "TEST: [$TEST_NAME] failed!"
exit 1
else
echo "TEST: [$TEST_NAME] successed!"
fi

run_sql "DROP DATABASE $DB;"
13 changes: 13 additions & 0 deletions tests/br_full_ddl/workload
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
recordcount=10000
operationcount=0
workload=core
fieldcount=100

readallfields=true

readproportion=0
updateproportion=0
scanproportion=0
insertproportion=0

requestdistribution=uniform
14 changes: 12 additions & 2 deletions tests/br_full_index/run.sh
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ set -eu
DB="$TEST_NAME"
TABLE="usertable"
DB_COUNT=3
LOG=/$TEST_DIR/$DB/backup.log

for i in $(seq $DB_COUNT); do
run_sql "CREATE DATABASE $DB${i};"
Expand All @@ -35,8 +36,15 @@ done

# backup full
echo "backup start..."
# TODO: Enable fastchecksum. For now, backup calculates extra data that fails in fastchecksum
run_br --pd $PD_ADDR backup full -s "local://$TEST_DIR/$DB" --ratelimit 5 --concurrency 4 #--fastchecksum true
run_br --pd $PD_ADDR backup full -s "local://$TEST_DIR/$DB" --ratelimit 5 --concurrency 4 --fastchecksum true --log-file $LOG

checksum_count=$(cat $LOG | grep "fast checksum success" | wc -l | xargs)

if [ "${checksum_count}" != "$DB_COUNT" ];then
echo "TEST: [$TEST_NAME] fail on fast checksum"
echo $(cat $LOG | grep checksum)
exit 1
fi

for i in $(seq $DB_COUNT); do
run_sql "DROP DATABASE $DB${i};"
Expand Down Expand Up @@ -65,3 +73,5 @@ if $fail; then
else
echo "TEST: [$TEST_NAME] successed!"
fi

run_sql "DROP DATABASE IF EXISTS $DB;"

0 comments on commit 5266cd4

Please sign in to comment.