Skip to content

Commit

Permalink
update
Browse files Browse the repository at this point in the history
  • Loading branch information
Mercybudda committed Dec 9, 2021
1 parent b019eb7 commit 2b0d305
Show file tree
Hide file tree
Showing 8 changed files with 227 additions and 40 deletions.
14 changes: 10 additions & 4 deletions cmd/geth/snapshot.go
Original file line number Diff line number Diff line change
Expand Up @@ -171,30 +171,34 @@ It's also usable without snapshot enabled.

func pruneBlock(ctx *cli.Context) error {
stack, config := makeConfigNode(ctx)
//defer stack.Close()
defer stack.Close()

chaindb := utils.MakeChainDatabase(ctx, stack, false)
chaindb.Close()

// Make sure we have a valid genesis JSON
genesisPath := ctx.GlobalString(utils.GenesisFlag.Name)
if len(genesisPath) == 0 {
utils.Fatalf("Must supply path to genesis JSON file")
}
genesis := new(core.Genesis)
file, err := os.Open(genesisPath)
if err != nil {
utils.Fatalf("Failed to read genesis file: %v", err)
}
defer file.Close()

genesis := new(core.Genesis)
if err := json.NewDecoder(file).Decode(genesis); err != nil {
utils.Fatalf("invalid genesis file: %v", err)
}
if err != nil {
utils.Fatalf("Failed to decode genesis: %v", err)
}

freezer := config.Eth.DatabaseFreezer

for _, name := range []string{"chaindata"} {
root := stack.ResolvePath(name) // /Users/user/storage/Private_BSC_Storage/build/bin/node/geth/chaindata
root := stack.ResolvePath(name)
switch {
case freezer == "":
freezer = filepath.Join(root, "ancient")
Expand All @@ -206,11 +210,13 @@ func pruneBlock(ctx *cli.Context) error {
utils.Fatalf("Failed to create block pruner", err)
}
backfreezer := filepath.Join(root, "ancient_back_up")
if err := pruner.BlockPruneBackUp(name, config.Eth.DatabaseCache, utils.MakeDatabaseHandles(), backfreezer, "", false); err != nil {

if err := pruner.BlockPruneBackUp(name, config.Eth.DatabaseCache, utils.MakeDatabaseHandles(), backfreezer, freezer, "", false); err != nil {
log.Error("Failed to back up block", "err", err)
return err
}
}

log.Info("geth block offline pruning backup successfully")
oldAncientPath := ctx.GlobalString(utils.AncientFlag.Name)
newAncientPath := ctx.GlobalString(utils.AncientBackUpFlag.Name)
Expand Down
15 changes: 14 additions & 1 deletion core/rawdb/accessors_chain.go
Original file line number Diff line number Diff line change
Expand Up @@ -294,6 +294,20 @@ func ReadHeaderRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValu
return nil // Can't find the data anywhere.
}

func ReadOffSetOfAncientFreezer(db ethdb.KeyValueStore) uint64 {
offset, _ := db.Get(offSetOfAncientFreezer)
if offset == nil {
return 0
}
return binary.BigEndian.Uint64(offset)
}

func WriteOffSetOfAncientFreezer(db ethdb.KeyValueStore, offset uint64) {
if err := db.Put(offSetOfAncientFreezer, new(big.Int).SetUint64(offset).Bytes()); err != nil {
log.Crit("Failed to store offSetOfAncientFreezer", "err", err)
}
}

// HasHeader verifies the existence of a block header corresponding to the hash.
func HasHeader(db ethdb.Reader, hash common.Hash, number uint64) bool {
if has, err := db.Ancient(freezerHashTable, number); err == nil && common.BytesToHash(has) == hash {
Expand Down Expand Up @@ -724,7 +738,6 @@ func WriteAncientBlock(db ethdb.AncientWriter, block *types.Block, receipts type
return len(headerBlob) + len(bodyBlob) + len(receiptBlob) + len(tdBlob) + common.HashLength
}


// DeleteBlock removes all block data associated with a hash.
func DeleteBlock(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
DeleteReceipts(db, hash, number)
Expand Down
89 changes: 84 additions & 5 deletions core/rawdb/database.go
Original file line number Diff line number Diff line change
Expand Up @@ -122,11 +122,6 @@ func (db *nofreezedb) AppendAncient(number uint64, hash, header, body, receipts,
return errNotSupported
}

// AppendAncientNoBody returns an error as we don't have a backing chain freezer.
func (db *nofreezedb) AppendAncientNoBody(number uint64, hash, header, receipts, td []byte) error {
return errNotSupported
}

// TruncateAncients returns an error as we don't have a backing chain freezer.
func (db *nofreezedb) TruncateAncients(items uint64) error {
return errNotSupported
Expand Down Expand Up @@ -236,6 +231,47 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, freezer string, namespace st
}, nil
}

// NewDatabaseWithFreezer creates a high level database on top of a given key-
// value data store with a freezer moving immutable chain segments into cold
// storage.
//Without goroutine of freeze running
func NewDatabaseWithFreezerForPruneBlock(db ethdb.KeyValueStore, freezer string, namespace string, readonly bool) (ethdb.Database, error) {
// Create the idle freezer instance
frdb, err := newFreezer(freezer, namespace, readonly)
if err != nil {
return nil, err
}

return &freezerdb{
KeyValueStore: db,
AncientStore: frdb,
}, nil
}

// NewDatabaseWithFreezer creates a high level database on top of a given key-
// value data store with a freezer moving immutable chain segments into cold
// storage.
func NewDatabaseWithFreezerBackup(offset uint64, db ethdb.KeyValueStore, freezer string, namespace string, readonly bool) (ethdb.Database, error) {
// Create the idle freezer instance
frdb, err := newFreezer(freezer, namespace, readonly)
if err != nil {
return nil, err
}

//Assign the new offset to the new backup freezer while creating freezer
frdb.offset = offset

// Freezer is consistent with the key-value database, permit combining the two
if !frdb.readonly {
go frdb.freeze(db)
}

return &freezerdb{
KeyValueStore: db,
AncientStore: frdb,
}, nil
}

// NewMemoryDatabase creates an ephemeral in-memory key-value database without a
// freezer moving immutable chain segments into cold storage.
func NewMemoryDatabase() ethdb.Database {
Expand Down Expand Up @@ -274,6 +310,49 @@ func NewLevelDBDatabaseWithFreezer(file string, cache int, handles int, freezer
return frdb, nil
}

// NewLevelDBDatabaseWithFreezer creates a persistent key-value database with a
// freezer moving immutable chain segments into cold storage.
func NewLevelDBDatabaseWithFreezerForPruneBlock(file string, cache int, handles int, freezer string, namespace string, readonly bool) (ethdb.Database, error) {
kvdb, err := leveldb.New(file, cache, handles, namespace, readonly)
if err != nil {
return nil, err
}
frdb, err := NewDatabaseWithFreezerForPruneBlock(kvdb, freezer, namespace, readonly)
if err != nil {
kvdb.Close()
return nil, err
}
return frdb, nil
}

// NewLevelDBDatabaseWithFreezer creates a persistent key-value database with a
// freezer moving immutable chain segments into cold storage.
func NewLevelDBDatabaseWithFreezerBackup(offset uint64, file string, cache int, handles int, freezer string, namespace string, readonly bool) (ethdb.Database, error) {
kvdb, err := leveldb.New(file, cache, handles, namespace, readonly)
if err != nil {
return nil, err
}
db, err := NewDatabaseWithFreezerBackup(offset, kvdb, freezer, namespace, readonly)
if err != nil {
kvdb.Close()
return nil, err
}

return db, nil
}

func ReOpenDatabaseWithFreezerBackup(frdb ethdb.AncientStore, file string, cache int, handles int, namespace string, readonly bool) (ethdb.Database, error) {
kvdb, err := leveldb.New(file, cache, handles, namespace, readonly)
if err != nil {
return nil, err
}

return &freezerdb{
KeyValueStore: kvdb,
AncientStore: frdb,
}, nil
}

type counter uint64

func (c counter) String() string {
Expand Down
13 changes: 10 additions & 3 deletions core/rawdb/freezer.go
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,8 @@ type freezer struct {

quit chan struct{}
closeOnce sync.Once

offset uint64
}

// newFreezer creates a chain freezer that moves ancient chain data into
Expand Down Expand Up @@ -164,15 +166,15 @@ func (f *freezer) Close() error {
// in the freezer.
func (f *freezer) HasAncient(kind string, number uint64) (bool, error) {
if table := f.tables[kind]; table != nil {
return table.has(number), nil
return table.has(number - f.offset), nil
}
return false, nil
}

// Ancient retrieves an ancient binary blob from the append-only immutable files.
func (f *freezer) Ancient(kind string, number uint64) ([]byte, error) {
if table := f.tables[kind]; table != nil {
return table.Retrieve(number)
return table.Retrieve(number - f.offset)
}
return nil, errUnknownTable
}
Expand Down Expand Up @@ -201,7 +203,7 @@ func (f *freezer) AppendAncient(number uint64, hash, header, body, receipts, td
return errReadOnly
}
// Ensure the binary blobs we are appending is continuous with freezer.
if atomic.LoadUint64(&f.frozen) != number {
if atomic.LoadUint64(&f.frozen) != number-f.offset {
return errOutOrderInsertion
}
// Rollback all inserted data if any insertion below failed to ensure
Expand Down Expand Up @@ -313,6 +315,11 @@ func (f *freezer) freeze(db ethdb.KeyValueStore) {
continue
}
number := ReadHeaderNumber(nfdb, hash)

//minus the freezer offset
if number != nil {
*number = *number - f.offset
}
threshold := atomic.LoadUint64(&f.threshold)

switch {
Expand Down
1 change: 1 addition & 0 deletions core/rawdb/schema.go
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,7 @@ var (
// fastTxLookupLimitKey tracks the transaction lookup limit during fast sync.
fastTxLookupLimitKey = []byte("FastTransactionLookupLimit")

offSetOfAncientFreezer = []byte("OffSetOfAncientFreezer")
// badBlockKey tracks the list of bad blocks seen by local
badBlockKey = []byte("InvalidBlock")

Expand Down
68 changes: 42 additions & 26 deletions core/state/pruner/pruner.go
Original file line number Diff line number Diff line change
Expand Up @@ -258,62 +258,78 @@ func prune(snaptree *snapshot.Tree, root common.Hash, maindb ethdb.Database, sta
}

// Prune block body data
func (p *BlockPruner) BlockPruneBackUp(name string, cache, handles int, backFreezer, namespace string, readonly bool) error {
func (p *BlockPruner) BlockPruneBackUp(name string, cache, handles int, backFreezer, freezer, namespace string, readonly bool) error {
//Back-up the necessary data within original ancient directory, create new freezer backup directory backFreezer
//db, err = rawdb.NewLevelDBDatabaseWithFreezer(root, cache, handles, backFreezer, namespace, readonly)
start := time.Now()
chainDbBack, err := p.n.OpenDatabaseWithFreezer(name, cache, handles, backFreezer, namespace, readonly)
if err != nil {
log.Error("Failed to open ancient database: %v", err)
return err
}

//write back-up data to new chainDb
//write genesis block firstly
genesis := p.genesis
if _, _, err := core.SetupGenesisBlock(chainDbBack, genesis); err != nil {
log.Error("Failed to write genesis block: %v", err)
return err
}

//write the latest 128 blocks data of the ancient db
// If we can't access the freezer or it's empty, abort
oldOffSet := rawdb.ReadOffSetOfAncientFreezer(p.db)
frozen, err := p.db.Ancients()

if err != nil || frozen == 0 {
return errors.New("Can't access the freezer or it's empty, abort")
return errors.New("can't access the freezer or it's empty, abort")
}
start_index := frozen - 128
if start_index < 0 {
start_index = 0
startBlockNumber := frozen + oldOffSet - 128

newOffSet := oldOffSet + frozen - 128

chainDb, err := p.n.OpenDatabaseWithFreezer(name, cache, handles, freezer, namespace, readonly)
if err != nil {
log.Error("Failed to open ancient database: %v", err)
return err
}
//write the new offset into db for new freezer usage
rawdb.WriteOffSetOfAncientFreezer(chainDb, newOffSet)
chainDb.Close()

blockList := make([]*types.Block, 0, 128)
receiptsList := make([]types.Receipts, 0, 128)
externTdList := make([]*big.Int, 0, 128)

chainDb, err = p.n.OpenDatabaseWithFreezerForPruneBlock(name, cache, handles, freezer, namespace, readonly)
if err != nil {
log.Error("Failed to open ancient database: %v", err)
return err
}
//All ancient data within the most recent 128 blocks write into new ancient_back directory
chainDb := p.db
for blockNumber := start_index; blockNumber < frozen; blockNumber++ {
for blockNumber := startBlockNumber; blockNumber < frozen+oldOffSet; blockNumber++ {
blockHash := rawdb.ReadCanonicalHash(chainDb, blockNumber)
block := rawdb.ReadBlock(chainDb, blockHash, blockNumber)
blockList = append(blockList, block)
receipts := rawdb.ReadRawReceipts(chainDb, blockHash, blockNumber)
receiptsList = append(receiptsList, receipts)
// Calculate the total difficulty of the block
td := rawdb.ReadTd(chainDb, blockHash, blockNumber)
if td == nil {
return consensus.ErrUnknownAncestor
}
externTd := new(big.Int).Add(block.Difficulty(), td)
rawdb.WriteAncientBlock(chainDbBack, block, receipts, externTd)
externTdList = append(externTdList, externTd)
}
//chainDb.TruncateAncients(start_index - 1)

chainDb.Close()
chainDbBack.Close()

log.Info("Block pruning BackUp successful", common.PrettyDuration(time.Since(start)))
chainDbBack, err := p.n.OpenDatabaseWithFreezerBackup(newOffSet, name, cache, handles, backFreezer, namespace, readonly)
if err != nil {
log.Error("Failed to open ancient database: %v", err)
return err
}

//Write into ancient_backup
for id := 0; id < len(blockList); id++ {
rawdb.WriteAncientBlock(chainDbBack, blockList[id], receiptsList[id], externTdList[id])
}

chainDbBack.Close()
log.Info("Block pruning BackUp successfully", common.PrettyDuration(time.Since(start)))
return nil
}

func BlockPrune(oldAncientPath, newAncientPath string) error {
//Delete directly for the old ancientdb, e.g.: path ../chaindb/ancient
if err := os.RemoveAll(oldAncientPath); err != nil {
log.Error("Failed to remove old ancient directory %v", err)

return err
}

Expand Down
1 change: 0 additions & 1 deletion ethdb/database.go
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,6 @@ type AncientWriter interface {

// Sync flushes all in-memory ancient store data to disk.
Sync() error

}

// Reader contains the methods required to read data from both key-value as well as
Expand Down
Loading

0 comments on commit 2b0d305

Please sign in to comment.