diff --git a/accounts/abi/bind/backends/simulated.go b/accounts/abi/bind/backends/simulated.go
index b8c6814040a4..08a7260d264f 100644
--- a/accounts/abi/bind/backends/simulated.go
+++ b/accounts/abi/bind/backends/simulated.go
@@ -61,11 +61,13 @@ type SimulatedBackend struct {
database ethdb.Database // In memory database to store our testing data
blockchain *core.BlockChain // Ethereum blockchain to handle the consensus
- mu sync.Mutex
- pendingBlock *types.Block // Currently pending block that will be imported on request
- pendingState *state.StateDB // Currently pending state that will be the active on on request
+ mu sync.Mutex
+ pendingBlock *types.Block // Currently pending block that will be imported on request
+ pendingState *state.StateDB // Currently pending state that will be the active on request
+ pendingReceipts types.Receipts // Currently receipts for the pending block
- events *filters.EventSystem // Event system for filtering log events live
+ events *filters.EventSystem // for filtering log events live
+ filterSystem *filters.FilterSystem // for filtering database logs
config *params.ChainConfig
}
@@ -94,9 +96,7 @@ func SimulateWalletAddressAndSignFn() (common.Address, func(account accounts.Acc
// XDC simulated backend for testing purpose.
func NewXDCSimulatedBackend(alloc core.GenesisAlloc, gasLimit uint64, chainConfig *params.ChainConfig) *SimulatedBackend {
- // database := ethdb.NewMemDatabase()
database := rawdb.NewMemoryDatabase()
-
genesis := core.Genesis{
GasLimit: gasLimit, // need this big, support initial smart contract
Config: chainConfig,
@@ -126,8 +126,12 @@ func NewXDCSimulatedBackend(alloc core.GenesisAlloc, gasLimit uint64, chainConfi
database: database,
blockchain: blockchain,
config: genesis.Config,
- events: filters.NewEventSystem(new(event.TypeMux), &filterBackend{database, blockchain}, false),
}
+
+ filterBackend := &filterBackend{database, blockchain, backend}
+ backend.filterSystem = filters.NewFilterSystem(filterBackend, filters.Config{})
+ backend.events = filters.NewEventSystem(backend.filterSystem, false)
+
blockchain.Client = backend
backend.rollback()
return backend
@@ -146,8 +150,12 @@ func NewSimulatedBackend(alloc core.GenesisAlloc) *SimulatedBackend {
database: database,
blockchain: blockchain,
config: genesis.Config,
- events: filters.NewEventSystem(new(event.TypeMux), &filterBackend{database, blockchain}, false),
}
+
+ filterBackend := &filterBackend{database, blockchain, backend}
+ backend.filterSystem = filters.NewFilterSystem(filterBackend, filters.Config{})
+ backend.events = filters.NewEventSystem(backend.filterSystem, false)
+
backend.rollback()
return backend
}
@@ -400,7 +408,7 @@ func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx *types.Transa
}
// Include tx in chain.
- blocks, _ := core.GenerateChain(b.config, block, b.blockchain.Engine(), b.database, 1, func(number int, block *core.BlockGen) {
+ blocks, receipts := core.GenerateChain(b.config, block, b.blockchain.Engine(), b.database, 1, func(number int, block *core.BlockGen) {
for _, tx := range b.pendingBlock.Transactions() {
block.AddTxWithChain(b.blockchain, tx)
}
@@ -410,6 +418,7 @@ func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx *types.Transa
b.pendingBlock = blocks[0]
b.pendingState, _ = state.New(b.pendingBlock.Root(), statedb.Database())
+ b.pendingReceipts = receipts[0]
return nil
}
@@ -421,7 +430,7 @@ func (b *SimulatedBackend) FilterLogs(ctx context.Context, query XDPoSChain.Filt
var filter *filters.Filter
if query.BlockHash != nil {
// Block filter requested, construct a single-shot filter
- filter = filters.NewBlockFilter(&filterBackend{b.database, b.blockchain}, *query.BlockHash, query.Addresses, query.Topics)
+ filter = b.filterSystem.NewBlockFilter(*query.BlockHash, query.Addresses, query.Topics)
} else {
// Initialize unset filter boundaried to run from genesis to chain head
from := int64(0)
@@ -433,7 +442,7 @@ func (b *SimulatedBackend) FilterLogs(ctx context.Context, query XDPoSChain.Filt
to = query.ToBlock.Int64()
}
// Construct the range filter
- filter = filters.NewRangeFilter(&filterBackend{b.database, b.blockchain}, from, to, query.Addresses, query.Topics)
+ filter = b.filterSystem.NewRangeFilter(from, to, query.Addresses, query.Topics)
}
// Run the filter and return all the logs
logs, err := filter.Logs(ctx)
@@ -523,8 +532,9 @@ func (m callMsg) AccessList() types.AccessList { return m.CallMsg.AccessList }
// filterBackend implements filters.Backend to support filtering for logs without
// taking bloom-bits acceleration structures into account.
type filterBackend struct {
- db ethdb.Database
- bc *core.BlockChain
+ db ethdb.Database
+ bc *core.BlockChain
+ backend *SimulatedBackend
}
func (fb *filterBackend) ChainDb() ethdb.Database { return fb.db }
@@ -545,35 +555,51 @@ func (fb *filterBackend) GetReceipts(ctx context.Context, hash common.Hash) (typ
return core.GetBlockReceipts(fb.db, hash, core.GetBlockNumber(fb.db, hash)), nil
}
-func (fb *filterBackend) GetLogs(ctx context.Context, hash common.Hash) ([][]*types.Log, error) {
- receipts := core.GetBlockReceipts(fb.db, hash, core.GetBlockNumber(fb.db, hash))
- if receipts == nil {
- return nil, nil
- }
- logs := make([][]*types.Log, len(receipts))
- for i, receipt := range receipts {
- logs[i] = receipt.Logs
+func (fb *filterBackend) GetBody(ctx context.Context, hash common.Hash, number rpc.BlockNumber) (*types.Body, error) {
+ if body := fb.bc.GetBody(hash); body != nil {
+ return body, nil
}
+ return nil, errors.New("block body not found")
+}
+
+func (fb *filterBackend) PendingBlockAndReceipts() (*types.Block, types.Receipts) {
+ return fb.backend.pendingBlock, fb.backend.pendingReceipts
+}
+
+func (fb *filterBackend) GetLogs(ctx context.Context, hash common.Hash, number uint64) ([][]*types.Log, error) {
+ logs := rawdb.ReadLogs(fb.db, hash, number)
return logs, nil
}
func (fb *filterBackend) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription {
- return event.NewSubscription(func(quit <-chan struct{}) error {
- <-quit
- return nil
- })
+ return nullSubscription()
}
+
func (fb *filterBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription {
return fb.bc.SubscribeChainEvent(ch)
}
+
func (fb *filterBackend) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription {
return fb.bc.SubscribeRemovedLogsEvent(ch)
}
+
func (fb *filterBackend) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription {
return fb.bc.SubscribeLogsEvent(ch)
}
+func (fb *filterBackend) SubscribePendingLogsEvent(ch chan<- []*types.Log) event.Subscription {
+ return nullSubscription()
+}
+
func (fb *filterBackend) BloomStatus() (uint64, uint64) { return 4096, 0 }
+
func (fb *filterBackend) ServiceFilter(ctx context.Context, ms *bloombits.MatcherSession) {
panic("not supported")
}
+
+func nullSubscription() event.Subscription {
+ return event.NewSubscription(func(quit <-chan struct{}) error {
+ <-quit
+ return nil
+ })
+}
diff --git a/cmd/XDC/main.go b/cmd/XDC/main.go
index ee6e47dac755..90bc3f6382e2 100644
--- a/cmd/XDC/main.go
+++ b/cmd/XDC/main.go
@@ -93,6 +93,7 @@ var (
utils.CacheDatabaseFlag,
//utils.CacheGCFlag,
//utils.TrieCacheGenFlag,
+ utils.CacheLogSizeFlag,
utils.FDLimitFlag,
utils.ListenPortFlag,
utils.MaxPeersFlag,
diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go
index a8b55145619a..ec727e20bef9 100644
--- a/cmd/utils/flags.go
+++ b/cmd/utils/flags.go
@@ -42,8 +42,10 @@ import (
"github.com/XinFinOrg/XDPoSChain/crypto"
"github.com/XinFinOrg/XDPoSChain/eth/downloader"
"github.com/XinFinOrg/XDPoSChain/eth/ethconfig"
+ "github.com/XinFinOrg/XDPoSChain/eth/filters"
"github.com/XinFinOrg/XDPoSChain/eth/gasprice"
"github.com/XinFinOrg/XDPoSChain/ethdb"
+ "github.com/XinFinOrg/XDPoSChain/internal/ethapi"
"github.com/XinFinOrg/XDPoSChain/log"
"github.com/XinFinOrg/XDPoSChain/metrics"
"github.com/XinFinOrg/XDPoSChain/metrics/exp"
@@ -54,6 +56,7 @@ import (
"github.com/XinFinOrg/XDPoSChain/p2p/nat"
"github.com/XinFinOrg/XDPoSChain/p2p/netutil"
"github.com/XinFinOrg/XDPoSChain/params"
+ "github.com/XinFinOrg/XDPoSChain/rpc"
whisper "github.com/XinFinOrg/XDPoSChain/whisper/whisperv6"
gopsutil "github.com/shirou/gopsutil/mem"
"gopkg.in/urfave/cli.v1"
@@ -316,6 +319,11 @@ var (
Usage: "Percentage of cache memory allowance to use for trie pruning",
Value: 25,
}
+ CacheLogSizeFlag = &cli.IntFlag{
+ Name: "cache.blocklogs",
+ Usage: "Size (in number of blocks) of the log cache for filtering",
+ Value: ethconfig.Defaults.FilterLogCacheSize,
+ }
FDLimitFlag = cli.IntFlag{
Name: "fdlimit",
Usage: "Raise the open file descriptor resource limit (default = system fd limit)",
@@ -1244,6 +1252,9 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
if ctx.GlobalIsSet(GasPriceFlag.Name) {
cfg.GasPrice = GlobalBig(ctx, GasPriceFlag.Name)
}
+ if ctx.IsSet(CacheLogSizeFlag.Name) {
+ cfg.FilterLogCacheSize = ctx.Int(CacheLogSizeFlag.Name)
+ }
if ctx.GlobalIsSet(VMEnableDebugFlag.Name) {
// TODO(fjl): force-enable this in --dev mode
cfg.EnablePreimageRecording = ctx.GlobalBool(VMEnableDebugFlag.Name)
@@ -1443,6 +1454,19 @@ func WalkMatch(root, pattern string) ([]string, error) {
return matches, nil
}
+// RegisterFilterAPI adds the eth log filtering RPC API to the node.
+func RegisterFilterAPI(stack *node.Node, backend ethapi.Backend, ethcfg *ethconfig.Config) *filters.FilterSystem {
+ isLightClient := ethcfg.SyncMode == downloader.LightSync
+ filterSystem := filters.NewFilterSystem(backend, filters.Config{
+ LogCacheSize: ethcfg.FilterLogCacheSize,
+ })
+ stack.RegisterAPIs([]rpc.API{{
+ Namespace: "eth",
+ Service: filters.NewFilterAPI(filterSystem, isLightClient),
+ }})
+ return filterSystem
+}
+
func SetupMetrics(ctx *cli.Context) {
if metrics.Enabled {
log.Info("Enabling metrics collection")
diff --git a/cmd/utils/utils.go b/cmd/utils/utils.go
index 406d15ebfc83..a329fc8c4d2e 100644
--- a/cmd/utils/utils.go
+++ b/cmd/utils/utils.go
@@ -47,8 +47,7 @@ func RegisterShhService(stack *node.Node, cfg *whisper.Config) {
}
}
-// RegisterEthStatsService configures the Ethereum Stats daemon and adds it to
-// th egiven node.
+// RegisterEthStatsService configures the Ethereum Stats daemon and adds it to the node.
func RegisterEthStatsService(stack *node.Node, url string) {
if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) {
// Retrieve both eth and les services
diff --git a/core/bench_test.go b/core/bench_test.go
index 7cfed07f45a0..1129142b1af6 100644
--- a/core/bench_test.go
+++ b/core/bench_test.go
@@ -235,12 +235,12 @@ func makeChainForBench(db ethdb.Database, full bool, count uint64) {
ReceiptHash: types.EmptyRootHash,
}
hash = header.Hash()
- WriteHeader(db, header)
- WriteCanonicalHash(db, hash, n)
+ rawdb.WriteHeader(db, header)
+ rawdb.WriteCanonicalHash(db, hash, n)
WriteTd(db, hash, n, big.NewInt(int64(n+1)))
if full || n == 0 {
block := types.NewBlockWithHeader(header)
- WriteBody(db, hash, n, block.Body())
+ rawdb.WriteBody(db, hash, n, block.Body())
WriteBlockReceipts(db, hash, n, nil)
}
}
diff --git a/core/blockchain.go b/core/blockchain.go
index 0eaa0823197b..e6aedc143c39 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -39,6 +39,7 @@ import (
"github.com/XinFinOrg/XDPoSChain/consensus/XDPoS"
"github.com/XinFinOrg/XDPoSChain/consensus/XDPoS/utils"
contractValidator "github.com/XinFinOrg/XDPoSChain/contracts/validator/contract"
+ "github.com/XinFinOrg/XDPoSChain/core/rawdb"
"github.com/XinFinOrg/XDPoSChain/core/state"
"github.com/XinFinOrg/XDPoSChain/core/types"
"github.com/XinFinOrg/XDPoSChain/core/vm"
@@ -437,9 +438,7 @@ func (bc *BlockChain) SetHead(head uint64) error {
}
currentBlock := bc.CurrentBlock()
currentFastBlock := bc.CurrentFastBlock()
- if err := WriteHeadBlockHash(bc.db, currentBlock.Hash()); err != nil {
- log.Crit("Failed to reset head full block", "err", err)
- }
+ rawdb.WriteHeadBlockHash(bc.db, currentBlock.Hash())
if err := WriteHeadFastBlockHash(bc.db, currentFastBlock.Hash()); err != nil {
log.Crit("Failed to reset head fast block", "err", err)
}
@@ -586,9 +585,7 @@ func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) error {
if err := bc.hc.WriteTd(genesis.Hash(), genesis.NumberU64(), genesis.Difficulty()); err != nil {
log.Crit("Failed to write genesis block TD", "err", err)
}
- if err := WriteBlock(bc.db, genesis); err != nil {
- log.Crit("Failed to write genesis block", "err", err)
- }
+ rawdb.WriteBlock(bc.db, genesis)
bc.genesisBlock = genesis
bc.insert(bc.genesisBlock, false)
bc.currentBlock.Store(bc.genesisBlock)
@@ -685,17 +682,9 @@ func (bc *BlockChain) insert(block *types.Block, writeBlock bool) {
updateHeads := GetCanonicalHash(bc.db, block.NumberU64()) != block.Hash()
// Add the block to the canonical chain number scheme and mark as the head
- if err := WriteCanonicalHash(bc.db, block.Hash(), block.NumberU64()); err != nil {
- log.Crit("Failed to insert block number", "err", err)
- }
- if err := WriteHeadBlockHash(bc.db, block.Hash()); err != nil {
- log.Crit("Failed to insert head block hash", "err", err)
- }
- if writeBlock {
- if err := WriteBlock(bc.db, block); err != nil {
- log.Crit("Failed to insert block", "err", err)
- }
- }
+ rawdb.WriteCanonicalHash(bc.db, block.Hash(), block.NumberU64())
+ rawdb.WriteHeadBlockHash(bc.db, block.Hash())
+ rawdb.WriteBlock(bc.db, block)
bc.currentBlock.Store(block)
// save cache BlockSigners
@@ -1044,7 +1033,7 @@ func (bc *BlockChain) Rollback(chain []common.Hash) {
if currentBlock := bc.CurrentBlock(); currentBlock.Hash() == hash {
newBlock := bc.GetBlock(currentBlock.ParentHash(), currentBlock.NumberU64()-1)
bc.currentBlock.Store(newBlock)
- WriteHeadBlockHash(bc.db, newBlock.Hash())
+ rawdb.WriteHeadBlockHash(bc.db, newBlock.Hash())
}
}
}
@@ -1134,9 +1123,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
return i, fmt.Errorf("failed to set receipts data: %v", err)
}
// Write all the data out into the database
- if err := WriteBody(batch, block.Hash(), block.NumberU64(), block.Body()); err != nil {
- return i, fmt.Errorf("failed to write block body: %v", err)
- }
+ rawdb.WriteBody(batch, block.Hash(), block.NumberU64(), block.Body())
if err := WriteBlockReceipts(batch, block.Hash(), block.NumberU64(), receipts); err != nil {
return i, fmt.Errorf("failed to write block receipts: %v", err)
}
@@ -1196,9 +1183,7 @@ func (bc *BlockChain) WriteBlockWithoutState(block *types.Block, td *big.Int) (e
if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), td); err != nil {
return err
}
- if err := WriteBlock(bc.db, block); err != nil {
- return err
- }
+ rawdb.WriteBlock(bc.db, block)
return nil
}
@@ -1226,9 +1211,7 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.
}
// Write other block data using a batch.
batch := bc.db.NewBatch()
- if err := WriteBlock(batch, block); err != nil {
- return NonStatTy, err
- }
+ rawdb.WriteBlock(batch, block)
root, err := state.Commit(bc.chainConfig.IsEIP158(block.Number()))
if err != nil {
return NonStatTy, err
diff --git a/core/blockchain_test.go b/core/blockchain_test.go
index c19a68dcc129..fab043c15608 100644
--- a/core/blockchain_test.go
+++ b/core/blockchain_test.go
@@ -130,7 +130,7 @@ func testBlockChainImport(chain types.Blocks, blockchain *BlockChain) error {
}
blockchain.mu.Lock()
WriteTd(blockchain.db, block.Hash(), block.NumberU64(), new(big.Int).Add(block.Difficulty(), blockchain.GetTdByHash(block.ParentHash())))
- WriteBlock(blockchain.db, block)
+ rawdb.WriteBlock(blockchain.db, block)
statedb.Commit(true)
blockchain.mu.Unlock()
}
@@ -148,7 +148,7 @@ func testHeaderChainImport(chain []*types.Header, blockchain *BlockChain) error
// Manually insert the header into the database, but don't reorganise (allows subsequent testing)
blockchain.mu.Lock()
WriteTd(blockchain.db, header.Hash(), header.Number.Uint64(), new(big.Int).Add(header.Difficulty, blockchain.GetTdByHash(header.ParentHash)))
- WriteHeader(blockchain.db, header)
+ rawdb.WriteHeader(blockchain.db, header)
blockchain.mu.Unlock()
}
return nil
diff --git a/core/chain_indexer_test.go b/core/chain_indexer_test.go
index c042a8319da0..4d44f3018a6f 100644
--- a/core/chain_indexer_test.go
+++ b/core/chain_indexer_test.go
@@ -18,12 +18,13 @@ package core
import (
"fmt"
- "github.com/XinFinOrg/XDPoSChain/core/rawdb"
"math/big"
"math/rand"
"testing"
"time"
+ "github.com/XinFinOrg/XDPoSChain/core/rawdb"
+
"github.com/XinFinOrg/XDPoSChain/common"
"github.com/XinFinOrg/XDPoSChain/core/types"
)
@@ -94,8 +95,8 @@ func testChainIndexer(t *testing.T, count int) {
if number > 0 {
header.ParentHash = GetCanonicalHash(db, number-1)
}
- WriteHeader(db, header)
- WriteCanonicalHash(db, header.Hash(), number)
+ rawdb.WriteHeader(db, header)
+ rawdb.WriteCanonicalHash(db, header.Hash(), number)
}
// Start indexer with an already existing chain
for i := uint64(0); i <= 100; i++ {
diff --git a/core/chain_makers.go b/core/chain_makers.go
index 348d68d1687a..7a4012945f7d 100644
--- a/core/chain_makers.go
+++ b/core/chain_makers.go
@@ -113,6 +113,15 @@ func (b *BlockGen) AddTxWithChain(bc *BlockChain, tx *types.Transaction) {
}
}
+// AddUncheckedTx forcefully adds a transaction to the block without any
+// validation.
+//
+// AddUncheckedTx will cause consensus failures when used during real
+// chain processing. This is best used in conjunction with raw block insertion.
+func (b *BlockGen) AddUncheckedTx(tx *types.Transaction) {
+ b.txs = append(b.txs, tx)
+}
+
// Number returns the block number of the block being generated.
func (b *BlockGen) Number() *big.Int {
return new(big.Int).Set(b.header.Number)
@@ -235,6 +244,19 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse
return blocks, receipts
}
+// GenerateChainWithGenesis is a wrapper of GenerateChain which will initialize
+// genesis block to database first according to the provided genesis specification
+// then generate chain on top.
+func GenerateChainWithGenesis(genesis *Genesis, engine consensus.Engine, n int, gen func(int, *BlockGen)) (ethdb.Database, []*types.Block, []types.Receipts) {
+ db := rawdb.NewMemoryDatabase()
+ _, err := genesis.Commit(db)
+ if err != nil {
+ panic(err)
+ }
+ blocks, receipts := GenerateChain(genesis.Config, genesis.ToBlock(nil), engine, db, n, gen)
+ return db, blocks, receipts
+}
+
func makeHeader(chain consensus.ChainReader, parent *types.Block, state *state.StateDB, engine consensus.Engine) *types.Header {
var time *big.Int
if parent.Time() == nil {
diff --git a/core/database_util.go b/core/database_util.go
index 647d6afd3b59..9d48ffec6ce2 100644
--- a/core/database_util.go
+++ b/core/database_util.go
@@ -356,15 +356,6 @@ func GetBloomBits(db DatabaseReader, bit uint, section uint64, head common.Hash)
return db.Get(key)
}
-// WriteCanonicalHash stores the canonical hash for the given block number.
-func WriteCanonicalHash(db ethdb.KeyValueWriter, hash common.Hash, number uint64) error {
- key := append(append(headerPrefix, encodeBlockNumber(number)...), numSuffix...)
- if err := db.Put(key, hash.Bytes()); err != nil {
- log.Crit("Failed to store number to hash mapping", "err", err)
- }
- return nil
-}
-
// WriteHeadHeaderHash stores the head header's hash.
func WriteHeadHeaderHash(db ethdb.KeyValueWriter, hash common.Hash) error {
if err := db.Put(headHeaderKey, hash.Bytes()); err != nil {
@@ -373,14 +364,6 @@ func WriteHeadHeaderHash(db ethdb.KeyValueWriter, hash common.Hash) error {
return nil
}
-// WriteHeadBlockHash stores the head block's hash.
-func WriteHeadBlockHash(db ethdb.KeyValueWriter, hash common.Hash) error {
- if err := db.Put(headBlockKey, hash.Bytes()); err != nil {
- log.Crit("Failed to store last block's hash", "err", err)
- }
- return nil
-}
-
// WriteHeadFastBlockHash stores the fast head block's hash.
func WriteHeadFastBlockHash(db ethdb.KeyValueWriter, hash common.Hash) error {
if err := db.Put(headFastKey, hash.Bytes()); err != nil {
@@ -398,44 +381,6 @@ func WriteTrieSyncProgress(db ethdb.KeyValueWriter, count uint64) error {
return nil
}
-// WriteHeader serializes a block header into the database.
-func WriteHeader(db ethdb.KeyValueWriter, header *types.Header) error {
- data, err := rlp.EncodeToBytes(header)
- if err != nil {
- return err
- }
- hash := header.Hash().Bytes()
- num := header.Number.Uint64()
- encNum := encodeBlockNumber(num)
- key := append(blockHashPrefix, hash...)
- if err := db.Put(key, encNum); err != nil {
- log.Crit("Failed to store hash to number mapping", "err", err)
- }
- key = append(append(headerPrefix, encNum...), hash...)
- if err := db.Put(key, data); err != nil {
- log.Crit("Failed to store header", "err", err)
- }
- return nil
-}
-
-// WriteBody serializes the body of a block into the database.
-func WriteBody(db ethdb.KeyValueWriter, hash common.Hash, number uint64, body *types.Body) error {
- data, err := rlp.EncodeToBytes(body)
- if err != nil {
- return err
- }
- return WriteBodyRLP(db, hash, number, data)
-}
-
-// WriteBodyRLP writes a serialized body of a block into the database.
-func WriteBodyRLP(db ethdb.KeyValueWriter, hash common.Hash, number uint64, rlp rlp.RawValue) error {
- key := append(append(bodyPrefix, encodeBlockNumber(number)...), hash.Bytes()...)
- if err := db.Put(key, rlp); err != nil {
- log.Crit("Failed to store block body", "err", err)
- }
- return nil
-}
-
// WriteTd serializes the total difficulty of a block into the database.
func WriteTd(db ethdb.KeyValueWriter, hash common.Hash, number uint64, td *big.Int) error {
data, err := rlp.EncodeToBytes(td)
@@ -449,19 +394,6 @@ func WriteTd(db ethdb.KeyValueWriter, hash common.Hash, number uint64, td *big.I
return nil
}
-// WriteBlock serializes a block into the database, header and body separately.
-func WriteBlock(db ethdb.KeyValueWriter, block *types.Block) error {
- // Store the body first to retain database consistency
- if err := WriteBody(db, block.Hash(), block.NumberU64(), block.Body()); err != nil {
- return err
- }
- // Store the header too, signaling full block ownership
- if err := WriteHeader(db, block.Header()); err != nil {
- return err
- }
- return nil
-}
-
// WriteBlockReceipts stores all the transaction receipts belonging to a block
// as a single receipt slice. This is used during chain reorganisations for
// rescheduling dropped transactions.
diff --git a/core/database_util_test.go b/core/database_util_test.go
index ecd843a3e880..a0d5a9ec8371 100644
--- a/core/database_util_test.go
+++ b/core/database_util_test.go
@@ -38,9 +38,7 @@ func TestHeaderStorage(t *testing.T) {
t.Fatalf("Non existent header returned: %v", entry)
}
// Write and verify the header in the database
- if err := WriteHeader(db, header); err != nil {
- t.Fatalf("Failed to write header into database: %v", err)
- }
+ rawdb.WriteHeader(db, header)
if entry := GetHeader(db, header.Hash(), header.Number.Uint64()); entry == nil {
t.Fatalf("Stored header not found")
} else if entry.Hash() != header.Hash() {
@@ -78,9 +76,7 @@ func TestBodyStorage(t *testing.T) {
t.Fatalf("Non existent body returned: %v", entry)
}
// Write and verify the body in the database
- if err := WriteBody(db, hash, 0, body); err != nil {
- t.Fatalf("Failed to write body into database: %v", err)
- }
+ rawdb.WriteBody(db, hash, 0, body)
if entry := GetBody(db, hash, 0); entry == nil {
t.Fatalf("Stored body not found")
} else if types.DeriveSha(types.Transactions(entry.Transactions)) != types.DeriveSha(types.Transactions(body.Transactions)) || types.CalcUncleHash(entry.Uncles) != types.CalcUncleHash(body.Uncles) {
@@ -124,9 +120,7 @@ func TestBlockStorage(t *testing.T) {
t.Fatalf("Non existent body returned: %v", entry)
}
// Write and verify the block in the database
- if err := WriteBlock(db, block); err != nil {
- t.Fatalf("Failed to write block into database: %v", err)
- }
+ rawdb.WriteBlock(db, block)
if entry := GetBlock(db, block.Hash(), block.NumberU64()); entry == nil {
t.Fatalf("Stored block not found")
} else if entry.Hash() != block.Hash() {
@@ -165,30 +159,22 @@ func TestPartialBlockStorage(t *testing.T) {
ReceiptHash: types.EmptyRootHash,
})
// Store a header and check that it's not recognized as a block
- if err := WriteHeader(db, block.Header()); err != nil {
- t.Fatalf("Failed to write header into database: %v", err)
- }
+ rawdb.WriteHeader(db, block.Header())
if entry := GetBlock(db, block.Hash(), block.NumberU64()); entry != nil {
t.Fatalf("Non existent block returned: %v", entry)
}
DeleteHeader(db, block.Hash(), block.NumberU64())
// Store a body and check that it's not recognized as a block
- if err := WriteBody(db, block.Hash(), block.NumberU64(), block.Body()); err != nil {
- t.Fatalf("Failed to write body into database: %v", err)
- }
+ rawdb.WriteBody(db, block.Hash(), block.NumberU64(), block.Body())
if entry := GetBlock(db, block.Hash(), block.NumberU64()); entry != nil {
t.Fatalf("Non existent block returned: %v", entry)
}
DeleteBody(db, block.Hash(), block.NumberU64())
// Store a header and a body separately and check reassembly
- if err := WriteHeader(db, block.Header()); err != nil {
- t.Fatalf("Failed to write header into database: %v", err)
- }
- if err := WriteBody(db, block.Hash(), block.NumberU64(), block.Body()); err != nil {
- t.Fatalf("Failed to write body into database: %v", err)
- }
+ rawdb.WriteHeader(db, block.Header())
+ rawdb.WriteBody(db, block.Hash(), block.NumberU64(), block.Body())
if entry := GetBlock(db, block.Hash(), block.NumberU64()); entry == nil {
t.Fatalf("Stored block not found")
} else if entry.Hash() != block.Hash() {
@@ -231,9 +217,7 @@ func TestCanonicalMappingStorage(t *testing.T) {
t.Fatalf("Non existent canonical mapping returned: %v", entry)
}
// Write and verify the TD in the database
- if err := WriteCanonicalHash(db, hash, number); err != nil {
- t.Fatalf("Failed to write canonical mapping into database: %v", err)
- }
+ rawdb.WriteCanonicalHash(db, hash, number)
if entry := GetCanonicalHash(db, number); entry == (common.Hash{}) {
t.Fatalf("Stored canonical mapping not found")
} else if entry != hash {
@@ -268,9 +252,7 @@ func TestHeadStorage(t *testing.T) {
if err := WriteHeadHeaderHash(db, blockHead.Hash()); err != nil {
t.Fatalf("Failed to write head header hash: %v", err)
}
- if err := WriteHeadBlockHash(db, blockFull.Hash()); err != nil {
- t.Fatalf("Failed to write head block hash: %v", err)
- }
+ rawdb.WriteHeadBlockHash(db, blockFull.Hash())
if err := WriteHeadFastBlockHash(db, blockFast.Hash()); err != nil {
t.Fatalf("Failed to write fast head block hash: %v", err)
}
@@ -304,9 +286,7 @@ func TestLookupStorage(t *testing.T) {
}
}
// Insert all the transactions into the database, and verify contents
- if err := WriteBlock(db, block); err != nil {
- t.Fatalf("failed to write block contents: %v", err)
- }
+ rawdb.WriteBlock(db, block)
if err := WriteTxLookupEntries(db, block); err != nil {
t.Fatalf("failed to write transactions: %v", err)
}
diff --git a/core/events.go b/core/events.go
index 60dc8d7ddd36..bf7e7027e5c9 100644
--- a/core/events.go
+++ b/core/events.go
@@ -30,11 +30,6 @@ type OrderTxPreEvent struct{ Tx *types.OrderTransaction }
// LendingTxPreEvent is posted when a order transaction enters the order transaction pool.
type LendingTxPreEvent struct{ Tx *types.LendingTransaction }
-// PendingLogsEvent is posted pre mining and notifies of pending logs.
-type PendingLogsEvent struct {
- Logs []*types.Log
-}
-
// PendingStateEvent is posted pre mining and notifies of pending state changes.
type PendingStateEvent struct{}
diff --git a/core/genesis.go b/core/genesis.go
index 205074b7141f..2156ddc7259d 100644
--- a/core/genesis.go
+++ b/core/genesis.go
@@ -280,18 +280,12 @@ func (g *Genesis) Commit(db ethdb.Database) (*types.Block, error) {
if err := WriteTd(db, block.Hash(), block.NumberU64(), g.Difficulty); err != nil {
return nil, err
}
- if err := WriteBlock(db, block); err != nil {
- return nil, err
- }
+ rawdb.WriteBlock(db, block)
if err := WriteBlockReceipts(db, block.Hash(), block.NumberU64(), nil); err != nil {
return nil, err
}
- if err := WriteCanonicalHash(db, block.Hash(), block.NumberU64()); err != nil {
- return nil, err
- }
- if err := WriteHeadBlockHash(db, block.Hash()); err != nil {
- return nil, err
- }
+ rawdb.WriteCanonicalHash(db, block.Hash(), block.NumberU64())
+ rawdb.WriteHeadBlockHash(db, block.Hash())
if err := WriteHeadHeaderHash(db, block.Hash()); err != nil {
return nil, err
}
diff --git a/core/headerchain.go b/core/headerchain.go
index 0dbc47e1a845..c0870f49cbc7 100644
--- a/core/headerchain.go
+++ b/core/headerchain.go
@@ -28,6 +28,7 @@ import (
"github.com/XinFinOrg/XDPoSChain/common"
"github.com/XinFinOrg/XDPoSChain/consensus"
+ "github.com/XinFinOrg/XDPoSChain/core/rawdb"
"github.com/XinFinOrg/XDPoSChain/core/types"
"github.com/XinFinOrg/XDPoSChain/ethdb"
"github.com/XinFinOrg/XDPoSChain/log"
@@ -66,9 +67,10 @@ type HeaderChain struct {
}
// NewHeaderChain creates a new HeaderChain structure.
-// getValidator should return the parent's validator
-// procInterrupt points to the parent's interrupt semaphore
-// wg points to the parent's shutdown wait group
+//
+// getValidator should return the parent's validator
+// procInterrupt points to the parent's interrupt semaphore
+// wg points to the parent's shutdown wait group
func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, engine consensus.Engine, procInterrupt func() bool) (*HeaderChain, error) {
headerCache, _ := lru.New(headerCacheLimit)
tdCache, _ := lru.New(tdCacheLimit)
@@ -147,9 +149,7 @@ func (hc *HeaderChain) WriteHeader(header *types.Header) (status WriteStatus, er
if err := hc.WriteTd(hash, number, externTd); err != nil {
log.Crit("Failed to write header total difficulty", "err", err)
}
- if err := WriteHeader(hc.chainDb, header); err != nil {
- log.Crit("Failed to write header content", "err", err)
- }
+ rawdb.WriteHeader(hc.chainDb, header)
// If the total difficulty is higher than our known, add it to the canonical chain
// Second clause in the if statement reduces the vulnerability to selfish mining.
// Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf
@@ -169,16 +169,14 @@ func (hc *HeaderChain) WriteHeader(header *types.Header) (status WriteStatus, er
headHeader = hc.GetHeader(headHash, headNumber)
)
for GetCanonicalHash(hc.chainDb, headNumber) != headHash {
- WriteCanonicalHash(hc.chainDb, headHash, headNumber)
+ rawdb.WriteCanonicalHash(hc.chainDb, headHash, headNumber)
headHash = headHeader.ParentHash
headNumber = headHeader.Number.Uint64() - 1
headHeader = hc.GetHeader(headHash, headNumber)
}
// Extend the canonical chain with the new header
- if err := WriteCanonicalHash(hc.chainDb, hash, number); err != nil {
- log.Crit("Failed to insert header number", "err", err)
- }
+ rawdb.WriteCanonicalHash(hc.chainDb, hash, number)
if err := WriteHeadHeaderHash(hc.chainDb, hash); err != nil {
log.Crit("Failed to insert head header hash", "err", err)
}
diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go
new file mode 100644
index 000000000000..bb79f9b71a42
--- /dev/null
+++ b/core/rawdb/accessors_chain.go
@@ -0,0 +1,321 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package rawdb
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+
+ "github.com/XinFinOrg/XDPoSChain/common"
+ "github.com/XinFinOrg/XDPoSChain/core/types"
+ "github.com/XinFinOrg/XDPoSChain/ethdb"
+ "github.com/XinFinOrg/XDPoSChain/log"
+ "github.com/XinFinOrg/XDPoSChain/params"
+ "github.com/XinFinOrg/XDPoSChain/rlp"
+)
+
+// WriteCanonicalHash stores the hash assigned to a canonical block number.
+func WriteCanonicalHash(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
+ if err := db.Put(headerHashKey(number), hash.Bytes()); err != nil {
+ log.Crit("Failed to store number to hash mapping", "err", err)
+ }
+}
+
+// WriteHeaderNumber stores the hash->number mapping.
+func WriteHeaderNumber(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
+ key := headerNumberKey(hash)
+ enc := encodeBlockNumber(number)
+ if err := db.Put(key, enc); err != nil {
+ log.Crit("Failed to store hash to number mapping", "err", err)
+ }
+}
+
+// ReadHeaderNumber returns the header number assigned to a hash.
+func ReadHeaderNumber(db ethdb.KeyValueReader, hash common.Hash) *uint64 {
+ data, _ := db.Get(headerNumberKey(hash))
+ if len(data) != 8 {
+ return nil
+ }
+ number := binary.BigEndian.Uint64(data)
+ return &number
+}
+
+// WriteHeadBlockHash stores the head block's hash.
+func WriteHeadBlockHash(db ethdb.KeyValueWriter, hash common.Hash) {
+ if err := db.Put(headBlockKey, hash.Bytes()); err != nil {
+ log.Crit("Failed to store last block's hash", "err", err)
+ }
+}
+
+// ReadBodyRLP retrieves the block body (transactions and uncles) in RLP encoding.
+func ReadBodyRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
+ // First try to look up the data in ancient database. Extra hash
+ // comparison is necessary since ancient database only maintains
+ // the canonical data.
+ data, _ := db.Ancient(freezerBodiesTable, number)
+ if len(data) > 0 {
+ h, _ := db.Ancient(freezerHashTable, number)
+ if common.BytesToHash(h) == hash {
+ return data
+ }
+ }
+ // Then try to look up the data in leveldb.
+ data, _ = db.Get(blockBodyKey(number, hash))
+ if len(data) > 0 {
+ return data
+ }
+ // In the background freezer is moving data from leveldb to flatten files.
+ // So during the first check for ancient db, the data is not yet in there,
+ // but when we reach into leveldb, the data was already moved. That would
+ // result in a not found error.
+ data, _ = db.Ancient(freezerBodiesTable, number)
+ if len(data) > 0 {
+ h, _ := db.Ancient(freezerHashTable, number)
+ if common.BytesToHash(h) == hash {
+ return data
+ }
+ }
+ return nil // Can't find the data anywhere.
+}
+
+// WriteBodyRLP stores an RLP encoded block body into the database.
+func WriteBodyRLP(db ethdb.KeyValueWriter, hash common.Hash, number uint64, rlp rlp.RawValue) {
+ if err := db.Put(blockBodyKey(number, hash), rlp); err != nil {
+ log.Crit("Failed to store block body", "err", err)
+ }
+}
+
+// ReadBody retrieves the block body corresponding to the hash.
+func ReadBody(db ethdb.Reader, hash common.Hash, number uint64) *types.Body {
+ data := ReadBodyRLP(db, hash, number)
+ if len(data) == 0 {
+ return nil
+ }
+ body := new(types.Body)
+ if err := rlp.Decode(bytes.NewReader(data), body); err != nil {
+ log.Error("Invalid block body RLP", "hash", hash, "err", err)
+ return nil
+ }
+ return body
+}
+
+// WriteBody stores a block body into the database.
+func WriteBody(db ethdb.KeyValueWriter, hash common.Hash, number uint64, body *types.Body) {
+ data, err := rlp.EncodeToBytes(body)
+ if err != nil {
+ log.Crit("Failed to RLP encode body", "err", err)
+ }
+ WriteBodyRLP(db, hash, number, data)
+}
+
+// WriteHeader stores a block header into the database and also stores the hash-
+// to-number mapping.
+func WriteHeader(db ethdb.KeyValueWriter, header *types.Header) {
+ var (
+ hash = header.Hash()
+ number = header.Number.Uint64()
+ )
+ // Write the hash -> number mapping
+ WriteHeaderNumber(db, hash, number)
+
+ // Write the encoded header
+ data, err := rlp.EncodeToBytes(header)
+ if err != nil {
+ log.Crit("Failed to RLP encode header", "err", err)
+ }
+ key := headerKey(number, hash)
+ if err := db.Put(key, data); err != nil {
+ log.Crit("Failed to store header", "err", err)
+ }
+}
+
+// ReadReceiptsRLP retrieves all the transaction receipts belonging to a block in RLP encoding.
+func ReadReceiptsRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
+ // First try to look up the data in ancient database. Extra hash
+ // comparison is necessary since ancient database only maintains
+ // the canonical data.
+ data, _ := db.Ancient(freezerReceiptTable, number)
+ if len(data) > 0 {
+ h, _ := db.Ancient(freezerHashTable, number)
+ if common.BytesToHash(h) == hash {
+ return data
+ }
+ }
+ // Then try to look up the data in leveldb.
+ data, _ = db.Get(blockReceiptsKey(number, hash))
+ if len(data) > 0 {
+ return data
+ }
+ // In the background freezer is moving data from leveldb to flatten files.
+ // So during the first check for ancient db, the data is not yet in there,
+ // but when we reach into leveldb, the data was already moved. That would
+ // result in a not found error.
+ data, _ = db.Ancient(freezerReceiptTable, number)
+ if len(data) > 0 {
+ h, _ := db.Ancient(freezerHashTable, number)
+ if common.BytesToHash(h) == hash {
+ return data
+ }
+ }
+ return nil // Can't find the data anywhere.
+}
+
+// ReadRawReceipts retrieves all the transaction receipts belonging to a block.
+// The receipt metadata fields are not guaranteed to be populated, so they
+// should not be used. Use ReadReceipts instead if the metadata is needed.
+func ReadRawReceipts(db ethdb.Reader, hash common.Hash, number uint64) types.Receipts {
+ // Retrieve the flattened receipt slice
+ data := ReadReceiptsRLP(db, hash, number)
+ if len(data) == 0 {
+ return nil
+ }
+ // Convert the receipts from their storage form to their internal representation
+ storageReceipts := []*types.ReceiptForStorage{}
+ if err := rlp.DecodeBytes(data, &storageReceipts); err != nil {
+ log.Error("Invalid receipt array RLP", "hash", hash, "err", err)
+ return nil
+ }
+ receipts := make(types.Receipts, len(storageReceipts))
+ for i, storageReceipt := range storageReceipts {
+ receipts[i] = (*types.Receipt)(storageReceipt)
+ }
+ return receipts
+}
+
+// ReadReceipts retrieves all the transaction receipts belonging to a block, including
+// its correspoinding metadata fields. If it is unable to populate these metadata
+// fields then nil is returned.
+//
+// The current implementation populates these metadata fields by reading the receipts'
+// corresponding block body, so if the block body is not found it will return nil even
+// if the receipt itself is stored.
+func ReadReceipts(db ethdb.Reader, hash common.Hash, number uint64, config *params.ChainConfig) types.Receipts {
+ // We're deriving many fields from the block body, retrieve beside the receipt
+ receipts := ReadRawReceipts(db, hash, number)
+ if receipts == nil {
+ return nil
+ }
+ body := ReadBody(db, hash, number)
+ if body == nil {
+ log.Error("Missing body but have receipt", "hash", hash, "number", number)
+ return nil
+ }
+ if err := receipts.DeriveFields(config, hash, number, body.Transactions); err != nil {
+ log.Error("Failed to derive block receipts fields", "hash", hash, "number", number, "err", err)
+ return nil
+ }
+ return receipts
+}
+
+// WriteReceipts stores all the transaction receipts belonging to a block.
+func WriteReceipts(db ethdb.KeyValueWriter, hash common.Hash, number uint64, receipts types.Receipts) {
+ // Convert the receipts into their storage form and serialize them
+ storageReceipts := make([]*types.ReceiptForStorage, len(receipts))
+ for i, receipt := range receipts {
+ storageReceipts[i] = (*types.ReceiptForStorage)(receipt)
+ }
+ bytes, err := rlp.EncodeToBytes(storageReceipts)
+ if err != nil {
+ log.Crit("Failed to encode block receipts", "err", err)
+ }
+ // Store the flattened receipt slice
+ if err := db.Put(blockReceiptsKey(number, hash), bytes); err != nil {
+ log.Crit("Failed to store block receipts", "err", err)
+ }
+}
+
+// WriteBlock serializes a block into the database, header and body separately.
+func WriteBlock(db ethdb.KeyValueWriter, block *types.Block) {
+ WriteBody(db, block.Hash(), block.NumberU64(), block.Body())
+ WriteHeader(db, block.Header())
+}
+
+// storedReceiptRLP is the storage encoding of a receipt.
+// Re-definition in core/types/receipt.go.
+type storedReceiptRLP struct {
+ PostStateOrStatus []byte
+ CumulativeGasUsed uint64
+ Bloom types.Bloom
+ TxHash common.Hash
+ ContractAddress common.Address
+ Logs []*types.LogForStorage
+ GasUsed uint64
+}
+
+// ReceiptLogs is a barebone version of ReceiptForStorage which only keeps
+// the list of logs. When decoding a stored receipt into this object we
+// avoid creating the bloom filter.
+type receiptLogs struct {
+ Logs []*types.Log
+}
+
+// DecodeRLP implements rlp.Decoder.
+func (r *receiptLogs) DecodeRLP(s *rlp.Stream) error {
+ var stored storedReceiptRLP
+ if err := s.Decode(&stored); err != nil {
+ return err
+ }
+ r.Logs = make([]*types.Log, len(stored.Logs))
+ for i, log := range stored.Logs {
+ r.Logs[i] = (*types.Log)(log)
+ }
+ return nil
+}
+
+// DeriveLogFields fills the logs in receiptLogs with information such as block number, txhash, etc.
+func deriveLogFields(receipts []*receiptLogs, hash common.Hash, number uint64, txs types.Transactions) error {
+ logIndex := uint(0)
+ if len(txs) != len(receipts) {
+ return errors.New("transaction and receipt count mismatch")
+ }
+ for i := 0; i < len(receipts); i++ {
+ txHash := txs[i].Hash()
+ // The derived log fields can simply be set from the block and transaction
+ for j := 0; j < len(receipts[i].Logs); j++ {
+ receipts[i].Logs[j].BlockNumber = number
+ receipts[i].Logs[j].BlockHash = hash
+ receipts[i].Logs[j].TxHash = txHash
+ receipts[i].Logs[j].TxIndex = uint(i)
+ receipts[i].Logs[j].Index = logIndex
+ logIndex++
+ }
+ }
+ return nil
+}
+
+// ReadLogs retrieves the logs for all transactions in a block. In case
+// receipts is not found, a nil is returned.
+// Note: ReadLogs does not derive unstored log fields.
+func ReadLogs(db ethdb.Reader, hash common.Hash, number uint64) [][]*types.Log {
+ // Retrieve the flattened receipt slice
+ data := ReadReceiptsRLP(db, hash, number)
+ if len(data) == 0 {
+ return nil
+ }
+ receipts := []*receiptLogs{}
+ if err := rlp.DecodeBytes(data, &receipts); err != nil {
+ log.Error("Invalid receipt array RLP", "hash", hash, "err", err)
+ return nil
+ }
+
+ logs := make([][]*types.Log, len(receipts))
+ for i, receipt := range receipts {
+ logs[i] = receipt.Logs
+ }
+ return logs
+}
diff --git a/core/rawdb/accessors_chain_test.go b/core/rawdb/accessors_chain_test.go
new file mode 100644
index 000000000000..a514c5857fdc
--- /dev/null
+++ b/core/rawdb/accessors_chain_test.go
@@ -0,0 +1,239 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package rawdb
+
+import (
+ "bytes"
+ "encoding/hex"
+ "io/ioutil"
+ "math/big"
+ "testing"
+
+ "github.com/XinFinOrg/XDPoSChain/common"
+ "github.com/XinFinOrg/XDPoSChain/core/types"
+ "github.com/XinFinOrg/XDPoSChain/params"
+ "github.com/XinFinOrg/XDPoSChain/rlp"
+)
+
+type fullLogRLP struct {
+ Address common.Address
+ Topics []common.Hash
+ Data []byte
+ BlockNumber uint64
+ TxHash common.Hash
+ TxIndex uint
+ BlockHash common.Hash
+ Index uint
+}
+
+func newFullLogRLP(l *types.Log) *fullLogRLP {
+ return &fullLogRLP{
+ Address: l.Address,
+ Topics: l.Topics,
+ Data: l.Data,
+ BlockNumber: l.BlockNumber,
+ TxHash: l.TxHash,
+ TxIndex: l.TxIndex,
+ BlockHash: l.BlockHash,
+ Index: l.Index,
+ }
+}
+
+// Tests that logs associated with a single block can be retrieved.
+func TestReadLogs(t *testing.T) {
+ db := NewMemoryDatabase()
+
+ // Create a live block since we need metadata to reconstruct the receipt
+ tx1 := types.NewTransaction(1, common.HexToAddress("0x1"), big.NewInt(1), 1, big.NewInt(1), nil)
+ tx2 := types.NewTransaction(2, common.HexToAddress("0x2"), big.NewInt(2), 2, big.NewInt(2), nil)
+
+ body := &types.Body{Transactions: types.Transactions{tx1, tx2}}
+
+ // Create the two receipts to manage afterwards
+ receipt1 := &types.Receipt{
+ Status: types.ReceiptStatusFailed,
+ CumulativeGasUsed: 1,
+ Logs: []*types.Log{
+ {Address: common.BytesToAddress([]byte{0x11})},
+ {Address: common.BytesToAddress([]byte{0x01, 0x11})},
+ },
+ TxHash: tx1.Hash(),
+ ContractAddress: common.BytesToAddress([]byte{0x01, 0x11, 0x11}),
+ GasUsed: 111111,
+ }
+ receipt1.Bloom = types.CreateBloom(types.Receipts{receipt1})
+
+ receipt2 := &types.Receipt{
+ PostState: common.Hash{2}.Bytes(),
+ CumulativeGasUsed: 2,
+ Logs: []*types.Log{
+ {Address: common.BytesToAddress([]byte{0x22})},
+ {Address: common.BytesToAddress([]byte{0x02, 0x22})},
+ },
+ TxHash: tx2.Hash(),
+ ContractAddress: common.BytesToAddress([]byte{0x02, 0x22, 0x22}),
+ GasUsed: 222222,
+ }
+ receipt2.Bloom = types.CreateBloom(types.Receipts{receipt2})
+ receipts := []*types.Receipt{receipt1, receipt2}
+
+ hash := common.BytesToHash([]byte{0x03, 0x14})
+ // Check that no receipt entries are in a pristine database
+ if rs := ReadReceipts(db, hash, 0, params.TestChainConfig); len(rs) != 0 {
+ t.Fatalf("non existent receipts returned: %v", rs)
+ }
+ // Insert the body that corresponds to the receipts
+ WriteBody(db, hash, 0, body)
+
+ // Insert the receipt slice into the database and check presence
+ WriteReceipts(db, hash, 0, receipts)
+
+ logs := ReadLogs(db, hash, 0)
+ if len(logs) == 0 {
+ t.Fatalf("no logs returned")
+ }
+ if have, want := len(logs), 2; have != want {
+ t.Fatalf("unexpected number of logs returned, have %d want %d", have, want)
+ }
+ if have, want := len(logs[0]), 2; have != want {
+ t.Fatalf("unexpected number of logs[0] returned, have %d want %d", have, want)
+ }
+ if have, want := len(logs[1]), 2; have != want {
+ t.Fatalf("unexpected number of logs[1] returned, have %d want %d", have, want)
+ }
+
+ for i, pr := range receipts {
+ for j, pl := range pr.Logs {
+ rlpHave, err := rlp.EncodeToBytes(newFullLogRLP(logs[i][j]))
+ if err != nil {
+ t.Fatal(err)
+ }
+ rlpWant, err := rlp.EncodeToBytes(newFullLogRLP(pl))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !bytes.Equal(rlpHave, rlpWant) {
+ t.Fatalf("receipt #%d: receipt mismatch: have %s, want %s", i, hex.EncodeToString(rlpHave), hex.EncodeToString(rlpWant))
+ }
+ }
+ }
+}
+
+func TestDeriveLogFields(t *testing.T) {
+ // Create a few transactions to have receipts for
+ to2 := common.HexToAddress("0x2")
+ to3 := common.HexToAddress("0x3")
+ txs := types.Transactions{
+ types.NewTx(&types.LegacyTx{
+ Nonce: 1,
+ Value: big.NewInt(1),
+ Gas: 1,
+ GasPrice: big.NewInt(1),
+ }),
+ types.NewTx(&types.LegacyTx{
+ To: &to2,
+ Nonce: 2,
+ Value: big.NewInt(2),
+ Gas: 2,
+ GasPrice: big.NewInt(2),
+ }),
+ types.NewTx(&types.AccessListTx{
+ To: &to3,
+ Nonce: 3,
+ Value: big.NewInt(3),
+ Gas: 3,
+ GasPrice: big.NewInt(3),
+ }),
+ }
+ // Create the corresponding receipts
+ receipts := []*receiptLogs{
+ {
+ Logs: []*types.Log{
+ {Address: common.BytesToAddress([]byte{0x11})},
+ {Address: common.BytesToAddress([]byte{0x01, 0x11})},
+ },
+ },
+ {
+ Logs: []*types.Log{
+ {Address: common.BytesToAddress([]byte{0x22})},
+ {Address: common.BytesToAddress([]byte{0x02, 0x22})},
+ },
+ },
+ {
+ Logs: []*types.Log{
+ {Address: common.BytesToAddress([]byte{0x33})},
+ {Address: common.BytesToAddress([]byte{0x03, 0x33})},
+ },
+ },
+ }
+
+ // Derive log metadata fields
+ number := big.NewInt(1)
+ hash := common.BytesToHash([]byte{0x03, 0x14})
+ if err := deriveLogFields(receipts, hash, number.Uint64(), txs); err != nil {
+ t.Fatal(err)
+ }
+
+ // Iterate over all the computed fields and check that they're correct
+ logIndex := uint(0)
+ for i := range receipts {
+ for j := range receipts[i].Logs {
+ if receipts[i].Logs[j].BlockNumber != number.Uint64() {
+ t.Errorf("receipts[%d].Logs[%d].BlockNumber = %d, want %d", i, j, receipts[i].Logs[j].BlockNumber, number.Uint64())
+ }
+ if receipts[i].Logs[j].BlockHash != hash {
+ t.Errorf("receipts[%d].Logs[%d].BlockHash = %s, want %s", i, j, receipts[i].Logs[j].BlockHash.String(), hash.String())
+ }
+ if receipts[i].Logs[j].TxHash != txs[i].Hash() {
+ t.Errorf("receipts[%d].Logs[%d].TxHash = %s, want %s", i, j, receipts[i].Logs[j].TxHash.String(), txs[i].Hash().String())
+ }
+ if receipts[i].Logs[j].TxIndex != uint(i) {
+ t.Errorf("receipts[%d].Logs[%d].TransactionIndex = %d, want %d", i, j, receipts[i].Logs[j].TxIndex, i)
+ }
+ if receipts[i].Logs[j].Index != logIndex {
+ t.Errorf("receipts[%d].Logs[%d].Index = %d, want %d", i, j, receipts[i].Logs[j].Index, logIndex)
+ }
+ logIndex++
+ }
+ }
+}
+
+func BenchmarkDecodeRLPLogs(b *testing.B) {
+ // Encoded receipts from block 0x14ee094309fbe8f70b65f45ebcc08fb33f126942d97464aad5eb91cfd1e2d269
+ buf, err := ioutil.ReadFile("testdata/stored_receipts.bin")
+ if err != nil {
+ b.Fatal(err)
+ }
+ b.Run("ReceiptForStorage", func(b *testing.B) {
+ b.ReportAllocs()
+ var r []*types.ReceiptForStorage
+ for i := 0; i < b.N; i++ {
+ if err := rlp.DecodeBytes(buf, &r); err != nil {
+ b.Fatal(err)
+ }
+ }
+ })
+ b.Run("rlpLogs", func(b *testing.B) {
+ b.ReportAllocs()
+ var r []*receiptLogs
+ for i := 0; i < b.N; i++ {
+ if err := rlp.DecodeBytes(buf, &r); err != nil {
+ b.Fatal(err)
+ }
+ }
+ })
+}
diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go
new file mode 100644
index 000000000000..0a08eeed69ed
--- /dev/null
+++ b/core/rawdb/schema.go
@@ -0,0 +1,79 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+// Package rawdb contains a collection of low level database accessors.
+package rawdb
+
+import (
+ "encoding/binary"
+
+ "github.com/XinFinOrg/XDPoSChain/common"
+)
+
+// The fields below define the low level database schema prefixing.
+var (
+ headBlockKey = []byte("LastBlock")
+ // Data item prefixes (use single byte to avoid mixing data types, avoid `i`, used for indexes).
+ headerPrefix = []byte("h") // headerPrefix + num (uint64 big endian) + hash -> header
+ headerHashSuffix = []byte("n") // headerPrefix + num (uint64 big endian) + headerHashSuffix -> hash
+ headerNumberPrefix = []byte("H") // headerNumberPrefix + hash -> num (uint64 big endian)
+
+ blockBodyPrefix = []byte("b") // blockBodyPrefix + num (uint64 big endian) + hash -> block body
+ blockReceiptsPrefix = []byte("r") // blockReceiptsPrefix + num (uint64 big endian) + hash -> block receipts
+)
+
+const (
+ // freezerHashTable indicates the name of the freezer canonical hash table.
+ freezerHashTable = "hashes"
+
+ // freezerBodiesTable indicates the name of the freezer block body table.
+ freezerBodiesTable = "bodies"
+
+ // freezerReceiptTable indicates the name of the freezer receipts table.
+ freezerReceiptTable = "receipts"
+)
+
+// encodeBlockNumber encodes a block number as big endian uint64
+func encodeBlockNumber(number uint64) []byte {
+ enc := make([]byte, 8)
+ binary.BigEndian.PutUint64(enc, number)
+ return enc
+}
+
+// headerKey = headerPrefix + num (uint64 big endian) + hash
+func headerKey(number uint64, hash common.Hash) []byte {
+ return append(append(headerPrefix, encodeBlockNumber(number)...), hash.Bytes()...)
+}
+
+// headerHashKey = headerPrefix + num (uint64 big endian) + headerHashSuffix
+func headerHashKey(number uint64) []byte {
+ return append(append(headerPrefix, encodeBlockNumber(number)...), headerHashSuffix...)
+}
+
+// headerNumberKey = headerNumberPrefix + hash
+func headerNumberKey(hash common.Hash) []byte {
+ return append(headerNumberPrefix, hash.Bytes()...)
+}
+
+// blockBodyKey = blockBodyPrefix + num (uint64 big endian) + hash
+func blockBodyKey(number uint64, hash common.Hash) []byte {
+ return append(append(blockBodyPrefix, encodeBlockNumber(number)...), hash.Bytes()...)
+}
+
+// blockReceiptsKey = blockReceiptsPrefix + num (uint64 big endian) + hash
+func blockReceiptsKey(number uint64, hash common.Hash) []byte {
+ return append(append(blockReceiptsPrefix, encodeBlockNumber(number)...), hash.Bytes()...)
+}
diff --git a/core/types/receipt.go b/core/types/receipt.go
index 9600ad1d794e..76507b521064 100644
--- a/core/types/receipt.go
+++ b/core/types/receipt.go
@@ -26,6 +26,8 @@ import (
"github.com/XinFinOrg/XDPoSChain/common"
"github.com/XinFinOrg/XDPoSChain/common/hexutil"
+ "github.com/XinFinOrg/XDPoSChain/crypto"
+ "github.com/XinFinOrg/XDPoSChain/params"
"github.com/XinFinOrg/XDPoSChain/rlp"
)
@@ -328,3 +330,47 @@ func (r Receipts) GetRlp(i int) []byte {
}
return bytes
}
+
+// DeriveFields fills the receipts with their computed fields based on consensus
+// data and contextual infos like containing block and transactions.
+func (r Receipts) DeriveFields(config *params.ChainConfig, hash common.Hash, number uint64, txs Transactions) error {
+ signer := MakeSigner(config, new(big.Int).SetUint64(number))
+
+ logIndex := uint(0)
+ if len(txs) != len(r) {
+ return errors.New("transaction and receipt count mismatch")
+ }
+ for i := 0; i < len(r); i++ {
+ // The transaction type and hash can be retrieved from the transaction itself
+ r[i].Type = txs[i].Type()
+ r[i].TxHash = txs[i].Hash()
+
+ // block location fields
+ r[i].BlockHash = hash
+ r[i].BlockNumber = new(big.Int).SetUint64(number)
+ r[i].TransactionIndex = uint(i)
+
+ // The contract address can be derived from the transaction itself
+ if txs[i].To() == nil {
+ // Deriving the signer is expensive, only do if it's actually needed
+ from, _ := Sender(signer, txs[i])
+ r[i].ContractAddress = crypto.CreateAddress(from, txs[i].Nonce())
+ }
+ // The used gas can be calculated based on previous r
+ if i == 0 {
+ r[i].GasUsed = r[i].CumulativeGasUsed
+ } else {
+ r[i].GasUsed = r[i].CumulativeGasUsed - r[i-1].CumulativeGasUsed
+ }
+ // The derived log fields can simply be set from the block and transaction
+ for j := 0; j < len(r[i].Logs); j++ {
+ r[i].Logs[j].BlockNumber = number
+ r[i].Logs[j].BlockHash = hash
+ r[i].Logs[j].TxHash = r[i].TxHash
+ r[i].Logs[j].TxIndex = uint(i)
+ r[i].Logs[j].Index = logIndex
+ logIndex++
+ }
+ }
+ return nil
+}
diff --git a/core/types/receipt_test.go b/core/types/receipt_test.go
index 82fec06c9667..1ca5a2864774 100644
--- a/core/types/receipt_test.go
+++ b/core/types/receipt_test.go
@@ -18,11 +18,14 @@ package types
import (
"bytes"
+ "math"
"math/big"
"reflect"
"testing"
"github.com/XinFinOrg/XDPoSChain/common"
+ "github.com/XinFinOrg/XDPoSChain/crypto"
+ "github.com/XinFinOrg/XDPoSChain/params"
"github.com/XinFinOrg/XDPoSChain/rlp"
)
@@ -141,6 +144,169 @@ func encodeAsV3StoredReceiptRLP(want *Receipt) ([]byte, error) {
return rlp.EncodeToBytes(stored)
}
+// Tests that receipt data can be correctly derived from the contextual infos
+func TestDeriveFields(t *testing.T) {
+ // Create a few transactions to have receipts for
+ to2 := common.HexToAddress("0x2")
+ to3 := common.HexToAddress("0x3")
+ txs := Transactions{
+ NewTx(&LegacyTx{
+ Nonce: 1,
+ Value: big.NewInt(1),
+ Gas: 1,
+ GasPrice: big.NewInt(1),
+ }),
+ NewTx(&LegacyTx{
+ To: &to2,
+ Nonce: 2,
+ Value: big.NewInt(2),
+ Gas: 2,
+ GasPrice: big.NewInt(2),
+ }),
+ NewTx(&AccessListTx{
+ To: &to3,
+ Nonce: 3,
+ Value: big.NewInt(3),
+ Gas: 3,
+ GasPrice: big.NewInt(3),
+ }),
+ }
+ // Create the corresponding receipts
+ receipts := Receipts{
+ &Receipt{
+ Status: ReceiptStatusFailed,
+ CumulativeGasUsed: 1,
+ Logs: []*Log{
+ {Address: common.BytesToAddress([]byte{0x11})},
+ {Address: common.BytesToAddress([]byte{0x01, 0x11})},
+ },
+ TxHash: txs[0].Hash(),
+ ContractAddress: common.BytesToAddress([]byte{0x01, 0x11, 0x11}),
+ GasUsed: 1,
+ },
+ &Receipt{
+ PostState: common.Hash{2}.Bytes(),
+ CumulativeGasUsed: 3,
+ Logs: []*Log{
+ {Address: common.BytesToAddress([]byte{0x22})},
+ {Address: common.BytesToAddress([]byte{0x02, 0x22})},
+ },
+ TxHash: txs[1].Hash(),
+ ContractAddress: common.BytesToAddress([]byte{0x02, 0x22, 0x22}),
+ GasUsed: 2,
+ },
+ &Receipt{
+ Type: AccessListTxType,
+ PostState: common.Hash{3}.Bytes(),
+ CumulativeGasUsed: 6,
+ Logs: []*Log{
+ {Address: common.BytesToAddress([]byte{0x33})},
+ {Address: common.BytesToAddress([]byte{0x03, 0x33})},
+ },
+ TxHash: txs[2].Hash(),
+ ContractAddress: common.BytesToAddress([]byte{0x03, 0x33, 0x33}),
+ GasUsed: 3,
+ },
+ }
+ // Clear all the computed fields and re-derive them
+ number := big.NewInt(1)
+ hash := common.BytesToHash([]byte{0x03, 0x14})
+
+ clearComputedFieldsOnReceipts(t, receipts)
+ if err := receipts.DeriveFields(params.TestChainConfig, hash, number.Uint64(), txs); err != nil {
+ t.Fatalf("DeriveFields(...) = %v, want ", err)
+ }
+ // Iterate over all the computed fields and check that they're correct
+ signer := MakeSigner(params.TestChainConfig, number)
+
+ logIndex := uint(0)
+ for i := range receipts {
+ if receipts[i].Type != txs[i].Type() {
+ t.Errorf("receipts[%d].Type = %d, want %d", i, receipts[i].Type, txs[i].Type())
+ }
+ if receipts[i].TxHash != txs[i].Hash() {
+ t.Errorf("receipts[%d].TxHash = %s, want %s", i, receipts[i].TxHash.String(), txs[i].Hash().String())
+ }
+ if receipts[i].BlockHash != hash {
+ t.Errorf("receipts[%d].BlockHash = %s, want %s", i, receipts[i].BlockHash.String(), hash.String())
+ }
+ if receipts[i].BlockNumber.Cmp(number) != 0 {
+ t.Errorf("receipts[%c].BlockNumber = %s, want %s", i, receipts[i].BlockNumber.String(), number.String())
+ }
+ if receipts[i].TransactionIndex != uint(i) {
+ t.Errorf("receipts[%d].TransactionIndex = %d, want %d", i, receipts[i].TransactionIndex, i)
+ }
+ if receipts[i].GasUsed != txs[i].Gas() {
+ t.Errorf("receipts[%d].GasUsed = %d, want %d", i, receipts[i].GasUsed, txs[i].Gas())
+ }
+ if txs[i].To() != nil && receipts[i].ContractAddress != (common.Address{}) {
+ t.Errorf("receipts[%d].ContractAddress = %s, want %s", i, receipts[i].ContractAddress.String(), (common.Address{}).String())
+ }
+ from, _ := Sender(signer, txs[i])
+ contractAddress := crypto.CreateAddress(from, txs[i].Nonce())
+ if txs[i].To() == nil && receipts[i].ContractAddress != contractAddress {
+ t.Errorf("receipts[%d].ContractAddress = %s, want %s", i, receipts[i].ContractAddress.String(), contractAddress.String())
+ }
+ for j := range receipts[i].Logs {
+ if receipts[i].Logs[j].BlockNumber != number.Uint64() {
+ t.Errorf("receipts[%d].Logs[%d].BlockNumber = %d, want %d", i, j, receipts[i].Logs[j].BlockNumber, number.Uint64())
+ }
+ if receipts[i].Logs[j].BlockHash != hash {
+ t.Errorf("receipts[%d].Logs[%d].BlockHash = %s, want %s", i, j, receipts[i].Logs[j].BlockHash.String(), hash.String())
+ }
+ if receipts[i].Logs[j].TxHash != txs[i].Hash() {
+ t.Errorf("receipts[%d].Logs[%d].TxHash = %s, want %s", i, j, receipts[i].Logs[j].TxHash.String(), txs[i].Hash().String())
+ }
+ if receipts[i].Logs[j].TxIndex != uint(i) {
+ t.Errorf("receipts[%d].Logs[%d].TransactionIndex = %d, want %d", i, j, receipts[i].Logs[j].TxIndex, i)
+ }
+ if receipts[i].Logs[j].Index != logIndex {
+ t.Errorf("receipts[%d].Logs[%d].Index = %d, want %d", i, j, receipts[i].Logs[j].Index, logIndex)
+ }
+ logIndex++
+ }
+ }
+}
+
+func clearComputedFieldsOnReceipts(t *testing.T, receipts Receipts) {
+ t.Helper()
+
+ for _, receipt := range receipts {
+ clearComputedFieldsOnReceipt(t, receipt)
+ }
+}
+
+func clearComputedFieldsOnReceipt(t *testing.T, receipt *Receipt) {
+ t.Helper()
+
+ receipt.TxHash = common.Hash{}
+ receipt.BlockHash = common.Hash{}
+ receipt.BlockNumber = big.NewInt(math.MaxUint32)
+ receipt.TransactionIndex = math.MaxUint32
+ receipt.ContractAddress = common.Address{}
+ receipt.GasUsed = 0
+
+ clearComputedFieldsOnLogs(t, receipt.Logs)
+}
+
+func clearComputedFieldsOnLogs(t *testing.T, logs []*Log) {
+ t.Helper()
+
+ for _, log := range logs {
+ clearComputedFieldsOnLog(t, log)
+ }
+}
+
+func clearComputedFieldsOnLog(t *testing.T, log *Log) {
+ t.Helper()
+
+ log.BlockNumber = math.MaxUint32
+ log.BlockHash = common.Hash{}
+ log.TxHash = common.Hash{}
+ log.TxIndex = math.MaxUint32
+ log.Index = math.MaxUint32
+}
+
// TestTypedReceiptEncodingDecoding reproduces a flaw that existed in the receipt
// rlp decoder, which failed due to a shadowing error.
func TestTypedReceiptEncodingDecoding(t *testing.T) {
diff --git a/eth/api_backend.go b/eth/api_backend.go
index 1a9ebc417091..f9a585ba5e7f 100644
--- a/eth/api_backend.go
+++ b/eth/api_backend.go
@@ -39,6 +39,7 @@ import (
"github.com/XinFinOrg/XDPoSChain/contracts"
"github.com/XinFinOrg/XDPoSChain/core"
"github.com/XinFinOrg/XDPoSChain/core/bloombits"
+ "github.com/XinFinOrg/XDPoSChain/core/rawdb"
"github.com/XinFinOrg/XDPoSChain/core/state"
stateDatabase "github.com/XinFinOrg/XDPoSChain/core/state"
"github.com/XinFinOrg/XDPoSChain/core/types"
@@ -158,6 +159,17 @@ func (b *EthApiBackend) BlockByHash(ctx context.Context, hash common.Hash) (*typ
return b.eth.blockchain.GetBlockByHash(hash), nil
}
+// GetBody returns body of a block. It does not resolve special block numbers.
+func (b *EthApiBackend) GetBody(ctx context.Context, hash common.Hash, number rpc.BlockNumber) (*types.Body, error) {
+ if number < 0 || hash == (common.Hash{}) {
+ return nil, errors.New("invalid arguments; expect hash and no special block numbers")
+ }
+ if body := b.eth.blockchain.GetBody(hash); body != nil {
+ return body, nil
+ }
+ return nil, errors.New("block body not found")
+}
+
func (b *EthApiBackend) BlockByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*types.Block, error) {
if blockNr, ok := blockNrOrHash.Number(); ok {
return b.BlockByNumber(ctx, blockNr)
@@ -179,6 +191,10 @@ func (b *EthApiBackend) BlockByNumberOrHash(ctx context.Context, blockNrOrHash r
return nil, errors.New("invalid arguments; neither block nor hash specified")
}
+func (b *EthApiBackend) PendingBlockAndReceipts() (*types.Block, types.Receipts) {
+ return b.eth.miner.PendingBlockAndReceipts()
+}
+
func (b *EthApiBackend) StateAndHeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*state.StateDB, *types.Header, error) {
// Pending state is only known by the miner
if blockNr == rpc.PendingBlockNumber {
@@ -228,16 +244,8 @@ func (b *EthApiBackend) GetReceipts(ctx context.Context, blockHash common.Hash)
return core.GetBlockReceipts(b.eth.chainDb, blockHash, core.GetBlockNumber(b.eth.chainDb, blockHash)), nil
}
-func (b *EthApiBackend) GetLogs(ctx context.Context, blockHash common.Hash) ([][]*types.Log, error) {
- receipts := core.GetBlockReceipts(b.eth.chainDb, blockHash, core.GetBlockNumber(b.eth.chainDb, blockHash))
- if receipts == nil {
- return nil, nil
- }
- logs := make([][]*types.Log, len(receipts))
- for i, receipt := range receipts {
- logs[i] = receipt.Logs
- }
- return logs, nil
+func (b *EthApiBackend) GetLogs(ctx context.Context, hash common.Hash, number uint64) ([][]*types.Log, error) {
+ return rawdb.ReadLogs(b.eth.chainDb, hash, number), nil
}
func (b *EthApiBackend) GetTd(blockHash common.Hash) *big.Int {
@@ -258,6 +266,10 @@ func (b *EthApiBackend) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEven
return b.eth.BlockChain().SubscribeRemovedLogsEvent(ch)
}
+func (b *EthApiBackend) SubscribePendingLogsEvent(ch chan<- []*types.Log) event.Subscription {
+ return b.eth.miner.SubscribePendingLogs(ch)
+}
+
func (b *EthApiBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription {
return b.eth.BlockChain().SubscribeChainEvent(ch)
}
diff --git a/eth/backend.go b/eth/backend.go
index f9063d5127e2..2b8cc288b39a 100644
--- a/eth/backend.go
+++ b/eth/backend.go
@@ -405,7 +405,7 @@ func (s *Ethereum) APIs() []rpc.API {
}, {
Namespace: "eth",
Version: "1.0",
- Service: filters.NewPublicFilterAPI(s.ApiBackend, false),
+ Service: filters.NewFilterAPI(filters.NewFilterSystem(s.ApiBackend, filters.Config{LogCacheSize: s.config.FilterLogCacheSize}), false),
Public: true,
}, {
Namespace: "admin",
diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go
index b246a9b332a6..73cc4e7cddbe 100644
--- a/eth/ethconfig/config.go
+++ b/eth/ethconfig/config.go
@@ -60,12 +60,13 @@ var Defaults = Config{
DatasetsInMem: 1,
DatasetsOnDisk: 2,
},
- NetworkId: 88,
- LightPeers: 100,
- DatabaseCache: 768,
- TrieCache: 256,
- TrieTimeout: 5 * time.Minute,
- GasPrice: big.NewInt(0.25 * params.Shannon),
+ NetworkId: 88,
+ LightPeers: 100,
+ DatabaseCache: 768,
+ TrieCache: 256,
+ TrieTimeout: 5 * time.Minute,
+ FilterLogCacheSize: 32,
+ GasPrice: big.NewInt(0.25 * params.Shannon),
TxPool: core.DefaultTxPoolConfig,
RPCGasCap: 25000000,
@@ -111,6 +112,9 @@ type Config struct {
TrieCache int
TrieTimeout time.Duration
+ // This is the number of blocks for which logs will be cached in the filter system.
+ FilterLogCacheSize int
+
// Mining-related options
Etherbase common.Address `toml:",omitempty"`
MinerThreads int `toml:",omitempty"`
diff --git a/eth/ethconfig/gen_config.go b/eth/ethconfig/gen_config.go
index d24b3251c05b..6b27542f1933 100644
--- a/eth/ethconfig/gen_config.go
+++ b/eth/ethconfig/gen_config.go
@@ -31,6 +31,7 @@ func (c Config) MarshalTOML() (interface{}, error) {
MinerThreads int `toml:",omitempty"`
ExtraData []byte `toml:",omitempty"`
GasPrice *big.Int
+ FilterLogCacheSize int
Ethash ethash.Config
TxPool core.TxPoolConfig
GPO gasprice.Config
@@ -55,6 +56,7 @@ func (c Config) MarshalTOML() (interface{}, error) {
enc.MinerThreads = c.MinerThreads
enc.ExtraData = c.ExtraData
enc.GasPrice = c.GasPrice
+ enc.FilterLogCacheSize = c.FilterLogCacheSize
enc.Ethash = c.Ethash
enc.TxPool = c.TxPool
enc.GPO = c.GPO
@@ -83,6 +85,7 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
MinerThreads *int `toml:",omitempty"`
ExtraData []byte `toml:",omitempty"`
GasPrice *big.Int
+ FilterLogCacheSize *int
Ethash *ethash.Config
TxPool *core.TxPoolConfig
GPO *gasprice.Config
@@ -140,6 +143,9 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
if dec.GasPrice != nil {
c.GasPrice = dec.GasPrice
}
+ if dec.FilterLogCacheSize != nil {
+ c.FilterLogCacheSize = *dec.FilterLogCacheSize
+ }
if dec.Ethash != nil {
c.Ethash = *dec.Ethash
}
diff --git a/eth/filters/api.go b/eth/filters/api.go
index 04256a109620..7762ee199286 100644
--- a/eth/filters/api.go
+++ b/eth/filters/api.go
@@ -29,8 +29,8 @@ import (
"github.com/XinFinOrg/XDPoSChain/common"
"github.com/XinFinOrg/XDPoSChain/common/hexutil"
"github.com/XinFinOrg/XDPoSChain/core/types"
- "github.com/XinFinOrg/XDPoSChain/ethdb"
- "github.com/XinFinOrg/XDPoSChain/event"
+
+ // "github.com/XinFinOrg/XDPoSChain/ethdb"
"github.com/XinFinOrg/XDPoSChain/rpc"
)
@@ -41,10 +41,6 @@ var (
// The maximum number of topic criteria allowed, vm.LOG4 - vm.LOG0
const maxTopics = 4
-var (
- deadline = 5 * time.Minute // consider a filter inactive if it has not been polled for within deadline
-)
-
// filter is a helper struct that holds meta information over the filter type
// and associated subscription in the event system.
type filter struct {
@@ -56,49 +52,55 @@ type filter struct {
s *Subscription // associated subscription in event system
}
-// PublicFilterAPI offers support to create and manage filters. This will allow external clients to retrieve various
+// FilterAPI offers support to create and manage filters. This will allow external clients to retrieve various
// information related to the Ethereum protocol such als blocks, transactions and logs.
-type PublicFilterAPI struct {
- backend Backend
- mux *event.TypeMux
- quit chan struct{}
- chainDb ethdb.Database
+type FilterAPI struct {
+ sys *FilterSystem
events *EventSystem
filtersMu sync.Mutex
filters map[rpc.ID]*filter
+ timeout time.Duration
}
-// NewPublicFilterAPI returns a new PublicFilterAPI instance.
-func NewPublicFilterAPI(backend Backend, lightMode bool) *PublicFilterAPI {
- api := &PublicFilterAPI{
- backend: backend,
- mux: backend.EventMux(),
- chainDb: backend.ChainDb(),
- events: NewEventSystem(backend.EventMux(), backend, lightMode),
+// NewFilterAPI returns a new FilterAPI instance.
+func NewFilterAPI(system *FilterSystem, lightMode bool) *FilterAPI {
+ api := &FilterAPI{
+ sys: system,
+ events: NewEventSystem(system, lightMode),
filters: make(map[rpc.ID]*filter),
+ timeout: system.cfg.Timeout,
}
- go api.timeoutLoop()
+ go api.timeoutLoop(system.cfg.Timeout)
return api
}
// timeoutLoop runs every 5 minutes and deletes filters that have not been recently used.
// Tt is started when the api is created.
-func (api *PublicFilterAPI) timeoutLoop() {
- ticker := time.NewTicker(5 * time.Minute)
+func (api *FilterAPI) timeoutLoop(timeout time.Duration) {
+ var toUninstall []*Subscription
+ ticker := time.NewTicker(timeout)
for {
<-ticker.C
api.filtersMu.Lock()
for id, f := range api.filters {
select {
case <-f.deadline.C:
- f.s.Unsubscribe()
+ toUninstall = append(toUninstall, f.s)
delete(api.filters, id)
default:
continue
}
}
api.filtersMu.Unlock()
+
+ // Unsubscribes are processed outside the lock to avoid the following scenario:
+ // event loop attempts broadcasting events to still active filters while
+ // Unsubscribe is waiting for it to process the uninstall request.
+ for _, s := range toUninstall {
+ s.Unsubscribe()
+ }
+ toUninstall = nil
}
}
@@ -109,14 +111,14 @@ func (api *PublicFilterAPI) timeoutLoop() {
// `eth_getFilterChanges` polling method that is also used for log filters.
//
// https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_newpendingtransactionfilter
-func (api *PublicFilterAPI) NewPendingTransactionFilter() rpc.ID {
+func (api *FilterAPI) NewPendingTransactionFilter() rpc.ID {
var (
pendingTxs = make(chan []common.Hash)
pendingTxSub = api.events.SubscribePendingTxs(pendingTxs)
)
api.filtersMu.Lock()
- api.filters[pendingTxSub.ID] = &filter{typ: PendingTransactionsSubscription, deadline: time.NewTimer(deadline), hashes: make([]common.Hash, 0), s: pendingTxSub}
+ api.filters[pendingTxSub.ID] = &filter{typ: PendingTransactionsSubscription, deadline: time.NewTimer(api.timeout), hashes: make([]common.Hash, 0), s: pendingTxSub}
api.filtersMu.Unlock()
go func() {
@@ -142,7 +144,7 @@ func (api *PublicFilterAPI) NewPendingTransactionFilter() rpc.ID {
// NewPendingTransactions creates a subscription that is triggered each time a transaction
// enters the transaction pool and was signed from one of the transactions this nodes manages.
-func (api *PublicFilterAPI) NewPendingTransactions(ctx context.Context) (*rpc.Subscription, error) {
+func (api *FilterAPI) NewPendingTransactions(ctx context.Context) (*rpc.Subscription, error) {
notifier, supported := rpc.NotifierFromContext(ctx)
if !supported {
return &rpc.Subscription{}, rpc.ErrNotificationsUnsupported
@@ -179,14 +181,14 @@ func (api *PublicFilterAPI) NewPendingTransactions(ctx context.Context) (*rpc.Su
// It is part of the filter package since polling goes with eth_getFilterChanges.
//
// https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_newblockfilter
-func (api *PublicFilterAPI) NewBlockFilter() rpc.ID {
+func (api *FilterAPI) NewBlockFilter() rpc.ID {
var (
headers = make(chan *types.Header)
headerSub = api.events.SubscribeNewHeads(headers)
)
api.filtersMu.Lock()
- api.filters[headerSub.ID] = &filter{typ: BlocksSubscription, deadline: time.NewTimer(deadline), hashes: make([]common.Hash, 0), s: headerSub}
+ api.filters[headerSub.ID] = &filter{typ: BlocksSubscription, deadline: time.NewTimer(api.timeout), hashes: make([]common.Hash, 0), s: headerSub}
api.filtersMu.Unlock()
go func() {
@@ -211,7 +213,7 @@ func (api *PublicFilterAPI) NewBlockFilter() rpc.ID {
}
// NewHeads send a notification each time a new (header) block is appended to the chain.
-func (api *PublicFilterAPI) NewHeads(ctx context.Context) (*rpc.Subscription, error) {
+func (api *FilterAPI) NewHeads(ctx context.Context) (*rpc.Subscription, error) {
notifier, supported := rpc.NotifierFromContext(ctx)
if !supported {
return &rpc.Subscription{}, rpc.ErrNotificationsUnsupported
@@ -241,7 +243,7 @@ func (api *PublicFilterAPI) NewHeads(ctx context.Context) (*rpc.Subscription, er
}
// Logs creates a subscription that fires for all new log that match the given filter criteria.
-func (api *PublicFilterAPI) Logs(ctx context.Context, crit FilterCriteria) (*rpc.Subscription, error) {
+func (api *FilterAPI) Logs(ctx context.Context, crit FilterCriteria) (*rpc.Subscription, error) {
notifier, supported := rpc.NotifierFromContext(ctx)
if !supported {
return &rpc.Subscription{}, rpc.ErrNotificationsUnsupported
@@ -295,7 +297,7 @@ type FilterCriteria ethereum.FilterQuery
// In case "fromBlock" > "toBlock" an error is returned.
//
// https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_newfilter
-func (api *PublicFilterAPI) NewFilter(crit FilterCriteria) (rpc.ID, error) {
+func (api *FilterAPI) NewFilter(crit FilterCriteria) (rpc.ID, error) {
logs := make(chan []*types.Log)
logsSub, err := api.events.SubscribeLogs(ethereum.FilterQuery(crit), logs)
if err != nil {
@@ -303,7 +305,7 @@ func (api *PublicFilterAPI) NewFilter(crit FilterCriteria) (rpc.ID, error) {
}
api.filtersMu.Lock()
- api.filters[logsSub.ID] = &filter{typ: LogsSubscription, crit: crit, deadline: time.NewTimer(deadline), logs: make([]*types.Log, 0), s: logsSub}
+ api.filters[logsSub.ID] = &filter{typ: LogsSubscription, crit: crit, deadline: time.NewTimer(api.timeout), logs: make([]*types.Log, 0), s: logsSub}
api.filtersMu.Unlock()
go func() {
@@ -330,7 +332,7 @@ func (api *PublicFilterAPI) NewFilter(crit FilterCriteria) (rpc.ID, error) {
// GetLogs returns logs matching the given argument that are stored within the state.
//
// https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getlogs
-func (api *PublicFilterAPI) GetLogs(ctx context.Context, crit FilterCriteria) ([]*types.Log, error) {
+func (api *FilterAPI) GetLogs(ctx context.Context, crit FilterCriteria) ([]*types.Log, error) {
if len(crit.Topics) > maxTopics {
return nil, errExceedMaxTopics
}
@@ -338,7 +340,7 @@ func (api *PublicFilterAPI) GetLogs(ctx context.Context, crit FilterCriteria) ([
var filter *Filter
if crit.BlockHash != nil {
// Block filter requested, construct a single-shot filter
- filter = NewBlockFilter(api.backend, *crit.BlockHash, crit.Addresses, crit.Topics)
+ filter = api.sys.NewBlockFilter(*crit.BlockHash, crit.Addresses, crit.Topics)
} else {
// Convert the RPC block numbers into internal representations
begin := rpc.LatestBlockNumber.Int64()
@@ -350,7 +352,7 @@ func (api *PublicFilterAPI) GetLogs(ctx context.Context, crit FilterCriteria) ([
end = crit.ToBlock.Int64()
}
// Construct the range filter
- filter = NewRangeFilter(api.backend, begin, end, crit.Addresses, crit.Topics)
+ filter = api.sys.NewRangeFilter(begin, end, crit.Addresses, crit.Topics)
}
// Run the filter and return all the logs
logs, err := filter.Logs(ctx)
@@ -363,7 +365,7 @@ func (api *PublicFilterAPI) GetLogs(ctx context.Context, crit FilterCriteria) ([
// UninstallFilter removes the filter with the given filter id.
//
// https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_uninstallfilter
-func (api *PublicFilterAPI) UninstallFilter(id rpc.ID) bool {
+func (api *FilterAPI) UninstallFilter(id rpc.ID) bool {
api.filtersMu.Lock()
f, found := api.filters[id]
if found {
@@ -381,7 +383,7 @@ func (api *PublicFilterAPI) UninstallFilter(id rpc.ID) bool {
// If the filter could not be found an empty array of logs is returned.
//
// https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getfilterlogs
-func (api *PublicFilterAPI) GetFilterLogs(ctx context.Context, id rpc.ID) ([]*types.Log, error) {
+func (api *FilterAPI) GetFilterLogs(ctx context.Context, id rpc.ID) ([]*types.Log, error) {
api.filtersMu.Lock()
f, found := api.filters[id]
api.filtersMu.Unlock()
@@ -393,7 +395,7 @@ func (api *PublicFilterAPI) GetFilterLogs(ctx context.Context, id rpc.ID) ([]*ty
var filter *Filter
if f.crit.BlockHash != nil {
// Block filter requested, construct a single-shot filter
- filter = NewBlockFilter(api.backend, *f.crit.BlockHash, f.crit.Addresses, f.crit.Topics)
+ filter = api.sys.NewBlockFilter(*f.crit.BlockHash, f.crit.Addresses, f.crit.Topics)
} else {
// Convert the RPC block numbers into internal representations
begin := rpc.LatestBlockNumber.Int64()
@@ -405,7 +407,7 @@ func (api *PublicFilterAPI) GetFilterLogs(ctx context.Context, id rpc.ID) ([]*ty
end = f.crit.ToBlock.Int64()
}
// Construct the range filter
- filter = NewRangeFilter(api.backend, begin, end, f.crit.Addresses, f.crit.Topics)
+ filter = api.sys.NewRangeFilter(begin, end, f.crit.Addresses, f.crit.Topics)
}
// Run the filter and return all the logs
logs, err := filter.Logs(ctx)
@@ -422,7 +424,7 @@ func (api *PublicFilterAPI) GetFilterLogs(ctx context.Context, id rpc.ID) ([]*ty
// (pending)Log filters return []Log.
//
// https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getfilterchanges
-func (api *PublicFilterAPI) GetFilterChanges(id rpc.ID) (interface{}, error) {
+func (api *FilterAPI) GetFilterChanges(id rpc.ID) (interface{}, error) {
api.filtersMu.Lock()
defer api.filtersMu.Unlock()
@@ -432,14 +434,14 @@ func (api *PublicFilterAPI) GetFilterChanges(id rpc.ID) (interface{}, error) {
// receive timer value and reset timer
<-f.deadline.C
}
- f.deadline.Reset(deadline)
+ f.deadline.Reset(api.timeout)
switch f.typ {
case PendingTransactionsSubscription, BlocksSubscription:
hashes := f.hashes
f.hashes = nil
return returnHashes(hashes), nil
- case LogsSubscription:
+ case LogsSubscription, MinedAndPendingLogsSubscription:
logs := f.logs
f.logs = nil
return returnLogs(logs), nil
diff --git a/eth/filters/bench_test.go b/eth/filters/bench_test.go
index 98635b9de7b9..f465d67f7069 100644
--- a/eth/filters/bench_test.go
+++ b/eth/filters/bench_test.go
@@ -30,7 +30,6 @@ import (
"github.com/XinFinOrg/XDPoSChain/core/rawdb"
"github.com/XinFinOrg/XDPoSChain/core/types"
"github.com/XinFinOrg/XDPoSChain/ethdb"
- "github.com/XinFinOrg/XDPoSChain/event"
"github.com/XinFinOrg/XDPoSChain/node"
)
@@ -124,21 +123,25 @@ func benchmarkBloomBits(b *testing.B, sectionSize uint64) {
b.Log("Running filter benchmarks...")
start = time.Now()
- mux := new(event.TypeMux)
- var backend *testBackend
+
+ var (
+ backend *testBackend
+ sys *FilterSystem
+ )
for i := 0; i < benchFilterCnt; i++ {
if i%20 == 0 {
db.Close()
db, _ = rawdb.NewLevelDBDatabase(benchDataDir, 128, 1024, "")
- backend = &testBackend{mux, db, cnt, new(event.Feed), new(event.Feed), new(event.Feed), new(event.Feed)}
+ backend = &testBackend{db: db, sections: cnt}
+ sys = NewFilterSystem(backend, Config{})
}
var addr common.Address
addr[0] = byte(i)
addr[1] = byte(i / 256)
- filter := NewRangeFilter(backend, 0, int64(cnt*sectionSize-1), []common.Address{addr}, nil)
+ filter := sys.NewRangeFilter(0, int64(cnt*sectionSize-1), []common.Address{addr}, nil)
if _, err := filter.Logs(context.Background()); err != nil {
- b.Error("filter.Find error:", err)
+ b.Error("filter.Logs error:", err)
}
}
d = time.Since(start)
@@ -188,11 +191,11 @@ func BenchmarkNoBloomBits(b *testing.B) {
clearBloomBits(db)
+ _, sys := newTestFilterSystem(b, db, Config{})
+
b.Log("Running filter benchmarks...")
start := time.Now()
- mux := new(event.TypeMux)
- backend := &testBackend{mux, db, 0, new(event.Feed), new(event.Feed), new(event.Feed), new(event.Feed)}
- filter := NewRangeFilter(backend, 0, int64(headNum), []common.Address{{}}, nil)
+ filter := sys.NewRangeFilter(0, int64(headNum), []common.Address{{}}, nil)
filter.Logs(context.Background())
d := time.Since(start)
b.Log("Finished running filter benchmarks")
diff --git a/eth/filters/filter.go b/eth/filters/filter.go
index 4d47bafc0fc7..3defe854f86f 100644
--- a/eth/filters/filter.go
+++ b/eth/filters/filter.go
@@ -22,36 +22,15 @@ import (
"math/big"
"github.com/XinFinOrg/XDPoSChain/common"
- "github.com/XinFinOrg/XDPoSChain/core"
"github.com/XinFinOrg/XDPoSChain/core/bloombits"
"github.com/XinFinOrg/XDPoSChain/core/types"
- "github.com/XinFinOrg/XDPoSChain/ethdb"
- "github.com/XinFinOrg/XDPoSChain/event"
"github.com/XinFinOrg/XDPoSChain/rpc"
)
-type Backend interface {
- ChainDb() ethdb.Database
- EventMux() *event.TypeMux
- HeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*types.Header, error)
- HeaderByHash(ctx context.Context, blockHash common.Hash) (*types.Header, error)
- GetReceipts(ctx context.Context, blockHash common.Hash) (types.Receipts, error)
- GetLogs(ctx context.Context, blockHash common.Hash) ([][]*types.Log, error)
-
- SubscribeNewTxsEvent(chan<- core.NewTxsEvent) event.Subscription
- SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription
- SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription
- SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription
-
- BloomStatus() (uint64, uint64)
- ServiceFilter(ctx context.Context, session *bloombits.MatcherSession)
-}
-
// Filter can be used to retrieve and filter logs.
type Filter struct {
- backend Backend
+ sys *FilterSystem
- db ethdb.Database
addresses []common.Address
topics [][]common.Hash
@@ -63,7 +42,7 @@ type Filter struct {
// NewRangeFilter creates a new filter which uses a bloom filter on blocks to
// figure out whether a particular block is interesting or not.
-func NewRangeFilter(backend Backend, begin, end int64, addresses []common.Address, topics [][]common.Hash) *Filter {
+func (sys *FilterSystem) NewRangeFilter(begin, end int64, addresses []common.Address, topics [][]common.Hash) *Filter {
// Flatten the address and topic filter clauses into a single bloombits filter
// system. Since the bloombits are not positional, nil topics are permitted,
// which get flattened into a nil byte slice.
@@ -82,10 +61,10 @@ func NewRangeFilter(backend Backend, begin, end int64, addresses []common.Addres
}
filters = append(filters, filter)
}
- size, _ := backend.BloomStatus()
+ size, _ := sys.backend.BloomStatus()
// Create a generic filter and convert it into a range filter
- filter := newFilter(backend, addresses, topics)
+ filter := newFilter(sys, addresses, topics)
filter.matcher = bloombits.NewMatcher(size, filters)
filter.begin = begin
@@ -96,21 +75,20 @@ func NewRangeFilter(backend Backend, begin, end int64, addresses []common.Addres
// NewBlockFilter creates a new filter which directly inspects the contents of
// a block to figure out whether it is interesting or not.
-func NewBlockFilter(backend Backend, block common.Hash, addresses []common.Address, topics [][]common.Hash) *Filter {
+func (sys *FilterSystem) NewBlockFilter(block common.Hash, addresses []common.Address, topics [][]common.Hash) *Filter {
// Create a generic filter and convert it into a block filter
- filter := newFilter(backend, addresses, topics)
+ filter := newFilter(sys, addresses, topics)
filter.block = block
return filter
}
// newFilter creates a generic filter that can either filter based on a block hash,
// or based on range queries. The search criteria needs to be explicitly set.
-func newFilter(backend Backend, addresses []common.Address, topics [][]common.Hash) *Filter {
+func newFilter(sys *FilterSystem, addresses []common.Address, topics [][]common.Hash) *Filter {
return &Filter{
- backend: backend,
+ sys: sys,
addresses: addresses,
topics: topics,
- db: backend.ChainDb(),
}
}
@@ -119,7 +97,7 @@ func newFilter(backend Backend, addresses []common.Address, topics [][]common.Ha
func (f *Filter) Logs(ctx context.Context) ([]*types.Log, error) {
// If we're doing singleton block filtering, execute and return
if f.block != (common.Hash{}) {
- header, err := f.backend.HeaderByHash(ctx, f.block)
+ header, err := f.sys.backend.HeaderByHash(ctx, f.block)
if err != nil {
return nil, err
}
@@ -128,26 +106,35 @@ func (f *Filter) Logs(ctx context.Context) ([]*types.Log, error) {
}
return f.blockLogs(ctx, header)
}
+ // Short-cut if all we care about is pending logs
+ if f.begin == rpc.PendingBlockNumber.Int64() {
+ if f.end != rpc.PendingBlockNumber.Int64() {
+ return nil, errors.New("invalid block range")
+ }
+ return f.pendingLogs()
+ }
// Figure out the limits of the filter range
- header, _ := f.backend.HeaderByNumber(ctx, rpc.LatestBlockNumber)
+ header, _ := f.sys.backend.HeaderByNumber(ctx, rpc.LatestBlockNumber)
if header == nil {
return nil, nil
}
- head := header.Number.Uint64()
-
- if f.begin == -1 {
+ var (
+ head = header.Number.Uint64()
+ end = uint64(f.end)
+ pending = f.end == rpc.PendingBlockNumber.Int64()
+ )
+ if f.begin == rpc.LatestBlockNumber.Int64() {
f.begin = int64(head)
}
- end := uint64(f.end)
- if f.end == -1 {
+ if f.end == rpc.LatestBlockNumber.Int64() || f.end == rpc.PendingBlockNumber.Int64() {
end = head
}
// Gather all indexed logs, and finish with non indexed ones
var (
- logs []*types.Log
- err error
+ logs []*types.Log
+ err error
+ size, sections = f.sys.backend.BloomStatus()
)
- size, sections := f.backend.BloomStatus()
if indexed := sections * size; indexed > uint64(f.begin) {
if indexed > end {
logs, err = f.indexedLogs(ctx, end)
@@ -160,6 +147,13 @@ func (f *Filter) Logs(ctx context.Context) ([]*types.Log, error) {
}
rest, err := f.unindexedLogs(ctx, end)
logs = append(logs, rest...)
+ if pending {
+ pendingLogs, err := f.pendingLogs()
+ if err != nil {
+ return nil, err
+ }
+ logs = append(logs, pendingLogs...)
+ }
return logs, err
}
@@ -175,7 +169,7 @@ func (f *Filter) indexedLogs(ctx context.Context, end uint64) ([]*types.Log, err
}
defer session.Close()
- f.backend.ServiceFilter(ctx, session)
+ f.sys.backend.ServiceFilter(ctx, session)
// Iterate over the matches until exhausted or context closed
var logs []*types.Log
@@ -194,11 +188,11 @@ func (f *Filter) indexedLogs(ctx context.Context, end uint64) ([]*types.Log, err
f.begin = int64(number) + 1
// Retrieve the suggested block and pull any truly matching logs
- header, err := f.backend.HeaderByNumber(ctx, rpc.BlockNumber(number))
+ header, err := f.sys.backend.HeaderByNumber(ctx, rpc.BlockNumber(number))
if header == nil || err != nil {
return logs, err
}
- found, err := f.checkMatches(ctx, header)
+ found, err := f.blockLogs(ctx, header)
if err != nil {
return logs, err
}
@@ -216,7 +210,7 @@ func (f *Filter) unindexedLogs(ctx context.Context, end uint64) ([]*types.Log, e
var logs []*types.Log
for ; f.begin <= int64(end); f.begin++ {
- header, err := f.backend.HeaderByNumber(ctx, rpc.BlockNumber(f.begin))
+ header, err := f.sys.backend.HeaderByNumber(ctx, rpc.BlockNumber(f.begin))
if header == nil || err != nil {
return logs, err
}
@@ -230,45 +224,58 @@ func (f *Filter) unindexedLogs(ctx context.Context, end uint64) ([]*types.Log, e
}
// blockLogs returns the logs matching the filter criteria within a single block.
-func (f *Filter) blockLogs(ctx context.Context, header *types.Header) (logs []*types.Log, err error) {
+func (f *Filter) blockLogs(ctx context.Context, header *types.Header) ([]*types.Log, error) {
if bloomFilter(header.Bloom, f.addresses, f.topics) {
- found, err := f.checkMatches(ctx, header)
- if err != nil {
- return logs, err
- }
- logs = append(logs, found...)
+ return f.checkMatches(ctx, header)
}
- return logs, nil
+ return nil, nil
}
// checkMatches checks if the receipts belonging to the given header contain any log events that
// match the filter criteria. This function is called when the bloom filter signals a potential match.
-func (f *Filter) checkMatches(ctx context.Context, header *types.Header) (logs []*types.Log, err error) {
- // Get the logs of the block
- logsList, err := f.backend.GetLogs(ctx, header.Hash())
+// skipFilter signals all logs of the given block are requested.
+func (f *Filter) checkMatches(ctx context.Context, header *types.Header) ([]*types.Log, error) {
+ hash := header.Hash()
+ // Logs in cache are partially filled with context data
+ // such as tx index, block hash, etc.
+ // Notably tx hash is NOT filled in because it needs
+ // access to block body data.
+ cached, err := f.sys.cachedLogElem(ctx, hash, header.Number.Uint64())
if err != nil {
return nil, err
}
- var unfiltered []*types.Log
- for _, logs := range logsList {
- unfiltered = append(unfiltered, logs...)
+ logs := filterLogs(cached.logs, nil, nil, f.addresses, f.topics)
+ if len(logs) == 0 {
+ return nil, nil
}
- logs = filterLogs(unfiltered, nil, nil, f.addresses, f.topics)
- if len(logs) > 0 {
- // We have matching logs, check if we need to resolve full logs via the light client
- if logs[0].TxHash == (common.Hash{}) {
- receipts, err := f.backend.GetReceipts(ctx, header.Hash())
- if err != nil {
- return nil, err
- }
- unfiltered = unfiltered[:0]
- for _, receipt := range receipts {
- unfiltered = append(unfiltered, receipt.Logs...)
- }
- logs = filterLogs(unfiltered, nil, nil, f.addresses, f.topics)
- }
+ // Most backends will deliver un-derived logs, but check nevertheless.
+ if len(logs) > 0 && logs[0].TxHash != (common.Hash{}) {
return logs, nil
}
+
+ body, err := f.sys.cachedGetBody(ctx, cached, hash, header.Number.Uint64())
+ if err != nil {
+ return nil, err
+ }
+ for i, log := range logs {
+ // Copy log not to modify cache elements
+ logcopy := *log
+ logcopy.TxHash = body.Transactions[logcopy.TxIndex].Hash()
+ logs[i] = &logcopy
+ }
+ return logs, nil
+}
+
+// pendingLogs returns the logs matching the filter criteria within the pending block.
+func (f *Filter) pendingLogs() ([]*types.Log, error) {
+ block, receipts := f.sys.backend.PendingBlockAndReceipts()
+ if bloomFilter(block.Bloom(), f.addresses, f.topics) {
+ var unfiltered []*types.Log
+ for _, r := range receipts {
+ unfiltered = append(unfiltered, r.Logs...)
+ }
+ return filterLogs(unfiltered, nil, nil, f.addresses, f.topics), nil
+ }
return nil, nil
}
diff --git a/eth/filters/filter_system.go b/eth/filters/filter_system.go
index 7bf7db629711..ad592154efa9 100644
--- a/eth/filters/filter_system.go
+++ b/eth/filters/filter_system.go
@@ -21,18 +21,125 @@ package filters
import (
"context"
"errors"
+ "fmt"
"sync"
+ "sync/atomic"
"time"
ethereum "github.com/XinFinOrg/XDPoSChain"
"github.com/XinFinOrg/XDPoSChain/common"
+ "github.com/XinFinOrg/XDPoSChain/common/lru"
"github.com/XinFinOrg/XDPoSChain/core"
+ "github.com/XinFinOrg/XDPoSChain/core/bloombits"
"github.com/XinFinOrg/XDPoSChain/core/types"
+ "github.com/XinFinOrg/XDPoSChain/ethdb"
"github.com/XinFinOrg/XDPoSChain/event"
"github.com/XinFinOrg/XDPoSChain/log"
"github.com/XinFinOrg/XDPoSChain/rpc"
)
+// Config represents the configuration of the filter system.
+type Config struct {
+ LogCacheSize int // maximum number of cached blocks (default: 32)
+ Timeout time.Duration // how long filters stay active (default: 5min)
+}
+
+func (cfg Config) withDefaults() Config {
+ if cfg.Timeout == 0 {
+ cfg.Timeout = 5 * time.Minute
+ }
+ if cfg.LogCacheSize == 0 {
+ cfg.LogCacheSize = 32
+ }
+ return cfg
+}
+
+type Backend interface {
+ ChainDb() ethdb.Database
+ HeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*types.Header, error)
+ HeaderByHash(ctx context.Context, blockHash common.Hash) (*types.Header, error)
+ GetBody(ctx context.Context, hash common.Hash, number rpc.BlockNumber) (*types.Body, error)
+ GetReceipts(ctx context.Context, blockHash common.Hash) (types.Receipts, error)
+ GetLogs(ctx context.Context, blockHash common.Hash, number uint64) ([][]*types.Log, error)
+ PendingBlockAndReceipts() (*types.Block, types.Receipts)
+
+ SubscribeNewTxsEvent(chan<- core.NewTxsEvent) event.Subscription
+ SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription
+ SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription
+ SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription
+ SubscribePendingLogsEvent(ch chan<- []*types.Log) event.Subscription
+
+ BloomStatus() (uint64, uint64)
+ ServiceFilter(ctx context.Context, session *bloombits.MatcherSession)
+}
+
+// FilterSystem holds resources shared by all filters.
+type FilterSystem struct {
+ backend Backend
+ logsCache *lru.Cache[common.Hash, *logCacheElem]
+ cfg *Config
+}
+
+// NewFilterSystem creates a filter system.
+func NewFilterSystem(backend Backend, config Config) *FilterSystem {
+ config = config.withDefaults()
+ return &FilterSystem{
+ backend: backend,
+ logsCache: lru.NewCache[common.Hash, *logCacheElem](config.LogCacheSize),
+ cfg: &config,
+ }
+}
+
+type logCacheElem struct {
+ logs []*types.Log
+ body atomic.Pointer[types.Body]
+}
+
+// cachedLogElem loads block logs from the backend and caches the result.
+func (sys *FilterSystem) cachedLogElem(ctx context.Context, blockHash common.Hash, number uint64) (*logCacheElem, error) {
+ cached, ok := sys.logsCache.Get(blockHash)
+ if ok {
+ return cached, nil
+ }
+
+ logs, err := sys.backend.GetLogs(ctx, blockHash, number)
+ if err != nil {
+ return nil, err
+ }
+ if logs == nil {
+ return nil, fmt.Errorf("failed to get logs for block #%d (0x%s)", number, blockHash.TerminalString())
+ }
+ // Database logs are un-derived.
+ // Fill in whatever we can (txHash is inaccessible at this point).
+ flattened := make([]*types.Log, 0)
+ var logIdx uint
+ for i, txLogs := range logs {
+ for _, log := range txLogs {
+ log.BlockHash = blockHash
+ log.BlockNumber = number
+ log.TxIndex = uint(i)
+ log.Index = logIdx
+ logIdx++
+ flattened = append(flattened, log)
+ }
+ }
+ elem := &logCacheElem{logs: flattened}
+ sys.logsCache.Add(blockHash, elem)
+ return elem, nil
+}
+
+func (sys *FilterSystem) cachedGetBody(ctx context.Context, elem *logCacheElem, hash common.Hash, number uint64) (*types.Body, error) {
+ if body := elem.body.Load(); body != nil {
+ return body, nil
+ }
+ body, err := sys.backend.GetBody(ctx, hash, rpc.BlockNumber(number))
+ if err != nil {
+ return nil, err
+ }
+ elem.body.Store(body)
+ return body, nil
+}
+
// Type determines the kind of filter and is used to put the filter in to
// the correct bucket when added.
type Type byte
@@ -67,10 +174,6 @@ const (
chainEvChanSize = 10
)
-var (
- ErrInvalidSubscriptionID = errors.New("invalid id")
-)
-
type subscription struct {
id rpc.ID
typ Type
@@ -86,25 +189,26 @@ type subscription struct {
// EventSystem creates subscriptions, processes events and broadcasts them to the
// subscription which match the subscription criteria.
type EventSystem struct {
- mux *event.TypeMux
backend Backend
+ sys *FilterSystem
lightMode bool
lastHead *types.Header
// Subscriptions
- txsSub event.Subscription // Subscription for new transaction event
- logsSub event.Subscription // Subscription for new log event
- rmLogsSub event.Subscription // Subscription for removed log event
- chainSub event.Subscription // Subscription for new chain event
- pendingLogSub *event.TypeMuxSubscription // Subscription for pending log event
+ txsSub event.Subscription // Subscription for new transaction event
+ logsSub event.Subscription // Subscription for new log event
+ rmLogsSub event.Subscription // Subscription for removed log event
+ pendingLogsSub event.Subscription // Subscription for pending log event
+ chainSub event.Subscription // Subscription for new chain event
// Channels
- install chan *subscription // install filter for event notification
- uninstall chan *subscription // remove filter for event notification
- txsCh chan core.NewTxsEvent // Channel to receive new transactions event
- logsCh chan []*types.Log // Channel to receive new log event
- rmLogsCh chan core.RemovedLogsEvent // Channel to receive removed log event
- chainCh chan core.ChainEvent // Channel to receive new chain event
+ install chan *subscription // install filter for event notification
+ uninstall chan *subscription // remove filter for event notification
+ txsCh chan core.NewTxsEvent // Channel to receive new transactions event
+ logsCh chan []*types.Log // Channel to receive new log event
+ pendingLogsCh chan []*types.Log // Channel to receive new log event
+ rmLogsCh chan core.RemovedLogsEvent // Channel to receive removed log event
+ chainCh chan core.ChainEvent // Channel to receive new chain event
}
// NewEventSystem creates a new manager that listens for event on the given mux,
@@ -113,17 +217,18 @@ type EventSystem struct {
//
// The returned manager has a loop that needs to be stopped with the Stop function
// or by stopping the given mux.
-func NewEventSystem(mux *event.TypeMux, backend Backend, lightMode bool) *EventSystem {
+func NewEventSystem(sys *FilterSystem, lightMode bool) *EventSystem {
m := &EventSystem{
- mux: mux,
- backend: backend,
- lightMode: lightMode,
- install: make(chan *subscription),
- uninstall: make(chan *subscription),
- txsCh: make(chan core.NewTxsEvent, txChanSize),
- logsCh: make(chan []*types.Log, logsChanSize),
- rmLogsCh: make(chan core.RemovedLogsEvent, rmLogsChanSize),
- chainCh: make(chan core.ChainEvent, chainEvChanSize),
+ sys: sys,
+ backend: sys.backend,
+ lightMode: lightMode,
+ install: make(chan *subscription),
+ uninstall: make(chan *subscription),
+ txsCh: make(chan core.NewTxsEvent, txChanSize),
+ logsCh: make(chan []*types.Log, logsChanSize),
+ rmLogsCh: make(chan core.RemovedLogsEvent, rmLogsChanSize),
+ pendingLogsCh: make(chan []*types.Log, logsChanSize),
+ chainCh: make(chan core.ChainEvent, chainEvChanSize),
}
// Subscribe events
@@ -131,12 +236,10 @@ func NewEventSystem(mux *event.TypeMux, backend Backend, lightMode bool) *EventS
m.logsSub = m.backend.SubscribeLogsEvent(m.logsCh)
m.rmLogsSub = m.backend.SubscribeRemovedLogsEvent(m.rmLogsCh)
m.chainSub = m.backend.SubscribeChainEvent(m.chainCh)
- // TODO(rjl493456442): use feed to subscribe pending log event
- m.pendingLogSub = m.mux.Subscribe(core.PendingLogsEvent{})
+ m.pendingLogsSub = m.backend.SubscribePendingLogsEvent(m.pendingLogsCh)
// Make sure none of the subscriptions are empty
- if m.txsSub == nil || m.logsSub == nil || m.rmLogsSub == nil || m.chainSub == nil ||
- m.pendingLogSub.Closed() {
+ if m.txsSub == nil || m.logsSub == nil || m.rmLogsSub == nil || m.chainSub == nil || m.pendingLogsSub == nil {
log.Crit("Subscribe for event system failed")
}
@@ -314,58 +417,67 @@ func (es *EventSystem) SubscribePendingTxs(hashes chan []common.Hash) *Subscript
type filterIndex map[Type]map[rpc.ID]*subscription
-// broadcast event to filters that match criteria.
-func (es *EventSystem) broadcast(filters filterIndex, ev interface{}) {
- if ev == nil {
+func (es *EventSystem) handleLogs(filters filterIndex, ev []*types.Log) {
+ if len(ev) == 0 {
return
}
+ for _, f := range filters[LogsSubscription] {
+ matchedLogs := filterLogs(ev, f.logsCrit.FromBlock, f.logsCrit.ToBlock, f.logsCrit.Addresses, f.logsCrit.Topics)
+ if len(matchedLogs) > 0 {
+ f.logs <- matchedLogs
+ }
+ }
+}
- switch e := ev.(type) {
- case []*types.Log:
- if len(e) > 0 {
- for _, f := range filters[LogsSubscription] {
- if matchedLogs := filterLogs(e, f.logsCrit.FromBlock, f.logsCrit.ToBlock, f.logsCrit.Addresses, f.logsCrit.Topics); len(matchedLogs) > 0 {
- f.logs <- matchedLogs
- }
- }
+func (es *EventSystem) handlePendingLogs(filters filterIndex, ev []*types.Log) {
+ if len(ev) == 0 {
+ return
+ }
+ for _, f := range filters[PendingLogsSubscription] {
+ matchedLogs := filterLogs(ev, nil, f.logsCrit.ToBlock, f.logsCrit.Addresses, f.logsCrit.Topics)
+ if len(matchedLogs) > 0 {
+ f.logs <- matchedLogs
}
- case core.RemovedLogsEvent:
- for _, f := range filters[LogsSubscription] {
- if matchedLogs := filterLogs(e.Logs, f.logsCrit.FromBlock, f.logsCrit.ToBlock, f.logsCrit.Addresses, f.logsCrit.Topics); len(matchedLogs) > 0 {
- f.logs <- matchedLogs
- }
+ }
+}
+
+func (es *EventSystem) handleRemovedLogs(filters filterIndex, ev core.RemovedLogsEvent) {
+ for _, f := range filters[LogsSubscription] {
+ matchedLogs := filterLogs(ev.Logs, f.logsCrit.FromBlock, f.logsCrit.ToBlock, f.logsCrit.Addresses, f.logsCrit.Topics)
+ if len(matchedLogs) > 0 {
+ f.logs <- matchedLogs
}
- case *event.TypeMuxEvent:
- if muxe, ok := e.Data.(core.PendingLogsEvent); ok {
- for _, f := range filters[PendingLogsSubscription] {
- if e.Time.After(f.created) {
- if matchedLogs := filterLogs(muxe.Logs, nil, f.logsCrit.ToBlock, f.logsCrit.Addresses, f.logsCrit.Topics); len(matchedLogs) > 0 {
- f.logs <- matchedLogs
- }
+ }
+}
+
+func (es *EventSystem) handleTxsEvent(filters filterIndex, ev core.NewTxsEvent) {
+ hashes := make([]common.Hash, 0, len(ev.Txs))
+ for _, tx := range ev.Txs {
+ hashes = append(hashes, tx.Hash())
+ }
+ for _, f := range filters[PendingTransactionsSubscription] {
+ f.hashes <- hashes
+ }
+}
+
+func (es *EventSystem) handleChainEvent(filters filterIndex, ev core.ChainEvent) {
+ for _, f := range filters[BlocksSubscription] {
+ f.headers <- ev.Block.Header()
+ }
+ if es.lightMode && len(filters[LogsSubscription]) > 0 {
+ es.lightFilterNewHead(ev.Block.Header(), func(header *types.Header, remove bool) {
+ for _, f := range filters[LogsSubscription] {
+ if f.logsCrit.FromBlock != nil && header.Number.Cmp(f.logsCrit.FromBlock) < 0 {
+ continue
}
- }
- }
- case core.NewTxsEvent:
- hashes := make([]common.Hash, 0, len(e.Txs))
- for _, tx := range e.Txs {
- hashes = append(hashes, tx.Hash())
- }
- for _, f := range filters[PendingTransactionsSubscription] {
- f.hashes <- hashes
- }
- case core.ChainEvent:
- for _, f := range filters[BlocksSubscription] {
- f.headers <- e.Block.Header()
- }
- if es.lightMode && len(filters[LogsSubscription]) > 0 {
- es.lightFilterNewHead(e.Block.Header(), func(header *types.Header, remove bool) {
- for _, f := range filters[LogsSubscription] {
- if matchedLogs := es.lightFilterLogs(header, f.logsCrit.Addresses, f.logsCrit.Topics, remove); len(matchedLogs) > 0 {
- f.logs <- matchedLogs
- }
+ if f.logsCrit.ToBlock != nil && header.Number.Cmp(f.logsCrit.ToBlock) > 0 {
+ continue
}
- })
- }
+ if matchedLogs := es.lightFilterLogs(header, f.logsCrit.Addresses, f.logsCrit.Topics, remove); len(matchedLogs) > 0 {
+ f.logs <- matchedLogs
+ }
+ }
+ })
}
}
@@ -404,52 +516,49 @@ func (es *EventSystem) lightFilterNewHead(newHeader *types.Header, callBack func
// filter logs of a single header in light client mode
func (es *EventSystem) lightFilterLogs(header *types.Header, addresses []common.Address, topics [][]common.Hash, remove bool) []*types.Log {
- if bloomFilter(header.Bloom, addresses, topics) {
- // Get the logs of the block
- ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
- defer cancel()
- logsList, err := es.backend.GetLogs(ctx, header.Hash())
- if err != nil {
- return nil
- }
- var unfiltered []*types.Log
- for _, logs := range logsList {
- for _, log := range logs {
- logcopy := *log
- logcopy.Removed = remove
- unfiltered = append(unfiltered, &logcopy)
- }
- }
- logs := filterLogs(unfiltered, nil, nil, addresses, topics)
- if len(logs) > 0 && logs[0].TxHash == (common.Hash{}) {
- // We have matching but non-derived logs
- receipts, err := es.backend.GetReceipts(ctx, header.Hash())
- if err != nil {
- return nil
- }
- unfiltered = unfiltered[:0]
- for _, receipt := range receipts {
- for _, log := range receipt.Logs {
- logcopy := *log
- logcopy.Removed = remove
- unfiltered = append(unfiltered, &logcopy)
- }
- }
- logs = filterLogs(unfiltered, nil, nil, addresses, topics)
- }
+ if !bloomFilter(header.Bloom, addresses, topics) {
+ return nil
+ }
+ // Get the logs of the block
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
+ defer cancel()
+ cached, err := es.sys.cachedLogElem(ctx, header.Hash(), header.Number.Uint64())
+ if err != nil {
+ return nil
+ }
+ unfiltered := append([]*types.Log{}, cached.logs...)
+ for i, log := range unfiltered {
+ // Don't modify in-cache elements
+ logcopy := *log
+ logcopy.Removed = remove
+ // Swap copy in-place
+ unfiltered[i] = &logcopy
+ }
+ logs := filterLogs(unfiltered, nil, nil, addresses, topics)
+ // Txhash is already resolved
+ if len(logs) > 0 && logs[0].TxHash != (common.Hash{}) {
return logs
}
- return nil
+ // Resolve txhash
+ body, err := es.sys.cachedGetBody(ctx, cached, header.Hash(), header.Number.Uint64())
+ if err != nil {
+ return nil
+ }
+ for _, log := range logs {
+ // logs are already copied, safe to modify
+ log.TxHash = body.Transactions[log.TxIndex].Hash()
+ }
+ return logs
}
// eventLoop (un)installs filters and processes mux events.
func (es *EventSystem) eventLoop() {
// Ensure all subscriptions get cleaned up
defer func() {
- es.pendingLogSub.Unsubscribe()
es.txsSub.Unsubscribe()
es.logsSub.Unsubscribe()
es.rmLogsSub.Unsubscribe()
+ es.pendingLogsSub.Unsubscribe()
es.chainSub.Unsubscribe()
}()
@@ -460,20 +569,16 @@ func (es *EventSystem) eventLoop() {
for {
select {
- // Handle subscribed events
case ev := <-es.txsCh:
- es.broadcast(index, ev)
+ es.handleTxsEvent(index, ev)
case ev := <-es.logsCh:
- es.broadcast(index, ev)
+ es.handleLogs(index, ev)
case ev := <-es.rmLogsCh:
- es.broadcast(index, ev)
+ es.handleRemovedLogs(index, ev)
+ case ev := <-es.pendingLogsCh:
+ es.handlePendingLogs(index, ev)
case ev := <-es.chainCh:
- es.broadcast(index, ev)
- case ev, active := <-es.pendingLogSub.Chan():
- if !active { // system stopped
- return
- }
- es.broadcast(index, ev)
+ es.handleChainEvent(index, ev)
case f := <-es.install:
if f.typ == MinedAndPendingLogsSubscription {
diff --git a/eth/filters/filter_system_test.go b/eth/filters/filter_system_test.go
index fb4f7e7b8bc2..2610376d3bf3 100644
--- a/eth/filters/filter_system_test.go
+++ b/eth/filters/filter_system_test.go
@@ -18,10 +18,12 @@ package filters
import (
"context"
+ "errors"
"fmt"
"math/big"
"math/rand"
"reflect"
+ "runtime"
"testing"
"time"
@@ -32,6 +34,7 @@ import (
"github.com/XinFinOrg/XDPoSChain/core/bloombits"
"github.com/XinFinOrg/XDPoSChain/core/rawdb"
"github.com/XinFinOrg/XDPoSChain/core/types"
+ "github.com/XinFinOrg/XDPoSChain/crypto"
"github.com/XinFinOrg/XDPoSChain/ethdb"
"github.com/XinFinOrg/XDPoSChain/event"
"github.com/XinFinOrg/XDPoSChain/params"
@@ -39,23 +42,20 @@ import (
)
type testBackend struct {
- mux *event.TypeMux
- db ethdb.Database
- sections uint64
- txFeed *event.Feed
- rmLogsFeed *event.Feed
- logsFeed *event.Feed
- chainFeed *event.Feed
+ mux *event.TypeMux
+ db ethdb.Database
+ sections uint64
+ txFeed event.Feed
+ logsFeed event.Feed
+ rmLogsFeed event.Feed
+ pendingLogsFeed event.Feed
+ chainFeed event.Feed
}
func (b *testBackend) ChainDb() ethdb.Database {
return b.db
}
-func (b *testBackend) EventMux() *event.TypeMux {
- return b.mux
-}
-
func (b *testBackend) HeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*types.Header, error) {
var hash common.Hash
var num uint64
@@ -74,22 +74,27 @@ func (b *testBackend) HeaderByHash(ctx context.Context, blockHash common.Hash) (
return core.GetHeader(b.db, blockHash, num), nil
}
+func (b *testBackend) GetBody(ctx context.Context, hash common.Hash, number rpc.BlockNumber) (*types.Body, error) {
+ if body := rawdb.ReadBody(b.db, hash, uint64(number)); body != nil {
+ return body, nil
+ }
+ return nil, errors.New("block body not found")
+}
+
func (b *testBackend) GetReceipts(ctx context.Context, blockHash common.Hash) (types.Receipts, error) {
number := core.GetBlockNumber(b.db, blockHash)
return core.GetBlockReceipts(b.db, blockHash, number), nil
}
-func (b *testBackend) GetLogs(ctx context.Context, blockHash common.Hash) ([][]*types.Log, error) {
- number := core.GetBlockNumber(b.db, blockHash)
- receipts := core.GetBlockReceipts(b.db, blockHash, number)
-
- logs := make([][]*types.Log, len(receipts))
- for i, receipt := range receipts {
- logs[i] = receipt.Logs
- }
+func (b *testBackend) GetLogs(ctx context.Context, hash common.Hash, number uint64) ([][]*types.Log, error) {
+ logs := rawdb.ReadLogs(b.db, hash, number)
return logs, nil
}
+func (b *testBackend) PendingBlockAndReceipts() (*types.Block, types.Receipts) {
+ return nil, nil
+}
+
func (b *testBackend) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription {
return b.txFeed.Subscribe(ch)
}
@@ -102,6 +107,10 @@ func (b *testBackend) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscript
return b.logsFeed.Subscribe(ch)
}
+func (b *testBackend) SubscribePendingLogsEvent(ch chan<- []*types.Log) event.Subscription {
+ return b.pendingLogsFeed.Subscribe(ch)
+}
+
func (b *testBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription {
return b.chainFeed.Subscribe(ch)
}
@@ -137,6 +146,12 @@ func (b *testBackend) ServiceFilter(ctx context.Context, session *bloombits.Matc
}()
}
+func newTestFilterSystem(t testing.TB, db ethdb.Database, cfg Config) (*testBackend, *FilterSystem) {
+ backend := &testBackend{db: db}
+ sys := NewFilterSystem(backend, cfg)
+ return backend, sys
+}
+
// TestBlockSubscription tests if a block subscription returns block hashes for posted chain events.
// It creates multiple subscriptions:
// - one at the start and should receive all posted chain events and a second (blockHashes)
@@ -146,17 +161,12 @@ func TestBlockSubscription(t *testing.T) {
t.Parallel()
var (
- mux = new(event.TypeMux)
- db = rawdb.NewMemoryDatabase()
- txFeed = new(event.Feed)
- rmLogsFeed = new(event.Feed)
- logsFeed = new(event.Feed)
- chainFeed = new(event.Feed)
- backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
- api = NewPublicFilterAPI(backend, false)
- genesis = new(core.Genesis).MustCommit(db)
- chain, _ = core.GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), db, 10, func(i int, gen *core.BlockGen) {})
- chainEvents = []core.ChainEvent{}
+ db = rawdb.NewMemoryDatabase()
+ backend, sys = newTestFilterSystem(t, db, Config{})
+ api = NewFilterAPI(sys, false)
+ genesis = new(core.Genesis).MustCommit(db)
+ chain, _ = core.GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), db, 10, func(i int, gen *core.BlockGen) {})
+ chainEvents = []core.ChainEvent{}
)
for _, blk := range chain {
@@ -191,7 +201,7 @@ func TestBlockSubscription(t *testing.T) {
time.Sleep(1 * time.Second)
for _, e := range chainEvents {
- chainFeed.Send(e)
+ backend.chainFeed.Send(e)
}
<-sub0.Err()
@@ -203,14 +213,9 @@ func TestPendingTxFilter(t *testing.T) {
t.Parallel()
var (
- mux = new(event.TypeMux)
- db = rawdb.NewMemoryDatabase()
- txFeed = new(event.Feed)
- rmLogsFeed = new(event.Feed)
- logsFeed = new(event.Feed)
- chainFeed = new(event.Feed)
- backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
- api = NewPublicFilterAPI(backend, false)
+ db = rawdb.NewMemoryDatabase()
+ backend, sys = newTestFilterSystem(t, db, Config{})
+ api = NewFilterAPI(sys, false)
transactions = []*types.Transaction{
types.NewTransaction(0, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil),
@@ -226,7 +231,7 @@ func TestPendingTxFilter(t *testing.T) {
fid0 := api.NewPendingTransactionFilter()
time.Sleep(1 * time.Second)
- txFeed.Send(core.NewTxsEvent{Txs: transactions})
+ backend.txFeed.Send(core.NewTxsEvent{Txs: transactions})
timeout := time.Now().Add(1 * time.Second)
for {
@@ -263,14 +268,9 @@ func TestPendingTxFilter(t *testing.T) {
// If not it must return an error.
func TestLogFilterCreation(t *testing.T) {
var (
- mux = new(event.TypeMux)
- db = rawdb.NewMemoryDatabase()
- txFeed = new(event.Feed)
- rmLogsFeed = new(event.Feed)
- logsFeed = new(event.Feed)
- chainFeed = new(event.Feed)
- backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
- api = NewPublicFilterAPI(backend, false)
+ db = rawdb.NewMemoryDatabase()
+ _, sys = newTestFilterSystem(t, db, Config{})
+ api = NewFilterAPI(sys, false)
testCases = []struct {
crit FilterCriteria
@@ -312,14 +312,9 @@ func TestInvalidLogFilterCreation(t *testing.T) {
t.Parallel()
var (
- mux = new(event.TypeMux)
- db = rawdb.NewMemoryDatabase()
- txFeed = new(event.Feed)
- rmLogsFeed = new(event.Feed)
- logsFeed = new(event.Feed)
- chainFeed = new(event.Feed)
- backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
- api = NewPublicFilterAPI(backend, false)
+ db = rawdb.NewMemoryDatabase()
+ _, sys = newTestFilterSystem(t, db, Config{})
+ api = NewFilterAPI(sys, false)
)
// different situations where log filter creation should fail.
@@ -339,15 +334,10 @@ func TestInvalidLogFilterCreation(t *testing.T) {
func TestInvalidGetLogsRequest(t *testing.T) {
var (
- mux = new(event.TypeMux)
- db = rawdb.NewMemoryDatabase()
- txFeed = new(event.Feed)
- rmLogsFeed = new(event.Feed)
- logsFeed = new(event.Feed)
- chainFeed = new(event.Feed)
- backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
- api = NewPublicFilterAPI(backend, false)
- blockHash = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111")
+ db = rawdb.NewMemoryDatabase()
+ _, sys = newTestFilterSystem(t, db, Config{})
+ api = NewFilterAPI(sys, false)
+ blockHash = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111")
)
// Reason: Cannot specify both BlockHash and FromBlock/ToBlock)
@@ -369,14 +359,9 @@ func TestLogFilter(t *testing.T) {
t.Parallel()
var (
- mux = new(event.TypeMux)
- db = rawdb.NewMemoryDatabase()
- txFeed = new(event.Feed)
- rmLogsFeed = new(event.Feed)
- logsFeed = new(event.Feed)
- chainFeed = new(event.Feed)
- backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
- api = NewPublicFilterAPI(backend, false)
+ db = rawdb.NewMemoryDatabase()
+ backend, sys = newTestFilterSystem(t, db, Config{})
+ api = NewFilterAPI(sys, false)
firstAddr = common.HexToAddress("0x1111111111111111111111111111111111111111")
secondAddr = common.HexToAddress("0x2222222222222222222222222222222222222222")
@@ -386,7 +371,7 @@ func TestLogFilter(t *testing.T) {
secondTopic = common.HexToHash("0x2222222222222222222222222222222222222222222222222222222222222222")
notUsedTopic = common.HexToHash("0x9999999999999999999999999999999999999999999999999999999999999999")
- // posted twice, once as vm.Logs and once as core.PendingLogsEvent
+ // posted twice, once as regular logs and once as pending logs.
allLogs = []*types.Log{
{Address: firstAddr},
{Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1},
@@ -439,11 +424,11 @@ func TestLogFilter(t *testing.T) {
// raise events
time.Sleep(1 * time.Second)
- if nsend := logsFeed.Send(allLogs); nsend == 0 {
- t.Fatal("Shoud have at least one subscription")
+ if nsend := backend.logsFeed.Send(allLogs); nsend == 0 {
+ t.Fatal("Logs event not delivered")
}
- if err := mux.Post(core.PendingLogsEvent{Logs: allLogs}); err != nil {
- t.Fatal(err)
+ if nsend := backend.pendingLogsFeed.Send(allLogs); nsend == 0 {
+ t.Fatal("Pending logs event not delivered")
}
for i, tt := range testCases {
@@ -488,14 +473,9 @@ func TestPendingLogsSubscription(t *testing.T) {
t.Parallel()
var (
- mux = new(event.TypeMux)
- db = rawdb.NewMemoryDatabase()
- txFeed = new(event.Feed)
- rmLogsFeed = new(event.Feed)
- logsFeed = new(event.Feed)
- chainFeed = new(event.Feed)
- backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
- api = NewPublicFilterAPI(backend, false)
+ db = rawdb.NewMemoryDatabase()
+ backend, sys = newTestFilterSystem(t, db, Config{})
+ api = NewFilterAPI(sys, false)
firstAddr = common.HexToAddress("0x1111111111111111111111111111111111111111")
secondAddr = common.HexToAddress("0x2222222222222222222222222222222222222222")
@@ -507,26 +487,18 @@ func TestPendingLogsSubscription(t *testing.T) {
fourthTopic = common.HexToHash("0x4444444444444444444444444444444444444444444444444444444444444444")
notUsedTopic = common.HexToHash("0x9999999999999999999999999999999999999999999999999999999999999999")
- allLogs = []core.PendingLogsEvent{
- {Logs: []*types.Log{{Address: firstAddr, Topics: []common.Hash{}, BlockNumber: 0}}},
- {Logs: []*types.Log{{Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1}}},
- {Logs: []*types.Log{{Address: secondAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 2}}},
- {Logs: []*types.Log{{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 3}}},
- {Logs: []*types.Log{{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 4}}},
- {Logs: []*types.Log{
+ allLogs = [][]*types.Log{
+ {{Address: firstAddr, Topics: []common.Hash{}, BlockNumber: 0}},
+ {{Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1}},
+ {{Address: secondAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 2}},
+ {{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 3}},
+ {{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 4}},
+ {
{Address: thirdAddress, Topics: []common.Hash{firstTopic}, BlockNumber: 5},
{Address: thirdAddress, Topics: []common.Hash{thirdTopic}, BlockNumber: 5},
{Address: thirdAddress, Topics: []common.Hash{fourthTopic}, BlockNumber: 5},
{Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 5},
- }},
- }
-
- convertLogs = func(pl []core.PendingLogsEvent) []*types.Log {
- var logs []*types.Log
- for _, l := range pl {
- logs = append(logs, l.Logs...)
- }
- return logs
+ },
}
testCases = []struct {
@@ -536,21 +508,52 @@ func TestPendingLogsSubscription(t *testing.T) {
sub *Subscription
}{
// match all
- {ethereum.FilterQuery{}, convertLogs(allLogs), nil, nil},
+ {
+ ethereum.FilterQuery{}, flattenLogs(allLogs),
+ nil, nil,
+ },
// match none due to no matching addresses
- {ethereum.FilterQuery{Addresses: []common.Address{{}, notUsedAddress}, Topics: [][]common.Hash{nil}}, []*types.Log{}, nil, nil},
+ {
+ ethereum.FilterQuery{Addresses: []common.Address{{}, notUsedAddress}, Topics: [][]common.Hash{nil}},
+ nil,
+ nil, nil,
+ },
// match logs based on addresses, ignore topics
- {ethereum.FilterQuery{Addresses: []common.Address{firstAddr}}, append(convertLogs(allLogs[:2]), allLogs[5].Logs[3]), nil, nil},
+ {
+ ethereum.FilterQuery{Addresses: []common.Address{firstAddr}},
+ append(flattenLogs(allLogs[:2]), allLogs[5][3]),
+ nil, nil,
+ },
// match none due to no matching topics (match with address)
- {ethereum.FilterQuery{Addresses: []common.Address{secondAddr}, Topics: [][]common.Hash{{notUsedTopic}}}, []*types.Log{}, nil, nil},
+ {
+ ethereum.FilterQuery{Addresses: []common.Address{secondAddr}, Topics: [][]common.Hash{{notUsedTopic}}},
+ nil, nil, nil,
+ },
// match logs based on addresses and topics
- {ethereum.FilterQuery{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, append(convertLogs(allLogs[3:5]), allLogs[5].Logs[0]), nil, nil},
+ {
+ ethereum.FilterQuery{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}},
+ append(flattenLogs(allLogs[3:5]), allLogs[5][0]),
+ nil, nil,
+ },
// match logs based on multiple addresses and "or" topics
- {ethereum.FilterQuery{Addresses: []common.Address{secondAddr, thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, append(convertLogs(allLogs[2:5]), allLogs[5].Logs[0]), nil, nil},
+ {
+ ethereum.FilterQuery{Addresses: []common.Address{secondAddr, thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}},
+ append(flattenLogs(allLogs[2:5]), allLogs[5][0]),
+ nil,
+ nil,
+ },
// block numbers are ignored for filters created with New***Filter, these return all logs that match the given criteria when the state changes
- {ethereum.FilterQuery{Addresses: []common.Address{firstAddr}, FromBlock: big.NewInt(2), ToBlock: big.NewInt(3)}, append(convertLogs(allLogs[:2]), allLogs[5].Logs[3]), nil, nil},
+ {
+ ethereum.FilterQuery{Addresses: []common.Address{firstAddr}, FromBlock: big.NewInt(2), ToBlock: big.NewInt(3)},
+ append(flattenLogs(allLogs[:2]), allLogs[5][3]),
+ nil, nil,
+ },
// multiple pending logs, should match only 2 topics from the logs in block 5
- {ethereum.FilterQuery{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, fourthTopic}}}, []*types.Log{allLogs[5].Logs[0], allLogs[5].Logs[2]}, nil, nil},
+ {
+ ethereum.FilterQuery{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, fourthTopic}}},
+ []*types.Log{allLogs[5][0], allLogs[5][2]},
+ nil, nil,
+ },
}
)
@@ -593,10 +596,219 @@ func TestPendingLogsSubscription(t *testing.T) {
// raise events
time.Sleep(1 * time.Second)
- // allLogs are type of core.PendingLogsEvent
- for _, l := range allLogs {
- if err := mux.Post(l); err != nil {
+ for _, ev := range allLogs {
+ backend.pendingLogsFeed.Send(ev)
+ }
+}
+
+func TestLightFilterLogs(t *testing.T) {
+ t.Parallel()
+
+ var (
+ db = rawdb.NewMemoryDatabase()
+ backend, sys = newTestFilterSystem(t, db, Config{})
+ api = NewFilterAPI(sys, true)
+ signer = types.HomesteadSigner{}
+
+ firstAddr = common.HexToAddress("0x1111111111111111111111111111111111111111")
+ secondAddr = common.HexToAddress("0x2222222222222222222222222222222222222222")
+ thirdAddress = common.HexToAddress("0x3333333333333333333333333333333333333333")
+ notUsedAddress = common.HexToAddress("0x9999999999999999999999999999999999999999")
+ firstTopic = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111")
+ secondTopic = common.HexToHash("0x2222222222222222222222222222222222222222222222222222222222222222")
+
+ // posted twice, once as regular logs and once as pending logs.
+ allLogs = []*types.Log{
+ // Block 1
+ {Address: firstAddr, Topics: []common.Hash{}, Data: []byte{}, BlockNumber: 2, Index: 0},
+ // Block 2
+ {Address: firstAddr, Topics: []common.Hash{firstTopic}, Data: []byte{}, BlockNumber: 3, Index: 0},
+ {Address: secondAddr, Topics: []common.Hash{firstTopic}, Data: []byte{}, BlockNumber: 3, Index: 1},
+ {Address: thirdAddress, Topics: []common.Hash{secondTopic}, Data: []byte{}, BlockNumber: 3, Index: 2},
+ // Block 3
+ {Address: thirdAddress, Topics: []common.Hash{secondTopic}, Data: []byte{}, BlockNumber: 4, Index: 0},
+ }
+
+ testCases = []struct {
+ crit FilterCriteria
+ expected []*types.Log
+ id rpc.ID
+ }{
+ // match all
+ 0: {FilterCriteria{}, allLogs, ""},
+ // match none due to no matching addresses
+ 1: {FilterCriteria{Addresses: []common.Address{{}, notUsedAddress}, Topics: [][]common.Hash{nil}}, []*types.Log{}, ""},
+ // match logs based on addresses, ignore topics
+ 2: {FilterCriteria{Addresses: []common.Address{firstAddr}}, allLogs[:2], ""},
+ // match logs based on addresses and topics
+ 3: {FilterCriteria{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, allLogs[3:5], ""},
+ // all logs with block num >= 3
+ 4: {FilterCriteria{FromBlock: big.NewInt(3), ToBlock: big.NewInt(5)}, allLogs[1:], ""},
+ // all logs
+ 5: {FilterCriteria{FromBlock: big.NewInt(0), ToBlock: big.NewInt(5)}, allLogs, ""},
+ // all logs with 1>= block num <=2 and topic secondTopic
+ 6: {FilterCriteria{FromBlock: big.NewInt(2), ToBlock: big.NewInt(3), Topics: [][]common.Hash{{secondTopic}}}, allLogs[3:4], ""},
+ }
+
+ key, _ = crypto.GenerateKey()
+ addr = crypto.PubkeyToAddress(key.PublicKey)
+ genesis = &core.Genesis{Config: params.TestChainConfig,
+ Alloc: core.GenesisAlloc{
+ addr: {Balance: big.NewInt(params.Ether)},
+ },
+ }
+ receipts = []*types.Receipt{{
+ Logs: []*types.Log{allLogs[0]},
+ }, {
+ Logs: []*types.Log{allLogs[1], allLogs[2], allLogs[3]},
+ }, {
+ Logs: []*types.Log{allLogs[4]},
+ }}
+ )
+
+ _, blocks, _ := core.GenerateChainWithGenesis(genesis, ethash.NewFaker(), 4, func(i int, b *core.BlockGen) {
+ if i == 0 {
+ return
+ }
+ receipts[i-1].Bloom = types.CreateBloom(types.Receipts{receipts[i-1]})
+ b.AddUncheckedReceipt(receipts[i-1])
+ tx, _ := types.SignTx(types.NewTx(&types.LegacyTx{Nonce: uint64(i - 1), To: &common.Address{}, Value: big.NewInt(1000), Gas: params.TxGas, GasPrice: big.NewInt(2100), Data: nil}), signer, key)
+ b.AddTx(tx)
+ })
+ for i, block := range blocks {
+ rawdb.WriteBlock(db, block)
+ rawdb.WriteCanonicalHash(db, block.Hash(), block.NumberU64())
+ rawdb.WriteHeadBlockHash(db, block.Hash())
+ if i > 0 {
+ rawdb.WriteReceipts(db, block.Hash(), block.NumberU64(), []*types.Receipt{receipts[i-1]})
+ }
+ }
+ // create all filters
+ for i := range testCases {
+ id, err := api.NewFilter(testCases[i].crit)
+ if err != nil {
t.Fatal(err)
}
+ testCases[i].id = id
+ }
+
+ // raise events
+ time.Sleep(1 * time.Second)
+ for _, block := range blocks {
+ backend.chainFeed.Send(core.ChainEvent{Block: block, Hash: common.Hash{}, Logs: allLogs})
+ }
+
+ for i, tt := range testCases {
+ var fetched []*types.Log
+ timeout := time.Now().Add(1 * time.Second)
+ for { // fetch all expected logs
+ results, err := api.GetFilterChanges(tt.id)
+ if err != nil {
+ t.Fatalf("Unable to fetch logs: %v", err)
+ }
+ fetched = append(fetched, results.([]*types.Log)...)
+ if len(fetched) >= len(tt.expected) {
+ break
+ }
+ // check timeout
+ if time.Now().After(timeout) {
+ break
+ }
+
+ time.Sleep(100 * time.Millisecond)
+ }
+
+ if len(fetched) != len(tt.expected) {
+ t.Errorf("invalid number of logs for case %d, want %d log(s), got %d", i, len(tt.expected), len(fetched))
+ return
+ }
+
+ for l := range fetched {
+ if fetched[l].Removed {
+ t.Errorf("expected log not to be removed for log %d in case %d", l, i)
+ }
+ expected := *tt.expected[l]
+ blockNum := expected.BlockNumber - 1
+ expected.BlockHash = blocks[blockNum].Hash()
+ expected.TxHash = blocks[blockNum].Transactions()[0].Hash()
+ if !reflect.DeepEqual(fetched[l], &expected) {
+ t.Errorf("invalid log on index %d for case %d", l, i)
+ }
+ }
+ }
+}
+
+// TestPendingTxFilterDeadlock tests if the event loop hangs when pending
+// txes arrive at the same time that one of multiple filters is timing out.
+// Please refer to #22131 for more details.
+func TestPendingTxFilterDeadlock(t *testing.T) {
+ t.Parallel()
+ timeout := 100 * time.Millisecond
+
+ var (
+ db = rawdb.NewMemoryDatabase()
+ backend, sys = newTestFilterSystem(t, db, Config{Timeout: timeout})
+ api = NewFilterAPI(sys, false)
+ done = make(chan struct{})
+ )
+
+ go func() {
+ // Bombard feed with txes until signal was received to stop
+ i := uint64(0)
+ for {
+ select {
+ case <-done:
+ return
+ default:
+ }
+
+ tx := types.NewTransaction(i, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil)
+ backend.txFeed.Send(core.NewTxsEvent{Txs: []*types.Transaction{tx}})
+ i++
+ }
+ }()
+
+ // Create a bunch of filters that will
+ // timeout either in 100ms or 200ms
+ fids := make([]rpc.ID, 20)
+ for i := 0; i < len(fids); i++ {
+ fid := api.NewPendingTransactionFilter()
+ fids[i] = fid
+ // Wait for at least one tx to arrive in filter
+ for {
+ hashes, err := api.GetFilterChanges(fid)
+ if err != nil {
+ t.Fatalf("Filter should exist: %v\n", err)
+ }
+ if len(hashes.([]common.Hash)) > 0 {
+ break
+ }
+ runtime.Gosched()
+ }
+ }
+
+ // Wait until filters have timed out
+ time.Sleep(3 * timeout)
+
+ // If tx loop doesn't consume `done` after a second
+ // it's hanging.
+ select {
+ case done <- struct{}{}:
+ // Check that all filters have been uninstalled
+ for _, fid := range fids {
+ if _, err := api.GetFilterChanges(fid); err == nil {
+ t.Errorf("Filter %s should have been uninstalled\n", fid)
+ }
+ }
+ case <-time.After(1 * time.Second):
+ t.Error("Tx sending loop hangs")
+ }
+}
+
+func flattenLogs(pl [][]*types.Log) []*types.Log {
+ var logs []*types.Log
+ for _, l := range pl {
+ logs = append(logs, l...)
}
+ return logs
}
diff --git a/eth/filters/filter_test.go b/eth/filters/filter_test.go
index 1b74d21df7e0..489917d17c2b 100644
--- a/eth/filters/filter_test.go
+++ b/eth/filters/filter_test.go
@@ -29,7 +29,6 @@ import (
"github.com/XinFinOrg/XDPoSChain/core"
"github.com/XinFinOrg/XDPoSChain/core/types"
"github.com/XinFinOrg/XDPoSChain/crypto"
- "github.com/XinFinOrg/XDPoSChain/event"
"github.com/XinFinOrg/XDPoSChain/params"
)
@@ -50,18 +49,13 @@ func BenchmarkFilters(b *testing.B) {
defer os.RemoveAll(dir)
var (
- db, _ = rawdb.NewLevelDBDatabase(dir, 0, 0, "")
- mux = new(event.TypeMux)
- txFeed = new(event.Feed)
- rmLogsFeed = new(event.Feed)
- logsFeed = new(event.Feed)
- chainFeed = new(event.Feed)
- backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
- key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
- addr1 = crypto.PubkeyToAddress(key1.PublicKey)
- addr2 = common.BytesToAddress([]byte("jeff"))
- addr3 = common.BytesToAddress([]byte("ethereum"))
- addr4 = common.BytesToAddress([]byte("random addresses please"))
+ db, _ = rawdb.NewLevelDBDatabase(dir, 0, 0, "")
+ _, sys = newTestFilterSystem(b, db, Config{})
+ key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
+ addr1 = crypto.PubkeyToAddress(key1.PublicKey)
+ addr2 = common.BytesToAddress([]byte("jeff"))
+ addr3 = common.BytesToAddress([]byte("ethereum"))
+ addr4 = common.BytesToAddress([]byte("random addresses please"))
)
defer db.Close()
@@ -84,20 +78,16 @@ func BenchmarkFilters(b *testing.B) {
}
})
for i, block := range chain {
- core.WriteBlock(db, block)
- if err := core.WriteCanonicalHash(db, block.Hash(), block.NumberU64()); err != nil {
- b.Fatalf("failed to insert block number: %v", err)
- }
- if err := core.WriteHeadBlockHash(db, block.Hash()); err != nil {
- b.Fatalf("failed to insert block number: %v", err)
- }
+ rawdb.WriteBlock(db, block)
+ rawdb.WriteCanonicalHash(db, block.Hash(), block.NumberU64())
+ rawdb.WriteHeadBlockHash(db, block.Hash())
if err := core.WriteBlockReceipts(db, block.Hash(), block.NumberU64(), receipts[i]); err != nil {
b.Fatal("error writing block receipts:", err)
}
}
b.ResetTimer()
- filter := NewRangeFilter(backend, 0, -1, []common.Address{addr1, addr2, addr3, addr4}, nil)
+ filter := sys.NewRangeFilter(0, -1, []common.Address{addr1, addr2, addr3, addr4}, nil)
for i := 0; i < b.N; i++ {
logs, _ := filter.Logs(context.Background())
@@ -115,15 +105,10 @@ func TestFilters(t *testing.T) {
defer os.RemoveAll(dir)
var (
- db, _ = rawdb.NewLevelDBDatabase(dir, 0, 0, "")
- mux = new(event.TypeMux)
- txFeed = new(event.Feed)
- rmLogsFeed = new(event.Feed)
- logsFeed = new(event.Feed)
- chainFeed = new(event.Feed)
- backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
- key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
- addr = crypto.PubkeyToAddress(key1.PublicKey)
+ db, _ = rawdb.NewLevelDBDatabase(dir, 0, 0, "")
+ _, sys = newTestFilterSystem(t, db, Config{})
+ key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
+ addr = crypto.PubkeyToAddress(key1.PublicKey)
hash1 = common.BytesToHash([]byte("topic1"))
hash2 = common.BytesToHash([]byte("topic2"))
@@ -144,6 +129,7 @@ func TestFilters(t *testing.T) {
},
}
gen.AddUncheckedReceipt(receipt)
+ gen.AddUncheckedTx(types.NewTransaction(1, common.HexToAddress("0x1"), big.NewInt(1), 1, big.NewInt(2100), nil))
case 2:
receipt := types.NewReceipt(nil, false, 0)
receipt.Logs = []*types.Log{
@@ -153,6 +139,7 @@ func TestFilters(t *testing.T) {
},
}
gen.AddUncheckedReceipt(receipt)
+ gen.AddUncheckedTx(types.NewTransaction(2, common.HexToAddress("0x2"), big.NewInt(2), 2, big.NewInt(2100), nil))
case 998:
receipt := types.NewReceipt(nil, false, 0)
receipt.Logs = []*types.Log{
@@ -162,6 +149,7 @@ func TestFilters(t *testing.T) {
},
}
gen.AddUncheckedReceipt(receipt)
+ gen.AddUncheckedTx(types.NewTransaction(998, common.HexToAddress("0x998"), big.NewInt(998), 998, big.NewInt(2100), nil))
case 999:
receipt := types.NewReceipt(nil, false, 0)
receipt.Logs = []*types.Log{
@@ -171,29 +159,26 @@ func TestFilters(t *testing.T) {
},
}
gen.AddUncheckedReceipt(receipt)
+ gen.AddUncheckedTx(types.NewTransaction(999, common.HexToAddress("0x999"), big.NewInt(999), 999, big.NewInt(2100), nil))
}
})
for i, block := range chain {
- core.WriteBlock(db, block)
- if err := core.WriteCanonicalHash(db, block.Hash(), block.NumberU64()); err != nil {
- t.Fatalf("failed to insert block number: %v", err)
- }
- if err := core.WriteHeadBlockHash(db, block.Hash()); err != nil {
- t.Fatalf("failed to insert block number: %v", err)
- }
+ rawdb.WriteBlock(db, block)
+ rawdb.WriteCanonicalHash(db, block.Hash(), block.NumberU64())
+ rawdb.WriteHeadBlockHash(db, block.Hash())
if err := core.WriteBlockReceipts(db, block.Hash(), block.NumberU64(), receipts[i]); err != nil {
t.Fatal("error writing block receipts:", err)
}
}
- filter := NewRangeFilter(backend, 0, -1, []common.Address{addr}, [][]common.Hash{{hash1, hash2, hash3, hash4}})
+ filter := sys.NewRangeFilter(0, -1, []common.Address{addr}, [][]common.Hash{{hash1, hash2, hash3, hash4}})
logs, _ := filter.Logs(context.Background())
if len(logs) != 4 {
t.Error("expected 4 log, got", len(logs))
}
- filter = NewRangeFilter(backend, 900, 999, []common.Address{addr}, [][]common.Hash{{hash3}})
+ filter = sys.NewRangeFilter(900, 999, []common.Address{addr}, [][]common.Hash{{hash3}})
logs, _ = filter.Logs(context.Background())
if len(logs) != 1 {
t.Error("expected 1 log, got", len(logs))
@@ -202,7 +187,7 @@ func TestFilters(t *testing.T) {
t.Errorf("expected log[0].Topics[0] to be %x, got %x", hash3, logs[0].Topics[0])
}
- filter = NewRangeFilter(backend, 990, -1, []common.Address{addr}, [][]common.Hash{{hash3}})
+ filter = sys.NewRangeFilter(990, -1, []common.Address{addr}, [][]common.Hash{{hash3}})
logs, _ = filter.Logs(context.Background())
if len(logs) != 1 {
t.Error("expected 1 log, got", len(logs))
@@ -211,7 +196,7 @@ func TestFilters(t *testing.T) {
t.Errorf("expected log[0].Topics[0] to be %x, got %x", hash3, logs[0].Topics[0])
}
- filter = NewRangeFilter(backend, 1, 10, nil, [][]common.Hash{{hash1, hash2}})
+ filter = sys.NewRangeFilter(1, 10, nil, [][]common.Hash{{hash1, hash2}})
logs, _ = filter.Logs(context.Background())
if len(logs) != 2 {
@@ -219,7 +204,7 @@ func TestFilters(t *testing.T) {
}
failHash := common.BytesToHash([]byte("fail"))
- filter = NewRangeFilter(backend, 0, -1, nil, [][]common.Hash{{failHash}})
+ filter = sys.NewRangeFilter(0, -1, nil, [][]common.Hash{{failHash}})
logs, _ = filter.Logs(context.Background())
if len(logs) != 0 {
@@ -227,14 +212,14 @@ func TestFilters(t *testing.T) {
}
failAddr := common.BytesToAddress([]byte("failmenow"))
- filter = NewRangeFilter(backend, 0, -1, []common.Address{failAddr}, nil)
+ filter = sys.NewRangeFilter(0, -1, []common.Address{failAddr}, nil)
logs, _ = filter.Logs(context.Background())
if len(logs) != 0 {
t.Error("expected 0 log, got", len(logs))
}
- filter = NewRangeFilter(backend, 0, -1, nil, [][]common.Hash{{failHash}, {hash1}})
+ filter = sys.NewRangeFilter(0, -1, nil, [][]common.Hash{{failHash}, {hash1}})
logs, _ = filter.Logs(context.Background())
if len(logs) != 0 {
diff --git a/internal/ethapi/backend.go b/internal/ethapi/backend.go
index d862977d2c5f..a365e5278020 100644
--- a/internal/ethapi/backend.go
+++ b/internal/ethapi/backend.go
@@ -35,6 +35,7 @@ import (
"github.com/XinFinOrg/XDPoSChain/core/types"
"github.com/XinFinOrg/XDPoSChain/core/vm"
"github.com/XinFinOrg/XDPoSChain/eth/downloader"
+ "github.com/XinFinOrg/XDPoSChain/eth/filters"
"github.com/XinFinOrg/XDPoSChain/ethdb"
"github.com/XinFinOrg/XDPoSChain/event"
"github.com/XinFinOrg/XDPoSChain/params"
@@ -49,9 +50,8 @@ type Backend interface {
ProtocolVersion() int
SuggestPrice(ctx context.Context) (*big.Int, error)
ChainDb() ethdb.Database
- EventMux() *event.TypeMux
AccountManager() *accounts.Manager
- RPCGasCap() uint64 // global gas cap for eth_call over rpc: DoS protection
+ RPCGasCap() uint64 // global gas cap for eth_call over rpc: DoS protection
RPCTxFeeCap() float64 // global tx fee cap for all transaction related APIs
XDCxService() *XDCx.XDCX
LendingService() *XDCxlending.Lending
@@ -67,6 +67,7 @@ type Backend interface {
StateAndHeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*state.StateDB, *types.Header, error)
StateAndHeaderByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*state.StateDB, *types.Header, error)
GetBlock(ctx context.Context, blockHash common.Hash) (*types.Block, error)
+ PendingBlockAndReceipts() (*types.Block, types.Receipts)
GetReceipts(ctx context.Context, blockHash common.Hash) (types.Receipts, error)
GetTd(blockHash common.Hash) *big.Int
GetEVM(ctx context.Context, msg core.Message, state *state.StateDB, XDCxState *tradingstate.TradingStateDB, header *types.Header, vmConfig *vm.Config) (*vm.EVM, func() error, error)
@@ -88,6 +89,7 @@ type Backend interface {
OrderTxPoolContent() (map[common.Address]types.OrderTransactions, map[common.Address]types.OrderTransactions)
OrderStats() (pending int, queued int)
SendLendingTx(ctx context.Context, signedTx *types.LendingTransaction) error
+ SubscribePendingLogsEvent(ch chan<- []*types.Log) event.Subscription
ChainConfig() *params.ChainConfig
CurrentBlock() *types.Block
@@ -102,6 +104,11 @@ type Backend interface {
GetBlocksHashCache(blockNr uint64) []common.Hash
AreTwoBlockSamePath(newBlock common.Hash, oldBlock common.Hash) bool
GetOrderNonce(address common.Hash) (uint64, error)
+
+ // eth/filters needs to be initialized from this backend type, so methods needed by
+ // it must also be included here.
+ GetBody(ctx context.Context, hash common.Hash, number rpc.BlockNumber) (*types.Body, error)
+ filters.Backend
}
func GetAPIs(apiBackend Backend, chainReader consensus.ChainReader) []rpc.API {
diff --git a/les/api_backend.go b/les/api_backend.go
index 3e93efe9601f..876a1dcbe9f8 100644
--- a/les/api_backend.go
+++ b/les/api_backend.go
@@ -130,6 +130,14 @@ func (b *LesApiBackend) BlockByNumberOrHash(ctx context.Context, blockNrOrHash r
return nil, errors.New("invalid arguments; neither block nor hash specified")
}
+func (b *LesApiBackend) GetBody(ctx context.Context, hash common.Hash, number rpc.BlockNumber) (*types.Body, error) {
+ return light.GetBody(ctx, b.eth.odr, hash, uint64(number))
+}
+
+func (b *LesApiBackend) PendingBlockAndReceipts() (*types.Block, types.Receipts) {
+ return nil, nil
+}
+
func (b *LesApiBackend) StateAndHeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*state.StateDB, *types.Header, error) {
header, err := b.HeaderByNumber(ctx, blockNr)
if header == nil || err != nil {
@@ -163,8 +171,8 @@ func (b *LesApiBackend) GetReceipts(ctx context.Context, blockHash common.Hash)
return light.GetBlockReceipts(ctx, b.eth.odr, blockHash, core.GetBlockNumber(b.eth.chainDb, blockHash))
}
-func (b *LesApiBackend) GetLogs(ctx context.Context, blockHash common.Hash) ([][]*types.Log, error) {
- return light.GetBlockLogs(ctx, b.eth.odr, blockHash, core.GetBlockNumber(b.eth.chainDb, blockHash))
+func (b *LesApiBackend) GetLogs(ctx context.Context, hash common.Hash, number uint64) ([][]*types.Log, error) {
+ return light.GetBlockLogs(ctx, b.eth.odr, hash, number)
}
func (b *LesApiBackend) GetTd(blockHash common.Hash) *big.Int {
@@ -241,6 +249,13 @@ func (b *LesApiBackend) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscri
return b.eth.blockchain.SubscribeLogsEvent(ch)
}
+func (b *LesApiBackend) SubscribePendingLogsEvent(ch chan<- []*types.Log) event.Subscription {
+ return event.NewSubscription(func(quit <-chan struct{}) error {
+ <-quit
+ return nil
+ })
+}
+
func (b *LesApiBackend) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription {
return b.eth.blockchain.SubscribeRemovedLogsEvent(ch)
}
@@ -261,10 +276,6 @@ func (b *LesApiBackend) ChainDb() ethdb.Database {
return b.eth.chainDb
}
-func (b *LesApiBackend) EventMux() *event.TypeMux {
- return b.eth.eventMux
-}
-
func (b *LesApiBackend) AccountManager() *accounts.Manager {
return b.eth.accountManager
}
diff --git a/les/backend.go b/les/backend.go
index 304dfc7bd331..fe3ffa80b5cb 100644
--- a/les/backend.go
+++ b/les/backend.go
@@ -190,7 +190,7 @@ func (s *LightEthereum) APIs() []rpc.API {
}, {
Namespace: "eth",
Version: "1.0",
- Service: filters.NewPublicFilterAPI(s.ApiBackend, true),
+ Service: filters.NewFilterAPI(filters.NewFilterSystem(s.ApiBackend, filters.Config{LogCacheSize: s.config.FilterLogCacheSize}), true),
Public: true,
}, {
Namespace: "net",
diff --git a/light/lightchain.go b/light/lightchain.go
index 72873a57c2fd..dd3e57f624dd 100644
--- a/light/lightchain.go
+++ b/light/lightchain.go
@@ -27,6 +27,7 @@ import (
"github.com/XinFinOrg/XDPoSChain/common"
"github.com/XinFinOrg/XDPoSChain/consensus"
"github.com/XinFinOrg/XDPoSChain/core"
+ "github.com/XinFinOrg/XDPoSChain/core/rawdb"
"github.com/XinFinOrg/XDPoSChain/core/state"
"github.com/XinFinOrg/XDPoSChain/core/types"
"github.com/XinFinOrg/XDPoSChain/ethdb"
@@ -34,7 +35,7 @@ import (
"github.com/XinFinOrg/XDPoSChain/log"
"github.com/XinFinOrg/XDPoSChain/params"
"github.com/XinFinOrg/XDPoSChain/rlp"
- "github.com/hashicorp/golang-lru"
+ lru "github.com/hashicorp/golang-lru"
)
var (
@@ -192,9 +193,7 @@ func (bc *LightChain) ResetWithGenesisBlock(genesis *types.Block) {
if err := core.WriteTd(bc.chainDb, genesis.Hash(), genesis.NumberU64(), genesis.Difficulty()); err != nil {
log.Crit("Failed to write genesis block TD", "err", err)
}
- if err := core.WriteBlock(bc.chainDb, genesis); err != nil {
- log.Crit("Failed to write genesis block", "err", err)
- }
+ rawdb.WriteBlock(bc.chainDb, genesis)
bc.genesisBlock = genesis
bc.hc.SetGenesis(bc.genesisBlock.Header())
bc.hc.SetCurrentHeader(bc.genesisBlock.Header())
diff --git a/light/lightchain_test.go b/light/lightchain_test.go
index 98e10904949c..0de612e3b9e6 100644
--- a/light/lightchain_test.go
+++ b/light/lightchain_test.go
@@ -124,7 +124,7 @@ func testHeaderChainImport(chain []*types.Header, lightchain *LightChain) error
// Manually insert the header into the database, but don't reorganize (allows subsequent testing)
lightchain.mu.Lock()
core.WriteTd(lightchain.chainDb, header.Hash(), header.Number.Uint64(), new(big.Int).Add(header.Difficulty, lightchain.GetTdByHash(header.ParentHash)))
- core.WriteHeader(lightchain.chainDb, header)
+ rawdb.WriteHeader(lightchain.chainDb, header)
lightchain.mu.Unlock()
}
return nil
diff --git a/light/odr.go b/light/odr.go
index eb039cfe3030..ee9aa9b352ab 100644
--- a/light/odr.go
+++ b/light/odr.go
@@ -24,6 +24,7 @@ import (
"github.com/XinFinOrg/XDPoSChain/common"
"github.com/XinFinOrg/XDPoSChain/core"
+ "github.com/XinFinOrg/XDPoSChain/core/rawdb"
"github.com/XinFinOrg/XDPoSChain/core/types"
"github.com/XinFinOrg/XDPoSChain/ethdb"
)
@@ -112,7 +113,7 @@ type BlockRequest struct {
// StoreResult stores the retrieved data in local database
func (req *BlockRequest) StoreResult(db ethdb.Database) {
- core.WriteBodyRLP(db, req.Hash, req.Number, req.Rlp)
+ rawdb.WriteBodyRLP(db, req.Hash, req.Number, req.Rlp)
}
// ReceiptsRequest is the ODR request type for retrieving block bodies
@@ -141,10 +142,10 @@ type ChtRequest struct {
// StoreResult stores the retrieved data in local database
func (req *ChtRequest) StoreResult(db ethdb.Database) {
// if there is a canonical hash, there is a header too
- core.WriteHeader(db, req.Header)
+ rawdb.WriteHeader(db, req.Header)
hash, num := req.Header.Hash(), req.Header.Number.Uint64()
core.WriteTd(db, hash, num, req.Td)
- core.WriteCanonicalHash(db, hash, num)
+ rawdb.WriteCanonicalHash(db, hash, num)
}
// BloomRequest is the ODR request type for retrieving bloom filters from a CHT structure
diff --git a/light/odr_util.go b/light/odr_util.go
index 236f5c238220..d7ebd6739f75 100644
--- a/light/odr_util.go
+++ b/light/odr_util.go
@@ -124,7 +124,7 @@ func GetBlock(ctx context.Context, odr OdrBackend, hash common.Hash, number uint
}
// GetBlockReceipts retrieves the receipts generated by the transactions included
-// in a block given by its hash.
+// in a block given by its hash. Receipts will be filled in with context data.
func GetBlockReceipts(ctx context.Context, odr OdrBackend, hash common.Hash, number uint64) (types.Receipts, error) {
// Retrieve the potentially incomplete receipts from disk or network
receipts := core.GetBlockReceipts(odr.Database(), hash, number)
@@ -153,9 +153,8 @@ func GetBlockReceipts(ctx context.Context, odr OdrBackend, hash common.Hash, num
}
// GetBlockLogs retrieves the logs generated by the transactions included in a
-// block given by its hash.
+// block given by its hash. Logs will be filled in with context data.
func GetBlockLogs(ctx context.Context, odr OdrBackend, hash common.Hash, number uint64) ([][]*types.Log, error) {
- // Retrieve the potentially incomplete receipts from disk or network
receipts := core.GetBlockReceipts(odr.Database(), hash, number)
if receipts == nil {
r := &ReceiptsRequest{Hash: hash, Number: number}
diff --git a/miner/miner.go b/miner/miner.go
index 4a9d34b9f7fa..835f0f014b74 100644
--- a/miner/miner.go
+++ b/miner/miner.go
@@ -178,7 +178,18 @@ func (self *Miner) PendingBlock() *types.Block {
return self.worker.pendingBlock()
}
+// PendingBlockAndReceipts returns the currently pending block and corresponding receipts.
+func (miner *Miner) PendingBlockAndReceipts() (*types.Block, types.Receipts) {
+ return miner.worker.pendingBlockAndReceipts()
+}
+
func (self *Miner) SetEtherbase(addr common.Address) {
self.coinbase = addr
self.worker.setEtherbase(addr)
}
+
+// SubscribePendingLogs starts delivering logs from pending transactions
+// to the given channel.
+func (self *Miner) SubscribePendingLogs(ch chan<- []*types.Log) event.Subscription {
+ return self.worker.pendingLogsFeed.Subscribe(ch)
+}
diff --git a/miner/worker.go b/miner/worker.go
index a97f55ceabbb..64b195b30fa1 100644
--- a/miner/worker.go
+++ b/miner/worker.go
@@ -104,6 +104,9 @@ type worker struct {
mu sync.Mutex
+ // Feeds
+ pendingLogsFeed event.Feed
+
// update loop
mux *event.TypeMux
txsCh chan core.NewTxsEvent
@@ -125,6 +128,10 @@ type worker struct {
coinbase common.Address
extra []byte
+ snapshotMu sync.RWMutex // The lock used to protect the block snapshot and state snapshot
+ snapshotBlock *types.Block
+ snapshotReceipts types.Receipts
+
currentMu sync.Mutex
current *Work
@@ -216,6 +223,14 @@ func (self *worker) pendingBlock() *types.Block {
return self.current.Block
}
+// pendingBlockAndReceipts returns pending block and corresponding receipts.
+func (w *worker) pendingBlockAndReceipts() (*types.Block, types.Receipts) {
+ // return a snapshot to avoid contention on currentMu mutex
+ w.snapshotMu.RLock()
+ defer w.snapshotMu.RUnlock()
+ return w.snapshotBlock, w.snapshotReceipts
+}
+
func (self *worker) start() {
self.mu.Lock()
defer self.mu.Unlock()
@@ -322,7 +337,7 @@ func (self *worker) update() {
}
feeCapacity := state.GetTRC21FeeCapacityFromState(self.current.state)
txset, specialTxs := types.NewTransactionsByPriceAndNonce(self.current.signer, txs, nil, feeCapacity)
- self.current.commitTransactions(self.mux, feeCapacity, txset, specialTxs, self.chain, self.coinbase)
+ self.current.commitTransactions(self.mux, feeCapacity, txset, specialTxs, self.chain, self.coinbase, &self.pendingLogsFeed)
self.currentMu.Unlock()
} else {
// If we're mining, but nothing is being processed, wake on new transactions
@@ -781,7 +796,7 @@ func (self *worker) commitNewWork() {
specialTxs = append(specialTxs, txStateRoot)
}
}
- work.commitTransactions(self.mux, feeCapacity, txs, specialTxs, self.chain, self.coinbase)
+ work.commitTransactions(self.mux, feeCapacity, txs, specialTxs, self.chain, self.coinbase, &self.pendingLogsFeed)
// compute uncles for the new block.
var (
uncles []*types.Header
@@ -801,7 +816,7 @@ func (self *worker) commitNewWork() {
self.push(work)
}
-func (env *Work) commitTransactions(mux *event.TypeMux, balanceFee map[common.Address]*big.Int, txs *types.TransactionsByPriceAndNonce, specialTxs types.Transactions, bc *core.BlockChain, coinbase common.Address) {
+func (env *Work) commitTransactions(mux *event.TypeMux, balanceFee map[common.Address]*big.Int, txs *types.TransactionsByPriceAndNonce, specialTxs types.Transactions, bc *core.BlockChain, coinbase common.Address, pendingLogsFeed *event.Feed) {
gp := new(core.GasPool).AddGas(env.header.GasLimit)
balanceUpdated := map[common.Address]*big.Int{}
totalFeeUsed := big.NewInt(0)
@@ -1028,29 +1043,25 @@ func (env *Work) commitTransactions(mux *event.TypeMux, balanceFee map[common.Ad
}
}
state.UpdateTRC21Fee(env.state, balanceUpdated, totalFeeUsed)
- if len(coalescedLogs) > 0 || env.tcount > 0 {
- // make a copy, the state caches the logs and these logs get "upgraded" from pending to mined
- // logs by filling in the block hash when the block was mined by the local miner. This can
- // cause a race condition if a log was "upgraded" before the PendingLogsEvent is processed.
+ // make a copy, the state caches the logs and these logs get "upgraded" from pending to mined
+ // logs by filling in the block hash when the block was mined by the local miner. This can
+ // cause a race condition if a log was "upgraded" before the PendingLogsEvent is processed.
+ if len(coalescedLogs) > 0 {
cpy := make([]*types.Log, len(coalescedLogs))
for i, l := range coalescedLogs {
cpy[i] = new(types.Log)
*cpy[i] = *l
}
- go func(logs []*types.Log, tcount int) {
- if len(logs) > 0 {
- err := mux.Post(core.PendingLogsEvent{Logs: logs})
- if err != nil {
- log.Warn("[commitTransactions] Error when sending PendingLogsEvent", "LogLength", len(logs))
- }
- }
- if tcount > 0 {
- err := mux.Post(core.PendingStateEvent{})
- if err != nil {
- log.Warn("[commitTransactions] Error when sending PendingStateEvent", "tcount", tcount)
- }
+ pendingLogsFeed.Send(cpy)
+ }
+ if env.tcount > 0 {
+ go func(tcount int) {
+ err := mux.Post(core.PendingStateEvent{})
+ if err != nil {
+ log.Warn("[commitTransactions] Error when sending PendingStateEvent", "tcount", tcount)
}
- }(cpy, env.tcount)
+ }(env.tcount)
+
}
}
diff --git a/node/node.go b/node/node.go
index c54328913742..a9f767bf28f1 100644
--- a/node/node.go
+++ b/node/node.go
@@ -48,6 +48,7 @@ type Node struct {
serverConfig p2p.Config
server *p2p.Server // Currently running P2P networking layer
+ state int // Tracks state of node lifecycle
serviceFuncs []ServiceConstructor // Service constructors (in dependency order)
services map[reflect.Type]Service // Currently running services
@@ -74,6 +75,10 @@ type Node struct {
log log.Logger
}
+const (
+ initializingState = iota
+)
+
// New creates a new P2P node, ready for protocol registration.
func New(conf *Config) (*Node, error) {
// Copy config and resolve the datadir so future changes to the current
@@ -302,6 +307,17 @@ func (n *Node) stopInProc() {
}
}
+// RegisterAPIs registers the APIs a service provides on the node.
+func (n *Node) RegisterAPIs(apis []rpc.API) {
+ n.lock.Lock()
+ defer n.lock.Unlock()
+
+ if n.state != initializingState {
+ panic("can't register APIs on running/stopped node")
+ }
+ n.rpcAPIs = append(n.rpcAPIs, apis...)
+}
+
// startIPC initializes and starts the IPC RPC endpoint.
func (n *Node) startIPC(apis []rpc.API) error {
// Short circuit if the IPC endpoint isn't being exposed