Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

bugfixes and using entropy window in append queue for filterting #2213

Closed
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion core/chain_indexer.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ import (
"github.com/dominant-strategies/go-quai/params"
)

var PruneDepth = uint64(1000000) // Number of blocks behind in which we begin pruning old block data
var PruneDepth = uint64(100000000) // Number of blocks behind in which we begin pruning old block data

// ChainIndexerBackend defines the methods needed to process chain segments in
// the background and write the segment results into the database. These can be
Expand Down
77 changes: 46 additions & 31 deletions core/core.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,31 +32,30 @@ import (
)

const (
c_maxAppendQueue = 3000 // Maximum number of future headers we can store in cache
c_maxFutureTime = 30 // Max time into the future (in seconds) we will accept a block
c_appendQueueRetryPeriod = 1 // Time (in seconds) before retrying to append from AppendQueue
c_appendQueueThreshold = 200 // Number of blocks to load from the disk to ram on every proc of append queue
c_processingCache = 10 // Number of block hashes held to prevent multi simultaneous appends on a single block hash
c_primeRetryThreshold = 1800 // Number of times a block is retry to be appended before eviction from append queue in Prime
c_regionRetryThreshold = 1200 // Number of times a block is retry to be appended before eviction from append queue in Region
c_zoneRetryThreshold = 600 // Number of times a block is retry to be appended before eviction from append queue in Zone
c_maxFutureBlocksPrime uint64 = 3 // Number of blocks ahead of the current block to be put in the hashNumberList
c_maxFutureBlocksRegion uint64 = 150
c_maxFutureBlocksZone uint64 = 2000
c_appendQueueRetryPriorityThreshold = 5 // If retry counter for a block is less than this number, then its put in the special list that is tried first to be appended
c_appendQueueRemoveThreshold = 10 // Number of blocks behind the block should be from the current header to be eligble for removal from the append queue
c_normalListProcCounter = 1 // Ratio of Number of times the PriorityList is serviced over the NormalList
c_statsPrintPeriod = 60 // Time between stats prints
c_appendQueuePrintSize = 10
c_normalListBackoffThreshold = 5 // Max multiple on the c_normalListProcCounter
c_maxRemoteTxQueue = 50000
c_remoteTxProcPeriod = 2 // Time between remote tx pool processing
c_asyncWorkShareTimer = 1 * time.Second
c_maxAppendQueue = 100000 // Maximum number of future headers we can store in cache
c_maxFutureTime = 30 // Max time into the future (in seconds) we will accept a block
c_appendQueueRetryPeriod = 1 // Time (in seconds) before retrying to append from AppendQueue
c_appendQueueThreshold = 200 // Number of blocks to load from the disk to ram on every proc of append queue
c_processingCache = 10 // Number of block hashes held to prevent multi simultaneous appends on a single block hash
c_primeRetryThreshold = 1800 // Number of times a block is retry to be appended before eviction from append queue in Prime
c_regionRetryThreshold = 1200 // Number of times a block is retry to be appended before eviction from append queue in Region
c_zoneRetryThreshold = 600 // Number of times a block is retry to be appended before eviction from append queue in Zone
c_appendQueueRetryPriorityThreshold = 5 // If retry counter for a block is less than this number, then its put in the special list that is tried first to be appended
c_appendQueueRemoveThreshold = 10 // Number of blocks behind the block should be from the current header to be eligble for removal from the append queue
c_normalListProcCounter = 1 // Ratio of Number of times the PriorityList is serviced over the NormalList
c_statsPrintPeriod = 60 // Time between stats prints
c_appendQueuePrintSize = 10
c_normalListBackoffThreshold = 5 // Max multiple on the c_normalListProcCounter
c_maxRemoteTxQueue = 50000
c_remoteTxProcPeriod = 2 // Time between remote tx pool processing
c_asyncWorkShareTimer = 1 * time.Second
c_maxFutureEntropyMultiple = 500
)

type blockNumberAndRetryCounter struct {
number uint64
retry uint64
number uint64
entropy *big.Int
retry uint64
}

type Core struct {
Expand Down Expand Up @@ -219,13 +218,11 @@ func (c *Core) InsertChain(blocks types.WorkObjects) (int, error) {

// procAppendQueue sorts the append queue and attempts to append
func (c *Core) procAppendQueue() {
nodeCtx := c.NodeLocation().Context()

maxFutureBlocks := c_maxFutureBlocksPrime
if nodeCtx == common.REGION_CTX {
maxFutureBlocks = c_maxFutureBlocksRegion
} else if nodeCtx == common.ZONE_CTX {
maxFutureBlocks = c_maxFutureBlocksZone
var genesis bool
entropyWindow := c.EntropyWindow()
if entropyWindow == nil {
genesis = true
}

// Sort the blocks by number and retry attempts and try to insert them
Expand All @@ -234,8 +231,8 @@ func (c *Core) procAppendQueue() {
var hashNumberPriorityList []types.HashAndNumber
for _, hash := range c.appendQueue.Keys() {
if value, exist := c.appendQueue.Peek(hash); exist {
hashNumber := types.HashAndNumber{Hash: hash, Number: value.number}
if hashNumber.Number < c.CurrentHeader().NumberU64(nodeCtx)+maxFutureBlocks {
hashNumber := types.HashAndNumber{Hash: hash, Number: value.number, Entropy: value.entropy}
if genesis || hashNumber.Entropy.Cmp(entropyWindow) < 0 {
if value.retry < c_appendQueueRetryPriorityThreshold {
hashNumberPriorityList = append(hashNumberPriorityList, hashNumber)
} else {
Expand Down Expand Up @@ -269,6 +266,24 @@ func (c *Core) procAppendQueue() {
c.procCounter++
}

// EntropyWindow calculates the entropy in terms of the current blocks intrinsic, and take a multiple of that value
func (c *Core) EntropyWindow() *big.Int {
var err error
currentHeader := c.CurrentHeader()
powhash, exists := c.sl.hc.powHashCache.Peek(currentHeader.Hash())
if !exists {
powhash, err = c.engine.VerifySeal(currentHeader.WorkObjectHeader())
if err != nil {
return nil
}
c.sl.hc.powHashCache.Add(currentHeader.Hash(), powhash)
}
currentBlockIntrinsic := c.engine.IntrinsicLogEntropy(powhash)
MaxAllowableEntropyDist := new(big.Int).Mul(currentBlockIntrinsic, big.NewInt(c_maxFutureEntropyMultiple))
currentHeaderEntropy := c.CurrentHeader().ParentEntropy(common.ZONE_CTX)
return new(big.Int).Add(currentHeaderEntropy, MaxAllowableEntropyDist)
}

func (c *Core) serviceBlocks(hashNumberList []types.HashAndNumber) {
sort.Slice(hashNumberList, func(i, j int) bool {
return hashNumberList[i].Number < hashNumberList[j].Number
Expand Down Expand Up @@ -407,7 +422,7 @@ func (c *Core) addToAppendQueue(block *types.WorkObject) error {
return err
}
if order == nodeCtx {
c.appendQueue.ContainsOrAdd(block.Hash(), blockNumberAndRetryCounter{block.NumberU64(c.NodeCtx()), 0})
c.appendQueue.ContainsOrAdd(block.Hash(), blockNumberAndRetryCounter{block.NumberU64(c.NodeCtx()), block.ParentEntropy(c.NodeCtx()), 0})
}
return nil
}
Expand Down
8 changes: 7 additions & 1 deletion core/slice.go
Original file line number Diff line number Diff line change
Expand Up @@ -473,6 +473,9 @@ func (sl *Slice) asyncPendingHeaderLoop() {
}

func (sl *Slice) WriteBestPh(bestPh *types.WorkObject) {
if bestPh == nil {
return
}
bestPhCopy := types.CopyWorkObject(bestPh)
sl.bestPh.Store(bestPhCopy)
}
Expand Down Expand Up @@ -1169,7 +1172,10 @@ func (sl *Slice) loadLastState() error {
if sl.ProcessingState() {
sl.miner.worker.LoadPendingBlockBody()
}
sl.WriteBestPh(rawdb.ReadBestPendingHeader(sl.sliceDb))
bestPh := rawdb.ReadBestPendingHeader(sl.sliceDb)
if bestPh != nil {
sl.WriteBestPh(bestPh)
Djadih marked this conversation as resolved.
Show resolved Hide resolved
}
return nil
}

Expand Down
5 changes: 3 additions & 2 deletions core/types/block.go
Original file line number Diff line number Diff line change
Expand Up @@ -1239,8 +1239,9 @@ func (m *BlockManifest) ProtoDecode(protoManifest *ProtoManifest) error {
}

type HashAndNumber struct {
Hash common.Hash
Number uint64
Hash common.Hash
Number uint64
Entropy *big.Int
}

type HashAndLocation struct {
Expand Down
4 changes: 3 additions & 1 deletion core/types/transaction_marshalling.go
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,9 @@ func (t *Transaction) MarshalJSON() ([]byte, error) {
enc.GasPrice = (*hexutil.Big)(tx.GasPrice)
enc.Value = (*hexutil.Big)(tx.Value)
enc.Data = (*hexutil.Bytes)(&tx.Data)
enc.To = t.To().MixedcaseAddressPtr()
if t.To() != nil {
enc.To = t.To().MixedcaseAddressPtr()
}
enc.V = (*hexutil.Big)(tx.V)
enc.R = (*hexutil.Big)(tx.R)
enc.S = (*hexutil.Big)(tx.S)
Expand Down
5 changes: 5 additions & 0 deletions internal/quaiapi/api.go
Original file line number Diff line number Diff line change
Expand Up @@ -1043,6 +1043,11 @@ func newRPCTransaction(tx *types.Transaction, blockHash common.Hash, blockNumber
for _, txout := range tx.TxOut() {
result.TxOut = append(result.TxOut, RPCTxOut{Denomination: hexutil.Uint(txout.Denomination), Address: hexutil.Bytes(txout.Address), Lock: (*hexutil.Big)(txout.Lock)})
}
if blockHash != (common.Hash{}) {
result.BlockHash = &blockHash
result.BlockNumber = (*hexutil.Big)(new(big.Int).SetUint64(blockNumber))
result.TransactionIndex = (*hexutil.Uint64)(&index)
}
return result
}

Expand Down
1 change: 0 additions & 1 deletion quai/handler.go
Original file line number Diff line number Diff line change
Expand Up @@ -200,7 +200,6 @@ func (h *handler) checkNextPrimeBlock() {
// downloaded otherwise, download next 2 *
// protocol.C_NumPrimeBlocksToDownload
if i < 2*protocol.C_NumPrimeBlocksToDownload {
h.GetNextPrimeBlock(syncHeight)
h.GetNextPrimeBlock(syncHeight.Add(syncHeight, big.NewInt(protocol.C_NumPrimeBlocksToDownload)))
}
break
Expand Down
Loading