diff --git a/Makefile b/Makefile
index 52c955b38f..a418c7e35e 100644
--- a/Makefile
+++ b/Makefile
@@ -85,7 +85,7 @@ GOLDFLAGS := $(GOLDFLAGS_BASE) \
UNIT_TEST_SOURCES := $(sort $(shell GOPATH=$(GOPATH) && GO111MODULE=off && go list ./... | grep -v /go-algorand/test/ ))
ALGOD_API_PACKAGES := $(sort $(shell GOPATH=$(GOPATH) && GO111MODULE=off && cd daemon/algod/api; go list ./... ))
-MSGP_GENERATE := ./protocol ./protocol/test ./crypto ./crypto/merklearray ./crypto/merklesignature ./crypto/stateproof ./data/basics ./data/transactions ./data/stateproofmsg ./data/committee ./data/bookkeeping ./data/hashable ./agreement ./rpcs ./node ./ledger ./ledger/ledgercore ./ledger/store ./stateproof ./data/account ./daemon/algod/api/spec/v2
+MSGP_GENERATE := ./protocol ./protocol/test ./crypto ./crypto/merklearray ./crypto/merklesignature ./crypto/stateproof ./data/basics ./data/transactions ./data/stateproofmsg ./data/committee ./data/bookkeeping ./data/hashable ./agreement ./rpcs ./node ./ledger ./ledger/ledgercore ./ledger/store ./ledger/encoded ./stateproof ./data/account ./daemon/algod/api/spec/v2
default: build
diff --git a/catchup/ledgerFetcher.go b/catchup/ledgerFetcher.go
index 30c5ccb3b8..1294d8cd6e 100644
--- a/catchup/ledgerFetcher.go
+++ b/catchup/ledgerFetcher.go
@@ -30,6 +30,7 @@ import (
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/ledger"
+ "github.com/algorand/go-algorand/ledger/encoded"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/network"
"github.com/algorand/go-algorand/rpcs"
@@ -40,7 +41,7 @@ var errNoLedgerForRound = errors.New("no ledger available for given round")
const (
// maxCatchpointFileChunkSize is a rough estimate for the worst-case scenario we're going to have of all the accounts data per a single catchpoint file chunk and one account with max resources.
- maxCatchpointFileChunkSize = ledger.BalancesPerCatchpointFileChunk*(ledger.MaxEncodedBaseAccountDataSize+ledger.MaxEncodedKVDataSize) + ledger.ResourcesPerCatchpointFileChunk*ledger.MaxEncodedBaseResourceDataSize
+ maxCatchpointFileChunkSize = ledger.BalancesPerCatchpointFileChunk*(ledger.MaxEncodedBaseAccountDataSize+encoded.MaxEncodedKVDataSize) + ledger.ResourcesPerCatchpointFileChunk*ledger.MaxEncodedBaseResourceDataSize
// defaultMinCatchpointFileDownloadBytesPerSecond defines the worst-case scenario download speed we expect to get while downloading a catchpoint file
defaultMinCatchpointFileDownloadBytesPerSecond = 20 * 1024
// catchpointFileStreamReadSize defines the number of bytes we would attempt to read at each iteration from the incoming http data stream
diff --git a/ledger/accountdb.go b/ledger/acctdeltas.go
similarity index 67%
rename from ledger/accountdb.go
rename to ledger/acctdeltas.go
index c82a4e5ee8..c6e742d1fc 100644
--- a/ledger/accountdb.go
+++ b/ledger/acctdeltas.go
@@ -20,14 +20,11 @@ import (
"bytes"
"context"
"database/sql"
- "errors"
"fmt"
- "math"
-
- "github.com/algorand/msgp/msgp"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/ledger/encoded"
"github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/ledger/store"
"github.com/algorand/go-algorand/protocol"
@@ -103,7 +100,7 @@ const MaxEncodedBaseAccountDataSize = 350
const MaxEncodedBaseResourceDataSize = 20000
// prepareNormalizedBalancesV5 converts an array of encodedBalanceRecordV5 into an equal size array of normalizedAccountBalances.
-func prepareNormalizedBalancesV5(bals []encodedBalanceRecordV5, proto config.ConsensusParams) (normalizedAccountBalances []store.NormalizedAccountBalance, err error) {
+func prepareNormalizedBalancesV5(bals []encoded.BalanceRecordV5, proto config.ConsensusParams) (normalizedAccountBalances []store.NormalizedAccountBalance, err error) {
normalizedAccountBalances = make([]store.NormalizedAccountBalance, len(bals))
for i, balance := range bals {
normalizedAccountBalances[i].Address = balance.Address
@@ -141,8 +138,8 @@ func prepareNormalizedBalancesV5(bals []encodedBalanceRecordV5, proto config.Con
return
}
-// prepareNormalizedBalancesV6 converts an array of encodedBalanceRecordV6 into an equal size array of normalizedAccountBalances.
-func prepareNormalizedBalancesV6(bals []encodedBalanceRecordV6, proto config.ConsensusParams) (normalizedAccountBalances []store.NormalizedAccountBalance, err error) {
+// prepareNormalizedBalancesV6 converts an array of encoded.BalanceRecordV6 into an equal size array of normalizedAccountBalances.
+func prepareNormalizedBalancesV6(bals []encoded.BalanceRecordV6, proto config.ConsensusParams) (normalizedAccountBalances []store.NormalizedAccountBalance, err error) {
normalizedAccountBalances = make([]store.NormalizedAccountBalance, len(bals))
for i, balance := range bals {
normalizedAccountBalances[i].Address = balance.Address
@@ -488,7 +485,8 @@ func (a *compactAccountDeltas) accountsLoadOld(tx *sql.Tx) (err error) {
case sql.ErrNoRows:
// we don't have that account, just return an empty record.
a.updateOld(idx, store.PersistedAccountData{Addr: addr})
- err = nil
+ // Note: the err will be ignored in this case since `err` is being shadowed.
+ // this behaviour is equivalent to `err = nil`
default:
// unexpected error - let the caller know that we couldn't complete the operation.
return err
@@ -1062,601 +1060,3 @@ func onlineAccountsNewRoundImpl(
return
}
-
-// catchpointAccountResourceCounter keeps track of the resources processed for the current account
-type catchpointAccountResourceCounter struct {
- totalAppParams uint64
- totalAppLocalStates uint64
- totalAssetParams uint64
- totalAssets uint64
-}
-
-// encodedAccountsBatchIter allows us to iterate over the accounts data stored in the accountbase table.
-type encodedAccountsBatchIter struct {
- accountsRows *sql.Rows
- resourcesRows *sql.Rows
- nextBaseRow pendingBaseRow
- nextResourceRow pendingResourceRow
- acctResCnt catchpointAccountResourceCounter
-}
-
-// Next returns an array containing the account data, in the same way it appear in the database
-// returning accountCount accounts data at a time.
-func (iterator *encodedAccountsBatchIter) Next(ctx context.Context, tx *sql.Tx, accountCount int, resourceCount int) (bals []encodedBalanceRecordV6, numAccountsProcessed uint64, err error) {
- if iterator.accountsRows == nil {
- iterator.accountsRows, err = tx.QueryContext(ctx, "SELECT rowid, address, data FROM accountbase ORDER BY rowid")
- if err != nil {
- return
- }
- }
- if iterator.resourcesRows == nil {
- iterator.resourcesRows, err = tx.QueryContext(ctx, "SELECT addrid, aidx, data FROM resources ORDER BY addrid, aidx")
- if err != nil {
- return
- }
- }
-
- // gather up to accountCount encoded accounts.
- bals = make([]encodedBalanceRecordV6, 0, accountCount)
- var encodedRecord encodedBalanceRecordV6
- var baseAcct store.BaseAccountData
- var numAcct int
- baseCb := func(addr basics.Address, rowid int64, accountData *store.BaseAccountData, encodedAccountData []byte) (err error) {
- encodedRecord = encodedBalanceRecordV6{Address: addr, AccountData: encodedAccountData}
- baseAcct = *accountData
- numAcct++
- return nil
- }
-
- var totalResources int
-
- // emptyCount := 0
- resCb := func(addr basics.Address, cidx basics.CreatableIndex, resData *store.ResourcesData, encodedResourceData []byte, lastResource bool) error {
-
- emptyBaseAcct := baseAcct.TotalAppParams == 0 && baseAcct.TotalAppLocalStates == 0 && baseAcct.TotalAssetParams == 0 && baseAcct.TotalAssets == 0
- if !emptyBaseAcct && resData != nil {
- if encodedRecord.Resources == nil {
- encodedRecord.Resources = make(map[uint64]msgp.Raw)
- }
- encodedRecord.Resources[uint64(cidx)] = encodedResourceData
- if resData.IsApp() && resData.IsOwning() {
- iterator.acctResCnt.totalAppParams++
- }
- if resData.IsApp() && resData.IsHolding() {
- iterator.acctResCnt.totalAppLocalStates++
- }
-
- if resData.IsAsset() && resData.IsOwning() {
- iterator.acctResCnt.totalAssetParams++
- }
- if resData.IsAsset() && resData.IsHolding() {
- iterator.acctResCnt.totalAssets++
- }
- totalResources++
- }
-
- if baseAcct.TotalAppParams == iterator.acctResCnt.totalAppParams &&
- baseAcct.TotalAppLocalStates == iterator.acctResCnt.totalAppLocalStates &&
- baseAcct.TotalAssetParams == iterator.acctResCnt.totalAssetParams &&
- baseAcct.TotalAssets == iterator.acctResCnt.totalAssets {
-
- encodedRecord.ExpectingMoreEntries = false
- bals = append(bals, encodedRecord)
- numAccountsProcessed++
-
- iterator.acctResCnt = catchpointAccountResourceCounter{}
-
- return nil
- }
-
- // max resources per chunk reached, stop iterating.
- if lastResource {
- encodedRecord.ExpectingMoreEntries = true
- bals = append(bals, encodedRecord)
- encodedRecord.Resources = nil
- }
-
- return nil
- }
-
- _, iterator.nextBaseRow, iterator.nextResourceRow, err = processAllBaseAccountRecords(
- iterator.accountsRows, iterator.resourcesRows,
- baseCb, resCb,
- iterator.nextBaseRow, iterator.nextResourceRow, accountCount, resourceCount,
- )
- if err != nil {
- iterator.Close()
- return
- }
-
- if len(bals) == accountCount || totalResources == resourceCount {
- // we're done with this iteration.
- return
- }
-
- err = iterator.accountsRows.Err()
- if err != nil {
- iterator.Close()
- return
- }
- // Do not Close() the iterator here. It is the caller's responsibility to
- // do so, signalled by the return of an empty chunk. If we Close() here, the
- // next call to Next() will start all over!
- return
-}
-
-// Close shuts down the encodedAccountsBatchIter, releasing database resources.
-func (iterator *encodedAccountsBatchIter) Close() {
- if iterator.accountsRows != nil {
- iterator.accountsRows.Close()
- iterator.accountsRows = nil
- }
- if iterator.resourcesRows != nil {
- iterator.resourcesRows.Close()
- iterator.resourcesRows = nil
- }
-}
-
-// orderedAccountsIterStep is used by orderedAccountsIter to define the current step
-//
-//msgp:ignore orderedAccountsIterStep
-type orderedAccountsIterStep int
-
-const (
- // startup step
- oaiStepStartup = orderedAccountsIterStep(0)
- // delete old ordering table if we have any leftover from previous invocation
- oaiStepDeleteOldOrderingTable = orderedAccountsIterStep(0)
- // create new ordering table
- oaiStepCreateOrderingTable = orderedAccountsIterStep(1)
- // query the existing accounts
- oaiStepQueryAccounts = orderedAccountsIterStep(2)
- // iterate over the existing accounts and insert their hash & address into the staging ordering table
- oaiStepInsertAccountData = orderedAccountsIterStep(3)
- // create an index on the ordering table so that we can efficiently scan it.
- oaiStepCreateOrderingAccountIndex = orderedAccountsIterStep(4)
- // query the ordering table
- oaiStepSelectFromOrderedTable = orderedAccountsIterStep(5)
- // iterate over the ordering table
- oaiStepIterateOverOrderedTable = orderedAccountsIterStep(6)
- // cleanup and delete ordering table
- oaiStepShutdown = orderedAccountsIterStep(7)
- // do nothing as we're done.
- oaiStepDone = orderedAccountsIterStep(8)
-)
-
-// orderedAccountsIter allows us to iterate over the accounts addresses in the order of the account hashes.
-type orderedAccountsIter struct {
- step orderedAccountsIterStep
- accountBaseRows *sql.Rows
- hashesRows *sql.Rows
- resourcesRows *sql.Rows
- tx *sql.Tx
- pendingBaseRow pendingBaseRow
- pendingResourceRow pendingResourceRow
- accountCount int
- insertStmt *sql.Stmt
-}
-
-// makeOrderedAccountsIter creates an ordered account iterator. Note that due to implementation reasons,
-// only a single iterator can be active at a time.
-func makeOrderedAccountsIter(tx *sql.Tx, accountCount int) *orderedAccountsIter {
- return &orderedAccountsIter{
- tx: tx,
- accountCount: accountCount,
- step: oaiStepStartup,
- }
-}
-
-type pendingBaseRow struct {
- addr basics.Address
- rowid int64
- accountData *store.BaseAccountData
- encodedAccountData []byte
-}
-
-type pendingResourceRow struct {
- addrid int64
- aidx basics.CreatableIndex
- buf []byte
-}
-
-func processAllResources(
- resRows *sql.Rows,
- addr basics.Address, accountData *store.BaseAccountData, acctRowid int64, pr pendingResourceRow, resourceCount int,
- callback func(addr basics.Address, creatableIdx basics.CreatableIndex, resData *store.ResourcesData, encodedResourceData []byte, lastResource bool) error,
-) (pendingResourceRow, int, error) {
- var err error
- count := 0
-
- // Declare variabled outside of the loop to prevent allocations per iteration.
- // At least resData is resolved as "escaped" because of passing it by a pointer to protocol.Decode()
- var buf []byte
- var addrid int64
- var aidx basics.CreatableIndex
- var resData store.ResourcesData
- for {
- if pr.addrid != 0 {
- // some accounts may not have resources, consider the following case:
- // acct 1 and 3 has resources, account 2 does not
- // in this case addrid = 3 after processing resources from 1, but acctRowid = 2
- // and we need to skip accounts without resources
- if pr.addrid > acctRowid {
- err = callback(addr, 0, nil, nil, false)
- return pr, count, err
- }
- if pr.addrid < acctRowid {
- err = fmt.Errorf("resource table entries mismatches accountbase table entries : reached addrid %d while expecting resource for %d", pr.addrid, acctRowid)
- return pendingResourceRow{}, count, err
- }
- addrid = pr.addrid
- buf = pr.buf
- aidx = pr.aidx
- pr = pendingResourceRow{}
- } else {
- if !resRows.Next() {
- err = callback(addr, 0, nil, nil, false)
- if err != nil {
- return pendingResourceRow{}, count, err
- }
- break
- }
- err = resRows.Scan(&addrid, &aidx, &buf)
- if err != nil {
- return pendingResourceRow{}, count, err
- }
- if addrid < acctRowid {
- err = fmt.Errorf("resource table entries mismatches accountbase table entries : reached addrid %d while expecting resource for %d", addrid, acctRowid)
- return pendingResourceRow{}, count, err
- } else if addrid > acctRowid {
- err = callback(addr, 0, nil, nil, false)
- return pendingResourceRow{addrid, aidx, buf}, count, err
- }
- }
- resData = store.ResourcesData{}
- err = protocol.Decode(buf, &resData)
- if err != nil {
- return pendingResourceRow{}, count, err
- }
- count++
- if resourceCount > 0 && count == resourceCount {
- // last resource to be included in chunk
- err := callback(addr, aidx, &resData, buf, true)
- return pendingResourceRow{}, count, err
- }
- err = callback(addr, aidx, &resData, buf, false)
- if err != nil {
- return pendingResourceRow{}, count, err
- }
- }
- return pendingResourceRow{}, count, nil
-}
-
-func processAllBaseAccountRecords(
- baseRows *sql.Rows,
- resRows *sql.Rows,
- baseCb func(addr basics.Address, rowid int64, accountData *store.BaseAccountData, encodedAccountData []byte) error,
- resCb func(addr basics.Address, creatableIdx basics.CreatableIndex, resData *store.ResourcesData, encodedResourceData []byte, lastResource bool) error,
- pendingBase pendingBaseRow, pendingResource pendingResourceRow, accountCount int, resourceCount int,
-) (int, pendingBaseRow, pendingResourceRow, error) {
- var addr basics.Address
- var prevAddr basics.Address
- var err error
- count := 0
-
- var accountData store.BaseAccountData
- var addrbuf []byte
- var buf []byte
- var rowid int64
- for {
- if pendingBase.rowid != 0 {
- addr = pendingBase.addr
- rowid = pendingBase.rowid
- accountData = *pendingBase.accountData
- buf = pendingBase.encodedAccountData
- pendingBase = pendingBaseRow{}
- } else {
- if !baseRows.Next() {
- break
- }
-
- err = baseRows.Scan(&rowid, &addrbuf, &buf)
- if err != nil {
- return 0, pendingBaseRow{}, pendingResourceRow{}, err
- }
-
- if len(addrbuf) != len(addr) {
- err = fmt.Errorf("account DB address length mismatch: %d != %d", len(addrbuf), len(addr))
- return 0, pendingBaseRow{}, pendingResourceRow{}, err
- }
-
- copy(addr[:], addrbuf)
-
- accountData = store.BaseAccountData{}
- err = protocol.Decode(buf, &accountData)
- if err != nil {
- return 0, pendingBaseRow{}, pendingResourceRow{}, err
- }
- }
-
- err = baseCb(addr, rowid, &accountData, buf)
- if err != nil {
- return 0, pendingBaseRow{}, pendingResourceRow{}, err
- }
-
- var resourcesProcessed int
- pendingResource, resourcesProcessed, err = processAllResources(resRows, addr, &accountData, rowid, pendingResource, resourceCount, resCb)
- if err != nil {
- err = fmt.Errorf("failed to gather resources for account %v, addrid %d, prev address %v : %w", addr, rowid, prevAddr, err)
- return 0, pendingBaseRow{}, pendingResourceRow{}, err
- }
-
- if resourcesProcessed == resourceCount {
- // we're done with this iteration.
- pendingBase := pendingBaseRow{
- addr: addr,
- rowid: rowid,
- accountData: &accountData,
- encodedAccountData: buf,
- }
- return count, pendingBase, pendingResource, nil
- }
- resourceCount -= resourcesProcessed
-
- count++
- if accountCount > 0 && count == accountCount {
- // we're done with this iteration.
- return count, pendingBaseRow{}, pendingResource, nil
- }
- prevAddr = addr
- }
-
- return count, pendingBaseRow{}, pendingResource, nil
-}
-
-// accountAddressHash is used by Next to return a single account address and the associated hash.
-type accountAddressHash struct {
- addrid int64
- digest []byte
-}
-
-// Next returns an array containing the account address and hash
-// the Next function works in multiple processing stages, where it first processes the current accounts and order them
-// followed by returning the ordered accounts. In the first phase, it would return empty accountAddressHash array
-// and sets the processedRecords to the number of accounts that were processed. On the second phase, the acct
-// would contain valid data ( and optionally the account data as well, if was asked in makeOrderedAccountsIter) and
-// the processedRecords would be zero. If err is sql.ErrNoRows it means that the iterator have completed it's work and no further
-// accounts exists. Otherwise, the caller is expected to keep calling "Next" to retrieve the next set of accounts
-// ( or let the Next function make some progress toward that goal )
-func (iterator *orderedAccountsIter) Next(ctx context.Context) (acct []accountAddressHash, processedRecords int, err error) {
- if iterator.step == oaiStepDeleteOldOrderingTable {
- // although we're going to delete this table anyway when completing the iterator execution, we'll try to
- // clean up any intermediate table.
- _, err = iterator.tx.ExecContext(ctx, "DROP TABLE IF EXISTS accountsiteratorhashes")
- if err != nil {
- return
- }
- iterator.step = oaiStepCreateOrderingTable
- return
- }
- if iterator.step == oaiStepCreateOrderingTable {
- // create the temporary table
- _, err = iterator.tx.ExecContext(ctx, "CREATE TABLE accountsiteratorhashes(addrid INTEGER, hash blob)")
- if err != nil {
- return
- }
- iterator.step = oaiStepQueryAccounts
- return
- }
- if iterator.step == oaiStepQueryAccounts {
- // iterate over the existing accounts
- iterator.accountBaseRows, err = iterator.tx.QueryContext(ctx, "SELECT rowid, address, data FROM accountbase ORDER BY rowid")
- if err != nil {
- return
- }
- // iterate over the existing resources
- iterator.resourcesRows, err = iterator.tx.QueryContext(ctx, "SELECT addrid, aidx, data FROM resources ORDER BY addrid, aidx")
- if err != nil {
- return
- }
- // prepare the insert statement into the temporary table
- iterator.insertStmt, err = iterator.tx.PrepareContext(ctx, "INSERT INTO accountsiteratorhashes(addrid, hash) VALUES(?, ?)")
- if err != nil {
- return
- }
- iterator.step = oaiStepInsertAccountData
- return
- }
- if iterator.step == oaiStepInsertAccountData {
- var lastAddrID int64
- baseCb := func(addr basics.Address, rowid int64, accountData *store.BaseAccountData, encodedAccountData []byte) (err error) {
- hash := store.AccountHashBuilderV6(addr, accountData, encodedAccountData)
- _, err = iterator.insertStmt.ExecContext(ctx, rowid, hash)
- if err != nil {
- return
- }
- lastAddrID = rowid
- return nil
- }
-
- resCb := func(addr basics.Address, cidx basics.CreatableIndex, resData *store.ResourcesData, encodedResourceData []byte, lastResource bool) error {
- if resData != nil {
- hash, err := store.ResourcesHashBuilderV6(resData, addr, cidx, resData.UpdateRound, encodedResourceData)
- if err != nil {
- return err
- }
- _, err = iterator.insertStmt.ExecContext(ctx, lastAddrID, hash)
- return err
- }
- return nil
- }
-
- count := 0
- count, iterator.pendingBaseRow, iterator.pendingResourceRow, err = processAllBaseAccountRecords(
- iterator.accountBaseRows, iterator.resourcesRows,
- baseCb, resCb,
- iterator.pendingBaseRow, iterator.pendingResourceRow, iterator.accountCount, math.MaxInt,
- )
- if err != nil {
- iterator.Close(ctx)
- return
- }
-
- if count == iterator.accountCount {
- // we're done with this iteration.
- processedRecords = count
- return
- }
-
- // make sure the resource iterator has no more entries.
- if iterator.resourcesRows.Next() {
- iterator.Close(ctx)
- err = errors.New("resource table entries exceed the ones specified in the accountbase table")
- return
- }
-
- processedRecords = count
- iterator.accountBaseRows.Close()
- iterator.accountBaseRows = nil
- iterator.resourcesRows.Close()
- iterator.resourcesRows = nil
- iterator.insertStmt.Close()
- iterator.insertStmt = nil
- iterator.step = oaiStepCreateOrderingAccountIndex
- return
- }
- if iterator.step == oaiStepCreateOrderingAccountIndex {
- // create an index. It shown that even when we're making a single select statement in step 5, it would be better to have this index vs. not having it at all.
- // note that this index is using the rowid of the accountsiteratorhashes table.
- _, err = iterator.tx.ExecContext(ctx, "CREATE INDEX accountsiteratorhashesidx ON accountsiteratorhashes(hash)")
- if err != nil {
- iterator.Close(ctx)
- return
- }
- iterator.step = oaiStepSelectFromOrderedTable
- return
- }
- if iterator.step == oaiStepSelectFromOrderedTable {
- // select the data from the ordered table
- iterator.hashesRows, err = iterator.tx.QueryContext(ctx, "SELECT addrid, hash FROM accountsiteratorhashes ORDER BY hash")
-
- if err != nil {
- iterator.Close(ctx)
- return
- }
- iterator.step = oaiStepIterateOverOrderedTable
- return
- }
-
- if iterator.step == oaiStepIterateOverOrderedTable {
- acct = make([]accountAddressHash, iterator.accountCount)
- acctIdx := 0
- for iterator.hashesRows.Next() {
- err = iterator.hashesRows.Scan(&(acct[acctIdx].addrid), &(acct[acctIdx].digest))
- if err != nil {
- iterator.Close(ctx)
- return
- }
- acctIdx++
- if acctIdx == iterator.accountCount {
- // we're done with this iteration.
- return
- }
- }
- acct = acct[:acctIdx]
- iterator.step = oaiStepShutdown
- iterator.hashesRows.Close()
- iterator.hashesRows = nil
- return
- }
- if iterator.step == oaiStepShutdown {
- err = iterator.Close(ctx)
- if err != nil {
- return
- }
- iterator.step = oaiStepDone
- // fallthrough
- }
- return nil, 0, sql.ErrNoRows
-}
-
-// Close shuts down the orderedAccountsBuilderIter, releasing database resources.
-func (iterator *orderedAccountsIter) Close(ctx context.Context) (err error) {
- if iterator.accountBaseRows != nil {
- iterator.accountBaseRows.Close()
- iterator.accountBaseRows = nil
- }
- if iterator.resourcesRows != nil {
- iterator.resourcesRows.Close()
- iterator.resourcesRows = nil
- }
- if iterator.hashesRows != nil {
- iterator.hashesRows.Close()
- iterator.hashesRows = nil
- }
- if iterator.insertStmt != nil {
- iterator.insertStmt.Close()
- iterator.insertStmt = nil
- }
- _, err = iterator.tx.ExecContext(ctx, "DROP TABLE IF EXISTS accountsiteratorhashes")
- return
-}
-
-// catchpointPendingHashesIterator allows us to iterate over the hashes in the catchpointpendinghashes table in their order.
-type catchpointPendingHashesIterator struct {
- hashCount int
- tx *sql.Tx
- rows *sql.Rows
-}
-
-// makeCatchpointPendingHashesIterator create a pending hashes iterator that retrieves the hashes in the catchpointpendinghashes table.
-func makeCatchpointPendingHashesIterator(hashCount int, tx *sql.Tx) *catchpointPendingHashesIterator {
- return &catchpointPendingHashesIterator{
- hashCount: hashCount,
- tx: tx,
- }
-}
-
-// Next returns an array containing the hashes, returning HashCount hashes at a time.
-func (iterator *catchpointPendingHashesIterator) Next(ctx context.Context) (hashes [][]byte, err error) {
- if iterator.rows == nil {
- iterator.rows, err = iterator.tx.QueryContext(ctx, "SELECT data FROM catchpointpendinghashes ORDER BY data")
- if err != nil {
- return
- }
- }
-
- // gather up to accountCount encoded accounts.
- hashes = make([][]byte, iterator.hashCount)
- hashIdx := 0
- for iterator.rows.Next() {
- err = iterator.rows.Scan(&hashes[hashIdx])
- if err != nil {
- iterator.Close()
- return
- }
-
- hashIdx++
- if hashIdx == iterator.hashCount {
- // we're done with this iteration.
- return
- }
- }
- hashes = hashes[:hashIdx]
- err = iterator.rows.Err()
- if err != nil {
- iterator.Close()
- return
- }
- // we just finished reading the table.
- iterator.Close()
- return
-}
-
-// Close shuts down the catchpointPendingHashesIterator, releasing database resources.
-func (iterator *catchpointPendingHashesIterator) Close() {
- if iterator.rows != nil {
- iterator.rows.Close()
- iterator.rows = nil
- }
-}
diff --git a/ledger/accountdb_test.go b/ledger/acctdeltas_test.go
similarity index 99%
rename from ledger/accountdb_test.go
rename to ledger/acctdeltas_test.go
index c290ed827c..6bba1a9ff5 100644
--- a/ledger/accountdb_test.go
+++ b/ledger/acctdeltas_test.go
@@ -33,6 +33,7 @@ import (
"time"
"github.com/algorand/go-algorand/data/transactions/logic"
+ "github.com/algorand/go-algorand/ledger/encoded"
"github.com/stretchr/testify/require"
@@ -905,9 +906,9 @@ func benchmarkWriteCatchpointStagingBalancesSub(b *testing.B, ascendingOrder boo
last64KAccountCreationTime = time.Duration(0)
}
var chunk catchpointFileChunkV6
- chunk.Balances = make([]encodedBalanceRecordV6, chunkSize)
+ chunk.Balances = make([]encoded.BalanceRecordV6, chunkSize)
for i := uint64(0); i < chunkSize; i++ {
- var randomAccount encodedBalanceRecordV6
+ var randomAccount encoded.BalanceRecordV6
accountData := store.BaseAccountData{RewardsBase: accountsLoaded + i}
accountData.MicroAlgos.Raw = crypto.RandUint63()
randomAccount.AccountData = protocol.Encode(&accountData)
diff --git a/ledger/catchpointtracker.go b/ledger/catchpointtracker.go
index 2867b285c7..bfc7d2e43f 100644
--- a/ledger/catchpointtracker.go
+++ b/ledger/catchpointtracker.go
@@ -1360,7 +1360,7 @@ func (ct *catchpointTracker) initializeHashes(ctx context.Context, tx *sql.Tx, r
if rootHash.IsZero() {
ct.log.Infof("initializeHashes rebuilding merkle trie for round %d", rnd)
- accountBuilderIt := makeOrderedAccountsIter(tx, trieRebuildAccountChunkSize)
+ accountBuilderIt := store.MakeOrderedAccountsIter(tx, trieRebuildAccountChunkSize)
defer accountBuilderIt.Close(ctx)
startTrieBuildTime := time.Now()
trieHashCount := 0
@@ -1380,18 +1380,18 @@ func (ct *catchpointTracker) initializeHashes(ctx context.Context, tx *sql.Tx, r
trieHashCount += len(accts)
pendingTrieHashes += len(accts)
for _, acct := range accts {
- added, err := trie.Add(acct.digest)
+ added, err := trie.Add(acct.Digest)
if err != nil {
return fmt.Errorf("initializeHashes was unable to add acct to trie: %v", err)
}
if !added {
// we need to translate the "addrid" into actual account address so that
// we can report the failure.
- addr, err := arw.LookupAccountAddressFromAddressID(ctx, acct.addrid)
+ addr, err := arw.LookupAccountAddressFromAddressID(ctx, acct.Addrid)
if err != nil {
- ct.log.Warnf("initializeHashes attempted to add duplicate acct hash '%s' to merkle trie for account id %d : %v", hex.EncodeToString(acct.digest), acct.addrid, err)
+ ct.log.Warnf("initializeHashes attempted to add duplicate acct hash '%s' to merkle trie for account id %d : %v", hex.EncodeToString(acct.Digest), acct.Addrid, err)
} else {
- ct.log.Warnf("initializeHashes attempted to add duplicate acct hash '%s' to merkle trie for account %v", hex.EncodeToString(acct.digest), addr)
+ ct.log.Warnf("initializeHashes attempted to add duplicate acct hash '%s' to merkle trie for account %v", hex.EncodeToString(acct.Digest), addr)
}
}
}
diff --git a/ledger/catchpointwriter.go b/ledger/catchpointwriter.go
index e204a8ae74..d120395361 100644
--- a/ledger/catchpointwriter.go
+++ b/ledger/catchpointwriter.go
@@ -25,9 +25,7 @@ import (
"os"
"path/filepath"
- "github.com/algorand/msgp/msgp"
-
- "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/ledger/encoded"
"github.com/algorand/go-algorand/ledger/store"
"github.com/algorand/go-algorand/protocol"
)
@@ -41,11 +39,6 @@ const (
// 100,000 resources * 20KB/resource => roughly max 2GB per chunk if all of them are max'ed out apps.
// In reality most entries are asset holdings, and they are very small.
ResourcesPerCatchpointFileChunk = 100_000
-
- // resourcesPerCatchpointFileChunkBackwardCompatible is the old value for ResourcesPerCatchpointFileChunk.
- // Size of a single resource entry was underestimated to 300 bytes that holds only for assets and not for apps.
- // It is safe to remove after April, 2023 since we are only supporting catchpoint that are 6 months old.
- resourcesPerCatchpointFileChunkBackwardCompatible = 300_000
)
// catchpointWriter is the struct managing the persistence of accounts data into the catchpoint file.
@@ -65,65 +58,28 @@ type catchpointWriter struct {
chunkNum uint64
writtenBytes int64
biggestChunkLen uint64
- accountsIterator encodedAccountsBatchIter
+ accountsIterator accountsBatchIter
maxResourcesPerChunk int
accountsDone bool
kvRows *sql.Rows
}
-type encodedBalanceRecordV5 struct {
- _struct struct{} `codec:",omitempty,omitemptyarray"`
-
- Address basics.Address `codec:"pk,allocbound=crypto.DigestSize"`
- AccountData msgp.Raw `codec:"ad"` // encoding of basics.AccountData
+type accountsBatchIter interface {
+ Next(ctx context.Context, tx *sql.Tx, accountCount int, resourceCount int) ([]encoded.BalanceRecordV6, uint64, error)
+ Close()
}
type catchpointFileBalancesChunkV5 struct {
- _struct struct{} `codec:",omitempty,omitemptyarray"`
- Balances []encodedBalanceRecordV5 `codec:"bl,allocbound=BalancesPerCatchpointFileChunk"`
-}
-
-// SortUint64 re-export this sort, which is implemented in basics, and being used by the msgp when
-// encoding the resources map below.
-type SortUint64 = basics.SortUint64
-
-type encodedBalanceRecordV6 struct {
- _struct struct{} `codec:",omitempty,omitemptyarray"`
-
- Address basics.Address `codec:"a,allocbound=crypto.DigestSize"`
- AccountData msgp.Raw `codec:"b"` // encoding of baseAccountData
- Resources map[uint64]msgp.Raw `codec:"c,allocbound=resourcesPerCatchpointFileChunkBackwardCompatible"` // map of resourcesData
-
- // flag indicating whether there are more records for the same account coming up
- ExpectingMoreEntries bool `codec:"e"`
-}
-
-// Adjust these to be big enough for boxes, but not directly tied to box values.
-const (
- // For boxes: "bx:<8 bytes><64 byte name>"
- encodedKVRecordV6MaxKeyLength = 128
-
- // For boxes: MaxBoxSize
- encodedKVRecordV6MaxValueLength = 32768
-
- // MaxEncodedKVDataSize is the max size of serialized KV entry, checked with TestEncodedKVDataSize.
- // Exact value is 32906 that is 10 bytes more than 32768 + 128
- MaxEncodedKVDataSize = 33000
-)
-
-type encodedKVRecordV6 struct {
- _struct struct{} `codec:",omitempty,omitemptyarray"`
-
- Key []byte `codec:"k,allocbound=encodedKVRecordV6MaxKeyLength"`
- Value []byte `codec:"v,allocbound=encodedKVRecordV6MaxValueLength"`
+ _struct struct{} `codec:",omitempty,omitemptyarray"`
+ Balances []encoded.BalanceRecordV5 `codec:"bl,allocbound=BalancesPerCatchpointFileChunk"`
}
type catchpointFileChunkV6 struct {
_struct struct{} `codec:",omitempty,omitemptyarray"`
- Balances []encodedBalanceRecordV6 `codec:"bl,allocbound=BalancesPerCatchpointFileChunk"`
+ Balances []encoded.BalanceRecordV6 `codec:"bl,allocbound=BalancesPerCatchpointFileChunk"`
numAccounts uint64
- KVs []encodedKVRecordV6 `codec:"kv,allocbound=BalancesPerCatchpointFileChunk"`
+ KVs []encoded.KVRecordV6 `codec:"kv,allocbound=BalancesPerCatchpointFileChunk"`
}
func (chunk catchpointFileChunkV6) empty() bool {
@@ -166,6 +122,7 @@ func makeCatchpointWriter(ctx context.Context, filePath string, tx *sql.Tx, maxR
file: file,
compressor: compressor,
tar: tar,
+ accountsIterator: store.MakeEncodedAccoutsBatchIter(),
maxResourcesPerChunk: maxResourcesPerChunk,
}
return res, nil
@@ -330,7 +287,7 @@ func (cw *catchpointWriter) readDatabaseStep(ctx context.Context, tx *sql.Tx) er
cw.kvRows = rows
}
- kvrs := make([]encodedKVRecordV6, 0, BalancesPerCatchpointFileChunk)
+ kvrs := make([]encoded.KVRecordV6, 0, BalancesPerCatchpointFileChunk)
for cw.kvRows.Next() {
var k []byte
var v []byte
@@ -338,7 +295,7 @@ func (cw *catchpointWriter) readDatabaseStep(ctx context.Context, tx *sql.Tx) er
if err != nil {
return err
}
- kvrs = append(kvrs, encodedKVRecordV6{Key: k, Value: v})
+ kvrs = append(kvrs, encoded.KVRecordV6{Key: k, Value: v})
if len(kvrs) == BalancesPerCatchpointFileChunk {
break
}
diff --git a/ledger/catchpointwriter_test.go b/ledger/catchpointwriter_test.go
index 0bdac4a2a1..0daedd0e37 100644
--- a/ledger/catchpointwriter_test.go
+++ b/ledger/catchpointwriter_test.go
@@ -24,7 +24,6 @@ import (
"database/sql"
"fmt"
"io"
- "math"
"os"
"path/filepath"
"strconv"
@@ -40,6 +39,7 @@ import (
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/data/transactions/logic"
"github.com/algorand/go-algorand/data/txntest"
+ "github.com/algorand/go-algorand/ledger/encoded"
"github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/ledger/store"
ledgertesting "github.com/algorand/go-algorand/ledger/testing"
@@ -71,19 +71,19 @@ func TestCatchpointFileBalancesChunkEncoding(t *testing.T) {
for i := uint64(0); i < numResources; i++ {
resources[i] = encodedResourceData
}
- balance := encodedBalanceRecordV6{
+ balance := encoded.BalanceRecordV6{
Address: ledgertesting.RandomAddress(),
AccountData: encodedBaseAD,
Resources: resources,
}
- balances := make([]encodedBalanceRecordV6, numChunkEntries)
- kv := encodedKVRecordV6{
- Key: make([]byte, encodedKVRecordV6MaxKeyLength),
- Value: make([]byte, encodedKVRecordV6MaxValueLength),
+ balances := make([]encoded.BalanceRecordV6, numChunkEntries)
+ kv := encoded.KVRecordV6{
+ Key: make([]byte, encoded.KVRecordV6MaxKeyLength),
+ Value: make([]byte, encoded.KVRecordV6MaxValueLength),
}
crypto.RandBytes(kv.Key[:])
crypto.RandBytes(kv.Value[:])
- kvs := make([]encodedKVRecordV6, numChunkEntries)
+ kvs := make([]encoded.KVRecordV6, numChunkEntries)
for i := 0; i < numChunkEntries; i++ {
balances[i] = balance
@@ -493,7 +493,7 @@ func TestFullCatchpointWriterOverflowAccounts(t *testing.T) {
require.NoError(t, err)
require.Zero(t, h)
- iter := makeOrderedAccountsIter(tx, trieRebuildAccountChunkSize)
+ iter := store.MakeOrderedAccountsIter(tx, trieRebuildAccountChunkSize)
defer iter.Close(ctx)
for {
accts, _, err := iter.Next(ctx)
@@ -507,7 +507,7 @@ func TestFullCatchpointWriterOverflowAccounts(t *testing.T) {
if len(accts) > 0 {
for _, acct := range accts {
- added, err := trie.Add(acct.digest)
+ added, err := trie.Add(acct.Digest)
require.NoError(t, err)
require.True(t, added)
}
@@ -873,37 +873,3 @@ func TestCatchpointAfterBoxTxns(t *testing.T) {
require.NoError(t, err)
require.Equal(t, strings.Repeat("f", 24), string(v))
}
-
-func TestEncodedKVRecordV6Allocbounds(t *testing.T) {
- partitiontest.PartitionTest(t)
- t.Parallel()
-
- for version, params := range config.Consensus {
- require.GreaterOrEqualf(t, uint64(encodedKVRecordV6MaxValueLength), params.MaxBoxSize, "Allocbound constant no longer valid as of consensus version %s", version)
- longestPossibleBoxName := string(make([]byte, params.MaxAppKeyLen))
- longestPossibleKey := logic.MakeBoxKey(basics.AppIndex(math.MaxUint64), longestPossibleBoxName)
- require.GreaterOrEqualf(t, encodedKVRecordV6MaxValueLength, len(longestPossibleKey), "Allocbound constant no longer valid as of consensus version %s", version)
- }
-}
-
-func TestEncodedKVDataSize(t *testing.T) {
- partitiontest.PartitionTest(t)
- t.Parallel()
-
- currentConsensusParams := config.Consensus[protocol.ConsensusCurrentVersion]
-
- require.GreaterOrEqual(t, encodedKVRecordV6MaxKeyLength, currentConsensusParams.MaxAppKeyLen)
- require.GreaterOrEqual(t, uint64(encodedKVRecordV6MaxValueLength), currentConsensusParams.MaxBoxSize)
-
- kvEntry := encodedKVRecordV6{
- Key: make([]byte, encodedKVRecordV6MaxKeyLength),
- Value: make([]byte, encodedKVRecordV6MaxValueLength),
- }
-
- crypto.RandBytes(kvEntry.Key[:])
- crypto.RandBytes(kvEntry.Value[:])
-
- encoded := kvEntry.MarshalMsg(nil)
- require.GreaterOrEqual(t, MaxEncodedKVDataSize, len(encoded))
-
-}
diff --git a/ledger/catchupaccessor.go b/ledger/catchupaccessor.go
index cbe18ba243..479a29a47c 100644
--- a/ledger/catchupaccessor.go
+++ b/ledger/catchupaccessor.go
@@ -31,6 +31,7 @@ import (
"github.com/algorand/go-algorand/crypto/merkletrie"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/ledger/encoded"
"github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/ledger/store"
"github.com/algorand/go-algorand/ledger/store/blockdb"
@@ -97,7 +98,7 @@ type stagingWriter interface {
writeBalances(context.Context, []store.NormalizedAccountBalance) error
writeCreatables(context.Context, []store.NormalizedAccountBalance) error
writeHashes(context.Context, []store.NormalizedAccountBalance) error
- writeKVs(context.Context, []encodedKVRecordV6) error
+ writeKVs(context.Context, []encoded.KVRecordV6) error
isShared() bool
}
@@ -112,7 +113,7 @@ func (w *stagingWriterImpl) writeBalances(ctx context.Context, balances []store.
})
}
-func (w *stagingWriterImpl) writeKVs(ctx context.Context, kvrs []encodedKVRecordV6) error {
+func (w *stagingWriterImpl) writeKVs(ctx context.Context, kvrs []encoded.KVRecordV6) error {
return w.wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
crw := store.NewCatchpointSQLReaderWriter(tx)
@@ -166,6 +167,14 @@ type catchpointCatchupAccessorImpl struct {
nextExpectedAccount basics.Address
}
+// catchpointAccountResourceCounter keeps track of the resources processed for the current account
+type catchpointAccountResourceCounter struct {
+ totalAppParams uint64
+ totalAppLocalStates uint64
+ totalAssetParams uint64
+ totalAssets uint64
+}
+
// CatchpointCatchupState is the state of the current catchpoint catchup process
type CatchpointCatchupState int32
@@ -388,7 +397,7 @@ func (c *catchpointCatchupAccessorImpl) processStagingBalances(ctx context.Conte
var normalizedAccountBalances []store.NormalizedAccountBalance
var expectingMoreEntries []bool
- var chunkKVs []encodedKVRecordV6
+ var chunkKVs []encoded.KVRecordV6
switch progress.Version {
default:
@@ -659,7 +668,7 @@ func (c *catchpointCatchupAccessorImpl) BuildMerkleTrie(ctx context.Context, pro
defer close(writerQueue)
err := rdb.Atomic(func(transactionCtx context.Context, tx *sql.Tx) (err error) {
- it := makeCatchpointPendingHashesIterator(trieRebuildAccountChunkSize, tx)
+ it := store.MakeCatchpointPendingHashesIterator(trieRebuildAccountChunkSize, tx)
var hashes [][]byte
for {
hashes, err = it.Next(transactionCtx)
diff --git a/ledger/catchupaccessor_test.go b/ledger/catchupaccessor_test.go
index d79d636e3c..1d2b536139 100644
--- a/ledger/catchupaccessor_test.go
+++ b/ledger/catchupaccessor_test.go
@@ -32,6 +32,7 @@ import (
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/ledger/encoded"
"github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/ledger/store"
ledgertesting "github.com/algorand/go-algorand/ledger/testing"
@@ -57,9 +58,9 @@ func createTestingEncodedChunks(accountsCount uint64) (encodedAccountChunks [][]
last64KIndex = len(encodedAccountChunks)
}
var chunk catchpointFileChunkV6
- chunk.Balances = make([]encodedBalanceRecordV6, chunkSize)
+ chunk.Balances = make([]encoded.BalanceRecordV6, chunkSize)
for i := uint64(0); i < chunkSize; i++ {
- var randomAccount encodedBalanceRecordV6
+ var randomAccount encoded.BalanceRecordV6
accountData := store.BaseAccountData{}
accountData.MicroAlgos.Raw = crypto.RandUint63()
randomAccount.AccountData = protocol.Encode(&accountData)
@@ -406,8 +407,8 @@ func TestCatchupAccessorResourceCountMismatch(t *testing.T) {
require.NoError(t, err)
var balances catchpointFileChunkV6
- balances.Balances = make([]encodedBalanceRecordV6, 1)
- var randomAccount encodedBalanceRecordV6
+ balances.Balances = make([]encoded.BalanceRecordV6, 1)
+ var randomAccount encoded.BalanceRecordV6
accountData := store.BaseAccountData{}
accountData.MicroAlgos.Raw = crypto.RandUint63()
accountData.TotalAppParams = 1
@@ -435,7 +436,7 @@ func (w *testStagingWriter) writeCreatables(ctx context.Context, balances []stor
return nil
}
-func (w *testStagingWriter) writeKVs(ctx context.Context, kvrs []encodedKVRecordV6) error {
+func (w *testStagingWriter) writeKVs(ctx context.Context, kvrs []encoded.KVRecordV6) error {
return nil
}
@@ -485,8 +486,8 @@ func TestCatchupAccessorProcessStagingBalances(t *testing.T) {
return accountData
}
- encodedBalanceRecordFromBase := func(addr basics.Address, base store.BaseAccountData, resources map[uint64]msgp.Raw, more bool) encodedBalanceRecordV6 {
- ebr := encodedBalanceRecordV6{
+ encodedBalanceRecordFromBase := func(addr basics.Address, base store.BaseAccountData, resources map[uint64]msgp.Raw, more bool) encoded.BalanceRecordV6 {
+ ebr := encoded.BalanceRecordV6{
Address: addr,
AccountData: protocol.Encode(&base),
Resources: resources,
@@ -530,14 +531,14 @@ func TestCatchupAccessorProcessStagingBalances(t *testing.T) {
// make chunks
chunks := []catchpointFileChunkV6{
{
- Balances: []encodedBalanceRecordV6{
+ Balances: []encoded.BalanceRecordV6{
encodedBalanceRecordFromBase(ledgertesting.RandomAddress(), acctA, nil, false),
encodedBalanceRecordFromBase(ledgertesting.RandomAddress(), acctB, nil, false),
encodedBalanceRecordFromBase(addrX, acctX, acctXRes1, true),
},
},
{
- Balances: []encodedBalanceRecordV6{
+ Balances: []encoded.BalanceRecordV6{
encodedBalanceRecordFromBase(addrX, acctX, acctXRes2, false),
encodedBalanceRecordFromBase(ledgertesting.RandomAddress(), acctC, nil, false),
encodedBalanceRecordFromBase(ledgertesting.RandomAddress(), acctD, nil, false),
diff --git a/ledger/encoded/msgp_gen.go b/ledger/encoded/msgp_gen.go
new file mode 100644
index 0000000000..189a6b73bf
--- /dev/null
+++ b/ledger/encoded/msgp_gen.go
@@ -0,0 +1,587 @@
+package encoded
+
+// Code generated by github.com/algorand/msgp DO NOT EDIT.
+
+import (
+ "sort"
+
+ "github.com/algorand/msgp/msgp"
+)
+
+// The following msgp objects are implemented in this file:
+// BalanceRecordV5
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
+// BalanceRecordV6
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
+// KVRecordV6
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
+
+// MarshalMsg implements msgp.Marshaler
+func (z *BalanceRecordV5) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // omitempty: check for empty values
+ zb0001Len := uint32(2)
+ var zb0001Mask uint8 /* 3 bits */
+ if (*z).AccountData.MsgIsZero() {
+ zb0001Len--
+ zb0001Mask |= 0x2
+ }
+ if (*z).Address.MsgIsZero() {
+ zb0001Len--
+ zb0001Mask |= 0x4
+ }
+ // variable map header, size zb0001Len
+ o = append(o, 0x80|uint8(zb0001Len))
+ if zb0001Len != 0 {
+ if (zb0001Mask & 0x2) == 0 { // if not empty
+ // string "ad"
+ o = append(o, 0xa2, 0x61, 0x64)
+ o = (*z).AccountData.MarshalMsg(o)
+ }
+ if (zb0001Mask & 0x4) == 0 { // if not empty
+ // string "pk"
+ o = append(o, 0xa2, 0x70, 0x6b)
+ o = (*z).Address.MarshalMsg(o)
+ }
+ }
+ return
+}
+
+func (_ *BalanceRecordV5) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*BalanceRecordV5)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *BalanceRecordV5) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0001 int
+ var zb0002 bool
+ zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).Address.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Address")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).AccountData.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "AccountData")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0001)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0002 {
+ (*z) = BalanceRecordV5{}
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "pk":
+ bts, err = (*z).Address.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Address")
+ return
+ }
+ case "ad":
+ bts, err = (*z).AccountData.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "AccountData")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *BalanceRecordV5) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*BalanceRecordV5)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *BalanceRecordV5) Msgsize() (s int) {
+ s = 1 + 3 + (*z).Address.Msgsize() + 3 + (*z).AccountData.Msgsize()
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *BalanceRecordV5) MsgIsZero() bool {
+ return ((*z).Address.MsgIsZero()) && ((*z).AccountData.MsgIsZero())
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *BalanceRecordV6) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // omitempty: check for empty values
+ zb0003Len := uint32(4)
+ var zb0003Mask uint8 /* 5 bits */
+ if (*z).Address.MsgIsZero() {
+ zb0003Len--
+ zb0003Mask |= 0x2
+ }
+ if (*z).AccountData.MsgIsZero() {
+ zb0003Len--
+ zb0003Mask |= 0x4
+ }
+ if len((*z).Resources) == 0 {
+ zb0003Len--
+ zb0003Mask |= 0x8
+ }
+ if (*z).ExpectingMoreEntries == false {
+ zb0003Len--
+ zb0003Mask |= 0x10
+ }
+ // variable map header, size zb0003Len
+ o = append(o, 0x80|uint8(zb0003Len))
+ if zb0003Len != 0 {
+ if (zb0003Mask & 0x2) == 0 { // if not empty
+ // string "a"
+ o = append(o, 0xa1, 0x61)
+ o = (*z).Address.MarshalMsg(o)
+ }
+ if (zb0003Mask & 0x4) == 0 { // if not empty
+ // string "b"
+ o = append(o, 0xa1, 0x62)
+ o = (*z).AccountData.MarshalMsg(o)
+ }
+ if (zb0003Mask & 0x8) == 0 { // if not empty
+ // string "c"
+ o = append(o, 0xa1, 0x63)
+ if (*z).Resources == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendMapHeader(o, uint32(len((*z).Resources)))
+ }
+ zb0001_keys := make([]uint64, 0, len((*z).Resources))
+ for zb0001 := range (*z).Resources {
+ zb0001_keys = append(zb0001_keys, zb0001)
+ }
+ sort.Sort(SortUint64(zb0001_keys))
+ for _, zb0001 := range zb0001_keys {
+ zb0002 := (*z).Resources[zb0001]
+ _ = zb0002
+ o = msgp.AppendUint64(o, zb0001)
+ o = zb0002.MarshalMsg(o)
+ }
+ }
+ if (zb0003Mask & 0x10) == 0 { // if not empty
+ // string "e"
+ o = append(o, 0xa1, 0x65)
+ o = msgp.AppendBool(o, (*z).ExpectingMoreEntries)
+ }
+ }
+ return
+}
+
+func (_ *BalanceRecordV6) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*BalanceRecordV6)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *BalanceRecordV6) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0003 int
+ var zb0004 bool
+ zb0003, zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0003, zb0004, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0003 > 0 {
+ zb0003--
+ bts, err = (*z).Address.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Address")
+ return
+ }
+ }
+ if zb0003 > 0 {
+ zb0003--
+ bts, err = (*z).AccountData.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "AccountData")
+ return
+ }
+ }
+ if zb0003 > 0 {
+ zb0003--
+ var zb0005 int
+ var zb0006 bool
+ zb0005, zb0006, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Resources")
+ return
+ }
+ if zb0005 > resourcesPerCatchpointFileChunkBackwardCompatible {
+ err = msgp.ErrOverflow(uint64(zb0005), uint64(resourcesPerCatchpointFileChunkBackwardCompatible))
+ err = msgp.WrapError(err, "struct-from-array", "Resources")
+ return
+ }
+ if zb0006 {
+ (*z).Resources = nil
+ } else if (*z).Resources == nil {
+ (*z).Resources = make(map[uint64]msgp.Raw, zb0005)
+ }
+ for zb0005 > 0 {
+ var zb0001 uint64
+ var zb0002 msgp.Raw
+ zb0005--
+ zb0001, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Resources")
+ return
+ }
+ bts, err = zb0002.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Resources", zb0001)
+ return
+ }
+ (*z).Resources[zb0001] = zb0002
+ }
+ }
+ if zb0003 > 0 {
+ zb0003--
+ (*z).ExpectingMoreEntries, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "ExpectingMoreEntries")
+ return
+ }
+ }
+ if zb0003 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0003)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0004 {
+ (*z) = BalanceRecordV6{}
+ }
+ for zb0003 > 0 {
+ zb0003--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "a":
+ bts, err = (*z).Address.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Address")
+ return
+ }
+ case "b":
+ bts, err = (*z).AccountData.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "AccountData")
+ return
+ }
+ case "c":
+ var zb0007 int
+ var zb0008 bool
+ zb0007, zb0008, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Resources")
+ return
+ }
+ if zb0007 > resourcesPerCatchpointFileChunkBackwardCompatible {
+ err = msgp.ErrOverflow(uint64(zb0007), uint64(resourcesPerCatchpointFileChunkBackwardCompatible))
+ err = msgp.WrapError(err, "Resources")
+ return
+ }
+ if zb0008 {
+ (*z).Resources = nil
+ } else if (*z).Resources == nil {
+ (*z).Resources = make(map[uint64]msgp.Raw, zb0007)
+ }
+ for zb0007 > 0 {
+ var zb0001 uint64
+ var zb0002 msgp.Raw
+ zb0007--
+ zb0001, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Resources")
+ return
+ }
+ bts, err = zb0002.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Resources", zb0001)
+ return
+ }
+ (*z).Resources[zb0001] = zb0002
+ }
+ case "e":
+ (*z).ExpectingMoreEntries, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "ExpectingMoreEntries")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *BalanceRecordV6) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*BalanceRecordV6)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *BalanceRecordV6) Msgsize() (s int) {
+ s = 1 + 2 + (*z).Address.Msgsize() + 2 + (*z).AccountData.Msgsize() + 2 + msgp.MapHeaderSize
+ if (*z).Resources != nil {
+ for zb0001, zb0002 := range (*z).Resources {
+ _ = zb0001
+ _ = zb0002
+ s += 0 + msgp.Uint64Size + zb0002.Msgsize()
+ }
+ }
+ s += 2 + msgp.BoolSize
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *BalanceRecordV6) MsgIsZero() bool {
+ return ((*z).Address.MsgIsZero()) && ((*z).AccountData.MsgIsZero()) && (len((*z).Resources) == 0) && ((*z).ExpectingMoreEntries == false)
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *KVRecordV6) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // omitempty: check for empty values
+ zb0001Len := uint32(2)
+ var zb0001Mask uint8 /* 3 bits */
+ if len((*z).Key) == 0 {
+ zb0001Len--
+ zb0001Mask |= 0x2
+ }
+ if len((*z).Value) == 0 {
+ zb0001Len--
+ zb0001Mask |= 0x4
+ }
+ // variable map header, size zb0001Len
+ o = append(o, 0x80|uint8(zb0001Len))
+ if zb0001Len != 0 {
+ if (zb0001Mask & 0x2) == 0 { // if not empty
+ // string "k"
+ o = append(o, 0xa1, 0x6b)
+ o = msgp.AppendBytes(o, (*z).Key)
+ }
+ if (zb0001Mask & 0x4) == 0 { // if not empty
+ // string "v"
+ o = append(o, 0xa1, 0x76)
+ o = msgp.AppendBytes(o, (*z).Value)
+ }
+ }
+ return
+}
+
+func (_ *KVRecordV6) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*KVRecordV6)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *KVRecordV6) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0001 int
+ var zb0002 bool
+ zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0001 > 0 {
+ zb0001--
+ var zb0003 int
+ zb0003, err = msgp.ReadBytesBytesHeader(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Key")
+ return
+ }
+ if zb0003 > KVRecordV6MaxKeyLength {
+ err = msgp.ErrOverflow(uint64(zb0003), uint64(KVRecordV6MaxKeyLength))
+ return
+ }
+ (*z).Key, bts, err = msgp.ReadBytesBytes(bts, (*z).Key)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Key")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ var zb0004 int
+ zb0004, err = msgp.ReadBytesBytesHeader(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Value")
+ return
+ }
+ if zb0004 > KVRecordV6MaxValueLength {
+ err = msgp.ErrOverflow(uint64(zb0004), uint64(KVRecordV6MaxValueLength))
+ return
+ }
+ (*z).Value, bts, err = msgp.ReadBytesBytes(bts, (*z).Value)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Value")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0001)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0002 {
+ (*z) = KVRecordV6{}
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "k":
+ var zb0005 int
+ zb0005, err = msgp.ReadBytesBytesHeader(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Key")
+ return
+ }
+ if zb0005 > KVRecordV6MaxKeyLength {
+ err = msgp.ErrOverflow(uint64(zb0005), uint64(KVRecordV6MaxKeyLength))
+ return
+ }
+ (*z).Key, bts, err = msgp.ReadBytesBytes(bts, (*z).Key)
+ if err != nil {
+ err = msgp.WrapError(err, "Key")
+ return
+ }
+ case "v":
+ var zb0006 int
+ zb0006, err = msgp.ReadBytesBytesHeader(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Value")
+ return
+ }
+ if zb0006 > KVRecordV6MaxValueLength {
+ err = msgp.ErrOverflow(uint64(zb0006), uint64(KVRecordV6MaxValueLength))
+ return
+ }
+ (*z).Value, bts, err = msgp.ReadBytesBytes(bts, (*z).Value)
+ if err != nil {
+ err = msgp.WrapError(err, "Value")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *KVRecordV6) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*KVRecordV6)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *KVRecordV6) Msgsize() (s int) {
+ s = 1 + 2 + msgp.BytesPrefixSize + len((*z).Key) + 2 + msgp.BytesPrefixSize + len((*z).Value)
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *KVRecordV6) MsgIsZero() bool {
+ return (len((*z).Key) == 0) && (len((*z).Value) == 0)
+}
diff --git a/ledger/encoded/msgp_gen_test.go b/ledger/encoded/msgp_gen_test.go
new file mode 100644
index 0000000000..415339c728
--- /dev/null
+++ b/ledger/encoded/msgp_gen_test.go
@@ -0,0 +1,195 @@
+//go:build !skip_msgp_testing
+// +build !skip_msgp_testing
+
+package encoded
+
+// Code generated by github.com/algorand/msgp DO NOT EDIT.
+
+import (
+ "testing"
+
+ "github.com/algorand/msgp/msgp"
+
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/test/partitiontest"
+)
+
+func TestMarshalUnmarshalBalanceRecordV5(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := BalanceRecordV5{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingBalanceRecordV5(t *testing.T) {
+ protocol.RunEncodingTest(t, &BalanceRecordV5{})
+}
+
+func BenchmarkMarshalMsgBalanceRecordV5(b *testing.B) {
+ v := BalanceRecordV5{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgBalanceRecordV5(b *testing.B) {
+ v := BalanceRecordV5{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalBalanceRecordV5(b *testing.B) {
+ v := BalanceRecordV5{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func TestMarshalUnmarshalBalanceRecordV6(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := BalanceRecordV6{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingBalanceRecordV6(t *testing.T) {
+ protocol.RunEncodingTest(t, &BalanceRecordV6{})
+}
+
+func BenchmarkMarshalMsgBalanceRecordV6(b *testing.B) {
+ v := BalanceRecordV6{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgBalanceRecordV6(b *testing.B) {
+ v := BalanceRecordV6{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalBalanceRecordV6(b *testing.B) {
+ v := BalanceRecordV6{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func TestMarshalUnmarshalKVRecordV6(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ v := KVRecordV6{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingKVRecordV6(t *testing.T) {
+ protocol.RunEncodingTest(t, &KVRecordV6{})
+}
+
+func BenchmarkMarshalMsgKVRecordV6(b *testing.B) {
+ v := KVRecordV6{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgKVRecordV6(b *testing.B) {
+ v := KVRecordV6{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalKVRecordV6(b *testing.B) {
+ v := KVRecordV6{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
diff --git a/ledger/encoded/recordsV5.go b/ledger/encoded/recordsV5.go
new file mode 100644
index 0000000000..acf7e8d514
--- /dev/null
+++ b/ledger/encoded/recordsV5.go
@@ -0,0 +1,30 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see .
+
+package encoded
+
+import (
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/msgp/msgp"
+)
+
+// BalanceRecordV5 is the encoded account balance record.
+type BalanceRecordV5 struct {
+ _struct struct{} `codec:",omitempty,omitemptyarray"`
+
+ Address basics.Address `codec:"pk,allocbound=crypto.DigestSize"`
+ AccountData msgp.Raw `codec:"ad"` // encoding of basics.AccountData
+}
diff --git a/ledger/encoded/recordsV6.go b/ledger/encoded/recordsV6.go
new file mode 100644
index 0000000000..e0712eeb0b
--- /dev/null
+++ b/ledger/encoded/recordsV6.go
@@ -0,0 +1,64 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see .
+
+package encoded
+
+import (
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/msgp/msgp"
+)
+
+// Adjust these to be big enough for boxes, but not directly tied to box values.
+const (
+ // For boxes: "bx:<8 bytes><64 byte name>"
+ KVRecordV6MaxKeyLength = 128
+
+ // For boxes: MaxBoxSize
+ KVRecordV6MaxValueLength = 32768
+
+ // MaxEncodedKVDataSize is the max size of serialized KV entry, checked with TestEncodedKVDataSize.
+ // Exact value is 32906 that is 10 bytes more than 32768 + 128
+ MaxEncodedKVDataSize = 33000
+
+ // resourcesPerCatchpointFileChunkBackwardCompatible is the old value for ResourcesPerCatchpointFileChunk.
+ // Size of a single resource entry was underestimated to 300 bytes that holds only for assets and not for apps.
+ // It is safe to remove after April, 2023 since we are only supporting catchpoint that are 6 months old.
+ resourcesPerCatchpointFileChunkBackwardCompatible = 300_000
+)
+
+// SortUint64 re-export this sort, which is implemented in basics, and being used by the msgp when
+// encoding the resources map below.
+type SortUint64 = basics.SortUint64
+
+// BalanceRecordV6 is the encoded account balance record.
+type BalanceRecordV6 struct {
+ _struct struct{} `codec:",omitempty,omitemptyarray"`
+
+ Address basics.Address `codec:"a,allocbound=crypto.DigestSize"`
+ AccountData msgp.Raw `codec:"b"` // encoding of baseAccountData
+ Resources map[uint64]msgp.Raw `codec:"c,allocbound=resourcesPerCatchpointFileChunkBackwardCompatible"` // map of resourcesData
+
+ // flag indicating whether there are more records for the same account coming up
+ ExpectingMoreEntries bool `codec:"e"`
+}
+
+// KVRecordV6 is the encoded KV record.
+type KVRecordV6 struct {
+ _struct struct{} `codec:",omitempty,omitemptyarray"`
+
+ Key []byte `codec:"k,allocbound=KVRecordV6MaxKeyLength"`
+ Value []byte `codec:"v,allocbound=KVRecordV6MaxValueLength"`
+}
diff --git a/ledger/encoded/recordsV6_test.go b/ledger/encoded/recordsV6_test.go
new file mode 100644
index 0000000000..75bdac5137
--- /dev/null
+++ b/ledger/encoded/recordsV6_test.go
@@ -0,0 +1,64 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see .
+
+package encoded
+
+import (
+ "math"
+ "testing"
+
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/transactions/logic"
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/stretchr/testify/require"
+)
+
+func TestEncodedKVRecordV6Allocbounds(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ for version, params := range config.Consensus {
+ require.GreaterOrEqualf(t, uint64(KVRecordV6MaxValueLength), params.MaxBoxSize, "Allocbound constant no longer valid as of consensus version %s", version)
+ longestPossibleBoxName := string(make([]byte, params.MaxAppKeyLen))
+ longestPossibleKey := logic.MakeBoxKey(basics.AppIndex(math.MaxUint64), longestPossibleBoxName)
+ require.GreaterOrEqualf(t, KVRecordV6MaxValueLength, len(longestPossibleKey), "Allocbound constant no longer valid as of consensus version %s", version)
+ }
+}
+
+func TestEncodedKVDataSize(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ currentConsensusParams := config.Consensus[protocol.ConsensusCurrentVersion]
+
+ require.GreaterOrEqual(t, KVRecordV6MaxKeyLength, currentConsensusParams.MaxAppKeyLen)
+ require.GreaterOrEqual(t, uint64(KVRecordV6MaxValueLength), currentConsensusParams.MaxBoxSize)
+
+ kvEntry := KVRecordV6{
+ Key: make([]byte, KVRecordV6MaxKeyLength),
+ Value: make([]byte, KVRecordV6MaxValueLength),
+ }
+
+ crypto.RandBytes(kvEntry.Key[:])
+ crypto.RandBytes(kvEntry.Value[:])
+
+ encoded := kvEntry.MarshalMsg(nil)
+ require.GreaterOrEqual(t, MaxEncodedKVDataSize, len(encoded))
+
+}
diff --git a/ledger/msgp_gen.go b/ledger/msgp_gen.go
index ddd01a87f5..39c489e281 100644
--- a/ledger/msgp_gen.go
+++ b/ledger/msgp_gen.go
@@ -3,9 +3,9 @@ package ledger
// Code generated by github.com/algorand/msgp DO NOT EDIT.
import (
- "sort"
-
"github.com/algorand/msgp/msgp"
+
+ "github.com/algorand/go-algorand/ledger/encoded"
)
// The following msgp objects are implemented in this file:
@@ -41,30 +41,6 @@ import (
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
//
-// encodedBalanceRecordV5
-// |-----> (*) MarshalMsg
-// |-----> (*) CanMarshalMsg
-// |-----> (*) UnmarshalMsg
-// |-----> (*) CanUnmarshalMsg
-// |-----> (*) Msgsize
-// |-----> (*) MsgIsZero
-//
-// encodedBalanceRecordV6
-// |-----> (*) MarshalMsg
-// |-----> (*) CanMarshalMsg
-// |-----> (*) UnmarshalMsg
-// |-----> (*) CanUnmarshalMsg
-// |-----> (*) Msgsize
-// |-----> (*) MsgIsZero
-//
-// encodedKVRecordV6
-// |-----> (*) MarshalMsg
-// |-----> (*) CanMarshalMsg
-// |-----> (*) UnmarshalMsg
-// |-----> (*) CanUnmarshalMsg
-// |-----> (*) Msgsize
-// |-----> (*) MsgIsZero
-//
// MarshalMsg implements msgp.Marshaler
func (z CatchpointCatchupState) MarshalMsg(b []byte) (o []byte) {
@@ -424,29 +400,7 @@ func (z *catchpointFileBalancesChunkV5) MarshalMsg(b []byte) (o []byte) {
o = msgp.AppendArrayHeader(o, uint32(len((*z).Balances)))
}
for zb0001 := range (*z).Balances {
- // omitempty: check for empty values
- zb0003Len := uint32(2)
- var zb0003Mask uint8 /* 3 bits */
- if (*z).Balances[zb0001].AccountData.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x2
- }
- if (*z).Balances[zb0001].Address.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x4
- }
- // variable map header, size zb0003Len
- o = append(o, 0x80|uint8(zb0003Len))
- if (zb0003Mask & 0x2) == 0 { // if not empty
- // string "ad"
- o = append(o, 0xa2, 0x61, 0x64)
- o = (*z).Balances[zb0001].AccountData.MarshalMsg(o)
- }
- if (zb0003Mask & 0x4) == 0 { // if not empty
- // string "pk"
- o = append(o, 0xa2, 0x70, 0x6b)
- o = (*z).Balances[zb0001].Address.MarshalMsg(o)
- }
+ o = (*z).Balances[zb0001].MarshalMsg(o)
}
}
}
@@ -490,77 +444,13 @@ func (z *catchpointFileBalancesChunkV5) UnmarshalMsg(bts []byte) (o []byte, err
} else if (*z).Balances != nil && cap((*z).Balances) >= zb0004 {
(*z).Balances = ((*z).Balances)[:zb0004]
} else {
- (*z).Balances = make([]encodedBalanceRecordV5, zb0004)
+ (*z).Balances = make([]encoded.BalanceRecordV5, zb0004)
}
for zb0001 := range (*z).Balances {
- var zb0006 int
- var zb0007 bool
- zb0006, zb0007, bts, err = msgp.ReadMapHeaderBytes(bts)
- if _, ok := err.(msgp.TypeError); ok {
- zb0006, zb0007, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Balances", zb0001)
- return
- }
- if zb0006 > 0 {
- zb0006--
- bts, err = (*z).Balances[zb0001].Address.UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Balances", zb0001, "struct-from-array", "Address")
- return
- }
- }
- if zb0006 > 0 {
- zb0006--
- bts, err = (*z).Balances[zb0001].AccountData.UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Balances", zb0001, "struct-from-array", "AccountData")
- return
- }
- }
- if zb0006 > 0 {
- err = msgp.ErrTooManyArrayFields(zb0006)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Balances", zb0001, "struct-from-array")
- return
- }
- }
- } else {
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Balances", zb0001)
- return
- }
- if zb0007 {
- (*z).Balances[zb0001] = encodedBalanceRecordV5{}
- }
- for zb0006 > 0 {
- zb0006--
- field, bts, err = msgp.ReadMapKeyZC(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Balances", zb0001)
- return
- }
- switch string(field) {
- case "pk":
- bts, err = (*z).Balances[zb0001].Address.UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Balances", zb0001, "Address")
- return
- }
- case "ad":
- bts, err = (*z).Balances[zb0001].AccountData.UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Balances", zb0001, "AccountData")
- return
- }
- default:
- err = msgp.ErrNoField(string(field))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Balances", zb0001)
- return
- }
- }
- }
+ bts, err = (*z).Balances[zb0001].UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Balances", zb0001)
+ return
}
}
}
@@ -588,94 +478,30 @@ func (z *catchpointFileBalancesChunkV5) UnmarshalMsg(bts []byte) (o []byte, err
}
switch string(field) {
case "bl":
- var zb0008 int
- var zb0009 bool
- zb0008, zb0009, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ var zb0006 int
+ var zb0007 bool
+ zb0006, zb0007, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Balances")
return
}
- if zb0008 > BalancesPerCatchpointFileChunk {
- err = msgp.ErrOverflow(uint64(zb0008), uint64(BalancesPerCatchpointFileChunk))
+ if zb0006 > BalancesPerCatchpointFileChunk {
+ err = msgp.ErrOverflow(uint64(zb0006), uint64(BalancesPerCatchpointFileChunk))
err = msgp.WrapError(err, "Balances")
return
}
- if zb0009 {
+ if zb0007 {
(*z).Balances = nil
- } else if (*z).Balances != nil && cap((*z).Balances) >= zb0008 {
- (*z).Balances = ((*z).Balances)[:zb0008]
+ } else if (*z).Balances != nil && cap((*z).Balances) >= zb0006 {
+ (*z).Balances = ((*z).Balances)[:zb0006]
} else {
- (*z).Balances = make([]encodedBalanceRecordV5, zb0008)
+ (*z).Balances = make([]encoded.BalanceRecordV5, zb0006)
}
for zb0001 := range (*z).Balances {
- var zb0010 int
- var zb0011 bool
- zb0010, zb0011, bts, err = msgp.ReadMapHeaderBytes(bts)
- if _, ok := err.(msgp.TypeError); ok {
- zb0010, zb0011, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Balances", zb0001)
- return
- }
- if zb0010 > 0 {
- zb0010--
- bts, err = (*z).Balances[zb0001].Address.UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "Balances", zb0001, "struct-from-array", "Address")
- return
- }
- }
- if zb0010 > 0 {
- zb0010--
- bts, err = (*z).Balances[zb0001].AccountData.UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "Balances", zb0001, "struct-from-array", "AccountData")
- return
- }
- }
- if zb0010 > 0 {
- err = msgp.ErrTooManyArrayFields(zb0010)
- if err != nil {
- err = msgp.WrapError(err, "Balances", zb0001, "struct-from-array")
- return
- }
- }
- } else {
- if err != nil {
- err = msgp.WrapError(err, "Balances", zb0001)
- return
- }
- if zb0011 {
- (*z).Balances[zb0001] = encodedBalanceRecordV5{}
- }
- for zb0010 > 0 {
- zb0010--
- field, bts, err = msgp.ReadMapKeyZC(bts)
- if err != nil {
- err = msgp.WrapError(err, "Balances", zb0001)
- return
- }
- switch string(field) {
- case "pk":
- bts, err = (*z).Balances[zb0001].Address.UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "Balances", zb0001, "Address")
- return
- }
- case "ad":
- bts, err = (*z).Balances[zb0001].AccountData.UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "Balances", zb0001, "AccountData")
- return
- }
- default:
- err = msgp.ErrNoField(string(field))
- if err != nil {
- err = msgp.WrapError(err, "Balances", zb0001)
- return
- }
- }
- }
+ bts, err = (*z).Balances[zb0001].UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Balances", zb0001)
+ return
}
}
default:
@@ -700,7 +526,7 @@ func (_ *catchpointFileBalancesChunkV5) CanUnmarshalMsg(z interface{}) bool {
func (z *catchpointFileBalancesChunkV5) Msgsize() (s int) {
s = 1 + 3 + msgp.ArrayHeaderSize
for zb0001 := range (*z).Balances {
- s += 1 + 3 + (*z).Balances[zb0001].Address.Msgsize() + 3 + (*z).Balances[zb0001].AccountData.Msgsize()
+ s += (*z).Balances[zb0001].Msgsize()
}
return
}
@@ -748,29 +574,7 @@ func (z *catchpointFileChunkV6) MarshalMsg(b []byte) (o []byte) {
o = msgp.AppendArrayHeader(o, uint32(len((*z).KVs)))
}
for zb0002 := range (*z).KVs {
- // omitempty: check for empty values
- zb0004Len := uint32(2)
- var zb0004Mask uint8 /* 3 bits */
- if len((*z).KVs[zb0002].Key) == 0 {
- zb0004Len--
- zb0004Mask |= 0x2
- }
- if len((*z).KVs[zb0002].Value) == 0 {
- zb0004Len--
- zb0004Mask |= 0x4
- }
- // variable map header, size zb0004Len
- o = append(o, 0x80|uint8(zb0004Len))
- if (zb0004Mask & 0x2) == 0 { // if not empty
- // string "k"
- o = append(o, 0xa1, 0x6b)
- o = msgp.AppendBytes(o, (*z).KVs[zb0002].Key)
- }
- if (zb0004Mask & 0x4) == 0 { // if not empty
- // string "v"
- o = append(o, 0xa1, 0x76)
- o = msgp.AppendBytes(o, (*z).KVs[zb0002].Value)
- }
+ o = (*z).KVs[zb0002].MarshalMsg(o)
}
}
}
@@ -814,7 +618,7 @@ func (z *catchpointFileChunkV6) UnmarshalMsg(bts []byte) (o []byte, err error) {
} else if (*z).Balances != nil && cap((*z).Balances) >= zb0005 {
(*z).Balances = ((*z).Balances)[:zb0005]
} else {
- (*z).Balances = make([]encodedBalanceRecordV6, zb0005)
+ (*z).Balances = make([]encoded.BalanceRecordV6, zb0005)
}
for zb0001 := range (*z).Balances {
bts, err = (*z).Balances[zb0001].UnmarshalMsg(bts)
@@ -843,117 +647,13 @@ func (z *catchpointFileChunkV6) UnmarshalMsg(bts []byte) (o []byte, err error) {
} else if (*z).KVs != nil && cap((*z).KVs) >= zb0007 {
(*z).KVs = ((*z).KVs)[:zb0007]
} else {
- (*z).KVs = make([]encodedKVRecordV6, zb0007)
+ (*z).KVs = make([]encoded.KVRecordV6, zb0007)
}
for zb0002 := range (*z).KVs {
- var zb0009 int
- var zb0010 bool
- zb0009, zb0010, bts, err = msgp.ReadMapHeaderBytes(bts)
- if _, ok := err.(msgp.TypeError); ok {
- zb0009, zb0010, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "KVs", zb0002)
- return
- }
- if zb0009 > 0 {
- zb0009--
- var zb0011 int
- zb0011, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "KVs", zb0002, "struct-from-array", "Key")
- return
- }
- if zb0011 > encodedKVRecordV6MaxKeyLength {
- err = msgp.ErrOverflow(uint64(zb0011), uint64(encodedKVRecordV6MaxKeyLength))
- return
- }
- (*z).KVs[zb0002].Key, bts, err = msgp.ReadBytesBytes(bts, (*z).KVs[zb0002].Key)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "KVs", zb0002, "struct-from-array", "Key")
- return
- }
- }
- if zb0009 > 0 {
- zb0009--
- var zb0012 int
- zb0012, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "KVs", zb0002, "struct-from-array", "Value")
- return
- }
- if zb0012 > encodedKVRecordV6MaxValueLength {
- err = msgp.ErrOverflow(uint64(zb0012), uint64(encodedKVRecordV6MaxValueLength))
- return
- }
- (*z).KVs[zb0002].Value, bts, err = msgp.ReadBytesBytes(bts, (*z).KVs[zb0002].Value)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "KVs", zb0002, "struct-from-array", "Value")
- return
- }
- }
- if zb0009 > 0 {
- err = msgp.ErrTooManyArrayFields(zb0009)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "KVs", zb0002, "struct-from-array")
- return
- }
- }
- } else {
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "KVs", zb0002)
- return
- }
- if zb0010 {
- (*z).KVs[zb0002] = encodedKVRecordV6{}
- }
- for zb0009 > 0 {
- zb0009--
- field, bts, err = msgp.ReadMapKeyZC(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "KVs", zb0002)
- return
- }
- switch string(field) {
- case "k":
- var zb0013 int
- zb0013, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "KVs", zb0002, "Key")
- return
- }
- if zb0013 > encodedKVRecordV6MaxKeyLength {
- err = msgp.ErrOverflow(uint64(zb0013), uint64(encodedKVRecordV6MaxKeyLength))
- return
- }
- (*z).KVs[zb0002].Key, bts, err = msgp.ReadBytesBytes(bts, (*z).KVs[zb0002].Key)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "KVs", zb0002, "Key")
- return
- }
- case "v":
- var zb0014 int
- zb0014, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "KVs", zb0002, "Value")
- return
- }
- if zb0014 > encodedKVRecordV6MaxValueLength {
- err = msgp.ErrOverflow(uint64(zb0014), uint64(encodedKVRecordV6MaxValueLength))
- return
- }
- (*z).KVs[zb0002].Value, bts, err = msgp.ReadBytesBytes(bts, (*z).KVs[zb0002].Value)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "KVs", zb0002, "Value")
- return
- }
- default:
- err = msgp.ErrNoField(string(field))
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "KVs", zb0002)
- return
- }
- }
- }
+ bts, err = (*z).KVs[zb0002].UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "KVs", zb0002)
+ return
}
}
}
@@ -981,24 +681,24 @@ func (z *catchpointFileChunkV6) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
switch string(field) {
case "bl":
- var zb0015 int
- var zb0016 bool
- zb0015, zb0016, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ var zb0009 int
+ var zb0010 bool
+ zb0009, zb0010, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Balances")
return
}
- if zb0015 > BalancesPerCatchpointFileChunk {
- err = msgp.ErrOverflow(uint64(zb0015), uint64(BalancesPerCatchpointFileChunk))
+ if zb0009 > BalancesPerCatchpointFileChunk {
+ err = msgp.ErrOverflow(uint64(zb0009), uint64(BalancesPerCatchpointFileChunk))
err = msgp.WrapError(err, "Balances")
return
}
- if zb0016 {
+ if zb0010 {
(*z).Balances = nil
- } else if (*z).Balances != nil && cap((*z).Balances) >= zb0015 {
- (*z).Balances = ((*z).Balances)[:zb0015]
+ } else if (*z).Balances != nil && cap((*z).Balances) >= zb0009 {
+ (*z).Balances = ((*z).Balances)[:zb0009]
} else {
- (*z).Balances = make([]encodedBalanceRecordV6, zb0015)
+ (*z).Balances = make([]encoded.BalanceRecordV6, zb0009)
}
for zb0001 := range (*z).Balances {
bts, err = (*z).Balances[zb0001].UnmarshalMsg(bts)
@@ -1008,134 +708,30 @@ func (z *catchpointFileChunkV6) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
}
case "kv":
- var zb0017 int
- var zb0018 bool
- zb0017, zb0018, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ var zb0011 int
+ var zb0012 bool
+ zb0011, zb0012, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "KVs")
return
}
- if zb0017 > BalancesPerCatchpointFileChunk {
- err = msgp.ErrOverflow(uint64(zb0017), uint64(BalancesPerCatchpointFileChunk))
+ if zb0011 > BalancesPerCatchpointFileChunk {
+ err = msgp.ErrOverflow(uint64(zb0011), uint64(BalancesPerCatchpointFileChunk))
err = msgp.WrapError(err, "KVs")
return
}
- if zb0018 {
+ if zb0012 {
(*z).KVs = nil
- } else if (*z).KVs != nil && cap((*z).KVs) >= zb0017 {
- (*z).KVs = ((*z).KVs)[:zb0017]
+ } else if (*z).KVs != nil && cap((*z).KVs) >= zb0011 {
+ (*z).KVs = ((*z).KVs)[:zb0011]
} else {
- (*z).KVs = make([]encodedKVRecordV6, zb0017)
+ (*z).KVs = make([]encoded.KVRecordV6, zb0011)
}
for zb0002 := range (*z).KVs {
- var zb0019 int
- var zb0020 bool
- zb0019, zb0020, bts, err = msgp.ReadMapHeaderBytes(bts)
- if _, ok := err.(msgp.TypeError); ok {
- zb0019, zb0020, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "KVs", zb0002)
- return
- }
- if zb0019 > 0 {
- zb0019--
- var zb0021 int
- zb0021, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "KVs", zb0002, "struct-from-array", "Key")
- return
- }
- if zb0021 > encodedKVRecordV6MaxKeyLength {
- err = msgp.ErrOverflow(uint64(zb0021), uint64(encodedKVRecordV6MaxKeyLength))
- return
- }
- (*z).KVs[zb0002].Key, bts, err = msgp.ReadBytesBytes(bts, (*z).KVs[zb0002].Key)
- if err != nil {
- err = msgp.WrapError(err, "KVs", zb0002, "struct-from-array", "Key")
- return
- }
- }
- if zb0019 > 0 {
- zb0019--
- var zb0022 int
- zb0022, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "KVs", zb0002, "struct-from-array", "Value")
- return
- }
- if zb0022 > encodedKVRecordV6MaxValueLength {
- err = msgp.ErrOverflow(uint64(zb0022), uint64(encodedKVRecordV6MaxValueLength))
- return
- }
- (*z).KVs[zb0002].Value, bts, err = msgp.ReadBytesBytes(bts, (*z).KVs[zb0002].Value)
- if err != nil {
- err = msgp.WrapError(err, "KVs", zb0002, "struct-from-array", "Value")
- return
- }
- }
- if zb0019 > 0 {
- err = msgp.ErrTooManyArrayFields(zb0019)
- if err != nil {
- err = msgp.WrapError(err, "KVs", zb0002, "struct-from-array")
- return
- }
- }
- } else {
- if err != nil {
- err = msgp.WrapError(err, "KVs", zb0002)
- return
- }
- if zb0020 {
- (*z).KVs[zb0002] = encodedKVRecordV6{}
- }
- for zb0019 > 0 {
- zb0019--
- field, bts, err = msgp.ReadMapKeyZC(bts)
- if err != nil {
- err = msgp.WrapError(err, "KVs", zb0002)
- return
- }
- switch string(field) {
- case "k":
- var zb0023 int
- zb0023, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "KVs", zb0002, "Key")
- return
- }
- if zb0023 > encodedKVRecordV6MaxKeyLength {
- err = msgp.ErrOverflow(uint64(zb0023), uint64(encodedKVRecordV6MaxKeyLength))
- return
- }
- (*z).KVs[zb0002].Key, bts, err = msgp.ReadBytesBytes(bts, (*z).KVs[zb0002].Key)
- if err != nil {
- err = msgp.WrapError(err, "KVs", zb0002, "Key")
- return
- }
- case "v":
- var zb0024 int
- zb0024, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "KVs", zb0002, "Value")
- return
- }
- if zb0024 > encodedKVRecordV6MaxValueLength {
- err = msgp.ErrOverflow(uint64(zb0024), uint64(encodedKVRecordV6MaxValueLength))
- return
- }
- (*z).KVs[zb0002].Value, bts, err = msgp.ReadBytesBytes(bts, (*z).KVs[zb0002].Value)
- if err != nil {
- err = msgp.WrapError(err, "KVs", zb0002, "Value")
- return
- }
- default:
- err = msgp.ErrNoField(string(field))
- if err != nil {
- err = msgp.WrapError(err, "KVs", zb0002)
- return
- }
- }
- }
+ bts, err = (*z).KVs[zb0002].UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "KVs", zb0002)
+ return
}
}
default:
@@ -1164,7 +760,7 @@ func (z *catchpointFileChunkV6) Msgsize() (s int) {
}
s += 3 + msgp.ArrayHeaderSize
for zb0002 := range (*z).KVs {
- s += 1 + 2 + msgp.BytesPrefixSize + len((*z).KVs[zb0002].Key) + 2 + msgp.BytesPrefixSize + len((*z).KVs[zb0002].Value)
+ s += (*z).KVs[zb0002].Msgsize()
}
return
}
@@ -1173,555 +769,3 @@ func (z *catchpointFileChunkV6) Msgsize() (s int) {
func (z *catchpointFileChunkV6) MsgIsZero() bool {
return (len((*z).Balances) == 0) && (len((*z).KVs) == 0)
}
-
-// MarshalMsg implements msgp.Marshaler
-func (z *encodedBalanceRecordV5) MarshalMsg(b []byte) (o []byte) {
- o = msgp.Require(b, z.Msgsize())
- // omitempty: check for empty values
- zb0001Len := uint32(2)
- var zb0001Mask uint8 /* 3 bits */
- if (*z).AccountData.MsgIsZero() {
- zb0001Len--
- zb0001Mask |= 0x2
- }
- if (*z).Address.MsgIsZero() {
- zb0001Len--
- zb0001Mask |= 0x4
- }
- // variable map header, size zb0001Len
- o = append(o, 0x80|uint8(zb0001Len))
- if zb0001Len != 0 {
- if (zb0001Mask & 0x2) == 0 { // if not empty
- // string "ad"
- o = append(o, 0xa2, 0x61, 0x64)
- o = (*z).AccountData.MarshalMsg(o)
- }
- if (zb0001Mask & 0x4) == 0 { // if not empty
- // string "pk"
- o = append(o, 0xa2, 0x70, 0x6b)
- o = (*z).Address.MarshalMsg(o)
- }
- }
- return
-}
-
-func (_ *encodedBalanceRecordV5) CanMarshalMsg(z interface{}) bool {
- _, ok := (z).(*encodedBalanceRecordV5)
- return ok
-}
-
-// UnmarshalMsg implements msgp.Unmarshaler
-func (z *encodedBalanceRecordV5) UnmarshalMsg(bts []byte) (o []byte, err error) {
- var field []byte
- _ = field
- var zb0001 int
- var zb0002 bool
- zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
- if _, ok := err.(msgp.TypeError); ok {
- zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- if zb0001 > 0 {
- zb0001--
- bts, err = (*z).Address.UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Address")
- return
- }
- }
- if zb0001 > 0 {
- zb0001--
- bts, err = (*z).AccountData.UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "AccountData")
- return
- }
- }
- if zb0001 > 0 {
- err = msgp.ErrTooManyArrayFields(zb0001)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array")
- return
- }
- }
- } else {
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- if zb0002 {
- (*z) = encodedBalanceRecordV5{}
- }
- for zb0001 > 0 {
- zb0001--
- field, bts, err = msgp.ReadMapKeyZC(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- switch string(field) {
- case "pk":
- bts, err = (*z).Address.UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "Address")
- return
- }
- case "ad":
- bts, err = (*z).AccountData.UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "AccountData")
- return
- }
- default:
- err = msgp.ErrNoField(string(field))
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- }
- }
- }
- o = bts
- return
-}
-
-func (_ *encodedBalanceRecordV5) CanUnmarshalMsg(z interface{}) bool {
- _, ok := (z).(*encodedBalanceRecordV5)
- return ok
-}
-
-// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
-func (z *encodedBalanceRecordV5) Msgsize() (s int) {
- s = 1 + 3 + (*z).Address.Msgsize() + 3 + (*z).AccountData.Msgsize()
- return
-}
-
-// MsgIsZero returns whether this is a zero value
-func (z *encodedBalanceRecordV5) MsgIsZero() bool {
- return ((*z).Address.MsgIsZero()) && ((*z).AccountData.MsgIsZero())
-}
-
-// MarshalMsg implements msgp.Marshaler
-func (z *encodedBalanceRecordV6) MarshalMsg(b []byte) (o []byte) {
- o = msgp.Require(b, z.Msgsize())
- // omitempty: check for empty values
- zb0003Len := uint32(4)
- var zb0003Mask uint8 /* 5 bits */
- if (*z).Address.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x2
- }
- if (*z).AccountData.MsgIsZero() {
- zb0003Len--
- zb0003Mask |= 0x4
- }
- if len((*z).Resources) == 0 {
- zb0003Len--
- zb0003Mask |= 0x8
- }
- if (*z).ExpectingMoreEntries == false {
- zb0003Len--
- zb0003Mask |= 0x10
- }
- // variable map header, size zb0003Len
- o = append(o, 0x80|uint8(zb0003Len))
- if zb0003Len != 0 {
- if (zb0003Mask & 0x2) == 0 { // if not empty
- // string "a"
- o = append(o, 0xa1, 0x61)
- o = (*z).Address.MarshalMsg(o)
- }
- if (zb0003Mask & 0x4) == 0 { // if not empty
- // string "b"
- o = append(o, 0xa1, 0x62)
- o = (*z).AccountData.MarshalMsg(o)
- }
- if (zb0003Mask & 0x8) == 0 { // if not empty
- // string "c"
- o = append(o, 0xa1, 0x63)
- if (*z).Resources == nil {
- o = msgp.AppendNil(o)
- } else {
- o = msgp.AppendMapHeader(o, uint32(len((*z).Resources)))
- }
- zb0001_keys := make([]uint64, 0, len((*z).Resources))
- for zb0001 := range (*z).Resources {
- zb0001_keys = append(zb0001_keys, zb0001)
- }
- sort.Sort(SortUint64(zb0001_keys))
- for _, zb0001 := range zb0001_keys {
- zb0002 := (*z).Resources[zb0001]
- _ = zb0002
- o = msgp.AppendUint64(o, zb0001)
- o = zb0002.MarshalMsg(o)
- }
- }
- if (zb0003Mask & 0x10) == 0 { // if not empty
- // string "e"
- o = append(o, 0xa1, 0x65)
- o = msgp.AppendBool(o, (*z).ExpectingMoreEntries)
- }
- }
- return
-}
-
-func (_ *encodedBalanceRecordV6) CanMarshalMsg(z interface{}) bool {
- _, ok := (z).(*encodedBalanceRecordV6)
- return ok
-}
-
-// UnmarshalMsg implements msgp.Unmarshaler
-func (z *encodedBalanceRecordV6) UnmarshalMsg(bts []byte) (o []byte, err error) {
- var field []byte
- _ = field
- var zb0003 int
- var zb0004 bool
- zb0003, zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
- if _, ok := err.(msgp.TypeError); ok {
- zb0003, zb0004, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- if zb0003 > 0 {
- zb0003--
- bts, err = (*z).Address.UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Address")
- return
- }
- }
- if zb0003 > 0 {
- zb0003--
- bts, err = (*z).AccountData.UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "AccountData")
- return
- }
- }
- if zb0003 > 0 {
- zb0003--
- var zb0005 int
- var zb0006 bool
- zb0005, zb0006, bts, err = msgp.ReadMapHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Resources")
- return
- }
- if zb0005 > resourcesPerCatchpointFileChunkBackwardCompatible {
- err = msgp.ErrOverflow(uint64(zb0005), uint64(resourcesPerCatchpointFileChunkBackwardCompatible))
- err = msgp.WrapError(err, "struct-from-array", "Resources")
- return
- }
- if zb0006 {
- (*z).Resources = nil
- } else if (*z).Resources == nil {
- (*z).Resources = make(map[uint64]msgp.Raw, zb0005)
- }
- for zb0005 > 0 {
- var zb0001 uint64
- var zb0002 msgp.Raw
- zb0005--
- zb0001, bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Resources")
- return
- }
- bts, err = zb0002.UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Resources", zb0001)
- return
- }
- (*z).Resources[zb0001] = zb0002
- }
- }
- if zb0003 > 0 {
- zb0003--
- (*z).ExpectingMoreEntries, bts, err = msgp.ReadBoolBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "ExpectingMoreEntries")
- return
- }
- }
- if zb0003 > 0 {
- err = msgp.ErrTooManyArrayFields(zb0003)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array")
- return
- }
- }
- } else {
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- if zb0004 {
- (*z) = encodedBalanceRecordV6{}
- }
- for zb0003 > 0 {
- zb0003--
- field, bts, err = msgp.ReadMapKeyZC(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- switch string(field) {
- case "a":
- bts, err = (*z).Address.UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "Address")
- return
- }
- case "b":
- bts, err = (*z).AccountData.UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "AccountData")
- return
- }
- case "c":
- var zb0007 int
- var zb0008 bool
- zb0007, zb0008, bts, err = msgp.ReadMapHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Resources")
- return
- }
- if zb0007 > resourcesPerCatchpointFileChunkBackwardCompatible {
- err = msgp.ErrOverflow(uint64(zb0007), uint64(resourcesPerCatchpointFileChunkBackwardCompatible))
- err = msgp.WrapError(err, "Resources")
- return
- }
- if zb0008 {
- (*z).Resources = nil
- } else if (*z).Resources == nil {
- (*z).Resources = make(map[uint64]msgp.Raw, zb0007)
- }
- for zb0007 > 0 {
- var zb0001 uint64
- var zb0002 msgp.Raw
- zb0007--
- zb0001, bts, err = msgp.ReadUint64Bytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Resources")
- return
- }
- bts, err = zb0002.UnmarshalMsg(bts)
- if err != nil {
- err = msgp.WrapError(err, "Resources", zb0001)
- return
- }
- (*z).Resources[zb0001] = zb0002
- }
- case "e":
- (*z).ExpectingMoreEntries, bts, err = msgp.ReadBoolBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "ExpectingMoreEntries")
- return
- }
- default:
- err = msgp.ErrNoField(string(field))
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- }
- }
- }
- o = bts
- return
-}
-
-func (_ *encodedBalanceRecordV6) CanUnmarshalMsg(z interface{}) bool {
- _, ok := (z).(*encodedBalanceRecordV6)
- return ok
-}
-
-// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
-func (z *encodedBalanceRecordV6) Msgsize() (s int) {
- s = 1 + 2 + (*z).Address.Msgsize() + 2 + (*z).AccountData.Msgsize() + 2 + msgp.MapHeaderSize
- if (*z).Resources != nil {
- for zb0001, zb0002 := range (*z).Resources {
- _ = zb0001
- _ = zb0002
- s += 0 + msgp.Uint64Size + zb0002.Msgsize()
- }
- }
- s += 2 + msgp.BoolSize
- return
-}
-
-// MsgIsZero returns whether this is a zero value
-func (z *encodedBalanceRecordV6) MsgIsZero() bool {
- return ((*z).Address.MsgIsZero()) && ((*z).AccountData.MsgIsZero()) && (len((*z).Resources) == 0) && ((*z).ExpectingMoreEntries == false)
-}
-
-// MarshalMsg implements msgp.Marshaler
-func (z *encodedKVRecordV6) MarshalMsg(b []byte) (o []byte) {
- o = msgp.Require(b, z.Msgsize())
- // omitempty: check for empty values
- zb0001Len := uint32(2)
- var zb0001Mask uint8 /* 3 bits */
- if len((*z).Key) == 0 {
- zb0001Len--
- zb0001Mask |= 0x2
- }
- if len((*z).Value) == 0 {
- zb0001Len--
- zb0001Mask |= 0x4
- }
- // variable map header, size zb0001Len
- o = append(o, 0x80|uint8(zb0001Len))
- if zb0001Len != 0 {
- if (zb0001Mask & 0x2) == 0 { // if not empty
- // string "k"
- o = append(o, 0xa1, 0x6b)
- o = msgp.AppendBytes(o, (*z).Key)
- }
- if (zb0001Mask & 0x4) == 0 { // if not empty
- // string "v"
- o = append(o, 0xa1, 0x76)
- o = msgp.AppendBytes(o, (*z).Value)
- }
- }
- return
-}
-
-func (_ *encodedKVRecordV6) CanMarshalMsg(z interface{}) bool {
- _, ok := (z).(*encodedKVRecordV6)
- return ok
-}
-
-// UnmarshalMsg implements msgp.Unmarshaler
-func (z *encodedKVRecordV6) UnmarshalMsg(bts []byte) (o []byte, err error) {
- var field []byte
- _ = field
- var zb0001 int
- var zb0002 bool
- zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
- if _, ok := err.(msgp.TypeError); ok {
- zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- if zb0001 > 0 {
- zb0001--
- var zb0003 int
- zb0003, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Key")
- return
- }
- if zb0003 > encodedKVRecordV6MaxKeyLength {
- err = msgp.ErrOverflow(uint64(zb0003), uint64(encodedKVRecordV6MaxKeyLength))
- return
- }
- (*z).Key, bts, err = msgp.ReadBytesBytes(bts, (*z).Key)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Key")
- return
- }
- }
- if zb0001 > 0 {
- zb0001--
- var zb0004 int
- zb0004, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Value")
- return
- }
- if zb0004 > encodedKVRecordV6MaxValueLength {
- err = msgp.ErrOverflow(uint64(zb0004), uint64(encodedKVRecordV6MaxValueLength))
- return
- }
- (*z).Value, bts, err = msgp.ReadBytesBytes(bts, (*z).Value)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array", "Value")
- return
- }
- }
- if zb0001 > 0 {
- err = msgp.ErrTooManyArrayFields(zb0001)
- if err != nil {
- err = msgp.WrapError(err, "struct-from-array")
- return
- }
- }
- } else {
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- if zb0002 {
- (*z) = encodedKVRecordV6{}
- }
- for zb0001 > 0 {
- zb0001--
- field, bts, err = msgp.ReadMapKeyZC(bts)
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- switch string(field) {
- case "k":
- var zb0005 int
- zb0005, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "Key")
- return
- }
- if zb0005 > encodedKVRecordV6MaxKeyLength {
- err = msgp.ErrOverflow(uint64(zb0005), uint64(encodedKVRecordV6MaxKeyLength))
- return
- }
- (*z).Key, bts, err = msgp.ReadBytesBytes(bts, (*z).Key)
- if err != nil {
- err = msgp.WrapError(err, "Key")
- return
- }
- case "v":
- var zb0006 int
- zb0006, err = msgp.ReadBytesBytesHeader(bts)
- if err != nil {
- err = msgp.WrapError(err, "Value")
- return
- }
- if zb0006 > encodedKVRecordV6MaxValueLength {
- err = msgp.ErrOverflow(uint64(zb0006), uint64(encodedKVRecordV6MaxValueLength))
- return
- }
- (*z).Value, bts, err = msgp.ReadBytesBytes(bts, (*z).Value)
- if err != nil {
- err = msgp.WrapError(err, "Value")
- return
- }
- default:
- err = msgp.ErrNoField(string(field))
- if err != nil {
- err = msgp.WrapError(err)
- return
- }
- }
- }
- }
- o = bts
- return
-}
-
-func (_ *encodedKVRecordV6) CanUnmarshalMsg(z interface{}) bool {
- _, ok := (z).(*encodedKVRecordV6)
- return ok
-}
-
-// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
-func (z *encodedKVRecordV6) Msgsize() (s int) {
- s = 1 + 2 + msgp.BytesPrefixSize + len((*z).Key) + 2 + msgp.BytesPrefixSize + len((*z).Value)
- return
-}
-
-// MsgIsZero returns whether this is a zero value
-func (z *encodedKVRecordV6) MsgIsZero() bool {
- return (len((*z).Key) == 0) && (len((*z).Value) == 0)
-}
diff --git a/ledger/msgp_gen_test.go b/ledger/msgp_gen_test.go
index e7739ad972..de29b5f11d 100644
--- a/ledger/msgp_gen_test.go
+++ b/ledger/msgp_gen_test.go
@@ -193,183 +193,3 @@ func BenchmarkUnmarshalcatchpointFileChunkV6(b *testing.B) {
}
}
}
-
-func TestMarshalUnmarshalencodedBalanceRecordV5(t *testing.T) {
- partitiontest.PartitionTest(t)
- v := encodedBalanceRecordV5{}
- bts := v.MarshalMsg(nil)
- left, err := v.UnmarshalMsg(bts)
- if err != nil {
- t.Fatal(err)
- }
- if len(left) > 0 {
- t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
- }
-
- left, err = msgp.Skip(bts)
- if err != nil {
- t.Fatal(err)
- }
- if len(left) > 0 {
- t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
- }
-}
-
-func TestRandomizedEncodingencodedBalanceRecordV5(t *testing.T) {
- protocol.RunEncodingTest(t, &encodedBalanceRecordV5{})
-}
-
-func BenchmarkMarshalMsgencodedBalanceRecordV5(b *testing.B) {
- v := encodedBalanceRecordV5{}
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- v.MarshalMsg(nil)
- }
-}
-
-func BenchmarkAppendMsgencodedBalanceRecordV5(b *testing.B) {
- v := encodedBalanceRecordV5{}
- bts := make([]byte, 0, v.Msgsize())
- bts = v.MarshalMsg(bts[0:0])
- b.SetBytes(int64(len(bts)))
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- bts = v.MarshalMsg(bts[0:0])
- }
-}
-
-func BenchmarkUnmarshalencodedBalanceRecordV5(b *testing.B) {
- v := encodedBalanceRecordV5{}
- bts := v.MarshalMsg(nil)
- b.ReportAllocs()
- b.SetBytes(int64(len(bts)))
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- _, err := v.UnmarshalMsg(bts)
- if err != nil {
- b.Fatal(err)
- }
- }
-}
-
-func TestMarshalUnmarshalencodedBalanceRecordV6(t *testing.T) {
- partitiontest.PartitionTest(t)
- v := encodedBalanceRecordV6{}
- bts := v.MarshalMsg(nil)
- left, err := v.UnmarshalMsg(bts)
- if err != nil {
- t.Fatal(err)
- }
- if len(left) > 0 {
- t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
- }
-
- left, err = msgp.Skip(bts)
- if err != nil {
- t.Fatal(err)
- }
- if len(left) > 0 {
- t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
- }
-}
-
-func TestRandomizedEncodingencodedBalanceRecordV6(t *testing.T) {
- protocol.RunEncodingTest(t, &encodedBalanceRecordV6{})
-}
-
-func BenchmarkMarshalMsgencodedBalanceRecordV6(b *testing.B) {
- v := encodedBalanceRecordV6{}
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- v.MarshalMsg(nil)
- }
-}
-
-func BenchmarkAppendMsgencodedBalanceRecordV6(b *testing.B) {
- v := encodedBalanceRecordV6{}
- bts := make([]byte, 0, v.Msgsize())
- bts = v.MarshalMsg(bts[0:0])
- b.SetBytes(int64(len(bts)))
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- bts = v.MarshalMsg(bts[0:0])
- }
-}
-
-func BenchmarkUnmarshalencodedBalanceRecordV6(b *testing.B) {
- v := encodedBalanceRecordV6{}
- bts := v.MarshalMsg(nil)
- b.ReportAllocs()
- b.SetBytes(int64(len(bts)))
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- _, err := v.UnmarshalMsg(bts)
- if err != nil {
- b.Fatal(err)
- }
- }
-}
-
-func TestMarshalUnmarshalencodedKVRecordV6(t *testing.T) {
- partitiontest.PartitionTest(t)
- v := encodedKVRecordV6{}
- bts := v.MarshalMsg(nil)
- left, err := v.UnmarshalMsg(bts)
- if err != nil {
- t.Fatal(err)
- }
- if len(left) > 0 {
- t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
- }
-
- left, err = msgp.Skip(bts)
- if err != nil {
- t.Fatal(err)
- }
- if len(left) > 0 {
- t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
- }
-}
-
-func TestRandomizedEncodingencodedKVRecordV6(t *testing.T) {
- protocol.RunEncodingTest(t, &encodedKVRecordV6{})
-}
-
-func BenchmarkMarshalMsgencodedKVRecordV6(b *testing.B) {
- v := encodedKVRecordV6{}
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- v.MarshalMsg(nil)
- }
-}
-
-func BenchmarkAppendMsgencodedKVRecordV6(b *testing.B) {
- v := encodedKVRecordV6{}
- bts := make([]byte, 0, v.Msgsize())
- bts = v.MarshalMsg(bts[0:0])
- b.SetBytes(int64(len(bts)))
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- bts = v.MarshalMsg(bts[0:0])
- }
-}
-
-func BenchmarkUnmarshalencodedKVRecordV6(b *testing.B) {
- v := encodedKVRecordV6{}
- bts := v.MarshalMsg(nil)
- b.ReportAllocs()
- b.SetBytes(int64(len(bts)))
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- _, err := v.UnmarshalMsg(bts)
- if err != nil {
- b.Fatal(err)
- }
- }
-}
diff --git a/ledger/store/catchpointPendingHashesIter.go b/ledger/store/catchpointPendingHashesIter.go
new file mode 100644
index 0000000000..39dc112e8b
--- /dev/null
+++ b/ledger/store/catchpointPendingHashesIter.go
@@ -0,0 +1,81 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see .
+
+package store
+
+import (
+ "context"
+ "database/sql"
+)
+
+// catchpointPendingHashesIterator allows us to iterate over the hashes in the catchpointpendinghashes table in their order.
+type catchpointPendingHashesIterator struct {
+ hashCount int
+ tx *sql.Tx
+ rows *sql.Rows
+}
+
+// MakeCatchpointPendingHashesIterator create a pending hashes iterator that retrieves the hashes in the catchpointpendinghashes table.
+func MakeCatchpointPendingHashesIterator(hashCount int, tx *sql.Tx) *catchpointPendingHashesIterator {
+ return &catchpointPendingHashesIterator{
+ hashCount: hashCount,
+ tx: tx,
+ }
+}
+
+// Next returns an array containing the hashes, returning HashCount hashes at a time.
+func (iterator *catchpointPendingHashesIterator) Next(ctx context.Context) (hashes [][]byte, err error) {
+ if iterator.rows == nil {
+ iterator.rows, err = iterator.tx.QueryContext(ctx, "SELECT data FROM catchpointpendinghashes ORDER BY data")
+ if err != nil {
+ return
+ }
+ }
+
+ // gather up to accountCount encoded accounts.
+ hashes = make([][]byte, iterator.hashCount)
+ hashIdx := 0
+ for iterator.rows.Next() {
+ err = iterator.rows.Scan(&hashes[hashIdx])
+ if err != nil {
+ iterator.Close()
+ return
+ }
+
+ hashIdx++
+ if hashIdx == iterator.hashCount {
+ // we're done with this iteration.
+ return
+ }
+ }
+ hashes = hashes[:hashIdx]
+ err = iterator.rows.Err()
+ if err != nil {
+ iterator.Close()
+ return
+ }
+ // we just finished reading the table.
+ iterator.Close()
+ return
+}
+
+// Close shuts down the catchpointPendingHashesIterator, releasing database resources.
+func (iterator *catchpointPendingHashesIterator) Close() {
+ if iterator.rows != nil {
+ iterator.rows.Close()
+ iterator.rows = nil
+ }
+}
diff --git a/ledger/store/encodedAccountsIter.go b/ledger/store/encodedAccountsIter.go
new file mode 100644
index 0000000000..8a2311c87c
--- /dev/null
+++ b/ledger/store/encodedAccountsIter.go
@@ -0,0 +1,165 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see .
+
+package store
+
+import (
+ "context"
+ "database/sql"
+
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/ledger/encoded"
+ "github.com/algorand/msgp/msgp"
+)
+
+// encodedAccountsBatchIter allows us to iterate over the accounts data stored in the accountbase table.
+type encodedAccountsBatchIter struct {
+ accountsRows *sql.Rows
+ resourcesRows *sql.Rows
+ nextBaseRow pendingBaseRow
+ nextResourceRow pendingResourceRow
+ acctResCnt catchpointAccountResourceCounter
+}
+
+// catchpointAccountResourceCounter keeps track of the resources processed for the current account
+type catchpointAccountResourceCounter struct {
+ totalAppParams uint64
+ totalAppLocalStates uint64
+ totalAssetParams uint64
+ totalAssets uint64
+}
+
+// MakeEncodedAccoutsBatchIter creates an empty accounts batch iterator.
+func MakeEncodedAccoutsBatchIter() *encodedAccountsBatchIter {
+ return &encodedAccountsBatchIter{}
+}
+
+// Next returns an array containing the account data, in the same way it appear in the database
+// returning accountCount accounts data at a time.
+func (iterator *encodedAccountsBatchIter) Next(ctx context.Context, tx *sql.Tx, accountCount int, resourceCount int) (bals []encoded.BalanceRecordV6, numAccountsProcessed uint64, err error) {
+ if iterator.accountsRows == nil {
+ iterator.accountsRows, err = tx.QueryContext(ctx, "SELECT rowid, address, data FROM accountbase ORDER BY rowid")
+ if err != nil {
+ return
+ }
+ }
+ if iterator.resourcesRows == nil {
+ iterator.resourcesRows, err = tx.QueryContext(ctx, "SELECT addrid, aidx, data FROM resources ORDER BY addrid, aidx")
+ if err != nil {
+ return
+ }
+ }
+
+ // gather up to accountCount encoded accounts.
+ bals = make([]encoded.BalanceRecordV6, 0, accountCount)
+ var encodedRecord encoded.BalanceRecordV6
+ var baseAcct BaseAccountData
+ var numAcct int
+ baseCb := func(addr basics.Address, rowid int64, accountData *BaseAccountData, encodedAccountData []byte) (err error) {
+ encodedRecord = encoded.BalanceRecordV6{Address: addr, AccountData: encodedAccountData}
+ baseAcct = *accountData
+ numAcct++
+ return nil
+ }
+
+ var totalResources int
+
+ // emptyCount := 0
+ resCb := func(addr basics.Address, cidx basics.CreatableIndex, resData *ResourcesData, encodedResourceData []byte, lastResource bool) error {
+
+ emptyBaseAcct := baseAcct.TotalAppParams == 0 && baseAcct.TotalAppLocalStates == 0 && baseAcct.TotalAssetParams == 0 && baseAcct.TotalAssets == 0
+ if !emptyBaseAcct && resData != nil {
+ if encodedRecord.Resources == nil {
+ encodedRecord.Resources = make(map[uint64]msgp.Raw)
+ }
+ encodedRecord.Resources[uint64(cidx)] = encodedResourceData
+ if resData.IsApp() && resData.IsOwning() {
+ iterator.acctResCnt.totalAppParams++
+ }
+ if resData.IsApp() && resData.IsHolding() {
+ iterator.acctResCnt.totalAppLocalStates++
+ }
+
+ if resData.IsAsset() && resData.IsOwning() {
+ iterator.acctResCnt.totalAssetParams++
+ }
+ if resData.IsAsset() && resData.IsHolding() {
+ iterator.acctResCnt.totalAssets++
+ }
+ totalResources++
+ }
+
+ if baseAcct.TotalAppParams == iterator.acctResCnt.totalAppParams &&
+ baseAcct.TotalAppLocalStates == iterator.acctResCnt.totalAppLocalStates &&
+ baseAcct.TotalAssetParams == iterator.acctResCnt.totalAssetParams &&
+ baseAcct.TotalAssets == iterator.acctResCnt.totalAssets {
+
+ encodedRecord.ExpectingMoreEntries = false
+ bals = append(bals, encodedRecord)
+ numAccountsProcessed++
+
+ iterator.acctResCnt = catchpointAccountResourceCounter{}
+
+ return nil
+ }
+
+ // max resources per chunk reached, stop iterating.
+ if lastResource {
+ encodedRecord.ExpectingMoreEntries = true
+ bals = append(bals, encodedRecord)
+ encodedRecord.Resources = nil
+ }
+
+ return nil
+ }
+
+ _, iterator.nextBaseRow, iterator.nextResourceRow, err = processAllBaseAccountRecords(
+ iterator.accountsRows, iterator.resourcesRows,
+ baseCb, resCb,
+ iterator.nextBaseRow, iterator.nextResourceRow, accountCount, resourceCount,
+ )
+ if err != nil {
+ iterator.Close()
+ return
+ }
+
+ if len(bals) == accountCount || totalResources == resourceCount {
+ // we're done with this iteration.
+ return
+ }
+
+ err = iterator.accountsRows.Err()
+ if err != nil {
+ iterator.Close()
+ return
+ }
+ // Do not Close() the iterator here. It is the caller's responsibility to
+ // do so, signalled by the return of an empty chunk. If we Close() here, the
+ // next call to Next() will start all over!
+ return
+}
+
+// Close shuts down the encodedAccountsBatchIter, releasing database resources.
+func (iterator *encodedAccountsBatchIter) Close() {
+ if iterator.accountsRows != nil {
+ iterator.accountsRows.Close()
+ iterator.accountsRows = nil
+ }
+ if iterator.resourcesRows != nil {
+ iterator.resourcesRows.Close()
+ iterator.resourcesRows = nil
+ }
+}
diff --git a/ledger/store/orderedAccountsIter.go b/ledger/store/orderedAccountsIter.go
new file mode 100644
index 0000000000..afc737d250
--- /dev/null
+++ b/ledger/store/orderedAccountsIter.go
@@ -0,0 +1,433 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see .
+
+package store
+
+import (
+ "context"
+ "database/sql"
+ "errors"
+ "fmt"
+ "math"
+
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/protocol"
+)
+
+// orderedAccountsIter allows us to iterate over the accounts addresses in the order of the account hashes.
+type orderedAccountsIter struct {
+ step orderedAccountsIterStep
+ accountBaseRows *sql.Rows
+ hashesRows *sql.Rows
+ resourcesRows *sql.Rows
+ tx *sql.Tx
+ pendingBaseRow pendingBaseRow
+ pendingResourceRow pendingResourceRow
+ accountCount int
+ insertStmt *sql.Stmt
+}
+
+// orderedAccountsIterStep is used by orderedAccountsIter to define the current step
+//
+//msgp:ignore orderedAccountsIterStep
+type orderedAccountsIterStep int
+
+const (
+ // startup step
+ oaiStepStartup = orderedAccountsIterStep(0)
+ // delete old ordering table if we have any leftover from previous invocation
+ oaiStepDeleteOldOrderingTable = orderedAccountsIterStep(0)
+ // create new ordering table
+ oaiStepCreateOrderingTable = orderedAccountsIterStep(1)
+ // query the existing accounts
+ oaiStepQueryAccounts = orderedAccountsIterStep(2)
+ // iterate over the existing accounts and insert their hash & address into the staging ordering table
+ oaiStepInsertAccountData = orderedAccountsIterStep(3)
+ // create an index on the ordering table so that we can efficiently scan it.
+ oaiStepCreateOrderingAccountIndex = orderedAccountsIterStep(4)
+ // query the ordering table
+ oaiStepSelectFromOrderedTable = orderedAccountsIterStep(5)
+ // iterate over the ordering table
+ oaiStepIterateOverOrderedTable = orderedAccountsIterStep(6)
+ // cleanup and delete ordering table
+ oaiStepShutdown = orderedAccountsIterStep(7)
+ // do nothing as we're done.
+ oaiStepDone = orderedAccountsIterStep(8)
+)
+
+type pendingBaseRow struct {
+ addr basics.Address
+ rowid int64
+ accountData *BaseAccountData
+ encodedAccountData []byte
+}
+
+type pendingResourceRow struct {
+ addrid int64
+ aidx basics.CreatableIndex
+ buf []byte
+}
+
+// MakeOrderedAccountsIter creates an ordered account iterator. Note that due to implementation reasons,
+// only a single iterator can be active at a time.
+func MakeOrderedAccountsIter(tx *sql.Tx, accountCount int) *orderedAccountsIter {
+ return &orderedAccountsIter{
+ tx: tx,
+ accountCount: accountCount,
+ step: oaiStepStartup,
+ }
+}
+
+// accountAddressHash is used by Next to return a single account address and the associated hash.
+type accountAddressHash struct {
+ Addrid int64
+ Digest []byte
+}
+
+// Next returns an array containing the account address and hash
+// the Next function works in multiple processing stages, where it first processes the current accounts and order them
+// followed by returning the ordered accounts. In the first phase, it would return empty accountAddressHash array
+// and sets the processedRecords to the number of accounts that were processed. On the second phase, the acct
+// would contain valid data ( and optionally the account data as well, if was asked in makeOrderedAccountsIter) and
+// the processedRecords would be zero. If err is sql.ErrNoRows it means that the iterator have completed it's work and no further
+// accounts exists. Otherwise, the caller is expected to keep calling "Next" to retrieve the next set of accounts
+// ( or let the Next function make some progress toward that goal )
+func (iterator *orderedAccountsIter) Next(ctx context.Context) (acct []accountAddressHash, processedRecords int, err error) {
+ if iterator.step == oaiStepDeleteOldOrderingTable {
+ // although we're going to delete this table anyway when completing the iterator execution, we'll try to
+ // clean up any intermediate table.
+ _, err = iterator.tx.ExecContext(ctx, "DROP TABLE IF EXISTS accountsiteratorhashes")
+ if err != nil {
+ return
+ }
+ iterator.step = oaiStepCreateOrderingTable
+ return
+ }
+ if iterator.step == oaiStepCreateOrderingTable {
+ // create the temporary table
+ _, err = iterator.tx.ExecContext(ctx, "CREATE TABLE accountsiteratorhashes(addrid INTEGER, hash blob)")
+ if err != nil {
+ return
+ }
+ iterator.step = oaiStepQueryAccounts
+ return
+ }
+ if iterator.step == oaiStepQueryAccounts {
+ // iterate over the existing accounts
+ iterator.accountBaseRows, err = iterator.tx.QueryContext(ctx, "SELECT rowid, address, data FROM accountbase ORDER BY rowid")
+ if err != nil {
+ return
+ }
+ // iterate over the existing resources
+ iterator.resourcesRows, err = iterator.tx.QueryContext(ctx, "SELECT addrid, aidx, data FROM resources ORDER BY addrid, aidx")
+ if err != nil {
+ return
+ }
+ // prepare the insert statement into the temporary table
+ iterator.insertStmt, err = iterator.tx.PrepareContext(ctx, "INSERT INTO accountsiteratorhashes(addrid, hash) VALUES(?, ?)")
+ if err != nil {
+ return
+ }
+ iterator.step = oaiStepInsertAccountData
+ return
+ }
+ if iterator.step == oaiStepInsertAccountData {
+ var lastAddrID int64
+ baseCb := func(addr basics.Address, rowid int64, accountData *BaseAccountData, encodedAccountData []byte) (err error) {
+ hash := AccountHashBuilderV6(addr, accountData, encodedAccountData)
+ _, err = iterator.insertStmt.ExecContext(ctx, rowid, hash)
+ if err != nil {
+ return
+ }
+ lastAddrID = rowid
+ return nil
+ }
+
+ resCb := func(addr basics.Address, cidx basics.CreatableIndex, resData *ResourcesData, encodedResourceData []byte, lastResource bool) error {
+ if resData != nil {
+ hash, err := ResourcesHashBuilderV6(resData, addr, cidx, resData.UpdateRound, encodedResourceData)
+ if err != nil {
+ return err
+ }
+ _, err = iterator.insertStmt.ExecContext(ctx, lastAddrID, hash)
+ return err
+ }
+ return nil
+ }
+
+ count := 0
+ count, iterator.pendingBaseRow, iterator.pendingResourceRow, err = processAllBaseAccountRecords(
+ iterator.accountBaseRows, iterator.resourcesRows,
+ baseCb, resCb,
+ iterator.pendingBaseRow, iterator.pendingResourceRow, iterator.accountCount, math.MaxInt,
+ )
+ if err != nil {
+ iterator.Close(ctx)
+ return
+ }
+
+ if count == iterator.accountCount {
+ // we're done with this iteration.
+ processedRecords = count
+ return
+ }
+
+ // make sure the resource iterator has no more entries.
+ if iterator.resourcesRows.Next() {
+ iterator.Close(ctx)
+ err = errors.New("resource table entries exceed the ones specified in the accountbase table")
+ return
+ }
+
+ processedRecords = count
+ iterator.accountBaseRows.Close()
+ iterator.accountBaseRows = nil
+ iterator.resourcesRows.Close()
+ iterator.resourcesRows = nil
+ iterator.insertStmt.Close()
+ iterator.insertStmt = nil
+ iterator.step = oaiStepCreateOrderingAccountIndex
+ return
+ }
+ if iterator.step == oaiStepCreateOrderingAccountIndex {
+ // create an index. It shown that even when we're making a single select statement in step 5, it would be better to have this index vs. not having it at all.
+ // note that this index is using the rowid of the accountsiteratorhashes table.
+ _, err = iterator.tx.ExecContext(ctx, "CREATE INDEX accountsiteratorhashesidx ON accountsiteratorhashes(hash)")
+ if err != nil {
+ iterator.Close(ctx)
+ return
+ }
+ iterator.step = oaiStepSelectFromOrderedTable
+ return
+ }
+ if iterator.step == oaiStepSelectFromOrderedTable {
+ // select the data from the ordered table
+ iterator.hashesRows, err = iterator.tx.QueryContext(ctx, "SELECT addrid, hash FROM accountsiteratorhashes ORDER BY hash")
+
+ if err != nil {
+ iterator.Close(ctx)
+ return
+ }
+ iterator.step = oaiStepIterateOverOrderedTable
+ return
+ }
+
+ if iterator.step == oaiStepIterateOverOrderedTable {
+ acct = make([]accountAddressHash, iterator.accountCount)
+ acctIdx := 0
+ for iterator.hashesRows.Next() {
+ err = iterator.hashesRows.Scan(&(acct[acctIdx].Addrid), &(acct[acctIdx].Digest))
+ if err != nil {
+ iterator.Close(ctx)
+ return
+ }
+ acctIdx++
+ if acctIdx == iterator.accountCount {
+ // we're done with this iteration.
+ return
+ }
+ }
+ acct = acct[:acctIdx]
+ iterator.step = oaiStepShutdown
+ iterator.hashesRows.Close()
+ iterator.hashesRows = nil
+ return
+ }
+ if iterator.step == oaiStepShutdown {
+ err = iterator.Close(ctx)
+ if err != nil {
+ return
+ }
+ iterator.step = oaiStepDone
+ // fallthrough
+ }
+ return nil, 0, sql.ErrNoRows
+}
+
+// Close shuts down the orderedAccountsBuilderIter, releasing database resources.
+func (iterator *orderedAccountsIter) Close(ctx context.Context) (err error) {
+ if iterator.accountBaseRows != nil {
+ iterator.accountBaseRows.Close()
+ iterator.accountBaseRows = nil
+ }
+ if iterator.resourcesRows != nil {
+ iterator.resourcesRows.Close()
+ iterator.resourcesRows = nil
+ }
+ if iterator.hashesRows != nil {
+ iterator.hashesRows.Close()
+ iterator.hashesRows = nil
+ }
+ if iterator.insertStmt != nil {
+ iterator.insertStmt.Close()
+ iterator.insertStmt = nil
+ }
+ _, err = iterator.tx.ExecContext(ctx, "DROP TABLE IF EXISTS accountsiteratorhashes")
+ return
+}
+
+func processAllBaseAccountRecords(
+ baseRows *sql.Rows,
+ resRows *sql.Rows,
+ baseCb func(addr basics.Address, rowid int64, accountData *BaseAccountData, encodedAccountData []byte) error,
+ resCb func(addr basics.Address, creatableIdx basics.CreatableIndex, resData *ResourcesData, encodedResourceData []byte, lastResource bool) error,
+ pendingBase pendingBaseRow, pendingResource pendingResourceRow, accountCount int, resourceCount int,
+) (int, pendingBaseRow, pendingResourceRow, error) {
+ var addr basics.Address
+ var prevAddr basics.Address
+ var err error
+ count := 0
+
+ var accountData BaseAccountData
+ var addrbuf []byte
+ var buf []byte
+ var rowid int64
+ for {
+ if pendingBase.rowid != 0 {
+ addr = pendingBase.addr
+ rowid = pendingBase.rowid
+ accountData = *pendingBase.accountData
+ buf = pendingBase.encodedAccountData
+ pendingBase = pendingBaseRow{}
+ } else {
+ if !baseRows.Next() {
+ break
+ }
+
+ err = baseRows.Scan(&rowid, &addrbuf, &buf)
+ if err != nil {
+ return 0, pendingBaseRow{}, pendingResourceRow{}, err
+ }
+
+ if len(addrbuf) != len(addr) {
+ err = fmt.Errorf("account DB address length mismatch: %d != %d", len(addrbuf), len(addr))
+ return 0, pendingBaseRow{}, pendingResourceRow{}, err
+ }
+
+ copy(addr[:], addrbuf)
+
+ accountData = BaseAccountData{}
+ err = protocol.Decode(buf, &accountData)
+ if err != nil {
+ return 0, pendingBaseRow{}, pendingResourceRow{}, err
+ }
+ }
+
+ err = baseCb(addr, rowid, &accountData, buf)
+ if err != nil {
+ return 0, pendingBaseRow{}, pendingResourceRow{}, err
+ }
+
+ var resourcesProcessed int
+ pendingResource, resourcesProcessed, err = processAllResources(resRows, addr, &accountData, rowid, pendingResource, resourceCount, resCb)
+ if err != nil {
+ err = fmt.Errorf("failed to gather resources for account %v, addrid %d, prev address %v : %w", addr, rowid, prevAddr, err)
+ return 0, pendingBaseRow{}, pendingResourceRow{}, err
+ }
+
+ if resourcesProcessed == resourceCount {
+ // we're done with this iteration.
+ pendingBase := pendingBaseRow{
+ addr: addr,
+ rowid: rowid,
+ accountData: &accountData,
+ encodedAccountData: buf,
+ }
+ return count, pendingBase, pendingResource, nil
+ }
+ resourceCount -= resourcesProcessed
+
+ count++
+ if accountCount > 0 && count == accountCount {
+ // we're done with this iteration.
+ return count, pendingBaseRow{}, pendingResource, nil
+ }
+ prevAddr = addr
+ }
+
+ return count, pendingBaseRow{}, pendingResource, nil
+}
+
+func processAllResources(
+ resRows *sql.Rows,
+ addr basics.Address, accountData *BaseAccountData, acctRowid int64, pr pendingResourceRow, resourceCount int,
+ callback func(addr basics.Address, creatableIdx basics.CreatableIndex, resData *ResourcesData, encodedResourceData []byte, lastResource bool) error,
+) (pendingResourceRow, int, error) {
+ var err error
+ count := 0
+
+ // Declare variabled outside of the loop to prevent allocations per iteration.
+ // At least resData is resolved as "escaped" because of passing it by a pointer to protocol.Decode()
+ var buf []byte
+ var addrid int64
+ var aidx basics.CreatableIndex
+ var resData ResourcesData
+ for {
+ if pr.addrid != 0 {
+ // some accounts may not have resources, consider the following case:
+ // acct 1 and 3 has resources, account 2 does not
+ // in this case addrid = 3 after processing resources from 1, but acctRowid = 2
+ // and we need to skip accounts without resources
+ if pr.addrid > acctRowid {
+ err = callback(addr, 0, nil, nil, false)
+ return pr, count, err
+ }
+ if pr.addrid < acctRowid {
+ err = fmt.Errorf("resource table entries mismatches accountbase table entries : reached addrid %d while expecting resource for %d", pr.addrid, acctRowid)
+ return pendingResourceRow{}, count, err
+ }
+ addrid = pr.addrid
+ buf = pr.buf
+ aidx = pr.aidx
+ pr = pendingResourceRow{}
+ } else {
+ if !resRows.Next() {
+ err = callback(addr, 0, nil, nil, false)
+ if err != nil {
+ return pendingResourceRow{}, count, err
+ }
+ break
+ }
+ err = resRows.Scan(&addrid, &aidx, &buf)
+ if err != nil {
+ return pendingResourceRow{}, count, err
+ }
+ if addrid < acctRowid {
+ err = fmt.Errorf("resource table entries mismatches accountbase table entries : reached addrid %d while expecting resource for %d", addrid, acctRowid)
+ return pendingResourceRow{}, count, err
+ } else if addrid > acctRowid {
+ err = callback(addr, 0, nil, nil, false)
+ return pendingResourceRow{addrid, aidx, buf}, count, err
+ }
+ }
+ resData = ResourcesData{}
+ err = protocol.Decode(buf, &resData)
+ if err != nil {
+ return pendingResourceRow{}, count, err
+ }
+ count++
+ if resourceCount > 0 && count == resourceCount {
+ // last resource to be included in chunk
+ err := callback(addr, aidx, &resData, buf, true)
+ return pendingResourceRow{}, count, err
+ }
+ err = callback(addr, aidx, &resData, buf, false)
+ if err != nil {
+ return pendingResourceRow{}, count, err
+ }
+ }
+ return pendingResourceRow{}, count, nil
+}