Skip to content

Commit

Permalink
catchpoints: small tweaks, mostly to comments (#5195)
Browse files Browse the repository at this point in the history
  • Loading branch information
jannotti authored Mar 13, 2023
1 parent 9e121f8 commit c556cfc
Show file tree
Hide file tree
Showing 8 changed files with 217 additions and 70 deletions.
26 changes: 13 additions & 13 deletions crypto/merkletrie/cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,13 +39,13 @@ const (
// be found in neither the in-memory cache or on the persistent storage.
var ErrLoadedPageMissingNode = errors.New("loaded page is missing a node")

// ErrPageDecodingFailuire is returned if the decoding of a page has failed.
var ErrPageDecodingFailuire = errors.New("error encountered while decoding page")
// ErrPageDecodingFailure is returned if the decoding of a page has failed.
var ErrPageDecodingFailure = errors.New("error encountered while decoding page")

type merkleTrieCache struct {
// mt is a point to the originating trie
// mt is a pointer to the originating trie
mt *Trie
// committer is the backing up storage for the cache. ( memory, database, etc. )
// committer is the backing store for the cache. ( memory, database, etc. )
committer Committer
// cachedNodeCount is the number of currently cached, in-memory, nodes stored in the pageToNIDsPtr structure.
cachedNodeCount int
Expand Down Expand Up @@ -293,7 +293,7 @@ func (mtc *merkleTrieCache) beginTransaction() {
mtc.txNextNodeID = mtc.mt.nextNodeID
}

// commitTransaction - used internaly by the Trie
// commitTransaction - used internally by the Trie
func (mtc *merkleTrieCache) commitTransaction() {
// the created nodes are already on the list.
for nodeID := range mtc.txCreatedNodeIDs {
Expand Down Expand Up @@ -426,7 +426,7 @@ func (mtc *merkleTrieCache) commit() (CommitStats, error) {

// reallocatePendingPages is called by the commit() function, and is responsible for performing two tasks -
// 1. calculate the hashes of all the newly created nodes
// 2. reornigize the pending flush nodes into an optimal page list, and construct a list of pages that need to be created, deleted and updated.
// 2. reorganize the pending flush nodes into an optimal page list, and construct a list of pages that need to be created, deleted and updated.
func (mtc *merkleTrieCache) reallocatePendingPages(stats *CommitStats) (pagesToCreate []uint64, pagesToDelete map[uint64]bool, pagesToUpdate map[uint64]map[storedNodeIdentifier]*node, err error) {
// newPageThreshold is the threshold at which all the pages are newly created pages that were never committed.
newPageThreshold := uint64(mtc.mt.lastCommittedNodeID) / uint64(mtc.nodesPerPage)
Expand Down Expand Up @@ -669,26 +669,26 @@ func (mtc *merkleTrieCache) reallocateNode(nid storedNodeIdentifier) storedNodeI
func decodePage(bytes []byte) (nodesMap map[storedNodeIdentifier]*node, err error) {
version, versionLength := binary.Uvarint(bytes[:])
if versionLength <= 0 {
return nil, ErrPageDecodingFailuire
return nil, ErrPageDecodingFailure
}
if version != NodePageVersion {
return nil, ErrPageDecodingFailuire
if version != nodePageVersion {
return nil, ErrPageDecodingFailure
}
nodesCount, nodesCountLength := binary.Varint(bytes[versionLength:])
if nodesCountLength <= 0 {
return nil, ErrPageDecodingFailuire
return nil, ErrPageDecodingFailure
}
nodesMap = make(map[storedNodeIdentifier]*node)
walk := nodesCountLength + versionLength
for i := int64(0); i < nodesCount; i++ {
nodeID, nodesIDLength := binary.Uvarint(bytes[walk:])
if nodesIDLength <= 0 {
return nil, ErrPageDecodingFailuire
return nil, ErrPageDecodingFailure
}
walk += nodesIDLength
pnode, nodeLength := deserializeNode(bytes[walk:])
if nodeLength <= 0 {
return nil, ErrPageDecodingFailuire
return nil, ErrPageDecodingFailure
}
walk += nodeLength
nodesMap[storedNodeIdentifier(nodeID)] = pnode
Expand All @@ -699,7 +699,7 @@ func decodePage(bytes []byte) (nodesMap map[storedNodeIdentifier]*node, err erro

// decodePage encodes a page contents into a byte array
func (mtc *merkleTrieCache) encodePage(nodeIDs map[storedNodeIdentifier]*node, serializedBuffer []byte) []byte {
version := binary.PutUvarint(serializedBuffer[:], NodePageVersion)
version := binary.PutUvarint(serializedBuffer[:], nodePageVersion)
length := binary.PutVarint(serializedBuffer[version:], int64(len(nodeIDs)))
walk := version + length
for nodeID, pnode := range nodeIDs {
Expand Down
4 changes: 2 additions & 2 deletions crypto/merkletrie/cache_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -298,7 +298,7 @@ func (mt *Trie) TestDeleteRollback(d []byte) (bool, error) {
if err != nil {
return false, err
}
found, err := pnode.find(mt.cache, d[:])
found, err := pnode.find(&mt.cache, d[:])
if !found || err != nil {
return false, err
}
Expand All @@ -311,7 +311,7 @@ func (mt *Trie) TestDeleteRollback(d []byte) (bool, error) {
mt.elementLength = 0
return true, nil
}
_, err = pnode.remove(mt.cache, d[:], make([]byte, 0, len(d)))
_, err = pnode.remove(&mt.cache, d[:], make([]byte, 0, len(d)))
// unlike the "real" function, we want always to fail here to test the rollbackTransaction() functionality.
mt.cache.rollbackTransaction()
return false, fmt.Errorf("this is a test for failing a Delete request")
Expand Down
2 changes: 1 addition & 1 deletion crypto/merkletrie/committer.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ const (
inMemoryCommitterPageSize = int64(512)
)

// InMemoryCommitter is a fully function in-memory committer, supporting
// InMemoryCommitter is a fully functional in-memory committer, supporting
// persistence of pages.
type InMemoryCommitter struct {
memStore map[uint64][]byte
Expand Down
10 changes: 5 additions & 5 deletions crypto/merkletrie/committer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -140,18 +140,18 @@ func TestNoRedundentPages(t *testing.T) {
require.Equal(t, nodesCount, mt1.cache.cachedNodeCount)
}

// decodePage decodes a byte array into a page content
// decodePageHeaderSize decodes a page header at the start of a byte array
func decodePageHeaderSize(bytes []byte) (headerSize int, err error) {
version, versionLength := binary.Uvarint(bytes[:])
if versionLength <= 0 {
return 0, ErrPageDecodingFailuire
return 0, ErrPageDecodingFailure
}
if version != NodePageVersion {
return 0, ErrPageDecodingFailuire
if version != nodePageVersion {
return 0, ErrPageDecodingFailure
}
_, nodesCountLength := binary.Varint(bytes[versionLength:])
if nodesCountLength <= 0 {
return 0, ErrPageDecodingFailuire
return 0, ErrPageDecodingFailure
}
return nodesCountLength + versionLength, nil
}
Expand Down
20 changes: 9 additions & 11 deletions crypto/merkletrie/node.go
Original file line number Diff line number Diff line change
Expand Up @@ -140,8 +140,9 @@ func (n *node) add(cache *merkleTrieCache, d []byte, path []byte) (nodeID stored
}
pnode.hash = append(path, d[:idiff]...)

// create ancestors from pnode up to the new split
for i := idiff - 1; i >= 0; i-- {
// create a parent node for pnode.
// create a parent node for pnode, and move up
pnode2, nodeID2 := cache.allocateNewNode()
pnode2.childrenMask.SetBit(d[i])
pnode2.children = []childEntry{
Expand All @@ -152,24 +153,21 @@ func (n *node) add(cache *merkleTrieCache, d []byte, path []byte) (nodeID stored
}
pnode2.hash = append(path, d[:i]...)

pnode = pnode2
nodeID = nodeID2
}
return nodeID, nil
}

if n.childrenMask.Bit(d[0]) == false {
// no such child.
var childNode *node
var childNodeID storedNodeIdentifier
childNode, childNodeID = cache.allocateNewNode()
childNode, childNodeID := cache.allocateNewNode()
childNode.hash = d[1:]

pnode, nodeID = cache.allocateNewNode()
pnode.childrenMask = n.childrenMask
pnode.childrenMask.SetBit(d[0])

pnode.children = make([]childEntry, len(n.children)+1, len(n.children)+1)
pnode.children = make([]childEntry, len(n.children)+1)
if d[0] > n.children[len(n.children)-1].hashIndex {
// the new entry comes after all the existing ones.
for i, child := range n.children {
Expand All @@ -183,8 +181,8 @@ func (n *node) add(cache *merkleTrieCache, d []byte, path []byte) (nodeID stored
for i, child := range n.children {
if d[0] < child.hashIndex {
pnode.children[i] = childEntry{
hashIndex: d[0],
id: childNodeID,
hashIndex: d[0],
}
// copy the rest of the items.
for ; i < len(n.children); i++ {
Expand All @@ -211,7 +209,7 @@ func (n *node) add(cache *merkleTrieCache, d []byte, path []byte) (nodeID stored
pnode, nodeID = childNode, cache.refurbishNode(curNodeID)
pnode.childrenMask = n.childrenMask
if len(pnode.children) < len(n.children) {
pnode.children = make([]childEntry, len(n.children), len(n.children))
pnode.children = make([]childEntry, len(n.children))
} else {
pnode.children = pnode.children[:len(n.children)]
}
Expand Down Expand Up @@ -270,7 +268,7 @@ func (n *node) remove(cache *merkleTrieCache, key []byte, path []byte) (nodeID s
pnode, nodeID = childNode, cache.refurbishNode(childNodeID)
pnode.childrenMask = n.childrenMask
// we are guaranteed to have other children, because our tree forbids nodes that have exactly one leaf child and no other children.
pnode.children = make([]childEntry, len(n.children)-1, len(n.children)-1)
pnode.children = make([]childEntry, len(n.children)-1)
copy(pnode.children, append(n.children[:childIndex], n.children[childIndex+1:]...))
pnode.childrenMask.ClearBit(key[0])
} else {
Expand All @@ -283,7 +281,7 @@ func (n *node) remove(cache *merkleTrieCache, key []byte, path []byte) (nodeID s
pnode, nodeID = childNode, cache.refurbishNode(childNodeID)
pnode.childrenMask = n.childrenMask
if len(pnode.children) < len(n.children) {
pnode.children = make([]childEntry, len(n.children), len(n.children))
pnode.children = make([]childEntry, len(n.children))
} else {
pnode.children = pnode.children[:len(n.children)]
}
Expand Down Expand Up @@ -371,7 +369,7 @@ func deserializeNode(buf []byte) (n *node, s int) {
prevChildIndex = childIndex
i++
}
n.children = make([]childEntry, i, i)
n.children = make([]childEntry, i)
copy(n.children, childEntries[:i])
return
}
Expand Down
100 changes: 88 additions & 12 deletions crypto/merkletrie/node_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@
package merkletrie

import (
"crypto/sha512"
"encoding/binary"
"testing"

"github.com/stretchr/testify/require"
Expand Down Expand Up @@ -67,17 +69,24 @@ func (n *node) leafUsingChildrenLength() bool {
return len(n.children) == 0
}

func makeHashes(n int) [][]byte {
hashes := make([][]byte, n)
for i := 0; i < len(hashes); i++ {
buf := make([]byte, 32)
binary.BigEndian.PutUint64(buf, uint64(i))
h := crypto.Hash(buf)
hashes[i] = h[:]
}
return hashes
}

func BenchmarkNodeLeafImplementation(b *testing.B) {
hashes := makeHashes(100000)

b.Run("leaf-ChildrenMask", func(b *testing.B) {
var memoryCommitter InMemoryCommitter
memConfig := defaultTestMemoryConfig
mt1, _ := MakeTrie(&memoryCommitter, memConfig)
// create 100000 hashes.
leafsCount := 100000
hashes := make([]crypto.Digest, leafsCount)
for i := 0; i < len(hashes); i++ {
hashes[i] = crypto.Hash([]byte{byte(i % 256), byte((i / 256) % 256), byte(i / 65536)})
}

for i := 0; i < len(hashes); i++ {
mt1.Add(hashes[i][:])
Expand All @@ -100,12 +109,6 @@ func BenchmarkNodeLeafImplementation(b *testing.B) {
var memoryCommitter InMemoryCommitter
memConfig := defaultTestMemoryConfig
mt1, _ := MakeTrie(&memoryCommitter, memConfig)
// create 100000 hashes.
leafsCount := 100000
hashes := make([]crypto.Digest, leafsCount)
for i := 0; i < len(hashes); i++ {
hashes[i] = crypto.Hash([]byte{byte(i % 256), byte((i / 256) % 256), byte(i / 65536)})
}

for i := 0; i < len(hashes); i++ {
mt1.Add(hashes[i][:])
Expand All @@ -125,3 +128,76 @@ func BenchmarkNodeLeafImplementation(b *testing.B) {
}
})
}

// calculateHashIncrementally uses the Writer interface to the crypto digest to
// avoid accumulating in a buffer. Yet it's slower! I don't know why, but
// leaving it here to benchmark more carefully later. (The final use of
// d.Sum(nil) instead of d.Sum(n.hash[:0]) is needed because we share the
// backing array for the slices in node hashes. But that is not the cause of the
// slow down.)
func (n *node) calculateHashIncrementally(cache *merkleTrieCache) error {
if n.leaf() {
return nil
}
path := n.hash

d := sha512.New512_256()

// we add this string length before the actual string so it could get "decoded"; in practice, it makes a good domain separator.
d.Write([]byte{byte(len(path))})
d.Write(path)
for _, child := range n.children {
childNode, err := cache.getNode(child.id)
if err != nil {
return err
}
if childNode.leaf() {
d.Write([]byte{0})
} else {
d.Write([]byte{1})
}
// we add this string length before the actual string so it could get "decoded"; in practice, it makes a good domain separator.
d.Write([]byte{byte(len(childNode.hash))})
d.Write([]byte{child.hashIndex}) // adding the first byte of the child
d.Write(childNode.hash) // adding the reminder of the child
}
n.hash = d.Sum(nil)
return nil
}

func BenchmarkAdd(b *testing.B) {
b.ReportAllocs()

memConfig := defaultTestMemoryConfig
mt, _ := MakeTrie(&InMemoryCommitter{}, memConfig)
hashes := makeHashes(b.N)

b.ResetTimer()

for i := 0; i < b.N; i++ {
mt.Add(hashes[i])
if i%1000 == 999 {
mt.Commit() // not sure how often we should Commit for a nice benchmark
}
}
}

func BenchmarkDelete(b *testing.B) {
b.ReportAllocs()

memConfig := defaultTestMemoryConfig
mt, _ := MakeTrie(&InMemoryCommitter{}, memConfig)
hashes := makeHashes(b.N)
for i := 0; i < b.N; i++ {
mt.Add(hashes[i])
}

b.ResetTimer()

for i := 0; i < b.N; i++ {
mt.Delete(hashes[i])
if i%1000 == 999 { // not sure how often we should Commit for a nice benchmark
mt.Commit()
}
}
}
Loading

0 comments on commit c556cfc

Please sign in to comment.