Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: replace the node key #597

Closed
wants to merge 15 commits into from
10 changes: 5 additions & 5 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -55,18 +55,18 @@ lint-fix:
# bench is the basic tests that shouldn't crash an aws instance
bench:
cd benchmarks && \
go test $(LDFLAGS) -tags cleveldb,rocksdb,pebbledb -run=NOTEST -bench=Small . && \
go test $(LDFLAGS) -tags cleveldb,rocksdb,pebbledb -run=NOTEST -bench=Medium . && \
go test $(LDFLAGS) -tags pebbledb -run=NOTEST -bench=Small . && \
go test $(LDFLAGS) -tags pebbledb -run=NOTEST -bench=Medium . && \
go test $(LDFLAGS) -run=NOTEST -bench=RandomBytes .
.PHONY: bench

# fullbench is extra tests needing lots of memory and to run locally
fullbench:
cd benchmarks && \
go test $(LDFLAGS) -run=NOTEST -bench=RandomBytes . && \
go test $(LDFLAGS) -tags cleveldb,rocksdb,pebbledb -run=NOTEST -bench=Small . && \
go test $(LDFLAGS) -tags cleveldb,rocksdb,pebbledb -run=NOTEST -bench=Medium . && \
go test $(LDFLAGS) -tags cleveldb,rocksdb,pebbledb -run=NOTEST -timeout=30m -bench=Large . && \
go test $(LDFLAGS) -tags pebbledb -run=NOTEST -bench=Small . && \
go test $(LDFLAGS) -tags pebbledb -run=NOTEST -bench=Medium . && \
go test $(LDFLAGS) -tags pebbledb -run=NOTEST -timeout=30m -bench=Large . && \
go test $(LDFLAGS) -run=NOTEST -bench=Mem . && \
go test $(LDFLAGS) -run=NOTEST -timeout=60m -bench=LevelDB .
.PHONY: fullbench
Expand Down
16 changes: 8 additions & 8 deletions basic_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -192,31 +192,31 @@ func TestUnit(t *testing.T) {
}

expectSet := func(tree *MutableTree, i int, repr string, hashCount int64) {
origNode := tree.root
tree.SaveVersion()
updated, err := tree.Set(i2b(i), []byte{})
require.NoError(t, err)
// ensure node was added & structure is as expected.
if updated || P(tree.root) != repr {
if updated || P(tree.root, tree.ImmutableTree) != repr {
t.Fatalf("Adding %v to %v:\nExpected %v\nUnexpectedly got %v updated:%v",
i, P(origNode), repr, P(tree.root), updated)
i, P(tree.lastSaved.root, tree.lastSaved), repr, P(tree.root, tree.ImmutableTree), updated)
}
// ensure hash calculation requirements
expectHash(tree.ImmutableTree, hashCount)
tree.root = origNode
tree.ImmutableTree = tree.lastSaved.clone()
}

expectRemove := func(tree *MutableTree, i int, repr string, hashCount int64) {
origNode := tree.root
tree.SaveVersion()
value, removed, err := tree.Remove(i2b(i))
require.NoError(t, err)
// ensure node was added & structure is as expected.
if len(value) != 0 || !removed || P(tree.root) != repr {
if len(value) != 0 || !removed || P(tree.root, tree.ImmutableTree) != repr {
t.Fatalf("Removing %v from %v:\nExpected %v\nUnexpectedly got %v value:%v removed:%v",
i, P(origNode), repr, P(tree.root), value, removed)
i, P(tree.lastSaved.root, tree.lastSaved), repr, P(tree.root, tree.ImmutableTree), value, removed)
}
// ensure hash calculation requirements
expectHash(tree.ImmutableTree, hashCount)
tree.root = origNode
tree.ImmutableTree = tree.lastSaved.clone()
}

// Test Set cases:
Expand Down
2 changes: 1 addition & 1 deletion benchmarks/bench_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -308,7 +308,7 @@ func BenchmarkLevelDBLargeData(b *testing.B) {
{"goleveldb", 50000, 100, 32, 100},
{"goleveldb", 50000, 100, 32, 1000},
{"goleveldb", 50000, 100, 32, 10000},
{"goleveldb", 50000, 100, 32, 100000},
// {"goleveldb", 50000, 100, 32, 100000},
}
runBenchmarks(b, benchmarks)
}
Expand Down
75 changes: 42 additions & 33 deletions cache/cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,29 +6,29 @@ import (
ibytes "github.com/cosmos/iavl/internal/bytes"
)

// Node represents a node eligible for caching.
type Node interface {
GetKey() []byte
// Node[T] represents a node eligible for caching.
type Node[T any] interface {
GetKey() T
}

// Cache is an in-memory structure to persist nodes for quick access.
// Please see lruCache for more details about why we need a custom
// cache implementation.
type Cache interface {
type Cache[T any] interface {
// Adds node to cache. If full and had to remove the oldest element,
// returns the oldest, otherwise nil.
// CONTRACT: node can never be nil. Otherwise, cache panics.
Add(node Node) Node
Add(node Node[T]) Node[T]

// Returns Node for the key, if exists. nil otherwise.
Get(key []byte) Node
// Returns Node[T] for the key, if exists. nil otherwise.
Get(key T) Node[T]

// Has returns true if node with key exists in cache, false otherwise.
Has(key []byte) bool
Has(key T) bool

// Remove removes node with key from cache. The removed node is returned.
// if not in cache, return nil.
Remove(key []byte) Node
Remove(key T) Node[T]

// Len returns the cache length.
Len() int
Expand All @@ -44,33 +44,33 @@ type Cache interface {
// The alternative implementations do not allow for
// customization and the ability to estimate the byte
// size of the cache.
type lruCache struct {
dict map[string]*list.Element // FastNode cache.
maxElementCount int // FastNode the maximum number of nodes in the cache.
ll *list.List // LRU queue of cache elements. Used for deletion.
type lruCache[K comparable, T any] struct {
dict map[K]*list.Element // FastNode cache.
maxElementCount int // FastNode the maximum number of nodes in the cache.
ll *list.List // LRU queue of cache elements. Used for deletion.
}

var _ Cache = (*lruCache)(nil)
var _ Cache[int] = (*lruCache[string, int])(nil)

func New(maxElementCount int) Cache {
return &lruCache{
dict: make(map[string]*list.Element),
func New[K comparable, T any](maxElementCount int) Cache[T] {
return &lruCache[K, T]{
dict: make(map[K]*list.Element),
maxElementCount: maxElementCount,
ll: list.New(),
}
}

func (c *lruCache) Add(node Node) Node {
keyStr := ibytes.UnsafeBytesToStr(node.GetKey())
if e, exists := c.dict[keyStr]; exists {
func (c *lruCache[K, T]) Add(node Node[T]) Node[T] {
key := c.getKey(node.GetKey())
if e, exists := c.dict[key]; exists {
c.ll.MoveToFront(e)
old := e.Value
e.Value = node
return old.(Node)
return old.(Node[T])
}

elem := c.ll.PushFront(node)
c.dict[keyStr] = elem
c.dict[key] = elem

if c.ll.Len() > c.maxElementCount {
oldest := c.ll.Back()
Expand All @@ -79,32 +79,41 @@ func (c *lruCache) Add(node Node) Node {
return nil
}

func (c *lruCache) Get(key []byte) Node {
if ele, hit := c.dict[ibytes.UnsafeBytesToStr(key)]; hit {
func (c *lruCache[K, T]) Get(key T) Node[T] {
if ele, hit := c.dict[c.getKey(key)]; hit {
c.ll.MoveToFront(ele)
return ele.Value.(Node)
return ele.Value.(Node[T])
}
return nil
}

func (c *lruCache) Has(key []byte) bool {
_, exists := c.dict[ibytes.UnsafeBytesToStr(key)]
func (c *lruCache[K, T]) Has(key T) bool {
_, exists := c.dict[c.getKey(key)]
return exists
}

func (c *lruCache) Len() int {
func (c *lruCache[K, T]) Len() int {
return c.ll.Len()
}

func (c *lruCache) Remove(key []byte) Node {
if elem, exists := c.dict[ibytes.UnsafeBytesToStr(key)]; exists {
func (c *lruCache[K, T]) Remove(key T) Node[T] {
if elem, exists := c.dict[c.getKey(key)]; exists {
return c.remove(elem)
}
return nil
}

func (c *lruCache) remove(e *list.Element) Node {
removed := c.ll.Remove(e).(Node)
delete(c.dict, ibytes.UnsafeBytesToStr(removed.GetKey()))
func (c *lruCache[K, T]) remove(e *list.Element) Node[T] {
removed := c.ll.Remove(e).(Node[T])
delete(c.dict, c.getKey(removed.GetKey()))
return removed
}

func (c *lruCache[K, T]) getKey(key T) K {
switch any(key).(type) {
case []byte:
return any(ibytes.UnsafeBytesToStr(any(key).([]byte))).(K)
default:
return any(key).(K)
}
}
4 changes: 2 additions & 2 deletions cache/cache_bench_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ func BenchmarkAdd(b *testing.B) {
}

for name, tc := range testcases {
cache := cache.New(tc.cacheMax)
cache := cache.New[string, []byte](tc.cacheMax)
b.Run(name, func(b *testing.B) {
for i := 0; i < b.N; i++ {
b.StopTimer()
Expand All @@ -46,7 +46,7 @@ func BenchmarkAdd(b *testing.B) {
func BenchmarkRemove(b *testing.B) {
b.ReportAllocs()

cache := cache.New(1000)
cache := cache.New[string, []byte](1000)
existentKeyMirror := [][]byte{}
// Populate cache
for i := 0; i < 50; i++ {
Expand Down
20 changes: 10 additions & 10 deletions cache/cache_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ type cacheOp struct {
}

type testcase struct {
setup func(cache.Cache)
setup func(cache.Cache[[]byte])
cacheMax int
cacheOps []cacheOp
expectedNodeIndexes []int // contents of the cache once test case completes represent by indexes in testNodes
Expand All @@ -43,9 +43,9 @@ const (
testKey = "key"
)

var _ cache.Node = (*testNode)(nil)
var _ cache.Node[[]byte] = (*testNode)(nil)

var testNodes = []cache.Node{
var testNodes = []cache.Node[[]byte]{
&testNode{
key: []byte(fmt.Sprintf("%s%d", testKey, 1)),
},
Expand Down Expand Up @@ -150,7 +150,7 @@ func Test_Cache_Add(t *testing.T) {

for name, tc := range testcases {
t.Run(name, func(t *testing.T) {
cache := cache.New(tc.cacheMax)
cache := cache.New[string, []byte](tc.cacheMax)

expectedCurSize := 0

Expand Down Expand Up @@ -189,7 +189,7 @@ func Test_Cache_Remove(t *testing.T) {
},
},
"remove non-existent key, cache max 1 - nil returned": {
setup: func(c cache.Cache) {
setup: func(c cache.Cache[[]byte]) {
require.Nil(t, c.Add(testNodes[1]))
require.Equal(t, 1, c.Len())
},
Expand All @@ -203,7 +203,7 @@ func Test_Cache_Remove(t *testing.T) {
expectedNodeIndexes: []int{1},
},
"remove existent key, cache max 1 - removed": {
setup: func(c cache.Cache) {
setup: func(c cache.Cache[[]byte]) {
require.Nil(t, c.Add(testNodes[0]))
require.Equal(t, 1, c.Len())
},
Expand All @@ -216,7 +216,7 @@ func Test_Cache_Remove(t *testing.T) {
},
},
"remove twice, cache max 1 - removed first time, then nil": {
setup: func(c cache.Cache) {
setup: func(c cache.Cache[[]byte]) {
require.Nil(t, c.Add(testNodes[0]))
require.Equal(t, 1, c.Len())
},
Expand All @@ -233,7 +233,7 @@ func Test_Cache_Remove(t *testing.T) {
},
},
"remove all, cache max 3": {
setup: func(c cache.Cache) {
setup: func(c cache.Cache[[]byte]) {
require.Nil(t, c.Add(testNodes[0]))
require.Nil(t, c.Add(testNodes[1]))
require.Nil(t, c.Add(testNodes[2]))
Expand All @@ -259,7 +259,7 @@ func Test_Cache_Remove(t *testing.T) {

for name, tc := range testcases {
t.Run(name, func(t *testing.T) {
cache := cache.New(tc.cacheMax)
cache := cache.New[string, []byte](tc.cacheMax)

if tc.setup != nil {
tc.setup(cache)
Expand Down Expand Up @@ -290,7 +290,7 @@ func Test_Cache_Remove(t *testing.T) {
}
}

func validateCacheContentsAfterTest(t *testing.T, tc testcase, cache cache.Cache) {
func validateCacheContentsAfterTest(t *testing.T, tc testcase, cache cache.Cache[[]byte]) {
require.Equal(t, len(tc.expectedNodeIndexes), cache.Len())
for _, idx := range tc.expectedNodeIndexes {
expectedNode := testNodes[idx]
Expand Down
2 changes: 1 addition & 1 deletion fastnode/fast_node.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ type Node struct {
value []byte
}

var _ cache.Node = (*Node)(nil)
var _ cache.Node[[]byte] = (*Node)(nil)

// NewNode returns a new fast node from a value and version.
func NewNode(key []byte, value []byte, version int64) *Node {
Expand Down
1 change: 0 additions & 1 deletion go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@ go 1.18
require (
github.com/confio/ics23/go v0.7.0
github.com/cosmos/cosmos-db v0.0.0-20220822060143-23a8145386c0
github.com/cosmos/gogoproto v1.4.2
github.com/golang/mock v1.6.0
github.com/golangci/golangci-lint v1.50.0
github.com/stretchr/testify v1.8.0
Expand Down
2 changes: 0 additions & 2 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -136,8 +136,6 @@ github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8Nz
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/cosmos/cosmos-db v0.0.0-20220822060143-23a8145386c0 h1:OMu+dCsWVVsHodR4ykMKEj0VtwkNL+xOtyv0vmCmZVQ=
github.com/cosmos/cosmos-db v0.0.0-20220822060143-23a8145386c0/go.mod h1:n5af5ISKZ7tP0q9hP1TW6MnWh7GrVrNfCLZhg+22gzg=
github.com/cosmos/gogoproto v1.4.2 h1:UeGRcmFW41l0G0MiefWhkPEVEwvu78SZsHBvI78dAYw=
github.com/cosmos/gogoproto v1.4.2/go.mod h1:cLxOsn1ljAHSV527CHOtaIP91kK6cCrZETRBrkzItWU=
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
Expand Down
20 changes: 15 additions & 5 deletions import.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ import (
"fmt"

db "github.com/cosmos/cosmos-db"
"github.com/cosmos/iavl/internal/encoding"
)

// maxBatchSize is the maximum size of the import batch before flushing it to the database
Expand Down Expand Up @@ -137,7 +138,7 @@ func (i *Importer) Add(exportNode *ExportNode) error {
bytesCopy := make([]byte, buf.Len())
copy(bytesCopy, buf.Bytes())

if err = i.batch.Set(i.tree.ndb.nodeKey(node.hash), bytesCopy); err != nil {
if err = i.batch.Set(i.tree.ndb.nodeKey(node.nodeKey), bytesCopy); err != nil {
return err
}

Expand All @@ -154,9 +155,9 @@ func (i *Importer) Add(exportNode *ExportNode) error {

// Update the stack now that we know there were no errors
switch {
case node.leftHash != nil && node.rightHash != nil:
case node.leftNode != nil && node.rightNode != nil:
i.stack = i.stack[:stackSize-2]
case node.leftHash != nil || node.rightHash != nil:
case node.leftNode != nil || node.rightNode != nil:
i.stack = i.stack[:stackSize-1]
}
i.stack = append(i.stack, node)
Expand All @@ -174,11 +175,20 @@ func (i *Importer) Commit() error {

switch len(i.stack) {
case 0:
if err := i.batch.Set(i.tree.ndb.rootKey(i.version), []byte{}); err != nil {
if err := i.batch.Set(i.tree.ndb.rootKey(i.version), []byte{0, 0}); err != nil {
return err
}
case 1:
if err := i.batch.Set(i.tree.ndb.rootKey(i.version), i.stack[0].hash); err != nil {
buf := new(bytes.Buffer)
err := encoding.EncodeVarint(buf, i.stack[0].nodeKey)
if err != nil {
return err
}
err = encoding.EncodeVarint(buf, i.tree.nonce)
if err != nil {
return err
}
if err := i.batch.Set(i.tree.ndb.rootKey(i.version), buf.Bytes()); err != nil {
return err
}
default:
Expand Down
Loading