-
Notifications
You must be signed in to change notification settings - Fork 9
/
snapshot_test.go
350 lines (319 loc) · 8.76 KB
/
snapshot_test.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
// Copyright 2012 The LevelDB-Go and Pebble and Bitalostored Authors. All rights reserved. Use
// of this source code is governed by a BSD-style license that can be found in
// the LICENSE file.
package bitalostable
import (
"bytes"
"fmt"
"reflect"
"runtime"
"strings"
"sync"
"testing"
"time"
"github.com/cockroachdb/errors"
"github.com/stretchr/testify/require"
"github.com/zuoyebang/bitalostable/internal/datadriven"
"github.com/zuoyebang/bitalostable/vfs"
)
func TestSnapshotListToSlice(t *testing.T) {
testCases := []struct {
vals []uint64
}{
{nil},
{[]uint64{1}},
{[]uint64{1, 2, 3}},
{[]uint64{3, 2, 1}},
}
for _, c := range testCases {
t.Run("", func(t *testing.T) {
var l snapshotList
l.init()
for _, v := range c.vals {
l.pushBack(&Snapshot{seqNum: v})
}
slice := l.toSlice()
if !reflect.DeepEqual(c.vals, slice) {
t.Fatalf("expected %d, but got %d", c.vals, slice)
}
})
}
}
func TestSnapshot(t *testing.T) {
var d *DB
var snapshots map[string]*Snapshot
close := func() {
for _, s := range snapshots {
require.NoError(t, s.Close())
}
snapshots = nil
if d != nil {
require.NoError(t, d.Close())
d = nil
}
}
defer close()
datadriven.RunTest(t, "testdata/snapshot", func(td *datadriven.TestData) string {
switch td.Cmd {
case "define":
close()
var err error
d, err = Open("", &Options{
FS: vfs.NewMem(),
})
if err != nil {
return err.Error()
}
snapshots = make(map[string]*Snapshot)
for _, line := range strings.Split(td.Input, "\n") {
parts := strings.Fields(line)
if len(parts) == 0 {
continue
}
var err error
switch parts[0] {
case "set":
if len(parts) != 3 {
return fmt.Sprintf("%s expects 2 arguments", parts[0])
}
err = d.Set([]byte(parts[1]), []byte(parts[2]), nil)
case "del":
if len(parts) != 2 {
return fmt.Sprintf("%s expects 1 argument", parts[0])
}
err = d.Delete([]byte(parts[1]), nil)
case "merge":
if len(parts) != 3 {
return fmt.Sprintf("%s expects 2 arguments", parts[0])
}
err = d.Merge([]byte(parts[1]), []byte(parts[2]), nil)
case "snapshot":
if len(parts) != 2 {
return fmt.Sprintf("%s expects 1 argument", parts[0])
}
snapshots[parts[1]] = d.NewSnapshot()
case "compact":
if len(parts) != 2 {
return fmt.Sprintf("%s expects 1 argument", parts[0])
}
keys := strings.Split(parts[1], "-")
if len(keys) != 2 {
return fmt.Sprintf("malformed key range: %s", parts[1])
}
err = d.Compact([]byte(keys[0]), []byte(keys[1]), false)
default:
return fmt.Sprintf("unknown op: %s", parts[0])
}
if err != nil {
return err.Error()
}
}
return ""
case "iter":
var iter *Iterator
if len(td.CmdArgs) == 1 {
if td.CmdArgs[0].Key != "snapshot" {
return fmt.Sprintf("unknown argument: %s", td.CmdArgs[0])
}
if len(td.CmdArgs[0].Vals) != 1 {
return fmt.Sprintf("%s expects 1 value: %s", td.CmdArgs[0].Key, td.CmdArgs[0])
}
name := td.CmdArgs[0].Vals[0]
snapshot := snapshots[name]
if snapshot == nil {
return fmt.Sprintf("unable to find snapshot \"%s\"", name)
}
iter = snapshot.NewIter(nil)
} else {
iter = d.NewIter(nil)
}
defer iter.Close()
var b bytes.Buffer
for _, line := range strings.Split(td.Input, "\n") {
parts := strings.Fields(line)
if len(parts) == 0 {
continue
}
switch parts[0] {
case "first":
iter.First()
case "last":
iter.Last()
case "seek-ge":
if len(parts) != 2 {
return "seek-ge <key>\n"
}
iter.SeekGE([]byte(strings.TrimSpace(parts[1])))
case "seek-lt":
if len(parts) != 2 {
return "seek-lt <key>\n"
}
iter.SeekLT([]byte(strings.TrimSpace(parts[1])))
case "next":
iter.Next()
case "prev":
iter.Prev()
default:
return fmt.Sprintf("unknown op: %s", parts[0])
}
if iter.Valid() {
fmt.Fprintf(&b, "%s:%s\n", iter.Key(), iter.Value())
} else if err := iter.Error(); err != nil {
fmt.Fprintf(&b, "err=%v\n", err)
} else {
fmt.Fprintf(&b, ".\n")
}
}
return b.String()
default:
return fmt.Sprintf("unknown command: %s", td.Cmd)
}
})
}
func TestSnapshotClosed(t *testing.T) {
d, err := Open("", &Options{
FS: vfs.NewMem(),
})
require.NoError(t, err)
catch := func(f func()) (err error) {
defer func() {
if r := recover(); r != nil {
err = r.(error)
}
}()
f()
return nil
}
snap := d.NewSnapshot()
require.NoError(t, snap.Close())
require.True(t, errors.Is(catch(func() { _ = snap.Close() }), ErrClosed))
require.True(t, errors.Is(catch(func() { _, _, _ = snap.Get(nil) }), ErrClosed))
require.True(t, errors.Is(catch(func() { snap.NewIter(nil) }), ErrClosed))
require.NoError(t, d.Close())
}
func TestSnapshotRangeDeletionStress(t *testing.T) {
const runs = 200
const middleKey = runs * runs
d, err := Open("", &Options{
FS: vfs.NewMem(),
})
require.NoError(t, err)
mkkey := func(k int) []byte {
return []byte(fmt.Sprintf("%08d", k))
}
v := []byte("hello world")
snapshots := make([]*Snapshot, 0, runs)
for r := 0; r < runs; r++ {
// We use a keyspace that is 2*runs*runs wide. In other words there are
// 2*runs sections of the keyspace, each with runs elements. On every
// run, we write to the r-th element of each section of the keyspace.
for i := 0; i < 2*runs; i++ {
err := d.Set(mkkey(runs*i+r), v, nil)
require.NoError(t, err)
}
// Now we delete some of the keyspace through a DeleteRange. We delete from
// the middle of the keyspace outwards. The keyspace is made of 2*runs
// sections, and we delete an additional two of these sections per run.
err := d.DeleteRange(mkkey(middleKey-runs*r), mkkey(middleKey+runs*r), nil)
require.NoError(t, err)
snapshots = append(snapshots, d.NewSnapshot())
}
// Check that all the snapshots contain the expected number of keys.
// Iterating over so many keys is slow, so do it in parallel.
var wg sync.WaitGroup
sem := make(chan struct{}, runtime.GOMAXPROCS(0))
for r := range snapshots {
wg.Add(1)
sem <- struct{}{}
go func(r int) {
defer func() {
<-sem
wg.Done()
}()
// Count the keys at this snapshot.
iter := snapshots[r].NewIter(nil)
var keysFound int
for iter.First(); iter.Valid(); iter.Next() {
keysFound++
}
err := firstError(iter.Error(), iter.Close())
if err != nil {
t.Error(err)
return
}
// At the time that this snapshot was taken, (r+1)*2*runs unique keys
// were Set (one in each of the 2*runs sections per run). But this
// run also deleted the 2*r middlemost sections. When this snapshot
// was taken, a Set to each of those sections had been made (r+1)
// times, so 2*r*(r+1) previously-set keys are now deleted.
keysExpected := (r+1)*2*runs - 2*r*(r+1)
if keysFound != keysExpected {
t.Errorf("%d: found %d keys, want %d", r, keysFound, keysExpected)
}
if err := snapshots[r].Close(); err != nil {
t.Error(err)
}
}(r)
}
wg.Wait()
require.NoError(t, d.Close())
}
// TestNewSnapshotRace tests atomicity of NewSnapshot.
//
// It tests for a regression of a previous race condition in which NewSnapshot
// would retrieve the visible sequence number for a new snapshot before
// locking the database mutex to add the snapshot. A write and flush that
// that occurred between the reading of the sequence number and appending the
// snapshot could drop keys required by the snapshot.
func TestNewSnapshotRace(t *testing.T) {
const runs = 10
d, err := Open("", &Options{FS: vfs.NewMem()})
require.NoError(t, err)
v := []byte(`foo`)
ch := make(chan string)
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
for k := range ch {
if err := d.Set([]byte(k), v, nil); err != nil {
t.Error(err)
return
}
if err := d.Flush(); err != nil {
t.Error(err)
return
}
}
}()
for i := 0; i < runs; i++ {
// This main test goroutine sets `k` before creating a new snapshot.
// The key `k` should always be present within the snapshot.
k := fmt.Sprintf("key%06d", i)
require.NoError(t, d.Set([]byte(k), v, nil))
// Lock d.mu in another goroutine so that our call to NewSnapshot
// will need to contend for d.mu.
wg.Add(1)
locked := make(chan struct{})
go func() {
defer wg.Done()
d.mu.Lock()
close(locked)
time.Sleep(20 * time.Millisecond)
d.mu.Unlock()
}()
<-locked
// Tell the other goroutine to overwrite `k` with a later sequence
// number. It's indeterminate which key we'll read, but we should
// always read one of them.
ch <- k
s := d.NewSnapshot()
_, c, err := s.Get([]byte(k))
require.NoError(t, err)
require.NoError(t, c.Close())
require.NoError(t, s.Close())
}
close(ch)
wg.Wait()
require.NoError(t, d.Close())
}