forked from tecbot/gorocksdb
-
Notifications
You must be signed in to change notification settings - Fork 0
/
write_batch.go
189 lines (162 loc) · 5.11 KB
/
write_batch.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
package gorocksdb
// #include "rocksdb/c.h"
import "C"
import "io"
// WriteBatch is a batching of Puts, Merges and Deletes.
type WriteBatch struct {
c *C.rocksdb_writebatch_t
}
// NewWriteBatch create a WriteBatch object.
func NewWriteBatch() *WriteBatch {
return NewNativeWriteBatch(C.rocksdb_writebatch_create())
}
// NewNativeWriteBatch create a WriteBatch object.
func NewNativeWriteBatch(c *C.rocksdb_writebatch_t) *WriteBatch {
return &WriteBatch{c}
}
// WriteBatchFrom creates a write batch from a serialized WriteBatch.
func WriteBatchFrom(data []byte) *WriteBatch {
return NewNativeWriteBatch(C.rocksdb_writebatch_create_from(byteToChar(data), C.size_t(len(data))))
}
// Put queues a key-value pair.
func (wb *WriteBatch) Put(key, value []byte) {
cKey := byteToChar(key)
cValue := byteToChar(value)
C.rocksdb_writebatch_put(wb.c, cKey, C.size_t(len(key)), cValue, C.size_t(len(value)))
}
// PutCF queues a key-value pair in a column family.
func (wb *WriteBatch) PutCF(cf *ColumnFamilyHandle, key, value []byte) {
cKey := byteToChar(key)
cValue := byteToChar(value)
C.rocksdb_writebatch_put_cf(wb.c, cf.c, cKey, C.size_t(len(key)), cValue, C.size_t(len(value)))
}
// Merge queues a merge of "value" with the existing value of "key".
func (wb *WriteBatch) Merge(key, value []byte) {
cKey := byteToChar(key)
cValue := byteToChar(value)
C.rocksdb_writebatch_merge(wb.c, cKey, C.size_t(len(key)), cValue, C.size_t(len(value)))
}
// MergeCF queues a merge of "value" with the existing value of "key" in a
// column family.
func (wb *WriteBatch) MergeCF(cf *ColumnFamilyHandle, key, value []byte) {
cKey := byteToChar(key)
cValue := byteToChar(value)
C.rocksdb_writebatch_merge_cf(wb.c, cf.c, cKey, C.size_t(len(key)), cValue, C.size_t(len(value)))
}
// Delete queues a deletion of the data at key.
func (wb *WriteBatch) Delete(key []byte) {
cKey := byteToChar(key)
C.rocksdb_writebatch_delete(wb.c, cKey, C.size_t(len(key)))
}
// DeleteCF queues a deletion of the data at key in a column family.
func (wb *WriteBatch) DeleteCF(cf *ColumnFamilyHandle, key []byte) {
cKey := byteToChar(key)
C.rocksdb_writebatch_delete_cf(wb.c, cf.c, cKey, C.size_t(len(key)))
}
// Data returns the serialized version of this batch.
func (wb *WriteBatch) Data() []byte {
var cSize C.size_t
cValue := C.rocksdb_writebatch_data(wb.c, &cSize)
return charToByte(cValue, cSize)
}
// Count returns the number of updates in the batch.
func (wb *WriteBatch) Count() int {
return int(C.rocksdb_writebatch_count(wb.c))
}
// NewIterator returns a iterator to iterate over the records in the batch.
func (wb *WriteBatch) NewIterator() *WriteBatchIterator {
data := wb.Data()
if len(data) < 8+4 {
return &WriteBatchIterator{}
}
return &WriteBatchIterator{data: data[12:]}
}
// Clear removes all the enqueued Put and Deletes.
func (wb *WriteBatch) Clear() {
C.rocksdb_writebatch_clear(wb.c)
}
// Destroy deallocates the WriteBatch object.
func (wb *WriteBatch) Destroy() {
C.rocksdb_writebatch_destroy(wb.c)
wb.c = nil
}
// WriteBatchRecordType describes the type of a batch record.
type WriteBatchRecordType byte
// Types of batch records.
const (
WriteBatchRecordTypeDeletion WriteBatchRecordType = 0x0
WriteBatchRecordTypeValue WriteBatchRecordType = 0x1
WriteBatchRecordTypeMerge WriteBatchRecordType = 0x2
WriteBatchRecordTypeLogData WriteBatchRecordType = 0x3
)
// WriteBatchRecord represents a record inside a WriteBatch.
type WriteBatchRecord struct {
Key []byte
Value []byte
Type WriteBatchRecordType
}
// WriteBatchIterator represents a iterator to iterator over records.
type WriteBatchIterator struct {
data []byte
record WriteBatchRecord
err error
}
// Next returns the next record.
// Returns false if no further record exists.
func (iter *WriteBatchIterator) Next() bool {
if iter.err != nil || len(iter.data) == 0 {
return false
}
// reset the current record
iter.record.Key = nil
iter.record.Value = nil
// parse the record type
recordType := WriteBatchRecordType(iter.data[0])
iter.record.Type = recordType
iter.data = iter.data[1:]
// parse the key
x, n := iter.decodeVarint(iter.data)
if n == 0 {
iter.err = io.ErrShortBuffer
return false
}
k := n + int(x)
iter.record.Key = iter.data[n:k]
iter.data = iter.data[k:]
// parse the data
if recordType == WriteBatchRecordTypeValue || recordType == WriteBatchRecordTypeMerge {
x, n := iter.decodeVarint(iter.data)
if n == 0 {
iter.err = io.ErrShortBuffer
return false
}
k := n + int(x)
iter.record.Value = iter.data[n:k]
iter.data = iter.data[k:]
}
return true
}
// Record returns the current record.
func (iter *WriteBatchIterator) Record() *WriteBatchRecord {
return &iter.record
}
// Error returns the error if the iteration is failed.
func (iter *WriteBatchIterator) Error() error {
return iter.err
}
func (iter *WriteBatchIterator) decodeVarint(buf []byte) (x uint64, n int) {
// x, n already 0
for shift := uint(0); shift < 64; shift += 7 {
if n >= len(buf) {
return 0, 0
}
b := uint64(buf[n])
n++
x |= (b & 0x7F) << shift
if (b & 0x80) == 0 {
return x, n
}
}
// The number is too large to represent in a 64-bit value.
return 0, 0
}