-
-
Notifications
You must be signed in to change notification settings - Fork 38
/
cache.go
350 lines (305 loc) · 9.37 KB
/
cache.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
package cache
import (
"context"
"sync"
"time"
"github.com/Code-Hex/go-generics-cache/policy/clock"
"github.com/Code-Hex/go-generics-cache/policy/fifo"
"github.com/Code-Hex/go-generics-cache/policy/lfu"
"github.com/Code-Hex/go-generics-cache/policy/lru"
"github.com/Code-Hex/go-generics-cache/policy/mru"
"github.com/Code-Hex/go-generics-cache/policy/simple"
)
// Interface is a common-cache interface.
type Interface[K comparable, V any] interface {
// Get looks up a key's value from the cache.
Get(key K) (value V, ok bool)
// Set sets a value to the cache with key. replacing any existing value.
Set(key K, val V)
// Keys returns the keys of the cache. The order is relied on algorithms.
Keys() []K
// Delete deletes the item with provided key from the cache.
Delete(key K)
// Len returns the number of items in the cache.
Len() int
}
var (
_ = []Interface[struct{}, any]{
(*simple.Cache[struct{}, any])(nil),
(*lru.Cache[struct{}, any])(nil),
(*lfu.Cache[struct{}, any])(nil),
(*fifo.Cache[struct{}, any])(nil),
(*mru.Cache[struct{}, any])(nil),
(*clock.Cache[struct{}, any])(nil),
}
)
// Item is an item
type Item[K comparable, V any] struct {
Key K
Value V
Expiration time.Time
InitialReferenceCount int
}
func (item *Item[K, V]) hasExpiration() bool {
return !item.Expiration.IsZero()
}
// Expired returns true if the item has expired.
func (item *Item[K, V]) Expired() bool {
if !item.hasExpiration() {
return false
}
return nowFunc().After(item.Expiration)
}
// GetReferenceCount returns reference count to be used when setting
// the cache item for the first time.
func (item *Item[K, V]) GetReferenceCount() int {
return item.InitialReferenceCount
}
var nowFunc = time.Now
// ItemOption is an option for cache item.
type ItemOption func(*itemOptions)
type itemOptions struct {
expiration time.Time // default none
referenceCount int
}
// WithExpiration is an option to set expiration time for any items.
// If the expiration is zero or negative value, it treats as w/o expiration.
func WithExpiration(exp time.Duration) ItemOption {
return func(o *itemOptions) {
o.expiration = nowFunc().Add(exp)
}
}
// WithReferenceCount is an option to set reference count for any items.
// This option is only applicable to cache policies that have a reference count (e.g., Clock, LFU).
// referenceCount specifies the reference count value to set for the cache item.
//
// the default is 1.
func WithReferenceCount(referenceCount int) ItemOption {
return func(o *itemOptions) {
o.referenceCount = referenceCount
}
}
// newItem creates a new item with specified any options.
func newItem[K comparable, V any](key K, val V, opts ...ItemOption) *Item[K, V] {
o := new(itemOptions)
for _, optFunc := range opts {
optFunc(o)
}
return &Item[K, V]{
Key: key,
Value: val,
Expiration: o.expiration,
InitialReferenceCount: o.referenceCount,
}
}
// Cache is a thread safe cache.
type Cache[K comparable, V any] struct {
cache Interface[K, *Item[K, V]]
// mu is used to do lock in some method process.
mu sync.Mutex
janitor *janitor
expManager *expirationManager[K]
}
// Option is an option for cache.
type Option[K comparable, V any] func(*options[K, V])
type options[K comparable, V any] struct {
cache Interface[K, *Item[K, V]]
janitorInterval time.Duration
}
func newOptions[K comparable, V any]() *options[K, V] {
return &options[K, V]{
cache: simple.NewCache[K, *Item[K, V]](),
janitorInterval: time.Minute,
}
}
// AsLRU is an option to make a new Cache as LRU algorithm.
func AsLRU[K comparable, V any](opts ...lru.Option) Option[K, V] {
return func(o *options[K, V]) {
o.cache = lru.NewCache[K, *Item[K, V]](opts...)
}
}
// AsLFU is an option to make a new Cache as LFU algorithm.
func AsLFU[K comparable, V any](opts ...lfu.Option) Option[K, V] {
return func(o *options[K, V]) {
o.cache = lfu.NewCache[K, *Item[K, V]](opts...)
}
}
// AsFIFO is an option to make a new Cache as FIFO algorithm.
func AsFIFO[K comparable, V any](opts ...fifo.Option) Option[K, V] {
return func(o *options[K, V]) {
o.cache = fifo.NewCache[K, *Item[K, V]](opts...)
}
}
// AsMRU is an option to make a new Cache as MRU algorithm.
func AsMRU[K comparable, V any](opts ...mru.Option) Option[K, V] {
return func(o *options[K, V]) {
o.cache = mru.NewCache[K, *Item[K, V]](opts...)
}
}
// AsClock is an option to make a new Cache as clock algorithm.
func AsClock[K comparable, V any](opts ...clock.Option) Option[K, V] {
return func(o *options[K, V]) {
o.cache = clock.NewCache[K, *Item[K, V]](opts...)
}
}
// WithJanitorInterval is an option to specify how often cache should delete expired items.
//
// Default is 1 minute.
func WithJanitorInterval[K comparable, V any](d time.Duration) Option[K, V] {
return func(o *options[K, V]) {
o.janitorInterval = d
}
}
// New creates a new thread safe Cache.
// The janitor will not be stopped which is created by this function. If you
// want to stop the janitor gracefully, You should use the `NewContext` function
// instead of this.
//
// There are several Cache replacement policies available with you specified any options.
func New[K comparable, V any](opts ...Option[K, V]) *Cache[K, V] {
return NewContext(context.Background(), opts...)
}
// NewContext creates a new thread safe Cache with context.
// This function will be stopped an internal janitor when the context is cancelled.
//
// There are several Cache replacement policies available with you specified any options.
func NewContext[K comparable, V any](ctx context.Context, opts ...Option[K, V]) *Cache[K, V] {
o := newOptions[K, V]()
for _, optFunc := range opts {
optFunc(o)
}
cache := &Cache[K, V]{
cache: o.cache,
janitor: newJanitor(ctx, o.janitorInterval),
expManager: newExpirationManager[K](),
}
cache.janitor.run(cache.DeleteExpired)
return cache
}
// Get looks up a key's value from the cache.
func (c *Cache[K, V]) Get(key K) (zero V, ok bool) {
c.mu.Lock()
defer c.mu.Unlock()
item, ok := c.cache.Get(key)
if !ok {
return
}
// Returns nil if the item has been expired.
// Do not delete here and leave it to an external process such as Janitor.
if item.Expired() {
return zero, false
}
return item.Value, true
}
// GetOrSet atomically gets a key's value from the cache, or if the
// key is not present, sets the given value.
// The loaded result is true if the value was loaded, false if stored.
func (c *Cache[K, V]) GetOrSet(key K, val V, opts ...ItemOption) (actual V, loaded bool) {
c.mu.Lock()
defer c.mu.Unlock()
item, ok := c.cache.Get(key)
if !ok || item.Expired() {
item := newItem(key, val, opts...)
c.cache.Set(key, item)
return val, false
}
return item.Value, true
}
// DeleteExpired all expired items from the cache.
func (c *Cache[K, V]) DeleteExpired() {
c.mu.Lock()
l := c.expManager.len()
c.mu.Unlock()
evict := func() bool {
key := c.expManager.pop()
// if is expired, delete it and return nil instead
item, ok := c.cache.Get(key)
if ok {
if item.Expired() {
c.cache.Delete(key)
return false
}
c.expManager.update(key, item.Expiration)
}
return true
}
for i := 0; i < l; i++ {
c.mu.Lock()
shouldBreak := evict()
c.mu.Unlock()
if shouldBreak {
break
}
}
}
// Set sets a value to the cache with key. replacing any existing value.
func (c *Cache[K, V]) Set(key K, val V, opts ...ItemOption) {
c.mu.Lock()
defer c.mu.Unlock()
item := newItem(key, val, opts...)
if item.hasExpiration() {
c.expManager.update(key, item.Expiration)
}
c.cache.Set(key, item)
}
// Keys returns the keys of the cache. the order is relied on algorithms.
func (c *Cache[K, V]) Keys() []K {
c.mu.Lock()
defer c.mu.Unlock()
return c.cache.Keys()
}
// Delete deletes the item with provided key from the cache.
func (c *Cache[K, V]) Delete(key K) {
c.mu.Lock()
defer c.mu.Unlock()
c.cache.Delete(key)
c.expManager.remove(key)
}
// Len returns the number of items in the cache.
func (c *Cache[K, V]) Len() int {
c.mu.Lock()
defer c.mu.Unlock()
return c.cache.Len()
}
// Contains reports whether key is within cache.
func (c *Cache[K, V]) Contains(key K) bool {
c.mu.Lock()
defer c.mu.Unlock()
_, ok := c.cache.Get(key)
return ok
}
// NumberCache is a in-memory cache which is able to store only Number constraint.
type NumberCache[K comparable, V Number] struct {
*Cache[K, V]
// nmu is used to do lock in Increment/Decrement process.
// Note that this must be here as a separate mutex because mu in Cache struct is Locked in Get,
// and if we call mu.Lock in Increment/Decrement, it will cause deadlock.
nmu sync.Mutex
}
// NewNumber creates a new cache for Number constraint.
func NewNumber[K comparable, V Number](opts ...Option[K, V]) *NumberCache[K, V] {
return &NumberCache[K, V]{
Cache: New(opts...),
}
}
// Increment an item of type Number constraint by n.
// Returns the incremented value.
func (nc *NumberCache[K, V]) Increment(key K, n V) V {
// In order to avoid lost update, we must lock whole Increment/Decrement process.
nc.nmu.Lock()
defer nc.nmu.Unlock()
got, _ := nc.Cache.Get(key)
nv := got + n
nc.Cache.Set(key, nv)
return nv
}
// Decrement an item of type Number constraint by n.
// Returns the decremented value.
func (nc *NumberCache[K, V]) Decrement(key K, n V) V {
nc.nmu.Lock()
defer nc.nmu.Unlock()
got, _ := nc.Cache.Get(key)
nv := got - n
nc.Cache.Set(key, nv)
return nv
}