Documentation
¶
Overview ¶
Package cachego provides an easy way to use foundation for your caching operations.
1. basic:
// Use NewCache function to create a cache.
// By default, it creates a standard cache which evicts entries randomly.
// Use WithShardings to shard cache to several parts for higher performance.
// Use WithGC to clean expired entries every 10 minutes.
cache := cachego.NewCache(cachego.WithGC(10*time.Minute), cachego.WithShardings(64))
// Set an entry to cache with ttl.
cache.Set("key", 123, time.Second)
// Get an entry from cache.
value, ok := cache.Get("key")
fmt.Println(value, ok) // 123 true
// Check how many entries stores in cache.
size := cache.Size()
fmt.Println(size) // 1
// Clean expired entries.
cleans := cache.GC()
fmt.Println(cleans) // 1
// Set an entry which doesn't have ttl.
cache.Set("key", 123, cachego.NoTTL)
// Remove an entry.
removedValue := cache.Remove("key")
fmt.Println(removedValue) // 123
// Reset resets cache to initial status.
cache.Reset()
// Get value from cache and load it to cache if not found.
value, ok = cache.Get("key")
if !ok {
// Loaded entry will be set to cache and returned.
// By default, it will use singleflight.
value, _ = cache.Load("key", time.Second, func() (value interface{}, err error) {
return 666, nil
})
}
fmt.Println(value) // 666
// You can use WithLRU to specify the type of cache to lru.
// Also, try WithLFU if you want to use lfu to evict data.
cache = cachego.NewCache(cachego.WithLRU(100))
cache = cachego.NewCache(cachego.WithLFU(100))
// Use NewCacheWithReport to create a cache with report.
cache, reporter := cachego.NewCacheWithReport(cachego.WithCacheName("test"))
fmt.Println(reporter.CacheName())
fmt.Println(reporter.CacheType())
2. ttl:
cache := cachego.NewCache()
// We think most of the entries in cache should have its ttl.
// So set an entry to cache should specify a ttl.
cache.Set("key", 666, time.Second)
value, ok := cache.Get("key")
fmt.Println(value, ok) // 666 true
time.Sleep(2 * time.Second)
// The entry is expired after ttl.
value, ok = cache.Get("key")
fmt.Println(value, ok) // <nil> false
// Notice that the entry still stores in cache even if it's expired.
// This is because we think you will reset entry to cache after cache missing in most situations.
// So we can reuse this entry and just reset its value and ttl.
size := cache.Size()
fmt.Println(size) // 1
// What should I do if I want an expired entry never storing in cache? Try GC:
cleans := cache.GC()
fmt.Println(cleans) // 1
size = cache.Size()
fmt.Println(size) // 0
// However, not all entries have ttl, and you can specify a NoTTL constant to do so.
// In fact, the entry won't expire as long as its ttl is <= 0.
// So you may have known NoTTL is a "readable" value of "<= 0".
cache.Set("key", 666, cachego.NoTTL)
3. lru:
// By default, NewCache() returns a standard cache which evicts entries randomly.
cache := cachego.NewCache(cachego.WithMaxEntries(10))
for i := 0; i < 20; i++ {
key := strconv.Itoa(i)
cache.Set(key, i, cachego.NoTTL)
}
// Since we set 20 entries to cache, the size won't be 20 because we limit the max entries to 10.
size := cache.Size()
fmt.Println(size) // 10
// We don't know which entries will be evicted and stayed.
for i := 0; i < 20; i++ {
key := strconv.Itoa(i)
value, ok := cache.Get(key)
fmt.Println(key, value, ok)
}
fmt.Println()
// Sometimes we want it evicts entries by lru, try WithLRU.
// You need to specify the max entries storing in lru cache.
// More details see https://en.wikipedia.org/wiki/Cache_replacement_policies#Least_recently_used_(LRU).
cache = cachego.NewCache(cachego.WithLRU(10))
for i := 0; i < 20; i++ {
key := strconv.Itoa(i)
cache.Set(key, i, cachego.NoTTL)
}
// Only the least recently used entries can be got in a lru cache.
for i := 0; i < 20; i++ {
key := strconv.Itoa(i)
value, ok := cache.Get(key)
fmt.Println(key, value, ok)
}
// By default, lru will share one lock to do all operations.
// You can sharding cache to several parts for higher performance.
// Notice that max entries only effect to one part in sharding mode.
// For example, the total max entries will be 2*10 if shardings is 2 and max entries is 10 in WithLRU or WithMaxEntries.
// In some cache libraries, they will calculate max entries in each parts of shardings, like 10/2.
// However, the result divided by max entries and shardings may be not an integer which will make the total max entries incorrect.
// So we let users decide the exact max entries in each parts of shardings.
cache = cachego.NewCache(cachego.WithShardings(2), cachego.WithLRU(10))
4. lfu:
// By default, NewCache() returns a standard cache which evicts entries randomly.
cache := cachego.NewCache(cachego.WithMaxEntries(10))
for i := 0; i < 20; i++ {
key := strconv.Itoa(i)
cache.Set(key, i, cachego.NoTTL)
}
// Since we set 20 entries to cache, the size won't be 20 because we limit the max entries to 10.
size := cache.Size()
fmt.Println(size) // 10
// We don't know which entries will be evicted and stayed.
for i := 0; i < 20; i++ {
key := strconv.Itoa(i)
value, ok := cache.Get(key)
fmt.Println(key, value, ok)
}
fmt.Println()
// Sometimes we want it evicts entries by lfu, try WithLFU.
// You need to specify the max entries storing in lfu cache.
// More details see https://en.wikipedia.org/wiki/Cache_replacement_policies#Least-frequently_used_(LFU).
cache = cachego.NewCache(cachego.WithLFU(10))
for i := 0; i < 20; i++ {
key := strconv.Itoa(i)
// Let entries have some frequently used operations.
for j := 0; j < i; j++ {
cache.Set(key, i, cachego.NoTTL)
}
}
for i := 0; i < 20; i++ {
key := strconv.Itoa(i)
value, ok := cache.Get(key)
fmt.Println(key, value, ok)
}
// By default, lfu will share one lock to do all operations.
// You can sharding cache to several parts for higher performance.
// Notice that max entries only effect to one part in sharding mode.
// For example, the total max entries will be 2*10 if shardings is 2 and max entries is 10 in WithLFU or WithMaxEntries.
// In some cache libraries, they will calculate max entries in each parts of shardings, like 10/2.
// However, the result divided by max entries and shardings may be not an integer which will make the total max entries incorrect.
// So we let users decide the exact max entries in each parts of shardings.
cache = cachego.NewCache(cachego.WithShardings(2), cachego.WithLFU(10))
5. sharding:
// All operations in cache share one lock for concurrency.
// Use read lock or write lock is depends on cache implements.
// Get will use read lock in standard cache, but lru and lfu don't.
// This may be a serious performance problem in high qps.
cache := cachego.NewCache()
// We provide a sharding cache wrapper to shard one cache to several parts with hash.
// Every parts store its entries and all operations of one entry work on one part.
// This means there are more than one lock when you operate entries.
// The performance will be better in high qps.
cache = cachego.NewCache(cachego.WithShardings(64))
cache.Set("key", 666, cachego.NoTTL)
value, ok := cache.Get("key")
fmt.Println(value, ok) // 666 true
// Notice that max entries will be the sum of shards.
// For example, we set WithShardings(4) and WithMaxEntries(100), and the max entries in whole cache will be 4 * 100.
cache = cachego.NewCache(cachego.WithShardings(4), cachego.WithMaxEntries(100))
for i := 0; i < 1000; i++ {
key := strconv.Itoa(i)
cache.Set(key, i, cachego.NoTTL)
}
size := cache.Size()
fmt.Println(size) // 400
6. gc:
cache := cachego.NewCache()
cache.Set("key", 666, time.Second)
time.Sleep(2 * time.Second)
// The entry is expired after ttl.
value, ok := cache.Get("key")
fmt.Println(value, ok) // <nil> false
// As you know the entry still stores in cache even if it's expired.
// This is because we think you will reset entry to cache after cache missing in most situations.
// So we can reuse this entry and just reset its value and ttl.
size := cache.Size()
fmt.Println(size) // 1
// What should I do if I want an expired entry never storing in cache? Try GC:
cleans := cache.GC()
fmt.Println(cleans) // 1
// Is there a smart way to do that? Try WithGC:
// For testing, we set a small duration of gc.
// You should set at least 3 minutes in production for performance.
cache = cachego.NewCache(cachego.WithGC(2 * time.Second))
cache.Set("key", 666, time.Second)
size = cache.Size()
fmt.Println(size) // 1
time.Sleep(3 * time.Second)
size = cache.Size()
fmt.Println(size) // 0
// Or you want a cancalable gc task? Try RunGCTask:
cache = cachego.NewCache()
cancel := cachego.RunGCTask(cache, 2*time.Second)
cache.Set("key", 666, time.Second)
size = cache.Size()
fmt.Println(size) // 1
time.Sleep(3 * time.Second)
size = cache.Size()
fmt.Println(size) // 0
cancel()
cache.Set("key", 666, time.Second)
size = cache.Size()
fmt.Println(size) // 1
time.Sleep(3 * time.Second)
size = cache.Size()
fmt.Println(size) // 1
// By default, gc only scans at most maxScans entries one time to remove expired entries.
// This is because scans all entries may cost much time if there is so many entries in cache, and a "stw" will happen.
// This can be a serious problem in some situations.
// Use WithMaxScans to set this value, remember, a value <= 0 means no scan limit.
cache = cachego.NewCache(cachego.WithGC(10*time.Minute), cachego.WithMaxScans(0))
7. load:
// By default, singleflight is enabled in cache.
// Use WithDisableSingleflight to disable if you want.
cache := cachego.NewCache(cachego.WithDisableSingleflight())
// We recommend you to use singleflight.
cache = cachego.NewCache()
value, ok := cache.Get("key")
fmt.Println(value, ok) // <nil> false
if !ok {
// Load loads a value of key to cache with ttl.
// Use cachego.NoTTL if you want this value is no ttl.
// After loading value to cache, it returns the loaded value and error if failed.
value, _ = cache.Load("key", time.Second, func() (value interface{}, err error) {
return 666, nil
})
}
fmt.Println(value) // 666
value, ok = cache.Get("key")
fmt.Println(value, ok) // 666, true
time.Sleep(2 * time.Second)
value, ok = cache.Get("key")
fmt.Println(value, ok) // <nil>, false
8. report:
func reportMissed(reporter *cachego.Reporter, key string) {
fmt.Printf("report: missed key %s, missed rate %.3f\n", key, reporter.MissedRate())
}
func reportHit(reporter *cachego.Reporter, key string, value interface{}) {
fmt.Printf("report: hit key %s value %+v, hit rate %.3f\n", key, value, reporter.HitRate())
}
func reportGC(reporter *cachego.Reporter, cost time.Duration, cleans int) {
fmt.Printf("report: gc cost %s cleans %d, gc count %d, cache size %d\n", cost, cleans, reporter.CountGC(), reporter.CacheSize())
}
func reportLoad(reporter *cachego.Reporter, key string, value interface{}, ttl time.Duration, err error) {
fmt.Printf("report: load key %s value %+v ttl %s, err %+v, load count %d\n", key, value, ttl, err, reporter.CountLoad())
}
// We provide some ways to report the status of cache.
// Use NewCacheWithReport to create a cache with reporting features.
cache, reporter := cachego.NewCacheWithReport(
// Sometimes you may have several caches in one service.
// You can set each name by WithCacheName and get the name from reporter.
cachego.WithCacheName("test"),
// For testing...
cachego.WithMaxEntries(3),
cachego.WithGC(100*time.Millisecond),
// ReportMissed reports the missed key getting from cache.
// ReportHit reports the hit entry getting from cache.
// ReportGC reports the status of cache gc.
// ReportLoad reports the result of loading.
cachego.WithReportMissed(reportMissed),
cachego.WithReportHit(reportHit),
cachego.WithReportGC(reportGC),
cachego.WithReportLoad(reportLoad),
)
for i := 0; i < 5; i++ {
key := strconv.Itoa(i)
evictedValue := cache.Set(key, key, 10*time.Millisecond)
fmt.Println(evictedValue)
}
for i := 0; i < 5; i++ {
key := strconv.Itoa(i)
value, ok := cache.Get(key)
fmt.Println(value, ok)
}
time.Sleep(200 * time.Millisecond)
value, err := cache.Load("key", time.Second, func() (value interface{}, err error) {
return 666, io.EOF
})
fmt.Println(value, err)
// These are some useful methods of reporter.
fmt.Println("CacheName:", reporter.CacheName())
fmt.Println("CacheType:", reporter.CacheType())
fmt.Println("CountMissed:", reporter.CountMissed())
fmt.Println("CountHit:", reporter.CountHit())
fmt.Println("CountGC:", reporter.CountGC())
fmt.Println("CountLoad:", reporter.CountLoad())
fmt.Println("CacheSize:", reporter.CacheSize())
fmt.Println("MissedRate:", reporter.MissedRate())
fmt.Println("HitRate:", reporter.HitRate())
9. task:
var (
contextKey = struct{}{}
)
func beforePrint(ctx context.Context) {
fmt.Println("before:", ctx.Value(contextKey))
}
func afterPrint(ctx context.Context) {
fmt.Println("after:", ctx.Value(contextKey))
}
func printContextValue(ctx context.Context) {
fmt.Println("context value:", ctx.Value(contextKey))
}
// Create a context to stop the task.
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
// Wrap context with key and value
ctx = context.WithValue(ctx, contextKey, "hello")
// Use New to create a task and run it.
// You can use it to load some hot data to cache at fixed duration.
// Before is called before the task loop, optional.
// After is called after the task loop, optional.
// Context is passed to fn include fn/before/after which can stop the task by Done(), optional.
// Duration is the duration between two loop of fn, optional.
// Run will start a new goroutine and run the task loop.
// The task will stop if context is done.
task.New(printContextValue).Before(beforePrint).After(afterPrint).Context(ctx).Duration(time.Second).Run()
10. clock:
// Create a fast clock and get current time in nanosecond by Now.
c := clock.New()
c.Now()
// Fast clock may return an "incorrect" time compared with time.Now.
// The gap will be smaller than about 100 ms.
for i := 0; i < 10; i++ {
time.Sleep(time.Duration(rand.Int63n(int64(time.Second))))
timeNow := time.Now().UnixNano()
clockNow := c.Now()
fmt.Println(timeNow)
fmt.Println(clockNow)
fmt.Println("gap:", time.Duration(timeNow-clockNow))
fmt.Println()
}
// You can specify the fast clock to cache by WithNow.
// All getting current time operations in this cache will use fast clock.
cache := cachego.NewCache(cachego.WithNow(clock.New().Now))
cache.Set("key", 666, 100*time.Millisecond)
value, ok := cache.Get("key")
fmt.Println(value, ok) // 666, true
time.Sleep(200 * time.Millisecond)
value, ok = cache.Get("key")
fmt.Println(value, ok) // <nil>, false
Index ¶
- Constants
- func NewCacheWithReport(opts ...Option) (cache Cache, reporter *Reporter)
- func RunGCTask(cache Cache, duration time.Duration) (cancel func())
- func SetMapInitialCap(initialCap int)
- func SetSliceInitialCap(initialCap int)
- type Cache
- type CacheType
- type Option
- func WithCacheName(cacheName string) Option
- func WithDisableSingleflight() Option
- func WithGC(gcDuration time.Duration) Option
- func WithHash(hash func(key string) int) Option
- func WithLFU(maxEntries int) Option
- func WithLRU(maxEntries int) Option
- func WithMaxEntries(maxEntries int) Option
- func WithMaxScans(maxScans int) Option
- func WithNow(now func() int64) Option
- func WithRecordGC(recordGC bool) Option
- func WithRecordHit(recordHit bool) Option
- func WithRecordLoad(recordLoad bool) Option
- func WithRecordMissed(recordMissed bool) Option
- func WithReportGC(reportGC func(reporter *Reporter, cost time.Duration, cleans int)) Option
- func WithReportHit(reportHit func(reporter *Reporter, key string, value interface{})) Option
- func WithReportLoad(...) Option
- func WithReportMissed(reportMissed func(reporter *Reporter, key string)) Option
- func WithShardings(shardings int) Option
- type Reporter
- func (r *Reporter) CacheGC() time.Duration
- func (r *Reporter) CacheName() string
- func (r *Reporter) CacheShardings() int
- func (r *Reporter) CacheSize() int
- func (r *Reporter) CacheType() CacheType
- func (r *Reporter) CountGC() uint64
- func (r *Reporter) CountHit() uint64
- func (r *Reporter) CountLoad() uint64
- func (r *Reporter) CountMissed() uint64
- func (r *Reporter) HitRate() float64
- func (r *Reporter) MissedRate() float64
Constants ¶
const (
// NoTTL means a key is never expired.
NoTTL = 0
)
const Version = "v0.6.1"
Version is the version string representation of cachego.
Variables ¶
This section is empty.
Functions ¶
func NewCacheWithReport ¶ added in v0.4.6
NewCacheWithReport creates a cache and a reporter with options. By default, it will create a standard cache which uses one lock to solve data race. It may cause a big performance problem in high concurrency. You can use WithShardings to create a sharding cache which is good for concurrency. Also, you can use options to specify the type of cache to others, such as lru.
func RunGCTask ¶ added in v0.4.6
RunGCTask runs a gc task in a new goroutine and returns a cancel function to cancel the task. However, you don't need to call it manually for most time, instead, use options is a better choice. Making it a public function is for more customizations in some situations. For example, using options to run gc task is un-cancelable, so you can use it to run gc task by your own and get a cancel function to cancel the gc task.
func SetMapInitialCap ¶ added in v0.4.6
func SetMapInitialCap(initialCap int)
SetMapInitialCap sets the initial capacity of map.
func SetSliceInitialCap ¶ added in v0.4.6
func SetSliceInitialCap(initialCap int)
SetSliceInitialCap sets the initial capacity of slice.
Types ¶
type Cache ¶
type Cache interface {
// Get gets the value of key from cache and returns value if found.
// A nil value will be returned if key doesn't exist in cache.
// Notice that we won't remove expired keys in get method, so you should remove them manually or set a limit of keys.
// The reason why we won't remove expired keys in get method is for higher re-usability, because we often set a new value
// of expired key after getting it (so we can reuse the memory of entry).
Get(key string) (value interface{}, found bool)
// Set sets key and value to cache with ttl and returns evicted value if exists.
// See NoTTL if you want your key is never expired.
Set(key string, value interface{}, ttl time.Duration) (evictedValue interface{})
// Remove removes key and returns the removed value of key.
// A nil value will be returned if key doesn't exist in cache.
Remove(key string) (removedValue interface{})
// Size returns the count of keys in cache.
// The result may be different in different implements.
Size() (size int)
// GC cleans the expired keys in cache and returns the exact count cleaned.
// The exact cleans depend on implements, however, all implements should have a limit of scanning.
GC() (cleans int)
// Reset resets cache to initial status which is like a new cache.
Reset()
// Load loads a key with ttl to cache and returns an error if failed.
// We recommend you use this method to load missed keys to cache,
// because it may use singleflight to reduce the times calling load function.
Load(key string, ttl time.Duration, load func() (value interface{}, err error)) (value interface{}, err error)
}
Cache is the core interface of cachego. We provide some implements including standard cache and sharding cache.
func NewCache ¶
NewCache creates a cache with options. By default, it will create a standard cache which uses one lock to solve data race. It may cause a big performance problem in high concurrency. You can use WithShardings to create a sharding cache which is good for concurrency. Also, you can use options to specify the type of cache to others, such as lru. Use NewCacheWithReporter to get a reporter for use if you want.
type CacheType ¶ added in v0.4.10
type CacheType string
CacheType is the type of cache.
func (CacheType) IsStandard ¶ added in v0.4.10
IsStandard returns if cache type is standard.
type Option ¶ added in v0.2.1
type Option func(conf *config)
Option applies to config and sets some values to config.
func WithCacheName ¶ added in v0.4.8
WithCacheName returns an option setting the cacheName of config.
func WithDisableSingleflight ¶ added in v0.3.2
func WithDisableSingleflight() Option
WithDisableSingleflight returns an option turning off singleflight mode of cache.
func WithGC ¶ added in v0.4.6
WithGC returns an option setting the duration of cache gc. Negative value means no gc.
func WithHash ¶ added in v0.4.6
WithHash returns an option setting the hash function of cache. A hash function should return the hash code of key.
func WithLFU ¶ added in v0.4.6
WithLFU returns an option setting the type of cache to lfu. Notice that lfu cache must have max entries limit, so you have to specify a maxEntries.
func WithLRU ¶ added in v0.4.6
WithLRU returns an option setting the type of cache to lru. Notice that lru cache must have max entries limit, so you have to specify a maxEntries.
func WithMaxEntries ¶ added in v0.4.6
WithMaxEntries returns an option setting the max entries of cache. Negative value means no limit.
func WithMaxScans ¶ added in v0.4.6
WithMaxScans returns an option setting the max scans of cache. Negative value means no limit.
func WithNow ¶ added in v0.4.6
WithNow returns an option setting the now function of cache. A now function should return a nanosecond unix time.
func WithRecordGC ¶ added in v0.4.6
WithRecordGC returns an option setting the recordGC of config.
func WithRecordHit ¶ added in v0.4.6
WithRecordHit returns an option setting the recordHit of config.
func WithRecordLoad ¶ added in v0.4.6
WithRecordLoad returns an option setting the recordLoad of config.
func WithRecordMissed ¶ added in v0.4.6
WithRecordMissed returns an option setting the recordMissed of config.
func WithReportGC ¶ added in v0.4.6
WithReportGC returns an option setting the reportGC of config.
func WithReportHit ¶ added in v0.4.6
WithReportHit returns an option setting the reportHit of config.
func WithReportLoad ¶ added in v0.4.6
func WithReportLoad(reportLoad func(reporter *Reporter, key string, value interface{}, ttl time.Duration, err error)) Option
WithReportLoad returns an option setting the reportLoad of config.
func WithReportMissed ¶ added in v0.4.6
WithReportMissed returns an option setting the reportMissed of config.
func WithShardings ¶ added in v0.4.6
WithShardings returns an option setting the sharding count of cache. Negative value means no sharding.
type Reporter ¶ added in v0.4.6
type Reporter struct {
// contains filtered or unexported fields
}
Reporter stores some values for reporting.
func (*Reporter) CacheGC ¶ added in v0.4.12
CacheGC returns the gc duration of cache. You can use WithGC to set cache's gc duration. Zero duration means cache disables gc.
func (*Reporter) CacheName ¶ added in v0.4.8
CacheName returns the name of cache. You can use WithCacheName to set cache's name.
func (*Reporter) CacheShardings ¶ added in v0.4.9
CacheShardings returns the shardings of cache. You can use WithShardings to set cache's shardings. Zero shardings means cache is non-sharding.
func (*Reporter) CountMissed ¶ added in v0.4.6
CountMissed returns the missed count.
func (*Reporter) MissedRate ¶ added in v0.4.6
MissedRate returns the missed rate.