init commit

This commit is contained in:
2024-03-19 01:05:51 +08:00
commit 199bbf2628
393 changed files with 34883 additions and 0 deletions

278
archive/go/cache/cache.go vendored Normal file
View File

@@ -0,0 +1,278 @@
package main
import (
"sort"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
)
type Cache interface {
SetMaxMemory(size string) bool
Set(key string, val interface{}, expire time.Duration)
Get(key string) (interface{}, bool)
Del(key string) bool
Exists(key string) bool
Flush() bool
Keys() int64
}
type cache struct {
maxMemory int64
memory int64
// 过期使用惰性删除,num实际上可能不准确
num int64
capacity int64
rehashs struct {
sync.Mutex
enable int32
index int
}
hashtable [2]*hashtable
// 淘汰池
evictionPool struct {
sync.Mutex
e []*entry
}
}
func NewCache() Cache {
return newCache(1024)
}
func newCache(cap int64) *cache {
c := &cache{
capacity: cap,
hashtable: [2]*hashtable{newHashtable(cap)},
evictionPool: struct {
sync.Mutex
e []*entry
}{e: make([]*entry, 0, 20)},
rehashs: struct {
sync.Mutex
enable int32
index int
}{enable: 0, index: 0},
}
c.SetMaxMemory("1GB")
return c
}
func (c *cache) SetMaxMemory(size string) bool {
if len(size) < 3 {
return false
}
num := size[:len(size)-2]
unit := size[len(size)-2:]
maxMemory, err := strconv.ParseInt(num, 10, 64)
if err != nil {
return false
}
if maxMemory == 0 {
return false
}
switch strings.ToUpper(unit) {
case "KB":
c.maxMemory = maxMemory << 10
case "MB":
c.maxMemory = maxMemory << 20
case "GB":
c.maxMemory = maxMemory << 30
default:
return false
}
return true
}
func (c *cache) Set(key string, val interface{}, expire time.Duration) {
c.rehashs.Lock()
defer c.rehashs.Unlock()
if c.rehashs.enable == 0 {
if float64(c.num)/float64(c.capacity) > 0.75 {
// 扩容一倍,渐进式rehash
atomic.AddInt64(&c.capacity, c.capacity)
c.hashtable[1] = newHashtable(c.capacity)
atomic.StoreInt32(&c.rehashs.enable, 1)
c.hashtable[1].Set(key, val, expire)
} else {
c.hashtable[0].Set(key, val, expire)
}
} else {
if c.rehashs.enable == 0 {
c.Set(key, val, expire)
return
}
c.hashtable[1].Set(key, val, expire)
c.rehash()
}
atomic.AddInt64(&c.num, 1)
atomic.AddInt64(&c.memory, int64(sizeof(val)))
for c.memory > c.maxMemory && c.num > 1 {
//清理内存
c.lru()
if len(c.evictionPool.e) == 0 {
break
}
}
}
func (c *cache) rehash() {
// rehash的时候使用锁
e, comple := c.hashtable[0].index(c.rehashs.index)
if comple {
//完成
c.hashtable[0], c.hashtable[1] = c.hashtable[1], nil
c.rehashs.enable = 0
c.rehashs.index = 0
return
}
c.rehashs.index++
if e == nil {
c.rehash()
return
}
for e != nil {
if e.expired() {
// 过期删除,但是这里没有对实际内存删除
atomic.AddInt64(&c.num, -1)
atomic.AddInt64(&c.memory, -int64(sizeof(e.value)))
} else {
c.hashtable[1].SetExpireTime(e.key, e.value, e.expireTime)
}
e, e.next = e.next, nil
}
}
// 近似lru清理内存
func (c *cache) lru() {
c.evictionPool.Lock()
defer c.evictionPool.Unlock()
if c.rehashs.enable == 0 {
// 取样20个
if len(c.evictionPool.e) <= 15 {
m := make(map[string]struct{})
for _, v := range c.evictionPool.e {
m[v.key] = struct{}{}
}
e := c.hashtable[0].random(5, -1)
for _, v := range e {
if _, ok := m[v.key]; ok {
continue
}
c.evictionPool.e = append(c.evictionPool.e, v)
}
sort.Slice(c.evictionPool.e, func(i, j int) bool {
return c.evictionPool.e[i].operateTime.Before(c.evictionPool.e[j].operateTime)
})
}
e := c.evictionPool.e[0]
c.Del(e.key)
c.evictionPool.e = c.evictionPool.e[1:]
if len(c.evictionPool.e) == 0 {
return
}
} else {
// TODO: 扩容中进行
// 好像有点麻烦,需要根据rehash的进度,从未rehash的地方进行取样,这样就得加锁,为了get读等其它的性能又不想加锁
// 或者只有set操作的时候才进行清理,(set加了一个rehash锁)
}
}
func (c *cache) Get(key string) (interface{}, bool) {
if c.rehashs.enable == 0 {
val, err := c.hashtable[0].Get(key)
if err != nil {
if err == ExpiredErr {
// 过期删除
atomic.AddInt64(&c.num, -1)
atomic.AddInt64(&c.memory, -int64(sizeof(val)))
}
return nil, false
}
return val, true
} else {
c.rehashs.Lock()
defer c.rehashs.Unlock()
if c.rehashs.enable == 0 {
return c.Get(key)
}
// rehash中 先从1中查,未查到再从0查
val, err := c.hashtable[1].Get(key)
if err != nil {
if err == ExpiredErr {
// 过期删除
atomic.AddInt64(&c.num, -1)
atomic.AddInt64(&c.memory, -int64(sizeof(val)))
return nil, false
}
val, err = c.hashtable[0].Get(key)
if err != nil {
if err == ExpiredErr {
// 过期删除
atomic.AddInt64(&c.num, -1)
atomic.AddInt64(&c.memory, -int64(sizeof(val)))
}
return nil, false
}
}
c.rehash()
return val, true
}
}
func (c *cache) Del(key string) bool {
if c.rehashs.enable == 0 {
val, ok := c.hashtable[0].Del(key)
if !ok {
return false
}
atomic.AddInt64(&c.memory, -int64(sizeof(val)))
atomic.AddInt64(&c.num, -1)
return true
} else {
// rehash中
c.rehashs.Lock()
defer c.rehashs.Unlock()
if c.rehashs.enable == 0 {
return c.Del(key)
}
val, ok := c.hashtable[0].Del(key)
if !ok {
val, ok = c.hashtable[1].Del(key)
if !ok {
return false
}
}
atomic.AddInt64(&c.memory, -int64(sizeof(val)))
atomic.AddInt64(&c.num, -1)
return true
}
}
func (c *cache) Exists(key string) bool {
_, ok := c.Get(key)
return ok
}
func (c *cache) Flush() bool {
if c.rehashs.enable == 0 {
c.hashtable[0].flush(&c.memory, &c.num)
} else {
// rehash中
c.rehashs.Lock()
defer c.rehashs.Unlock()
c.hashtable[0].flush(&c.memory, &c.num)
c.hashtable[1].flush(&c.memory, &c.num)
}
return true
}
// Keys 过期使用惰性删除,keys可能并不准确
func (c *cache) Keys() int64 {
return c.num
}

60
archive/go/cache/cache_bench_test.go vendored Normal file
View File

@@ -0,0 +1,60 @@
package main
import (
"strconv"
"testing"
"time"
"github.com/allegro/bigcache/v3"
)
func BenchmarkMapSet(b *testing.B) {
m := make(map[string]int, b.N)
for i := 0; i < b.N; i++ {
m[strconv.Itoa(i)] = i
}
}
func BenchmarkMapGetSet(b *testing.B) {
m := make(map[string]int, b.N)
for i := 0; i < 1000; i++ {
m[strconv.Itoa(i)] = i
}
for i := 0; i < b.N; i++ {
_, _ = m[strconv.Itoa(i%1000)]
}
}
func BenchmarkCacheSet(b *testing.B) {
cache := NewCache()
for i := 0; i < b.N; i++ {
cache.Set(strconv.Itoa(i), i, 0)
}
}
func BenchmarkCacheSetGet(b *testing.B) {
cache := NewCache()
for i := 0; i < 1000; i++ {
cache.Set(strconv.Itoa(i), i, 0)
}
for i := 0; i < b.N; i++ {
cache.Get(strconv.Itoa(i % 1000))
}
}
func BenchmarkBigCacheSet(b *testing.B) {
cache, _ := bigcache.NewBigCache(bigcache.DefaultConfig(10 * time.Minute))
for i := 0; i < b.N; i++ {
cache.Set(strconv.Itoa(i), []byte(strconv.Itoa(i)))
}
}
func BenchmarkBigCacheSetGet(b *testing.B) {
cache, _ := bigcache.NewBigCache(bigcache.DefaultConfig(10 * time.Minute))
for i := 0; i < 1000; i++ {
cache.Set(strconv.Itoa(i), []byte(strconv.Itoa(i)))
}
for i := 0; i < b.N; i++ {
cache.Get(strconv.Itoa(i % 1000))
}
}

217
archive/go/cache/cache_test.go vendored Normal file
View File

@@ -0,0 +1,217 @@
package main
import (
"fmt"
"sync"
"sync/atomic"
"testing"
"time"
)
import "github.com/stretchr/testify/assert"
func Test_cache_SetMaxMemory(t *testing.T) {
tests := []struct {
name string
args string
maxMemory int64
want bool
}{
{"case1", "1KB", 1024, true},
{"case2", "1MB", 1024 * 1024, true},
{"case3", "1GB", 1024 * 1024 * 1024, true},
{"case4", "10G", 0, false},
{"case5", "0", 0, false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := &cache{}
if got := c.SetMaxMemory(tt.args); got != tt.want || c.maxMemory != tt.maxMemory {
t.Errorf("SetMaxMemory() = %v,maxMemory = %v, want %v,%v", got, c.maxMemory, tt.want, tt.maxMemory)
}
})
}
}
func TestCache(t *testing.T) {
cache := newCache(1024)
cache.SetMaxMemory("100MB")
assert.Equal(t, int64(1024*1024*100), cache.maxMemory)
cache.Set("int", 1, 0)
assert.Equal(t, int64(8), cache.memory)
v, ok := cache.Get("int")
assert.True(t, ok)
assert.Equal(t, 1, v)
cache.Del("int")
_, ok = cache.Get("int")
assert.False(t, ok)
cache.Set("str", "ok", time.Microsecond)
v, ok = cache.Get("str")
assert.True(t, ok)
assert.Equal(t, "ok", v)
assert.Equal(t, int64(1), cache.Keys())
time.Sleep(time.Microsecond)
assert.Equal(t, int64(1), cache.Keys())
assert.Equal(t, int64(16), cache.memory)
v, ok = cache.Get("str")
assert.False(t, ok)
assert.Equal(t, int64(0), cache.Keys())
assert.Equal(t, int64(0), cache.memory)
cache.Flush()
cache.Keys()
}
func TestClean(t *testing.T) {
cache := newCache(1024)
cache.SetMaxMemory("1KB")
cache.Set("test1", 1, 0)
cache.Set("test2", 10, 0)
cache.Set("test3", 100, 0)
cache.Set("test4", 1000, 0)
time.Sleep(time.Microsecond)
cache.Set("bigkey", [1024]int{0}, 0)
assert.Equal(t, int64(1), cache.Keys())
_, ok := cache.Get("test1")
assert.False(t, ok)
v, ok := cache.Get("bigkey")
assert.Equal(t, [1024]int{0}, v)
assert.True(t, ok)
time.Sleep(time.Microsecond)
cache.Set("test1", 1, 0)
cache.Set("test2", 10, 0)
cache.Set("test3", 100, 0)
assert.Equal(t, int64(3), cache.Keys())
v, ok = cache.Get("test3")
assert.True(t, ok)
assert.Equal(t, 100, v)
v, ok = cache.Get("bigkey")
assert.False(t, ok)
assert.Nil(t, v)
}
// 测试扩容
func TestExpansion(t *testing.T) {
cache := newCache(1024)
cache.SetMaxMemory("1GB")
for i := 0; i < 10000; i++ {
cache.Set(fmt.Sprintf("%d", i), i, 0)
}
for i := 0; i < 10000; i++ {
v, ok := cache.Get(fmt.Sprintf("%d", i))
assert.True(t, ok)
assert.Equal(t, i, v)
}
assert.Equal(t, int64(80000), cache.memory)
assert.Equal(t, int64(10000), cache.num)
cache.Flush()
assert.Equal(t, int64(0), cache.memory)
assert.Equal(t, int64(0), cache.num)
}
// 并发读写测试
func TestSetGet(t *testing.T) {
var rn int64
maxNum := int64(100000)
cache := newCache(1024)
cache.SetMaxMemory("1GB")
var n int64 = -1
wg := sync.WaitGroup{}
for i := 0; i < 100; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for {
i := atomic.AddInt64(&n, 1)
if i > maxNum {
break
}
cache.Set(fmt.Sprintf("%d", i), i, 0)
}
}()
}
wg.Wait()
n = -1
for i := 0; i < 100; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for {
i := atomic.AddInt64(&n, 1)
if i > maxNum {
break
}
v, ok := cache.Get(fmt.Sprintf("%d", i))
assert.True(t, ok)
assert.Equal(t, i, v)
atomic.AddInt64(&rn, 1)
}
}()
}
wg.Wait()
assert.Equal(t, maxNum+1, rn)
rn = 0
cache.Flush()
assert.Equal(t, int64(0), cache.memory)
assert.Equal(t, int64(0), cache.num)
// Flush 不会缩容,另外新建一个测试
cache = newCache(1024)
cache.SetMaxMemory("1GB")
// 一边写 一边读
c := make(chan int64, 100)
n = -1
wwg := sync.WaitGroup{}
for i := 0; i < 100; i++ {
wwg.Add(1)
go func() {
defer wwg.Done()
for {
i := atomic.AddInt64(&n, 1)
if i > maxNum {
break
}
cache.Set(fmt.Sprintf("%d", i), i, 0)
c <- i
}
}()
}
go func() {
wwg.Wait()
close(c)
}()
for i := 0; i < 100; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for {
i, ok := <-c
if !ok {
break
}
v, ok := cache.Get(fmt.Sprintf("%d", i))
assert.True(t, ok)
atomic.AddInt64(&rn, 1)
assert.Equal(t, i, v)
}
}()
}
wg.Wait()
assert.Equal(t, maxNum+1, rn)
}
// 测试清理
func TestLru(t *testing.T) {
cache := newCache(2048)
cache.SetMaxMemory("1KB")
for i := 0; i < 100; i++ {
cache.Set(fmt.Sprintf("%d", i), [...]int{1, 2, 3, 4, 5, 6, 7, 8}, 0)
assert.True(t, cache.memory <= cache.maxMemory)
}
}

283
archive/go/cache/hashtable.go vendored Normal file
View File

@@ -0,0 +1,283 @@
package main
import (
"errors"
"hash/fnv"
"math/rand"
"sync"
"sync/atomic"
"time"
)
type entry struct {
key string
value interface{}
expireTime time.Time
operateTime time.Time
next *entry
}
var ExpiredErr = errors.New("key过期")
func (v *entry) expired() bool {
return time.Now().After(v.expireTime)
}
// 过期也会返回value,用于计算内存占用大小
func (v *entry) val() (interface{}, error) {
if !v.expired() {
return v.value, nil
}
return v.value, ExpiredErr
}
// 拉链法实现hashtable
type hashtable struct {
//maxMemory int64
//memory int64
//
//cap int64
//num int64
// 分段锁提高性能
bucket []*bucket
}
func newHashtable(cap int64) *hashtable {
ht := &hashtable{
//maxMemory: maxMemory, memory: memory, cap: cap, num: num,
bucket: make([]*bucket, 0, cap/1024),
}
for i := int64(0); i < (cap / 1024); i++ {
ht.bucket = append(ht.bucket, &bucket{})
}
return ht
}
// 计算key 返回桶位置和数组位置
func (h *hashtable) hash(key string) (int, int) {
hash := fnv.New64()
hash.Write([]byte(key))
nk := int(hash.Sum64())
if nk < 0 {
nk = -nk
}
return nk % len(h.bucket), nk % 1024
}
func (h *hashtable) Get(key string) (interface{}, error) {
b, n := h.hash(key)
return h.bucket[b].get(key, n)
}
func (h *hashtable) Set(key string, val interface{}, expire time.Duration) {
b, n := h.hash(key)
h.bucket[b].set(key, n, val, expire)
}
func (h *hashtable) SetExpireTime(key string, val interface{}, expireTime time.Time) {
b, n := h.hash(key)
h.bucket[b].setExpireTime(key, n, val, expireTime)
}
func (h *hashtable) Del(key string) (interface{}, bool) {
b, n := h.hash(key)
val, ok := h.bucket[b].del(key, n)
return val, ok
}
// flush 不在hashtab层加锁,循环桶清空
func (h *hashtable) flush(mem, num *int64) {
for _, v := range h.bucket {
v.flush(mem, num)
}
}
func (h *hashtable) index(index int) (*entry, bool) {
b, n := index/1024, index%1024
if index >= len(h.bucket)*1024 {
return nil, true
}
return h.bucket[b].getAndDel(n), false
}
//从某个位置开始随机取值,-1为随机所有key
func (h *hashtable) random(count, start int) []*entry {
if start == -1 {
start = rand.Intn(len(h.bucket) * 1024)
}
startBucket := start / 1024
randomBucket := startBucket + rand.Intn(len(h.bucket)-startBucket)
startIndex := 0
if randomBucket == startBucket {
startIndex = start % 1024
startIndex = startIndex + rand.Intn(1024-startIndex)
} else {
startBucket = randomBucket
startIndex = rand.Intn(1024)
}
ret := make([]*entry, 0, count)
for i := 0; i < 1024; i++ {
var e *entry
if startIndex+i >= 1024 {
// 跨过桶了
b := h.bucket[(startBucket+1)%len(h.bucket)]
e = b.index((startIndex + i) % 1024)
} else {
e = h.bucket[startBucket].index(startIndex + i)
}
if e == nil {
continue
}
for {
ret = append(ret, e)
if e.next == nil || len(ret) >= count {
break
}
e = e.next
}
if len(ret) >= count {
break
}
}
return ret
}
type bucket struct {
sync.RWMutex
kv [1024]*entry
// 桶内也对内存 num进行统计,另外一个优化思路,定时对桶内的内存和数量进行统计 然后进行内存释放和扩容
mem int64
num int64
}
// 清理,对总的内存和数量进行计算
func (b *bucket) flush(mem, num *int64) {
b.Lock()
defer b.Unlock()
b.kv = [1024]*entry{}
atomic.AddInt64(mem, -b.mem)
atomic.AddInt64(num, -b.num)
b.mem = 0
b.num = 0
}
func (b *bucket) getAndDel(n int) (v *entry) {
b.Lock()
defer b.Unlock()
v, b.kv[n] = b.kv[n], nil
return
}
// 获取指针
func (b *bucket) getP(key string, n int) (*entry, *entry) {
v := b.kv[n]
prev := v
for v != nil {
if v.key == key {
return v, prev
}
prev = v
v = v.next
}
return nil, nil
}
func (b *bucket) index(n int) *entry {
b.RLock()
defer b.RUnlock()
return b.kv[n]
}
func (b *bucket) get(key string, n int) (interface{}, error) {
b.RLock()
v, prev := b.getP(key, n)
if v == nil {
b.RUnlock()
return nil, errors.New("key不存在")
}
val, err := v.val()
if err != nil {
b.RUnlock()
b.Lock()
defer b.Unlock()
b.mem -= int64(sizeof(val))
b.num--
// 过期删除
if prev == v {
b.kv[n] = nil
} else {
prev.next = v.next
}
return val, err
}
v.operateTime = time.Now()
b.RUnlock()
return val, nil
}
func (b *bucket) setExpireTime(key string, n int, val interface{}, expireTime time.Time) {
b.Lock()
defer b.Unlock()
v := b.kv[n]
b.mem += int64(sizeof(val))
b.num++
if v == nil {
b.kv[n] = &entry{
key: key,
value: val,
expireTime: expireTime,
operateTime: time.Now(),
next: nil,
}
return
}
for v != nil {
if v.key == key {
v.value = val
v.expireTime = expireTime
v.operateTime = time.Now()
return
}
if v.next == nil {
break
}
v = v.next
}
v.next = &entry{
key: key,
value: val,
expireTime: expireTime,
operateTime: time.Now(),
next: nil,
}
}
func (b *bucket) set(key string, n int, val interface{}, expire time.Duration) {
expireTime := time.Now()
if expire == 0 {
// 直接设置一个超大的时间
expireTime = expireTime.Add(time.Hour * 24 * 365 * 10)
} else {
expireTime = expireTime.Add(expire)
}
b.setExpireTime(key, n, val, expireTime)
}
// 删除并返回删除的值
func (b *bucket) del(key string, n int) (interface{}, bool) {
b.Lock()
defer b.Unlock()
v, prev := b.getP(key, n)
if v == nil {
return nil, false
}
b.mem -= int64(sizeof(v.value))
b.num--
if prev == v {
b.kv[n] = nil
} else {
prev.next = v.next
}
return v.value, true
}

13
archive/go/cache/main.go vendored Normal file
View File

@@ -0,0 +1,13 @@
package main
func main() {
cache := NewCache()
cache.SetMaxMemory("100MB")
cache.Set("int", 1, 0)
cache.Set("bool", false, 0)
cache.Set("data", map[string]interface{}{"a": 1}, 0)
cache.Get("int")
cache.Del("int")
cache.Flush()
cache.Keys()
}

310
archive/go/cache/simple.go vendored Normal file
View File

@@ -0,0 +1,310 @@
package main
import (
"hash/fnv"
"strconv"
"strings"
"sync"
"time"
)
type lru struct {
prev *lru
next *lru
key string
}
type simpleCache struct {
sync.Mutex
maxMemory int
memory int
hashtable []*entry
lruhead, lrutail *lru
lruHash map[string]*lru
num int
cap int
}
func NewSimpleCache() Cache {
return newSimpleCache()
}
func newSimpleCache() *simpleCache {
c := &simpleCache{
hashtable: make([]*entry, 1024),
cap: 1024,
lruhead: &lru{},
lrutail: &lru{},
lruHash: map[string]*lru{},
}
c.lruhead.next = c.lrutail
c.lrutail.prev = c.lruhead
go func() {
// 扫描过期
for {
time.Sleep(time.Minute)
c.Lock()
for n, p := range c.hashtable {
prev := p
for p != nil {
if p.expired() {
if p == c.hashtable[n] {
c.hashtable[n] = p.next
} else {
prev.next = p.next
}
c.num--
c.memory -= sizeof(p.value)
}
prev = p
p = p.next
}
}
c.Unlock()
}
}()
return c
}
func (c *simpleCache) SetMaxMemory(size string) bool {
if len(size) < 3 {
return false
}
num := size[:len(size)-2]
unit := size[len(size)-2:]
maxMemory, err := strconv.Atoi(num)
if err != nil {
return false
}
if maxMemory == 0 {
return false
}
switch strings.ToUpper(unit) {
case "KB":
c.maxMemory = maxMemory << 10
case "MB":
c.maxMemory = maxMemory << 20
case "GB":
c.maxMemory = maxMemory << 30
default:
return false
}
return true
}
func (c *simpleCache) hash(key string) int {
hash := fnv.New64()
hash.Write([]byte(key))
nk := int(hash.Sum64())
if nk < 0 {
nk = -nk
}
return nk % c.cap
}
func (c *simpleCache) rehashkey(cap int, key string) int {
hash := fnv.New64()
hash.Write([]byte(key))
nk := int(hash.Sum64())
if nk < 0 {
nk = -nk
}
return nk % cap
}
func (c *simpleCache) rehash() {
//扩容
newCap := c.cap * 2
tmpHashtable := make([]*entry, newCap)
for _, p := range c.hashtable {
for p != nil {
n := c.rehashkey(newCap, p.key)
nv := tmpHashtable[n]
nk := &entry{
key: p.key,
value: p.value,
expireTime: p.expireTime,
operateTime: p.operateTime,
next: nil,
}
if nv == nil {
tmpHashtable[n] = nk
} else {
for nv.next != nil {
nv = nv.next
}
nv.next = nk
}
p = p.next
}
}
c.cap = newCap
c.hashtable = tmpHashtable
}
func (c *simpleCache) Set(key string, val interface{}, expire time.Duration) {
c.Lock()
defer c.Unlock()
defer c.lru(key)
c.memory += sizeof(val)
if float32(c.num)/float32(c.cap) > 0.75 {
c.rehash()
}
n := c.hash(key)
expireTime := time.Now()
if expire == 0 {
// 设置一个很大的时间
expireTime = expireTime.Add(time.Hour * 24 * 365 * 10)
} else {
expireTime = expireTime.Add(expire)
}
p := c.hashtable[n]
e := &entry{
key: key,
value: val,
expireTime: expireTime,
next: nil,
}
if p == nil {
c.hashtable[n] = e
c.num++
return
}
prev := p
for p != nil {
if p.key == key {
p.value = val
p.expireTime = expireTime
return
}
prev = p
p = p.next
}
prev.next = e
c.num++
}
func (c *simpleCache) lru(key string) {
l := c.lruHash[key]
if l == nil {
// 插入头
nl := &lru{prev: c.lruhead, next: c.lruhead.next, key: key}
c.lruhead.next.prev = nl
c.lruhead.next = nl
c.lruHash[key] = nl
} else {
// 移动到头
l.prev.next = l.next
l.next.prev = l.prev
l.prev = c.lruhead
l.next = c.lruhead.next
c.lruhead.next.prev = l
c.lruhead.next = l
}
// 清理内存
for c.memory > c.maxMemory && c.num > 1 {
if c.lrutail.prev == c.lruhead {
return
}
rmk := c.lrutail.prev
c.lrutail.prev.next = c.lrutail
c.lrutail.prev = c.lrutail.prev.prev
c.del(rmk.key)
}
}
func (c *simpleCache) delLru(key string) {
l := c.lruHash[key]
if l != nil {
l.prev.next = l.next
l.next.prev = l.prev
delete(c.lruHash, key)
}
}
func (c *simpleCache) Get(key string) (interface{}, bool) {
c.Lock()
defer c.Unlock()
n := c.hash(key)
p := c.hashtable[n]
prev := p
for p != nil {
if p.key == key {
// 过期删除
if p.expired() {
c.delLru(key)
if c.hashtable[n] == p {
c.hashtable[n] = p.next
} else {
prev.next = p.next
}
c.num--
c.memory -= sizeof(p.value)
return nil, false
}
c.lru(key)
return p.value, true
}
prev = p
p = p.next
}
return nil, false
}
func (c *simpleCache) Del(key string) bool {
c.Lock()
defer c.Unlock()
return c.del(key)
}
func (c *simpleCache) del(key string) bool {
n := c.hash(key)
p := c.hashtable[n]
prev := p
for p != nil {
if p.key == key {
c.delLru(key)
if c.hashtable[n] == p {
c.hashtable[n] = p.next
} else {
prev.next = p.next
}
c.num--
c.memory -= sizeof(p.value)
return true
}
prev = p
p = p.next
}
return false
}
func (c *simpleCache) Exists(key string) bool {
_, ok := c.Get(key)
return ok
}
func (c *simpleCache) Flush() bool {
c.Lock()
defer c.Unlock()
c.num = 0
c.memory = 0
c.hashtable = make([]*entry, c.cap)
c.lruhead = &lru{}
c.lrutail = &lru{}
c.lruHash = map[string]*lru{}
c.lruhead.next = c.lrutail
c.lrutail.prev = c.lruhead
return true
}
func (c *simpleCache) Keys() int64 {
return int64(c.num)
}

213
archive/go/cache/simple_test.go vendored Normal file
View File

@@ -0,0 +1,213 @@
package main
import (
"fmt"
"sync"
"sync/atomic"
"testing"
"time"
)
import "github.com/stretchr/testify/assert"
func TestSimple_cache_SetMaxMemory(t *testing.T) {
tests := []struct {
name string
args string
maxMemory int
want bool
}{
{"case1", "1KB", 1024, true},
{"case2", "1MB", 1024 * 1024, true},
{"case3", "1GB", 1024 * 1024 * 1024, true},
{"case4", "10G", 0, false},
{"case5", "0", 0, false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := newSimpleCache()
if got := c.SetMaxMemory(tt.args); got != tt.want || c.maxMemory != tt.maxMemory {
t.Errorf("SetMaxMemory() = %v,maxMemory = %v, want %v,%v", got, c.maxMemory, tt.want, tt.maxMemory)
}
})
}
}
func TestSimpleCache(t *testing.T) {
cache := newSimpleCache()
cache.SetMaxMemory("100MB")
assert.Equal(t, 1024*1024*100, cache.maxMemory)
cache.Set("int", 1, 0)
assert.Equal(t, 8, cache.memory)
v, ok := cache.Get("int")
assert.True(t, ok)
assert.Equal(t, 1, v)
cache.Del("int")
_, ok = cache.Get("int")
assert.False(t, ok)
cache.Set("str", "ok", time.Microsecond)
v, ok = cache.Get("str")
assert.True(t, ok)
assert.Equal(t, "ok", v)
assert.Equal(t, int64(1), cache.Keys())
time.Sleep(time.Microsecond)
assert.Equal(t, int64(1), cache.Keys())
assert.Equal(t, 16, cache.memory)
v, ok = cache.Get("str")
assert.False(t, ok)
assert.Equal(t, int64(0), cache.Keys())
assert.Equal(t, 0, cache.memory)
cache.Flush()
cache.Keys()
}
func TestSimpleClean(t *testing.T) {
cache := newSimpleCache()
cache.SetMaxMemory("1KB")
cache.Set("test1", 1, 0)
cache.Set("test2", 10, 0)
cache.Set("test3", 100, 0)
cache.Set("test4", 1000, 0)
cache.Set("bigkey", [1024]int{0}, 0)
assert.Equal(t, int64(1), cache.Keys())
_, ok := cache.Get("test1")
assert.False(t, ok)
v, ok := cache.Get("bigkey")
assert.Equal(t, [1024]int{0}, v)
assert.True(t, ok)
cache.Set("test1", 1, 0)
cache.Set("test2", 10, 0)
cache.Set("test3", 100, 0)
assert.Equal(t, int64(3), cache.Keys())
v, ok = cache.Get("test3")
assert.True(t, ok)
assert.Equal(t, 100, v)
v, ok = cache.Get("bigkey")
assert.False(t, ok)
assert.Nil(t, v)
}
// 测试扩容
func TestSimpleExpansion(t *testing.T) {
cache := newSimpleCache()
cache.SetMaxMemory("1GB")
for i := 0; i < 10000; i++ {
cache.Set(fmt.Sprintf("%d", i), i, 0)
}
for i := 0; i < 10000; i++ {
v, ok := cache.Get(fmt.Sprintf("%d", i))
assert.True(t, ok)
assert.Equal(t, i, v)
}
assert.Equal(t, 80000, cache.memory)
assert.Equal(t, 10000, cache.num)
cache.Flush()
assert.Equal(t, 0, cache.memory)
assert.Equal(t, 0, cache.num)
}
// 并发读写测试
func TestSimpleSetGet(t *testing.T) {
var rn int64
maxNum := int64(100000)
cache := newSimpleCache()
cache.SetMaxMemory("1GB")
var n int64 = -1
wg := sync.WaitGroup{}
for i := 0; i < 100; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for {
i := atomic.AddInt64(&n, 1)
if i > maxNum {
break
}
cache.Set(fmt.Sprintf("%d", i), i, 0)
}
}()
}
wg.Wait()
n = -1
for i := 0; i < 100; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for {
i := atomic.AddInt64(&n, 1)
if i > maxNum {
break
}
v, ok := cache.Get(fmt.Sprintf("%d", i))
assert.True(t, ok)
assert.Equal(t, i, v)
atomic.AddInt64(&rn, 1)
}
}()
}
wg.Wait()
assert.Equal(t, maxNum+1, rn)
rn = 0
cache.Flush()
assert.Equal(t, 0, cache.memory)
assert.Equal(t, 0, cache.num)
// Flush 不会缩容,另外新建一个测试
cache = newSimpleCache()
cache.SetMaxMemory("1GB")
// 一边写 一边读
c := make(chan int64, 100)
n = -1
wwg := sync.WaitGroup{}
for i := 0; i < 100; i++ {
wwg.Add(1)
go func() {
defer wwg.Done()
for {
i := atomic.AddInt64(&n, 1)
if i > maxNum {
break
}
cache.Set(fmt.Sprintf("%d", i), i, 0)
c <- i
}
}()
}
go func() {
wwg.Wait()
close(c)
}()
for i := 0; i < 100; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for {
i, ok := <-c
if !ok {
break
}
v, ok := cache.Get(fmt.Sprintf("%d", i))
assert.True(t, ok)
atomic.AddInt64(&rn, 1)
assert.Equal(t, i, v)
}
}()
}
wg.Wait()
assert.Equal(t, maxNum+1, rn)
}
// 测试清理
func TestSimpleLru(t *testing.T) {
cache := newSimpleCache()
cache.SetMaxMemory("1KB")
for i := 0; i < 100; i++ {
cache.Set(fmt.Sprintf("%d", i), [...]int{1, 2, 3, 4, 5, 6, 7, 8}, 0)
assert.True(t, cache.memory <= cache.maxMemory)
}
}

8
archive/go/cache/utils.go vendored Normal file
View File

@@ -0,0 +1,8 @@
package main
import "reflect"
// 简单的计算了,对于指针、切片等有问题
func sizeof(v interface{}) int {
return int(reflect.TypeOf(v).Size())
}