init commit

This commit is contained in:
2024-03-19 01:05:51 +08:00
commit 199bbf2628
393 changed files with 34883 additions and 0 deletions

View File

@@ -0,0 +1,39 @@
package main
import (
"sync"
"sync/atomic"
"time"
)
func main() {
t := time.Now()
sum := int64(0)
for i := int64(0); i < 500000000; i++ {
sum += i
}
t1 := time.Now().Sub(t)
println(sum)
println(t1.String())
//锁
t = time.Now()
sum = 0
var mutex sync.Mutex
for i := int64(0); i < 500000000; i++ {
mutex.Lock()
sum += i
mutex.Unlock()
}
t1 = time.Now().Sub(t)
println(sum)
println(t1.String())
//原子操作
t = time.Now()
sum = 0
for i := int64(0); i < 500000000; i++ {
atomic.AddInt64(&sum, i)
}
t1 = time.Now().Sub(t)
println(sum)
println(t1.String())
}

View File

@@ -0,0 +1,129 @@
package main
import (
"bytes"
"crypto/md5"
"fmt"
"io"
"net/http"
"runtime"
"github.com/gin-gonic/gin"
)
func main() {
r := gin.Default()
/**
1G 文件测试结果
{
"md5": "23023b24fc0ab2d03d5cd62a18bb4aa8",
"mem": "96M",
"startMem": "1M"
}
100M 文件测试结果
{
"md5": "6ba2b5a1b62f356fc86efad690613916",
"mem": "96M",
"startMem": "0M"
}
*/
r.GET("/hash1", func(c *gin.Context) {
fmt.Printf("%v", c.ContentType())
m := runtime.MemStats{}
runtime.ReadMemStats(&m)
startM := m.Alloc / 1024 / 1024
oldBody := c.Request.Body
defer oldBody.Close()
pr, pw := io.Pipe()
defer pw.Close()
defer pr.Close()
c.Request.Body = pr
hash := md5.New()
go func() {
_, err := io.Copy(io.MultiWriter(hash, pw), oldBody)
if err != nil {
fmt.Printf("io copy: %v", err)
}
}()
_, err := c.MultipartForm()
if err != nil {
c.AbortWithError(http.StatusInternalServerError, err)
return
}
runtime.ReadMemStats(&m)
c.JSON(http.StatusOK, gin.H{
"md5": fmt.Sprintf("%x", hash.Sum(nil)),
"startMem": fmt.Sprintf("%dM", startM),
"mem": fmt.Sprintf("%dM", m.Alloc/1024/1024),
})
})
/**
1G 文件测试结果
{
"md5": "62da2c499cdb3fad927f881c134684b0",
"mem": "2922M",
"startMem": "1M"
}
100M 文件测试结果
{
"md5": "55a6849293d0847a48f856254aa910e2",
"mem": "341M",
"startMem": "1M"
}
*/
r.GET("/hash2", func(c *gin.Context) {
m := runtime.MemStats{}
runtime.ReadMemStats(&m)
startM := m.Alloc / 1024 / 1024
oldBody := c.Request.Body
defer oldBody.Close()
buffer := bytes.NewBuffer(nil)
hash := md5.New()
_, err := io.Copy(io.MultiWriter(buffer, hash), oldBody)
if err != nil {
fmt.Printf("io copy2: %v", err)
}
c.Request.Body = io.NopCloser(buffer)
_, err = c.MultipartForm()
if err != nil {
c.AbortWithError(http.StatusInternalServerError, err)
return
}
runtime.ReadMemStats(&m)
c.JSON(http.StatusOK, gin.H{
"md5": fmt.Sprintf("%x", hash.Sum(nil)),
"startMem": fmt.Sprintf("%dM", startM),
"mem": fmt.Sprintf("%dM", m.Alloc/1024/1024),
})
})
/**
1G 文件测试结果
{
"mem": "96M",
"startMem": "1M"
}
100M 文件测试结果
{
"mem": "96M",
"startMem": "1M"
}
*/
r.GET("/hash3", func(c *gin.Context) {
m := runtime.MemStats{}
runtime.ReadMemStats(&m)
startM := m.Alloc / 1024 / 1024
_, err := c.MultipartForm()
if err != nil {
c.AbortWithError(http.StatusInternalServerError, err)
return
}
runtime.ReadMemStats(&m)
c.JSON(http.StatusOK, gin.H{
"startMem": fmt.Sprintf("%dM", startM),
"mem": fmt.Sprintf("%dM", m.Alloc/1024/1024),
})
})
r.Run(":8088")
}

278
archive/go/cache/cache.go vendored Normal file
View File

@@ -0,0 +1,278 @@
package main
import (
"sort"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
)
type Cache interface {
SetMaxMemory(size string) bool
Set(key string, val interface{}, expire time.Duration)
Get(key string) (interface{}, bool)
Del(key string) bool
Exists(key string) bool
Flush() bool
Keys() int64
}
type cache struct {
maxMemory int64
memory int64
// 过期使用惰性删除,num实际上可能不准确
num int64
capacity int64
rehashs struct {
sync.Mutex
enable int32
index int
}
hashtable [2]*hashtable
// 淘汰池
evictionPool struct {
sync.Mutex
e []*entry
}
}
func NewCache() Cache {
return newCache(1024)
}
func newCache(cap int64) *cache {
c := &cache{
capacity: cap,
hashtable: [2]*hashtable{newHashtable(cap)},
evictionPool: struct {
sync.Mutex
e []*entry
}{e: make([]*entry, 0, 20)},
rehashs: struct {
sync.Mutex
enable int32
index int
}{enable: 0, index: 0},
}
c.SetMaxMemory("1GB")
return c
}
func (c *cache) SetMaxMemory(size string) bool {
if len(size) < 3 {
return false
}
num := size[:len(size)-2]
unit := size[len(size)-2:]
maxMemory, err := strconv.ParseInt(num, 10, 64)
if err != nil {
return false
}
if maxMemory == 0 {
return false
}
switch strings.ToUpper(unit) {
case "KB":
c.maxMemory = maxMemory << 10
case "MB":
c.maxMemory = maxMemory << 20
case "GB":
c.maxMemory = maxMemory << 30
default:
return false
}
return true
}
func (c *cache) Set(key string, val interface{}, expire time.Duration) {
c.rehashs.Lock()
defer c.rehashs.Unlock()
if c.rehashs.enable == 0 {
if float64(c.num)/float64(c.capacity) > 0.75 {
// 扩容一倍,渐进式rehash
atomic.AddInt64(&c.capacity, c.capacity)
c.hashtable[1] = newHashtable(c.capacity)
atomic.StoreInt32(&c.rehashs.enable, 1)
c.hashtable[1].Set(key, val, expire)
} else {
c.hashtable[0].Set(key, val, expire)
}
} else {
if c.rehashs.enable == 0 {
c.Set(key, val, expire)
return
}
c.hashtable[1].Set(key, val, expire)
c.rehash()
}
atomic.AddInt64(&c.num, 1)
atomic.AddInt64(&c.memory, int64(sizeof(val)))
for c.memory > c.maxMemory && c.num > 1 {
//清理内存
c.lru()
if len(c.evictionPool.e) == 0 {
break
}
}
}
func (c *cache) rehash() {
// rehash的时候使用锁
e, comple := c.hashtable[0].index(c.rehashs.index)
if comple {
//完成
c.hashtable[0], c.hashtable[1] = c.hashtable[1], nil
c.rehashs.enable = 0
c.rehashs.index = 0
return
}
c.rehashs.index++
if e == nil {
c.rehash()
return
}
for e != nil {
if e.expired() {
// 过期删除,但是这里没有对实际内存删除
atomic.AddInt64(&c.num, -1)
atomic.AddInt64(&c.memory, -int64(sizeof(e.value)))
} else {
c.hashtable[1].SetExpireTime(e.key, e.value, e.expireTime)
}
e, e.next = e.next, nil
}
}
// 近似lru清理内存
func (c *cache) lru() {
c.evictionPool.Lock()
defer c.evictionPool.Unlock()
if c.rehashs.enable == 0 {
// 取样20个
if len(c.evictionPool.e) <= 15 {
m := make(map[string]struct{})
for _, v := range c.evictionPool.e {
m[v.key] = struct{}{}
}
e := c.hashtable[0].random(5, -1)
for _, v := range e {
if _, ok := m[v.key]; ok {
continue
}
c.evictionPool.e = append(c.evictionPool.e, v)
}
sort.Slice(c.evictionPool.e, func(i, j int) bool {
return c.evictionPool.e[i].operateTime.Before(c.evictionPool.e[j].operateTime)
})
}
e := c.evictionPool.e[0]
c.Del(e.key)
c.evictionPool.e = c.evictionPool.e[1:]
if len(c.evictionPool.e) == 0 {
return
}
} else {
// TODO: 扩容中进行
// 好像有点麻烦,需要根据rehash的进度,从未rehash的地方进行取样,这样就得加锁,为了get读等其它的性能又不想加锁
// 或者只有set操作的时候才进行清理,(set加了一个rehash锁)
}
}
func (c *cache) Get(key string) (interface{}, bool) {
if c.rehashs.enable == 0 {
val, err := c.hashtable[0].Get(key)
if err != nil {
if err == ExpiredErr {
// 过期删除
atomic.AddInt64(&c.num, -1)
atomic.AddInt64(&c.memory, -int64(sizeof(val)))
}
return nil, false
}
return val, true
} else {
c.rehashs.Lock()
defer c.rehashs.Unlock()
if c.rehashs.enable == 0 {
return c.Get(key)
}
// rehash中 先从1中查,未查到再从0查
val, err := c.hashtable[1].Get(key)
if err != nil {
if err == ExpiredErr {
// 过期删除
atomic.AddInt64(&c.num, -1)
atomic.AddInt64(&c.memory, -int64(sizeof(val)))
return nil, false
}
val, err = c.hashtable[0].Get(key)
if err != nil {
if err == ExpiredErr {
// 过期删除
atomic.AddInt64(&c.num, -1)
atomic.AddInt64(&c.memory, -int64(sizeof(val)))
}
return nil, false
}
}
c.rehash()
return val, true
}
}
func (c *cache) Del(key string) bool {
if c.rehashs.enable == 0 {
val, ok := c.hashtable[0].Del(key)
if !ok {
return false
}
atomic.AddInt64(&c.memory, -int64(sizeof(val)))
atomic.AddInt64(&c.num, -1)
return true
} else {
// rehash中
c.rehashs.Lock()
defer c.rehashs.Unlock()
if c.rehashs.enable == 0 {
return c.Del(key)
}
val, ok := c.hashtable[0].Del(key)
if !ok {
val, ok = c.hashtable[1].Del(key)
if !ok {
return false
}
}
atomic.AddInt64(&c.memory, -int64(sizeof(val)))
atomic.AddInt64(&c.num, -1)
return true
}
}
func (c *cache) Exists(key string) bool {
_, ok := c.Get(key)
return ok
}
func (c *cache) Flush() bool {
if c.rehashs.enable == 0 {
c.hashtable[0].flush(&c.memory, &c.num)
} else {
// rehash中
c.rehashs.Lock()
defer c.rehashs.Unlock()
c.hashtable[0].flush(&c.memory, &c.num)
c.hashtable[1].flush(&c.memory, &c.num)
}
return true
}
// Keys 过期使用惰性删除,keys可能并不准确
func (c *cache) Keys() int64 {
return c.num
}

60
archive/go/cache/cache_bench_test.go vendored Normal file
View File

@@ -0,0 +1,60 @@
package main
import (
"strconv"
"testing"
"time"
"github.com/allegro/bigcache/v3"
)
func BenchmarkMapSet(b *testing.B) {
m := make(map[string]int, b.N)
for i := 0; i < b.N; i++ {
m[strconv.Itoa(i)] = i
}
}
func BenchmarkMapGetSet(b *testing.B) {
m := make(map[string]int, b.N)
for i := 0; i < 1000; i++ {
m[strconv.Itoa(i)] = i
}
for i := 0; i < b.N; i++ {
_, _ = m[strconv.Itoa(i%1000)]
}
}
func BenchmarkCacheSet(b *testing.B) {
cache := NewCache()
for i := 0; i < b.N; i++ {
cache.Set(strconv.Itoa(i), i, 0)
}
}
func BenchmarkCacheSetGet(b *testing.B) {
cache := NewCache()
for i := 0; i < 1000; i++ {
cache.Set(strconv.Itoa(i), i, 0)
}
for i := 0; i < b.N; i++ {
cache.Get(strconv.Itoa(i % 1000))
}
}
func BenchmarkBigCacheSet(b *testing.B) {
cache, _ := bigcache.NewBigCache(bigcache.DefaultConfig(10 * time.Minute))
for i := 0; i < b.N; i++ {
cache.Set(strconv.Itoa(i), []byte(strconv.Itoa(i)))
}
}
func BenchmarkBigCacheSetGet(b *testing.B) {
cache, _ := bigcache.NewBigCache(bigcache.DefaultConfig(10 * time.Minute))
for i := 0; i < 1000; i++ {
cache.Set(strconv.Itoa(i), []byte(strconv.Itoa(i)))
}
for i := 0; i < b.N; i++ {
cache.Get(strconv.Itoa(i % 1000))
}
}

217
archive/go/cache/cache_test.go vendored Normal file
View File

@@ -0,0 +1,217 @@
package main
import (
"fmt"
"sync"
"sync/atomic"
"testing"
"time"
)
import "github.com/stretchr/testify/assert"
func Test_cache_SetMaxMemory(t *testing.T) {
tests := []struct {
name string
args string
maxMemory int64
want bool
}{
{"case1", "1KB", 1024, true},
{"case2", "1MB", 1024 * 1024, true},
{"case3", "1GB", 1024 * 1024 * 1024, true},
{"case4", "10G", 0, false},
{"case5", "0", 0, false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := &cache{}
if got := c.SetMaxMemory(tt.args); got != tt.want || c.maxMemory != tt.maxMemory {
t.Errorf("SetMaxMemory() = %v,maxMemory = %v, want %v,%v", got, c.maxMemory, tt.want, tt.maxMemory)
}
})
}
}
func TestCache(t *testing.T) {
cache := newCache(1024)
cache.SetMaxMemory("100MB")
assert.Equal(t, int64(1024*1024*100), cache.maxMemory)
cache.Set("int", 1, 0)
assert.Equal(t, int64(8), cache.memory)
v, ok := cache.Get("int")
assert.True(t, ok)
assert.Equal(t, 1, v)
cache.Del("int")
_, ok = cache.Get("int")
assert.False(t, ok)
cache.Set("str", "ok", time.Microsecond)
v, ok = cache.Get("str")
assert.True(t, ok)
assert.Equal(t, "ok", v)
assert.Equal(t, int64(1), cache.Keys())
time.Sleep(time.Microsecond)
assert.Equal(t, int64(1), cache.Keys())
assert.Equal(t, int64(16), cache.memory)
v, ok = cache.Get("str")
assert.False(t, ok)
assert.Equal(t, int64(0), cache.Keys())
assert.Equal(t, int64(0), cache.memory)
cache.Flush()
cache.Keys()
}
func TestClean(t *testing.T) {
cache := newCache(1024)
cache.SetMaxMemory("1KB")
cache.Set("test1", 1, 0)
cache.Set("test2", 10, 0)
cache.Set("test3", 100, 0)
cache.Set("test4", 1000, 0)
time.Sleep(time.Microsecond)
cache.Set("bigkey", [1024]int{0}, 0)
assert.Equal(t, int64(1), cache.Keys())
_, ok := cache.Get("test1")
assert.False(t, ok)
v, ok := cache.Get("bigkey")
assert.Equal(t, [1024]int{0}, v)
assert.True(t, ok)
time.Sleep(time.Microsecond)
cache.Set("test1", 1, 0)
cache.Set("test2", 10, 0)
cache.Set("test3", 100, 0)
assert.Equal(t, int64(3), cache.Keys())
v, ok = cache.Get("test3")
assert.True(t, ok)
assert.Equal(t, 100, v)
v, ok = cache.Get("bigkey")
assert.False(t, ok)
assert.Nil(t, v)
}
// 测试扩容
func TestExpansion(t *testing.T) {
cache := newCache(1024)
cache.SetMaxMemory("1GB")
for i := 0; i < 10000; i++ {
cache.Set(fmt.Sprintf("%d", i), i, 0)
}
for i := 0; i < 10000; i++ {
v, ok := cache.Get(fmt.Sprintf("%d", i))
assert.True(t, ok)
assert.Equal(t, i, v)
}
assert.Equal(t, int64(80000), cache.memory)
assert.Equal(t, int64(10000), cache.num)
cache.Flush()
assert.Equal(t, int64(0), cache.memory)
assert.Equal(t, int64(0), cache.num)
}
// 并发读写测试
func TestSetGet(t *testing.T) {
var rn int64
maxNum := int64(100000)
cache := newCache(1024)
cache.SetMaxMemory("1GB")
var n int64 = -1
wg := sync.WaitGroup{}
for i := 0; i < 100; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for {
i := atomic.AddInt64(&n, 1)
if i > maxNum {
break
}
cache.Set(fmt.Sprintf("%d", i), i, 0)
}
}()
}
wg.Wait()
n = -1
for i := 0; i < 100; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for {
i := atomic.AddInt64(&n, 1)
if i > maxNum {
break
}
v, ok := cache.Get(fmt.Sprintf("%d", i))
assert.True(t, ok)
assert.Equal(t, i, v)
atomic.AddInt64(&rn, 1)
}
}()
}
wg.Wait()
assert.Equal(t, maxNum+1, rn)
rn = 0
cache.Flush()
assert.Equal(t, int64(0), cache.memory)
assert.Equal(t, int64(0), cache.num)
// Flush 不会缩容,另外新建一个测试
cache = newCache(1024)
cache.SetMaxMemory("1GB")
// 一边写 一边读
c := make(chan int64, 100)
n = -1
wwg := sync.WaitGroup{}
for i := 0; i < 100; i++ {
wwg.Add(1)
go func() {
defer wwg.Done()
for {
i := atomic.AddInt64(&n, 1)
if i > maxNum {
break
}
cache.Set(fmt.Sprintf("%d", i), i, 0)
c <- i
}
}()
}
go func() {
wwg.Wait()
close(c)
}()
for i := 0; i < 100; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for {
i, ok := <-c
if !ok {
break
}
v, ok := cache.Get(fmt.Sprintf("%d", i))
assert.True(t, ok)
atomic.AddInt64(&rn, 1)
assert.Equal(t, i, v)
}
}()
}
wg.Wait()
assert.Equal(t, maxNum+1, rn)
}
// 测试清理
func TestLru(t *testing.T) {
cache := newCache(2048)
cache.SetMaxMemory("1KB")
for i := 0; i < 100; i++ {
cache.Set(fmt.Sprintf("%d", i), [...]int{1, 2, 3, 4, 5, 6, 7, 8}, 0)
assert.True(t, cache.memory <= cache.maxMemory)
}
}

283
archive/go/cache/hashtable.go vendored Normal file
View File

@@ -0,0 +1,283 @@
package main
import (
"errors"
"hash/fnv"
"math/rand"
"sync"
"sync/atomic"
"time"
)
type entry struct {
key string
value interface{}
expireTime time.Time
operateTime time.Time
next *entry
}
var ExpiredErr = errors.New("key过期")
func (v *entry) expired() bool {
return time.Now().After(v.expireTime)
}
// 过期也会返回value,用于计算内存占用大小
func (v *entry) val() (interface{}, error) {
if !v.expired() {
return v.value, nil
}
return v.value, ExpiredErr
}
// 拉链法实现hashtable
type hashtable struct {
//maxMemory int64
//memory int64
//
//cap int64
//num int64
// 分段锁提高性能
bucket []*bucket
}
func newHashtable(cap int64) *hashtable {
ht := &hashtable{
//maxMemory: maxMemory, memory: memory, cap: cap, num: num,
bucket: make([]*bucket, 0, cap/1024),
}
for i := int64(0); i < (cap / 1024); i++ {
ht.bucket = append(ht.bucket, &bucket{})
}
return ht
}
// 计算key 返回桶位置和数组位置
func (h *hashtable) hash(key string) (int, int) {
hash := fnv.New64()
hash.Write([]byte(key))
nk := int(hash.Sum64())
if nk < 0 {
nk = -nk
}
return nk % len(h.bucket), nk % 1024
}
func (h *hashtable) Get(key string) (interface{}, error) {
b, n := h.hash(key)
return h.bucket[b].get(key, n)
}
func (h *hashtable) Set(key string, val interface{}, expire time.Duration) {
b, n := h.hash(key)
h.bucket[b].set(key, n, val, expire)
}
func (h *hashtable) SetExpireTime(key string, val interface{}, expireTime time.Time) {
b, n := h.hash(key)
h.bucket[b].setExpireTime(key, n, val, expireTime)
}
func (h *hashtable) Del(key string) (interface{}, bool) {
b, n := h.hash(key)
val, ok := h.bucket[b].del(key, n)
return val, ok
}
// flush 不在hashtab层加锁,循环桶清空
func (h *hashtable) flush(mem, num *int64) {
for _, v := range h.bucket {
v.flush(mem, num)
}
}
func (h *hashtable) index(index int) (*entry, bool) {
b, n := index/1024, index%1024
if index >= len(h.bucket)*1024 {
return nil, true
}
return h.bucket[b].getAndDel(n), false
}
//从某个位置开始随机取值,-1为随机所有key
func (h *hashtable) random(count, start int) []*entry {
if start == -1 {
start = rand.Intn(len(h.bucket) * 1024)
}
startBucket := start / 1024
randomBucket := startBucket + rand.Intn(len(h.bucket)-startBucket)
startIndex := 0
if randomBucket == startBucket {
startIndex = start % 1024
startIndex = startIndex + rand.Intn(1024-startIndex)
} else {
startBucket = randomBucket
startIndex = rand.Intn(1024)
}
ret := make([]*entry, 0, count)
for i := 0; i < 1024; i++ {
var e *entry
if startIndex+i >= 1024 {
// 跨过桶了
b := h.bucket[(startBucket+1)%len(h.bucket)]
e = b.index((startIndex + i) % 1024)
} else {
e = h.bucket[startBucket].index(startIndex + i)
}
if e == nil {
continue
}
for {
ret = append(ret, e)
if e.next == nil || len(ret) >= count {
break
}
e = e.next
}
if len(ret) >= count {
break
}
}
return ret
}
type bucket struct {
sync.RWMutex
kv [1024]*entry
// 桶内也对内存 num进行统计,另外一个优化思路,定时对桶内的内存和数量进行统计 然后进行内存释放和扩容
mem int64
num int64
}
// 清理,对总的内存和数量进行计算
func (b *bucket) flush(mem, num *int64) {
b.Lock()
defer b.Unlock()
b.kv = [1024]*entry{}
atomic.AddInt64(mem, -b.mem)
atomic.AddInt64(num, -b.num)
b.mem = 0
b.num = 0
}
func (b *bucket) getAndDel(n int) (v *entry) {
b.Lock()
defer b.Unlock()
v, b.kv[n] = b.kv[n], nil
return
}
// 获取指针
func (b *bucket) getP(key string, n int) (*entry, *entry) {
v := b.kv[n]
prev := v
for v != nil {
if v.key == key {
return v, prev
}
prev = v
v = v.next
}
return nil, nil
}
func (b *bucket) index(n int) *entry {
b.RLock()
defer b.RUnlock()
return b.kv[n]
}
func (b *bucket) get(key string, n int) (interface{}, error) {
b.RLock()
v, prev := b.getP(key, n)
if v == nil {
b.RUnlock()
return nil, errors.New("key不存在")
}
val, err := v.val()
if err != nil {
b.RUnlock()
b.Lock()
defer b.Unlock()
b.mem -= int64(sizeof(val))
b.num--
// 过期删除
if prev == v {
b.kv[n] = nil
} else {
prev.next = v.next
}
return val, err
}
v.operateTime = time.Now()
b.RUnlock()
return val, nil
}
func (b *bucket) setExpireTime(key string, n int, val interface{}, expireTime time.Time) {
b.Lock()
defer b.Unlock()
v := b.kv[n]
b.mem += int64(sizeof(val))
b.num++
if v == nil {
b.kv[n] = &entry{
key: key,
value: val,
expireTime: expireTime,
operateTime: time.Now(),
next: nil,
}
return
}
for v != nil {
if v.key == key {
v.value = val
v.expireTime = expireTime
v.operateTime = time.Now()
return
}
if v.next == nil {
break
}
v = v.next
}
v.next = &entry{
key: key,
value: val,
expireTime: expireTime,
operateTime: time.Now(),
next: nil,
}
}
func (b *bucket) set(key string, n int, val interface{}, expire time.Duration) {
expireTime := time.Now()
if expire == 0 {
// 直接设置一个超大的时间
expireTime = expireTime.Add(time.Hour * 24 * 365 * 10)
} else {
expireTime = expireTime.Add(expire)
}
b.setExpireTime(key, n, val, expireTime)
}
// 删除并返回删除的值
func (b *bucket) del(key string, n int) (interface{}, bool) {
b.Lock()
defer b.Unlock()
v, prev := b.getP(key, n)
if v == nil {
return nil, false
}
b.mem -= int64(sizeof(v.value))
b.num--
if prev == v {
b.kv[n] = nil
} else {
prev.next = v.next
}
return v.value, true
}

13
archive/go/cache/main.go vendored Normal file
View File

@@ -0,0 +1,13 @@
package main
func main() {
cache := NewCache()
cache.SetMaxMemory("100MB")
cache.Set("int", 1, 0)
cache.Set("bool", false, 0)
cache.Set("data", map[string]interface{}{"a": 1}, 0)
cache.Get("int")
cache.Del("int")
cache.Flush()
cache.Keys()
}

310
archive/go/cache/simple.go vendored Normal file
View File

@@ -0,0 +1,310 @@
package main
import (
"hash/fnv"
"strconv"
"strings"
"sync"
"time"
)
type lru struct {
prev *lru
next *lru
key string
}
type simpleCache struct {
sync.Mutex
maxMemory int
memory int
hashtable []*entry
lruhead, lrutail *lru
lruHash map[string]*lru
num int
cap int
}
func NewSimpleCache() Cache {
return newSimpleCache()
}
func newSimpleCache() *simpleCache {
c := &simpleCache{
hashtable: make([]*entry, 1024),
cap: 1024,
lruhead: &lru{},
lrutail: &lru{},
lruHash: map[string]*lru{},
}
c.lruhead.next = c.lrutail
c.lrutail.prev = c.lruhead
go func() {
// 扫描过期
for {
time.Sleep(time.Minute)
c.Lock()
for n, p := range c.hashtable {
prev := p
for p != nil {
if p.expired() {
if p == c.hashtable[n] {
c.hashtable[n] = p.next
} else {
prev.next = p.next
}
c.num--
c.memory -= sizeof(p.value)
}
prev = p
p = p.next
}
}
c.Unlock()
}
}()
return c
}
func (c *simpleCache) SetMaxMemory(size string) bool {
if len(size) < 3 {
return false
}
num := size[:len(size)-2]
unit := size[len(size)-2:]
maxMemory, err := strconv.Atoi(num)
if err != nil {
return false
}
if maxMemory == 0 {
return false
}
switch strings.ToUpper(unit) {
case "KB":
c.maxMemory = maxMemory << 10
case "MB":
c.maxMemory = maxMemory << 20
case "GB":
c.maxMemory = maxMemory << 30
default:
return false
}
return true
}
func (c *simpleCache) hash(key string) int {
hash := fnv.New64()
hash.Write([]byte(key))
nk := int(hash.Sum64())
if nk < 0 {
nk = -nk
}
return nk % c.cap
}
func (c *simpleCache) rehashkey(cap int, key string) int {
hash := fnv.New64()
hash.Write([]byte(key))
nk := int(hash.Sum64())
if nk < 0 {
nk = -nk
}
return nk % cap
}
func (c *simpleCache) rehash() {
//扩容
newCap := c.cap * 2
tmpHashtable := make([]*entry, newCap)
for _, p := range c.hashtable {
for p != nil {
n := c.rehashkey(newCap, p.key)
nv := tmpHashtable[n]
nk := &entry{
key: p.key,
value: p.value,
expireTime: p.expireTime,
operateTime: p.operateTime,
next: nil,
}
if nv == nil {
tmpHashtable[n] = nk
} else {
for nv.next != nil {
nv = nv.next
}
nv.next = nk
}
p = p.next
}
}
c.cap = newCap
c.hashtable = tmpHashtable
}
func (c *simpleCache) Set(key string, val interface{}, expire time.Duration) {
c.Lock()
defer c.Unlock()
defer c.lru(key)
c.memory += sizeof(val)
if float32(c.num)/float32(c.cap) > 0.75 {
c.rehash()
}
n := c.hash(key)
expireTime := time.Now()
if expire == 0 {
// 设置一个很大的时间
expireTime = expireTime.Add(time.Hour * 24 * 365 * 10)
} else {
expireTime = expireTime.Add(expire)
}
p := c.hashtable[n]
e := &entry{
key: key,
value: val,
expireTime: expireTime,
next: nil,
}
if p == nil {
c.hashtable[n] = e
c.num++
return
}
prev := p
for p != nil {
if p.key == key {
p.value = val
p.expireTime = expireTime
return
}
prev = p
p = p.next
}
prev.next = e
c.num++
}
func (c *simpleCache) lru(key string) {
l := c.lruHash[key]
if l == nil {
// 插入头
nl := &lru{prev: c.lruhead, next: c.lruhead.next, key: key}
c.lruhead.next.prev = nl
c.lruhead.next = nl
c.lruHash[key] = nl
} else {
// 移动到头
l.prev.next = l.next
l.next.prev = l.prev
l.prev = c.lruhead
l.next = c.lruhead.next
c.lruhead.next.prev = l
c.lruhead.next = l
}
// 清理内存
for c.memory > c.maxMemory && c.num > 1 {
if c.lrutail.prev == c.lruhead {
return
}
rmk := c.lrutail.prev
c.lrutail.prev.next = c.lrutail
c.lrutail.prev = c.lrutail.prev.prev
c.del(rmk.key)
}
}
func (c *simpleCache) delLru(key string) {
l := c.lruHash[key]
if l != nil {
l.prev.next = l.next
l.next.prev = l.prev
delete(c.lruHash, key)
}
}
func (c *simpleCache) Get(key string) (interface{}, bool) {
c.Lock()
defer c.Unlock()
n := c.hash(key)
p := c.hashtable[n]
prev := p
for p != nil {
if p.key == key {
// 过期删除
if p.expired() {
c.delLru(key)
if c.hashtable[n] == p {
c.hashtable[n] = p.next
} else {
prev.next = p.next
}
c.num--
c.memory -= sizeof(p.value)
return nil, false
}
c.lru(key)
return p.value, true
}
prev = p
p = p.next
}
return nil, false
}
func (c *simpleCache) Del(key string) bool {
c.Lock()
defer c.Unlock()
return c.del(key)
}
func (c *simpleCache) del(key string) bool {
n := c.hash(key)
p := c.hashtable[n]
prev := p
for p != nil {
if p.key == key {
c.delLru(key)
if c.hashtable[n] == p {
c.hashtable[n] = p.next
} else {
prev.next = p.next
}
c.num--
c.memory -= sizeof(p.value)
return true
}
prev = p
p = p.next
}
return false
}
func (c *simpleCache) Exists(key string) bool {
_, ok := c.Get(key)
return ok
}
func (c *simpleCache) Flush() bool {
c.Lock()
defer c.Unlock()
c.num = 0
c.memory = 0
c.hashtable = make([]*entry, c.cap)
c.lruhead = &lru{}
c.lrutail = &lru{}
c.lruHash = map[string]*lru{}
c.lruhead.next = c.lrutail
c.lrutail.prev = c.lruhead
return true
}
func (c *simpleCache) Keys() int64 {
return int64(c.num)
}

213
archive/go/cache/simple_test.go vendored Normal file
View File

@@ -0,0 +1,213 @@
package main
import (
"fmt"
"sync"
"sync/atomic"
"testing"
"time"
)
import "github.com/stretchr/testify/assert"
func TestSimple_cache_SetMaxMemory(t *testing.T) {
tests := []struct {
name string
args string
maxMemory int
want bool
}{
{"case1", "1KB", 1024, true},
{"case2", "1MB", 1024 * 1024, true},
{"case3", "1GB", 1024 * 1024 * 1024, true},
{"case4", "10G", 0, false},
{"case5", "0", 0, false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := newSimpleCache()
if got := c.SetMaxMemory(tt.args); got != tt.want || c.maxMemory != tt.maxMemory {
t.Errorf("SetMaxMemory() = %v,maxMemory = %v, want %v,%v", got, c.maxMemory, tt.want, tt.maxMemory)
}
})
}
}
func TestSimpleCache(t *testing.T) {
cache := newSimpleCache()
cache.SetMaxMemory("100MB")
assert.Equal(t, 1024*1024*100, cache.maxMemory)
cache.Set("int", 1, 0)
assert.Equal(t, 8, cache.memory)
v, ok := cache.Get("int")
assert.True(t, ok)
assert.Equal(t, 1, v)
cache.Del("int")
_, ok = cache.Get("int")
assert.False(t, ok)
cache.Set("str", "ok", time.Microsecond)
v, ok = cache.Get("str")
assert.True(t, ok)
assert.Equal(t, "ok", v)
assert.Equal(t, int64(1), cache.Keys())
time.Sleep(time.Microsecond)
assert.Equal(t, int64(1), cache.Keys())
assert.Equal(t, 16, cache.memory)
v, ok = cache.Get("str")
assert.False(t, ok)
assert.Equal(t, int64(0), cache.Keys())
assert.Equal(t, 0, cache.memory)
cache.Flush()
cache.Keys()
}
func TestSimpleClean(t *testing.T) {
cache := newSimpleCache()
cache.SetMaxMemory("1KB")
cache.Set("test1", 1, 0)
cache.Set("test2", 10, 0)
cache.Set("test3", 100, 0)
cache.Set("test4", 1000, 0)
cache.Set("bigkey", [1024]int{0}, 0)
assert.Equal(t, int64(1), cache.Keys())
_, ok := cache.Get("test1")
assert.False(t, ok)
v, ok := cache.Get("bigkey")
assert.Equal(t, [1024]int{0}, v)
assert.True(t, ok)
cache.Set("test1", 1, 0)
cache.Set("test2", 10, 0)
cache.Set("test3", 100, 0)
assert.Equal(t, int64(3), cache.Keys())
v, ok = cache.Get("test3")
assert.True(t, ok)
assert.Equal(t, 100, v)
v, ok = cache.Get("bigkey")
assert.False(t, ok)
assert.Nil(t, v)
}
// 测试扩容
func TestSimpleExpansion(t *testing.T) {
cache := newSimpleCache()
cache.SetMaxMemory("1GB")
for i := 0; i < 10000; i++ {
cache.Set(fmt.Sprintf("%d", i), i, 0)
}
for i := 0; i < 10000; i++ {
v, ok := cache.Get(fmt.Sprintf("%d", i))
assert.True(t, ok)
assert.Equal(t, i, v)
}
assert.Equal(t, 80000, cache.memory)
assert.Equal(t, 10000, cache.num)
cache.Flush()
assert.Equal(t, 0, cache.memory)
assert.Equal(t, 0, cache.num)
}
// 并发读写测试
func TestSimpleSetGet(t *testing.T) {
var rn int64
maxNum := int64(100000)
cache := newSimpleCache()
cache.SetMaxMemory("1GB")
var n int64 = -1
wg := sync.WaitGroup{}
for i := 0; i < 100; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for {
i := atomic.AddInt64(&n, 1)
if i > maxNum {
break
}
cache.Set(fmt.Sprintf("%d", i), i, 0)
}
}()
}
wg.Wait()
n = -1
for i := 0; i < 100; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for {
i := atomic.AddInt64(&n, 1)
if i > maxNum {
break
}
v, ok := cache.Get(fmt.Sprintf("%d", i))
assert.True(t, ok)
assert.Equal(t, i, v)
atomic.AddInt64(&rn, 1)
}
}()
}
wg.Wait()
assert.Equal(t, maxNum+1, rn)
rn = 0
cache.Flush()
assert.Equal(t, 0, cache.memory)
assert.Equal(t, 0, cache.num)
// Flush 不会缩容,另外新建一个测试
cache = newSimpleCache()
cache.SetMaxMemory("1GB")
// 一边写 一边读
c := make(chan int64, 100)
n = -1
wwg := sync.WaitGroup{}
for i := 0; i < 100; i++ {
wwg.Add(1)
go func() {
defer wwg.Done()
for {
i := atomic.AddInt64(&n, 1)
if i > maxNum {
break
}
cache.Set(fmt.Sprintf("%d", i), i, 0)
c <- i
}
}()
}
go func() {
wwg.Wait()
close(c)
}()
for i := 0; i < 100; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for {
i, ok := <-c
if !ok {
break
}
v, ok := cache.Get(fmt.Sprintf("%d", i))
assert.True(t, ok)
atomic.AddInt64(&rn, 1)
assert.Equal(t, i, v)
}
}()
}
wg.Wait()
assert.Equal(t, maxNum+1, rn)
}
// 测试清理
func TestSimpleLru(t *testing.T) {
cache := newSimpleCache()
cache.SetMaxMemory("1KB")
for i := 0; i < 100; i++ {
cache.Set(fmt.Sprintf("%d", i), [...]int{1, 2, 3, 4, 5, 6, 7, 8}, 0)
assert.True(t, cache.memory <= cache.maxMemory)
}
}

8
archive/go/cache/utils.go vendored Normal file
View File

@@ -0,0 +1,8 @@
package main
import "reflect"
// 简单的计算了,对于指针、切片等有问题
func sizeof(v interface{}) int {
return int(reflect.TypeOf(v).Size())
}

36
archive/go/class/class.go Normal file
View File

@@ -0,0 +1,36 @@
package main
import "fmt"
type IA interface {
FuncA()
FuncB()
}
type A struct {
IA
}
func (a *A) FuncA() {
fmt.Println("class a,func a")
}
type B struct {
}
func (b *B) FuncA() {
fmt.Println("class b,func a")
}
func (b *B) FuncB() {
fmt.Println("class b,func b")
}
func main() {
var a IA = new(A)
a.FuncA()
// a.FuncB() //报错的
var a2 IA = &A{IA: new(B)}
a2.FuncA()
a2.FuncB()
}

18
archive/go/goroutine.go Normal file
View File

@@ -0,0 +1,18 @@
package main
import (
"fmt"
"runtime"
)
func main() {
var tmp string
runtime.GOMAXPROCS(1)
for i := 0; i < 10; i++ {
go func() {
for {
}
}()
}
fmt.Scanln(&tmp)
}

54
archive/go/goroutine.md Normal file
View File

@@ -0,0 +1,54 @@
## Go Goroutine(协程)
> 原来对协程有一些了解,用C实现过一个辣鸡的协程.通过上下文的切换,将io堵塞的时间切换成到其它的协程继续运行,不让cpu歇息一下
go中运行协程通过`go`关键词就可以运行一个协程,非常简单
来看一个例子:
```go
func main() {
var tmp string
for i := 0; i < 10; i++ {
go func() {
for {
}
}()
}
fmt.Scanln(&tmp)
}
```
可以直接跑满CPU(我的电脑是4核8线程)
然后我改改
```go
func main() {
var tmp string
for i := 0; i < 10; i++ {
go func(i int) {
for {
fmt.Println(i)
}
}(i)
}
fmt.Scanln(&tmp)
}
```
我电脑的CPU降到了40%,这是因为当所有的协程都堵塞住了(打印数据),给了CPU喘息的机会,而且因为达到了输出的最大限制,增加协程也不会再增加CPU了(io瓶颈)
再来一个例子,这里用了`runtime.GOMAXPROCS(1)`这个函数,它可以设置我们的协程运行指定个数的CPU核里面,这里只用了一个核
```go
func main() {
var tmp string
runtime.GOMAXPROCS(1)
for i := 0; i < 10; i++ {
go func() {
for {
}
}()
}
fmt.Scanln(&tmp)
}
```
然后CPU维持再18%左右,总CPU在20%左右,我想是另外的线程占用了剩余的CPU.(然后还有超频?反正我是猜的)
在go1.14实现了抢占式的协程,上述协程可能会无法再往下执行

View File

@@ -0,0 +1,113 @@
package main
import (
"context"
"flag"
"net"
"net/http"
"os"
"os/exec"
"os/signal"
"sync"
"syscall"
"time"
)
// 一个停止的信号量
var (
isStop bool
lock sync.WaitGroup
server *http.Server
listener net.Listener
isReload bool
)
func init() {
flag.BoolVar(&isReload, "reload", false, "")
flag.Parse()
}
// 优雅停止中间件
func GracefulStop(handel func(http.ResponseWriter, *http.Request)) func(http.ResponseWriter, *http.Request) {
return func(writer http.ResponseWriter, request *http.Request) {
lock.Add(1)
if isStop {
//不再接收新的请求
writer.WriteHeader(400)
writer.Write([]byte("server is stop"))
} else {
handel(writer, request)
}
lock.Done()
}
}
// 一个会耗时很长的方法
func LongTime(writer http.ResponseWriter, request *http.Request) {
time.Sleep(time.Second * 5)
writer.Write([]byte("Hello"))
}
func Normal(writer http.ResponseWriter, request *http.Request) {
writer.Write([]byte("Hello"))
}
func main() {
http.HandleFunc("/normal", Normal)
http.HandleFunc("/long-time", GracefulStop(LongTime))
//监听信号
go func() {
ch := make(chan os.Signal, 1)
signal.Notify(ch, syscall.SIGINT, syscall.SIGTERM, syscall.SIGUSR1)
for {
sign := <-ch
ctx, _ := context.WithTimeout(context.Background(), 20*time.Second)
switch sign {
case syscall.SIGINT, syscall.SIGTERM:
{
println("stop...")
//停止
isStop = true
lock.Wait()
//结束
signal.Stop(ch)
server.Shutdown(ctx)
println("stop")
return
}
case syscall.SIGUSR1:
{
println("reload...")
//热重启
tl, _ := listener.(*net.TCPListener)
f, _ := tl.File()
cmd := exec.Command(os.Args[0], "--reload")
cmd.Stdout = os.Stdout
cmd.Stdin = os.Stdin
cmd.Stderr = os.Stderr
cmd.ExtraFiles = []*os.File{f}
cmd.Start()
signal.Stop(ch)
server.Shutdown(ctx)
println("father stop")
return
}
}
}
}()
if isReload {
f := os.NewFile(3, "")
listener, _ = net.FileListener(f)
println("child start")
} else {
listener, _ = net.Listen("tcp", ":8080")
}
server = &http.Server{
Addr: ":8080",
Handler: nil,
}
server.Serve(listener)
}

View File

@@ -0,0 +1,14 @@
# 热重启
可以通过nginx来实现热重启
这里尝试使用fork的方式实现热重启
某些信号量需要linux才有
使用`kill pid `
# 优雅结束
将请求处理完之后再结束

50
archive/go/interface.md Normal file
View File

@@ -0,0 +1,50 @@
## GO Interface(接口)
> 记哪记得住啊,不如顺便水一点博客,忘记了还能过来翻翻
go的接口值是由两部分组成,被称为接口的动态类型和动态值
动态类型部分储存着数据的类型,动态值部分储存着值.
来点栗子
```go
type If interface {
Demo() int
}
type S struct {
a int
}
func (s *S) Demo() int {
s.a++
return s.a
}
```
上面是额外的定义
```go
func TestInterface(t *testing.T) {
var i If
s := S{}
i = &s
i.Demo()
assert.Equal(t, s.a, 1)
}
```
值得一提的是第四行`i=&s`,这是由于声明S的Demo方法时决定的,接收器类型为`*S`,那么只有是*S类型才会有Demo方法.如果去掉`&`将会报错.
go中接口是可以判断相等的,相等的条件是,动态类型和动态值全部相等
```go
func TestInterfaceEqual(t *testing.T) {
var i1, i2 If
s := S{a: 2}
assert.Equal(t, i1, i2)
i1 = &s
i2 = &s
assert.Equal(t, i1, i2)
var s2 *S
i1 = s2
i2 = nil
t.Logf("s2:%v,i1 value:%v,i2 value:%v,i1 type:%T,i2 type:%T",
s2, i1, i2, i1, i2)
assert.NotEqual(t, i1, i2)
}
```
例如上面的例子,最后一个,虽然s2,i1,i2的值输出都是nil,但是i1,i2的type不相等,所以i1和i2是不相等的

View File

@@ -0,0 +1,42 @@
package main
import (
"testing"
"github.com/stretchr/testify/assert"
)
type If interface {
Demo() int
}
type S struct {
a int
}
func (s *S) Demo() int {
s.a++
return s.a
}
func TestInterface(t *testing.T) {
s := S{}
var i If
i = &s
i.Demo()
assert.Equal(t, s.a, 1)
}
func TestInterfaceEqual(t *testing.T) {
var i1, i2 If
s := S{a: 2}
assert.Equal(t, i1, i2)
i1 = &s
i2 = &s
assert.Equal(t, i1, i2)
var s2 *S
i1 = s2
i2 = nil
t.Logf("s2:%v,i1 value:%v,i2 value:%v,i1 type:%T,i2 type:%T",
s2, i1, i2, i1, i2)
assert.NotEqual(t, i1, i2)
}

0
archive/go/readme.md Normal file
View File

167
archive/go/slice.md Normal file
View File

@@ -0,0 +1,167 @@
## GO Slice(切片)
> 感觉切片只要知道底层是引用的一个数组对象,就挺好理解了.这里写下一些笔记,方便记忆和以后再来查找.
### 切片和数组
切片由三个部分组成:指针(指向底层数组),长度(当前切片使用的长度),容量(切片能包含多少个成员)
然后还有一句和数组相关的:当调用一个函数的时候,函数的每个调用参数将会被赋值给函数内部的参数变量,所以函数参数变量接收的是一个复制的副本,并不是原始调用的变量。(所以数组作为参数,是低效的,还需要进行一次数组的拷贝,可以使用数组指针)
然后如果我们想要传递给一个函数一个数组,函数需要对数组进行修改,我们必须使用数组指针(用return当然也不是不行啦🤨)
但是切片就不需要,来个例子:
```go
func Test_Func(t *testing.T) {
var arr = [...]int{1, 2, 3}
var slice = []int{1, 2, 3}
ModifyArray(arr)
ModifySlice(slice)
t.Logf("%v %v\n", arr, slice)
assert.NotEqual(t, arr[2], 1)
assert.Equal(t, slice[2], 1)
}
func ModifyArray(arr [3]int) {
println(arr)
arr[2] = 1
}
func ModifySlice(slice []int) {
println(slice)
slice[2] = 1
}
```
凭啥切片就行,大家明明都长得都差不多.
前面说了,切片是由三个部分组成,然后数组传入的是数组的副本.其实我觉得go里所有的类型传入的都是对应的副本,切片也是,指针也是的(值传递).
那都是副本拷贝,那么咋切片可以修改?
`切片是由:数组指针,长度,容量组成的`,来划一下重点.
副本传的也是上面这些东西.然后修改切片的时候呢,实际上是通过切片里面的数组指针去修改了,并没有修改切片的值(数组指针,长度,容量).
等看完下面再写另外一个情况,在函数里面,给切片增加成员,会怎么样?
### 切片
定义一个数组和定义一个切片的区别是[...]和\[\](当然还有其他的定义方式)
```go
func Test_DefineSlice(t *testing.T) {
var arr = [...]int{1, 2, 3}
var slice1 = []int{1, 2, 3}
var slice2 = make([]int, 3)
var slice3 = arr[:]
fmt.Printf("arr type=%v len=%d cap=%d\n", reflect.TypeOf(arr).String(), len(arr), cap(arr))
fmt.Printf("slice1 type=%v len=%d cap=%d\n", reflect.TypeOf(slice1).String(), len(slice1), cap(slice1))
fmt.Printf("slice2 type=%v len=%d cap=%d\n", reflect.TypeOf(slice2).String(), len(slice2), cap(slice2))
fmt.Printf("slice3 type=%v len=%d cap=%d\n", reflect.TypeOf(slice3).String(), len(slice3), cap(slice3))
}
//Result:
//arr type=[3]int len=3 cap=3
//slice1 type=[]int len=3 cap=3
//slice2 type=[]int len=3 cap=3
//slice3 type=[]int len=3 cap=3
```
上面方法中的切片是会自动创建一个底层数组,如果切片直接引用一个创建好了的数组呢?
我的猜想是在切片里面修改值,原数组也会跟着一起变(切片指针指向的就是这一个数组)
然后我想再验证一下,如果我的切片再增加一个成员(超出数组限制),那么还会变化吗?
我的猜想是会重新分配到另外一个数组去,然后导致引用的数组不会发生改变(切片指针指向的已经是另外一个数组了)
```go
func Test_Modify(t *testing.T) {
arr := [...]int{1, 2, 3, 4, 5, 6, 7}
slice := arr[:]
slice[4] = 8
t.Logf("arr[4]=%v,slice[4]=%v\n", arr[4], slice[4])
assert.Equal(t, slice[4], arr[4])
slice = append(slice, 9)
slice[5] = 10
t.Logf("arr[4]=%v,slice[4]=%v\n", arr[4], slice[4])
assert.Equal(t, slice[4], arr[4])
t.Logf("arr[5]=%v,slice[5]=%v\n", arr[5], slice[5])
assert.NotEqual(t, slice[5], arr[5])
}
```
验证通过^_^
再来试试两个切片共享一个数组
```go
func Test_ModifyTwoSlice(t *testing.T) {
arr := [...]int{1, 2, 3, 4, 5, 6, 7, 8, 9}
slice1 := arr[1:5]
slice2 := arr[3:8]
slice1[2] = 8
t.Logf("%v %v %v\n", arr, slice1, slice2)
assert.Equal(t, slice1[2], slice2[0], arr[3])
}
```
一样的全部一起修改成功了
### append
然后我们来看看append
```go
func Test_Append(t *testing.T) {
slice := []int{1, 2, 3}
println(slice)
slice = append(slice, 1)
println(slice)
slice = append(slice, 1)
println(slice)
}
// Result:
// slice type=[]int len=3 cap=3
// [3/3]0xc00005e3e0
// slice type=[]int len=4 cap=6
// [4/6]0xc00008c060
// slice type=[]int len=5 cap=6
// [5/6]0xc00008c060
```
当容量够的时候切片的内存地址没有发生变化,不够的时候进行了扩容,地址改变了.
刚刚写的时候发现了一个问题,就是每次扩容的大小,我写了一个循环来展示
```go
func Test_Cap(t *testing.T) {
slice1 := []int{1, 2, 3}
slice2 := make([]int, 3, 3)
last := [2]int{0, 0}
for i := 0; i < 100; i++ {
slice1 = append(slice1, 1)
slice2 = append(slice2, 1)
if last[0] != cap(slice1) {
println(slice1)
last[0] = cap(slice1)
}
if last[1] != cap(slice2) {
println(slice2)
last[1] = cap(slice2)
}
}
}
```
好吧,我以为扩容的容量,~~如果不是make的话是按照前一次容量的两倍来扩容的,是make就是每次增加的容量是固定的,事实证明我想多了~~
### End
再回到最开始,在函数里面,增加切片的成员.我想应该有了答案.
```go
func ModifySlice(slice []int) {
slice[2] = 1
slice = append(slice, 4)
slice[2] = 3
}
```
我把之前的`ModifySlice`方法修改了一下,然后成员没加,后面再修改回去为3也没有发生变化了.
这是因为append的时候因为容量不够扩容了,导致底层数组指针发生了改变,但是传进来的切片是外面切片的副本,修改这个切片里面的数组指针不会影响到外面的切片
#### 奇淫巧技
例如这个(go圣经里面抄的233),reverse的参数是切片,但是我们需要处理的是一个数组,这里我们就可以直接用`arr[:]`把数组转化为切片进行处理
```go
func Test_Demo(t *testing.T) {
arr := [...]int{1, 2, 3, 4}
reverse(arr[:])
t.Logf("%v\n", arr)
}
func reverse(s []int) {
for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 {
s[i], s[j] = s[j], s[i]
}
}
```
> 暂时只想到这些,再有的话,以后继续补.如有错误希望指教.

105
archive/go/slice_test.go Normal file
View File

@@ -0,0 +1,105 @@
package main
import (
"fmt"
"reflect"
"testing"
"github.com/stretchr/testify/assert"
)
func Test_Func(t *testing.T) {
var arr = [...]int{1, 2, 3}
var slice = []int{1, 2, 3}
ModifyArray(arr)
ModifySlice(slice)
t.Logf("%v %v\n", arr, slice)
assert.NotEqual(t, arr[2], 1)
assert.Equal(t, slice[2], 1)
}
func ModifyArray(arr [3]int) {
arr[2] = 1
}
func ModifySlice(slice []int) {
slice[2] = 1
slice = append(slice, 4)
slice[2] = 3
}
func Test_DefineSlice(t *testing.T) {
var arr = [...]int{1, 2, 3}
var slice1 = []int{1, 2, 3}
var slice2 = make([]int, 3)
var slice3 = arr[:]
t.Logf("arr type=%v len=%d cap=%d\n", reflect.TypeOf(arr).String(), len(arr), cap(arr))
t.Logf("slice1 type=%v len=%d cap=%d\n", reflect.TypeOf(slice1).String(), len(slice1), cap(slice1))
t.Logf("slice2 type=%v len=%d cap=%d\n", reflect.TypeOf(slice2).String(), len(slice2), cap(slice2))
t.Logf("slice3 type=%v len=%d cap=%d\n", reflect.TypeOf(slice3).String(), len(slice3), cap(slice3))
}
func Test_Modify(t *testing.T) {
arr := [...]int{1, 2, 3, 4, 5, 6, 7}
slice := arr[:]
slice[4] = 8
t.Logf("arr[4]=%v,slice[4]=%v\n", arr[4], slice[4])
assert.Equal(t, slice[4], arr[4])
slice = append(slice, 9)
slice[5] = 10
t.Logf("arr[4]=%v,slice[4]=%v\n", arr[4], slice[4])
assert.Equal(t, slice[4], arr[4])
t.Logf("arr[5]=%v,slice[5]=%v\n", arr[5], slice[5])
assert.NotEqual(t, slice[5], arr[5])
}
func Test_ModifyTwoSlice(t *testing.T) {
arr := [...]int{1, 2, 3, 4, 5, 6, 7, 8, 9}
slice1 := arr[1:5]
slice2 := arr[3:8]
slice1[2] = 8
t.Logf("%v %v %v\n", arr, slice1, slice2)
assert.Equal(t, slice1[2], slice2[0], arr[3])
}
func Test_Append(t *testing.T) {
slice := []int{1, 2, 3}
fmt.Printf("slice type=%v len=%d cap=%d\n", reflect.TypeOf(slice).String(), len(slice), cap(slice))
println(slice)
slice = append(slice, 1)
fmt.Printf("slice type=%v len=%d cap=%d\n", reflect.TypeOf(slice).String(), len(slice), cap(slice))
println(slice)
slice = append(slice, 1)
fmt.Printf("slice type=%v len=%d cap=%d\n", reflect.TypeOf(slice).String(), len(slice), cap(slice))
println(slice)
}
func Test_Cap(t *testing.T) {
slice1 := []int{1, 2, 3}
slice2 := make([]int, 3, 3)
last := [2]int{0, 0}
for i := 0; i < 100; i++ {
slice1 = append(slice1, 1)
slice2 = append(slice2, 1)
if last[0] != cap(slice1) {
println(slice1)
last[0] = cap(slice1)
}
if last[1] != cap(slice2) {
println(slice2)
last[1] = cap(slice2)
}
}
}
func Test_Demo(t *testing.T) {
arr := [...]int{1, 2, 3, 4}
reverse(arr[:])
t.Logf("%v\n", arr)
}
func reverse(s []int) {
for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 {
s[i], s[j] = s[j], s[i]
}
}

20
archive/go/test/http.go Normal file
View File

@@ -0,0 +1,20 @@
package main
import (
"net/http"
"time"
)
// 测试http是否会处理完请求后才结束
// 结果,不会
func main() {
http.HandleFunc("/test", func(writer http.ResponseWriter, request *http.Request) {
time.Sleep(time.Second * 5)
writer.Write([]byte("success"))
})
http.ListenAndServe(":8080", nil)
}