High Concurrency Design
支撑百万级 QPS,掌握缓存、异步、分库分表等性能优化策略。
1// 多级缓存实现2type MultiLevelCache struct {3 local *LocalCache // L1: 本地缓存4 redis *redis.Client // L2: Redis5 db *sql.DB // L3: 数据库6}7 8func (c *MultiLevelCache) Get(ctx context.Context, key string) ([]byte, error) {9 // L1: 本地缓存10 if val, ok := c.local.Get(key); ok {11 return val, nil12 }13 14 // L2: Redis15 val, err := c.redis.Get(ctx, key).Bytes()16 if err == nil {17 c.local.Set(key, val, time.Minute)18 return val, nil19 }20 21 // L3: 数据库 (使用 singleflight 防止缓存击穿)22 val, err, _ = c.group.Do(key, func() (interface{}, error) {23 data, err := c.loadFromDB(ctx, key)24 if err != nil {25 return nil, err26 }27 // 回填缓存28 c.redis.Set(ctx, key, data, time.Hour)29 c.local.Set(key, data, time.Minute)30 return data, nil31 })32 33 return val.([]byte), err34}1// Worker Pool 模式2type WorkerPool struct {3 tasks chan Task4 results chan Result5 workers int6}7 8func NewWorkerPool(workers, queueSize int) *WorkerPool {9 pool := &WorkerPool{10 tasks: make(chan Task, queueSize),11 results: make(chan Result, queueSize),12 workers: workers,13 }14 pool.Start()15 return pool16}17 18func (p *WorkerPool) Start() {19 for i := 0; i < p.workers; i++ {20 go func(workerID int) {21 for task := range p.tasks {22 result := task.Execute()23 p.results <- result24 }25 }(i)26 }27}28 29// Web3 场景:批量处理链上事件30func (p *WorkerPool) ProcessEvents(events []Event) {31 for _, event := range events {32 p.tasks <- Task{Event: event}33 }34}