Cache Plugin Architecture
Cache Plugin Architecture
Section titled “Cache Plugin Architecture”Pie features a flexible plugin-based caching system that supports multiple cache implementations and chaining. The system provides Ristretto (default) and Redis implementations, while allowing users to implement custom cache plugins.
Overview
Section titled “Overview”The cache plugin architecture allows you to:
- Use multiple cache instances in a chain
- Implement custom cache backends
- Combine different cache types (memory + Redis)
- Enable automatic cache backfilling
- Monitor cache performance across all layers
Basic Usage
Section titled “Basic Usage”Default Ristretto Cache
Section titled “Default Ristretto Cache”// Enable default Ristretto memory cacheengine.UseDefaultCache()
// Use cache in sessionsession := pie.Table[User](engine)users, err := session.Cache(5 * time.Minute).Find(ctx)Redis Cache
Section titled “Redis Cache”// Enable Redis cacheredisConfig := &pie.RedisCacheConfig{ Addr: "localhost:6379", Password: "", DB: 0, PoolSize: 10,}engine.UseRedis(redisConfig)
// Use cachesession := pie.Table[User](engine)users, err := session.Cache(10 * time.Minute).Find(ctx)Multi-Level Cache Chain
Section titled “Multi-Level Cache Chain”// Create multiple cache instancesristrettoCache, _ := pie.NewRistrettoCache(nil)redisCache, _ := pie.NewRedisCache(&pie.RedisCacheConfig{ Addr: "localhost:6379",})
// Use chained caching (L1: Ristretto, L2: Redis)engine.UseCache(ristrettoCache, redisCache)
// Cache operations will automatically:// 1. Check L1 cache first// 2. If miss, check L2 cache// 3. If L2 hit, backfill to L1// 4. Write to all cache layers on SetCache Configuration
Section titled “Cache Configuration”Ristretto Configuration
Section titled “Ristretto Configuration”// Custom Ristretto configurationristrettoConfig := &pie.RistrettoCacheConfig{ NumCounters: 100000, // ~10x max entries MaxCost: 100 * 1024 * 1024, // 100MB BufferItems: 64, // Get buffer size}
ristrettoCache, err := pie.NewRistrettoCache(ristrettoConfig)if err != nil { log.Fatal(err)}
engine.UseCache(ristrettoCache)Redis Configuration
Section titled “Redis Configuration”// Redis configurationredisConfig := &pie.RedisCacheConfig{ Addr: "localhost:6379", Password: "your-password", DB: 0, PoolSize: 20,}
redisCache, err := pie.NewRedisCache(redisConfig)if err != nil { log.Fatal(err)}
engine.UseCache(redisCache)Cache Manager Configuration
Section titled “Cache Manager Configuration”// Cache manager configurationconfig := &pie.CacheConfig{ Enabled: true, DefaultTTL: 5 * time.Minute, KeyPrefix: "pie:", EnableJitter: true, TTLJitter: 2 * time.Minute, EmptyCacheTTL: 30 * time.Second,}
engine.UseCache(ristrettoCache, redisCache)// Configuration is applied to the cache managerCustom Cache Implementation
Section titled “Custom Cache Implementation”Implementing the Cache Interface
Section titled “Implementing the Cache Interface”// Custom cache implementationtype MyCache struct { data map[string][]byte stats *pie.CacheStats mu sync.RWMutex}
func NewMyCache() *MyCache { return &MyCache{ data: make(map[string][]byte), stats: &pie.CacheStats{}, }}
// Implement Cache interfacefunc (m *MyCache) Get(ctx context.Context, key string) ([]byte, error) { m.mu.RLock() defer m.mu.RUnlock()
m.stats.Total++ if val, exists := m.data[key]; exists { m.stats.Hits++ m.stats.HitRate = float64(m.stats.Hits) / float64(m.stats.Total) * 100 return val, nil }
m.stats.Misses++ return nil, pie.ErrCacheNotFound}
func (m *MyCache) Set(ctx context.Context, key string, value []byte, ttl time.Duration) error { m.mu.Lock() defer m.mu.Unlock()
m.data[key] = value m.stats.Keys++ return nil}
func (m *MyCache) Delete(ctx context.Context, key string) error { m.mu.Lock() defer m.mu.Unlock()
delete(m.data, key) m.stats.Keys-- return nil}
func (m *MyCache) DeleteByPattern(ctx context.Context, pattern string) error { // Implement pattern-based deletion return nil}
func (m *MyCache) DeleteByTags(ctx context.Context, tags ...string) error { // Implement tag-based deletion return nil}
func (m *MyCache) Exists(ctx context.Context, key string) (bool, error) { m.mu.RLock() defer m.mu.RUnlock()
_, exists := m.data[key] return exists, nil}
func (m *MyCache) Clear(ctx context.Context) error { m.mu.Lock() defer m.mu.Unlock()
m.data = make(map[string][]byte) m.stats.Keys = 0 return nil}
func (m *MyCache) Stats() *pie.CacheStats { m.mu.RLock() defer m.mu.RUnlock()
stats := *m.stats return &stats}Using Custom Cache
Section titled “Using Custom Cache”// Use custom cachemyCache := NewMyCache()engine.UseCache(myCache)
// Or combine with other cachesengine.UseCache(myCache, ristrettoCache, redisCache)Session Cache Usage
Section titled “Session Cache Usage”Basic Caching
Section titled “Basic Caching”// Basic cachingsession := pie.Table[User](engine)users, err := session.Cache(5 * time.Minute).Find(ctx)Cache with Tags
Section titled “Cache with Tags”// Cache with tags for easy invalidationusers, err := session.CacheWithTags("users", "active").Find(ctx)
// Invalidate by tags_ = engine.Cache().DeleteByTags(ctx, "users")Cache with TTL Jitter
Section titled “Cache with TTL Jitter”// Use TTL jitter to prevent cache stampedeusers, err := session.CacheWithJitter(10*time.Minute, 2*time.Minute).Find(ctx)Cache Empty Results
Section titled “Cache Empty Results”// Cache empty results to prevent cache penetrationusers, err := session.CacheEmpty(30*time.Second).Find(ctx)Cache Chain Behavior
Section titled “Cache Chain Behavior”Read Operations
Section titled “Read Operations”When reading from a cache chain:
- Sequential Lookup: Check caches in order (L1 → L2 → L3…)
- Backfill: If found in L2+, automatically backfill to L1
- Return: Return the first found value
// Example: L1 miss, L2 hit, automatic backfill to L1ristrettoCache, _ := pie.NewRistrettoCache(nil)redisCache, _ := pie.NewRedisCache(&pie.RedisCacheConfig{Addr: "localhost:6379"})
engine.UseCache(ristrettoCache, redisCache)
// First call: L1 miss, L2 miss, query databaseusers, err := session.Cache(5*time.Minute).Find(ctx)
// Second call: L1 hit (backfilled from L2)users, err := session.Cache(5*time.Minute).Find(ctx)Write Operations
Section titled “Write Operations”When you execute a query with caching enabled (for example Find, First, Count), Pie automatically serializes the result and writes it to every cache layer. You typically don’t need to write to the cache manually.
If you have non-query data to store, you can interact with the cache manager directly:
// Manually store a payload in all cache layerspayload, _ := json.Marshal(users)err := engine.Cache().Set(ctx, "users:active", payload, 5*time.Minute)Delete Operations
Section titled “Delete Operations”When deleting from a cache chain:
- Delete All: Delete from all cache layers
- Pattern Matching: Apply pattern deletion to all layers
- Tag Invalidation: Apply tag-based deletion to all layers
// Delete from all layers_ = engine.Cache().Delete(ctx, "users:active")
// Pattern deletion_ = engine.Cache().DeleteByPattern(ctx, "users:*")
// Tag-based deletion_ = engine.Cache().DeleteByTags(ctx, "users", "active")Performance Monitoring
Section titled “Performance Monitoring”Cache Statistics
Section titled “Cache Statistics”// Get aggregated statistics from all cache layersstats := engine.Cache().Stats()
fmt.Printf("Cache Statistics:")fmt.Printf(" Total Requests: %d", stats.Total)fmt.Printf(" Hits: %d", stats.Hits)fmt.Printf(" Misses: %d", stats.Misses)fmt.Printf(" Hit Rate: %.2f%%", stats.HitRate)fmt.Printf(" Keys: %d", stats.Keys)fmt.Printf(" Size: %d bytes", stats.Size)fmt.Printf(" Evicted Keys: %d", stats.EvictedKeys)Individual Cache Statistics
Section titled “Individual Cache Statistics”// Get statistics from individual cachescaches := engine.Cache().GetCaches()for i, cache := range caches { stats := cache.Stats() fmt.Printf("Cache Layer %d: Hit Rate %.2f%%", i+1, stats.HitRate)}Real-World Applications
Section titled “Real-World Applications”User Information Caching
Section titled “User Information Caching”func getUserWithCache(userID bson.ObjectID) (*User, error) { session := pie.Table[User](engine)
user, err := session. Where("_id", userID). Cache(10 * time.Minute). FindOne(ctx)
if err != nil { return nil, err }
return user, nil}
func updateUserWithCache(userID bson.ObjectID, updates bson.D) error { session := pie.Table[User](engine)
// Update user _, err := session. Where("_id", userID). Update(ctx, updates)
if err != nil { return err }
// Clear related cache _ = engine.Cache().DeleteByPattern(ctx, "user:*")
return nil}Statistics Caching
Section titled “Statistics Caching”func getCachedUserStats() (*UserStats, error) { session := pie.Table[User](engine)
var stats UserStats
// Cache total user count totalCount, err := session. Cache(30 * time.Minute). Count(ctx) if err != nil { return nil, err } stats.TotalCount = totalCount
// Cache active user count with tags activeCount, err := session. Where("status", "active"). CacheWithTags("users", "stats"). Count(ctx) if err != nil { return nil, err } stats.ActiveCount = activeCount
return &stats, nil}Configuration Caching
Section titled “Configuration Caching”func getCachedConfig(key string) (string, error) { session := pie.Table[Config](engine)
config, err := session. Where("key", key). CacheWithTags("config"). FindOne(ctx)
if err != nil { return "", err }
return config.Value, nil}
func setConfigWithCache(key, value string) error { session := pie.Table[Config](engine)
// Update or insert configuration _, err := session. Where("key", key). Upsert(ctx, &Config{ Key: key, Value: value, })
if err != nil { return err }
// Clear config cache by tags _ = engine.Cache().DeleteByTags(ctx, "config")
return nil}Advanced Usage
Section titled “Advanced Usage”Cache Warming
Section titled “Cache Warming”func warmupCache() error { session := pie.Table[User](engine)
// Warm up common queries queries := []struct { name string query func() *pie.Session[User] }{ { name: "active_users", query: func() *pie.Session[User] { return session.Where("status", "active").Cache(1*time.Hour) }, }, { name: "admin_users", query: func() *pie.Session[User] { return session.Where("role", "admin").Cache(1*time.Hour) }, }, }
for _, q := range queries { users, err := q.query().Find(ctx) if err != nil { log.Printf("Failed to warmup cache for %s: %v", q.name, err) } else { log.Printf("Warmed up cache for %s: %d users", q.name, len(users)) } }
return nil}Conditional Caching
Section titled “Conditional Caching”func getUsersWithConditionalCache(useCache bool) ([]User, error) { session := pie.Table[User](engine)
query := session.Where("status", "active")
if useCache { query = query.Cache(5 * time.Minute) }
users, err := query.Find(ctx) return users, err}Cache Invalidation Strategies
Section titled “Cache Invalidation Strategies”func invalidateUserCache(userID bson.ObjectID) error { cache := engine.Cache()
// Clear specific user cache _ = cache.Delete(ctx, fmt.Sprintf("user:%s", userID.Hex()))
// Clear related list cache _ = cache.DeleteByPattern(ctx, "users:*") _ = cache.DeleteByPattern(ctx, "active_users:*")
return nil}
func invalidateAllUserCache() error { cache := engine.Cache()
// Clear all user-related cache using tags _ = cache.DeleteByTags(ctx, "users", "user_stats")
return nil}Best Practices
Section titled “Best Practices”1. Choose Appropriate Cache Strategy
Section titled “1. Choose Appropriate Cache Strategy”// Read-heavy, write-light data: long cache timefunc getStaticData() ([]StaticData, error) { session := pie.Table[StaticData](engine) return session.Cache(1 * time.Hour).Find(ctx)}
// Read-light, write-heavy data: short cache timefunc getFrequentlyUpdatedData() ([]DynamicData, error) { session := pie.Table[DynamicData](engine) return session.Cache(1 * time.Minute).Find(ctx)}2. Use Multi-Level Caching
Section titled “2. Use Multi-Level Caching”// L1: Fast memory cache for hot data// L2: Persistent Redis cache for warm dataristrettoCache, _ := pie.NewRistrettoCache(&pie.RistrettoCacheConfig{ NumCounters: 100000, MaxCost: 50 * 1024 * 1024, // 50MB})
redisCache, _ := pie.NewRedisCache(&pie.RedisCacheConfig{ Addr: "localhost:6379",})
engine.UseCache(ristrettoCache, redisCache)3. Monitor Cache Performance
Section titled “3. Monitor Cache Performance”func monitorCachePerformance() { cache := engine.Cache()
// Get cache statistics stats := cache.Stats()
log.Printf("Cache Performance:") log.Printf(" Hit Rate: %.2f%%", stats.HitRate) log.Printf(" Total Requests: %d", stats.Total) log.Printf(" Memory Usage: %d bytes", stats.Size)
// Alert if hit rate is too low if stats.HitRate < 50.0 { log.Printf("WARNING: Low cache hit rate: %.2f%%", stats.HitRate) }}4. Use TTL Jitter
Section titled “4. Use TTL Jitter”// Prevent cache stampede with TTL jittersession := pie.Table[User](engine)users, err := session.CacheWithJitter(10*time.Minute, 2*time.Minute).Find(ctx)Error Handling
Section titled “Error Handling”Cache Error Handling
Section titled “Cache Error Handling”func handleCacheError(err error) { if err == nil { return }
switch err { case pie.ErrCacheNotFound: log.Println("Cache miss - this is normal") case pie.ErrCacheExpired: log.Println("Cache expired - refreshing") case pie.ErrCacheDisabled: log.Println("Cache is disabled") default: log.Printf("Cache error: %v", err) }}Fallback Handling
Section titled “Fallback Handling”func getUsersWithFallback() ([]User, error) { session := pie.Table[User](engine)
// Try to get from cache users, err := session. Where("status", "active"). Cache(5 * time.Minute). Find(ctx)
if err != nil { // Cache miss, fallback to database log.Printf("Cache miss, falling back to database: %v", err)
users, err = session. Where("status", "active"). Find(ctx)
if err != nil { return nil, err } }
return users, nil}Next Steps
Section titled “Next Steps”- Hook System - Learn about lifecycle hooks
- Soft Delete - Master soft delete functionality
- Index Management - Learn index optimization