Skip to content

Cache Plugin Architecture

Pie features a flexible plugin-based caching system that supports multiple cache implementations and chaining. The system provides Ristretto (default) and Redis implementations, while allowing users to implement custom cache plugins.

The cache plugin architecture allows you to:

  • Use multiple cache instances in a chain
  • Implement custom cache backends
  • Combine different cache types (memory + Redis)
  • Enable automatic cache backfilling
  • Monitor cache performance across all layers
// Enable default Ristretto memory cache
engine.UseDefaultCache()
// Use cache in session
session := pie.Table[User](engine)
users, err := session.Cache(5 * time.Minute).Find(ctx)
// Enable Redis cache
redisConfig := &pie.RedisCacheConfig{
Addr: "localhost:6379",
Password: "",
DB: 0,
PoolSize: 10,
}
engine.UseRedis(redisConfig)
// Use cache
session := pie.Table[User](engine)
users, err := session.Cache(10 * time.Minute).Find(ctx)
// Create multiple cache instances
ristrettoCache, _ := pie.NewRistrettoCache(nil)
redisCache, _ := pie.NewRedisCache(&pie.RedisCacheConfig{
Addr: "localhost:6379",
})
// Use chained caching (L1: Ristretto, L2: Redis)
engine.UseCache(ristrettoCache, redisCache)
// Cache operations will automatically:
// 1. Check L1 cache first
// 2. If miss, check L2 cache
// 3. If L2 hit, backfill to L1
// 4. Write to all cache layers on Set
// Custom Ristretto configuration
ristrettoConfig := &pie.RistrettoCacheConfig{
NumCounters: 100000, // ~10x max entries
MaxCost: 100 * 1024 * 1024, // 100MB
BufferItems: 64, // Get buffer size
}
ristrettoCache, err := pie.NewRistrettoCache(ristrettoConfig)
if err != nil {
log.Fatal(err)
}
engine.UseCache(ristrettoCache)
// Redis configuration
redisConfig := &pie.RedisCacheConfig{
Addr: "localhost:6379",
Password: "your-password",
DB: 0,
PoolSize: 20,
}
redisCache, err := pie.NewRedisCache(redisConfig)
if err != nil {
log.Fatal(err)
}
engine.UseCache(redisCache)
// Cache manager configuration
config := &pie.CacheConfig{
Enabled: true,
DefaultTTL: 5 * time.Minute,
KeyPrefix: "pie:",
EnableJitter: true,
TTLJitter: 2 * time.Minute,
EmptyCacheTTL: 30 * time.Second,
}
engine.UseCache(ristrettoCache, redisCache)
// Configuration is applied to the cache manager
// Custom cache implementation
type MyCache struct {
data map[string][]byte
stats *pie.CacheStats
mu sync.RWMutex
}
func NewMyCache() *MyCache {
return &MyCache{
data: make(map[string][]byte),
stats: &pie.CacheStats{},
}
}
// Implement Cache interface
func (m *MyCache) Get(ctx context.Context, key string) ([]byte, error) {
m.mu.RLock()
defer m.mu.RUnlock()
m.stats.Total++
if val, exists := m.data[key]; exists {
m.stats.Hits++
m.stats.HitRate = float64(m.stats.Hits) / float64(m.stats.Total) * 100
return val, nil
}
m.stats.Misses++
return nil, pie.ErrCacheNotFound
}
func (m *MyCache) Set(ctx context.Context, key string, value []byte, ttl time.Duration) error {
m.mu.Lock()
defer m.mu.Unlock()
m.data[key] = value
m.stats.Keys++
return nil
}
func (m *MyCache) Delete(ctx context.Context, key string) error {
m.mu.Lock()
defer m.mu.Unlock()
delete(m.data, key)
m.stats.Keys--
return nil
}
func (m *MyCache) DeleteByPattern(ctx context.Context, pattern string) error {
// Implement pattern-based deletion
return nil
}
func (m *MyCache) DeleteByTags(ctx context.Context, tags ...string) error {
// Implement tag-based deletion
return nil
}
func (m *MyCache) Exists(ctx context.Context, key string) (bool, error) {
m.mu.RLock()
defer m.mu.RUnlock()
_, exists := m.data[key]
return exists, nil
}
func (m *MyCache) Clear(ctx context.Context) error {
m.mu.Lock()
defer m.mu.Unlock()
m.data = make(map[string][]byte)
m.stats.Keys = 0
return nil
}
func (m *MyCache) Stats() *pie.CacheStats {
m.mu.RLock()
defer m.mu.RUnlock()
stats := *m.stats
return &stats
}
// Use custom cache
myCache := NewMyCache()
engine.UseCache(myCache)
// Or combine with other caches
engine.UseCache(myCache, ristrettoCache, redisCache)
// Basic caching
session := pie.Table[User](engine)
users, err := session.Cache(5 * time.Minute).Find(ctx)
// Cache with tags for easy invalidation
users, err := session.CacheWithTags("users", "active").Find(ctx)
// Invalidate by tags
_ = engine.Cache().DeleteByTags(ctx, "users")
// Use TTL jitter to prevent cache stampede
users, err := session.CacheWithJitter(10*time.Minute, 2*time.Minute).Find(ctx)
// Cache empty results to prevent cache penetration
users, err := session.CacheEmpty(30*time.Second).Find(ctx)

When reading from a cache chain:

  1. Sequential Lookup: Check caches in order (L1 → L2 → L3…)
  2. Backfill: If found in L2+, automatically backfill to L1
  3. Return: Return the first found value
// Example: L1 miss, L2 hit, automatic backfill to L1
ristrettoCache, _ := pie.NewRistrettoCache(nil)
redisCache, _ := pie.NewRedisCache(&pie.RedisCacheConfig{Addr: "localhost:6379"})
engine.UseCache(ristrettoCache, redisCache)
// First call: L1 miss, L2 miss, query database
users, err := session.Cache(5*time.Minute).Find(ctx)
// Second call: L1 hit (backfilled from L2)
users, err := session.Cache(5*time.Minute).Find(ctx)

When you execute a query with caching enabled (for example Find, First, Count), Pie automatically serializes the result and writes it to every cache layer. You typically don’t need to write to the cache manually.

If you have non-query data to store, you can interact with the cache manager directly:

// Manually store a payload in all cache layers
payload, _ := json.Marshal(users)
err := engine.Cache().Set(ctx, "users:active", payload, 5*time.Minute)

When deleting from a cache chain:

  1. Delete All: Delete from all cache layers
  2. Pattern Matching: Apply pattern deletion to all layers
  3. Tag Invalidation: Apply tag-based deletion to all layers
// Delete from all layers
_ = engine.Cache().Delete(ctx, "users:active")
// Pattern deletion
_ = engine.Cache().DeleteByPattern(ctx, "users:*")
// Tag-based deletion
_ = engine.Cache().DeleteByTags(ctx, "users", "active")
// Get aggregated statistics from all cache layers
stats := engine.Cache().Stats()
fmt.Printf("Cache Statistics:")
fmt.Printf(" Total Requests: %d", stats.Total)
fmt.Printf(" Hits: %d", stats.Hits)
fmt.Printf(" Misses: %d", stats.Misses)
fmt.Printf(" Hit Rate: %.2f%%", stats.HitRate)
fmt.Printf(" Keys: %d", stats.Keys)
fmt.Printf(" Size: %d bytes", stats.Size)
fmt.Printf(" Evicted Keys: %d", stats.EvictedKeys)
// Get statistics from individual caches
caches := engine.Cache().GetCaches()
for i, cache := range caches {
stats := cache.Stats()
fmt.Printf("Cache Layer %d: Hit Rate %.2f%%", i+1, stats.HitRate)
}
func getUserWithCache(userID bson.ObjectID) (*User, error) {
session := pie.Table[User](engine)
user, err := session.
Where("_id", userID).
Cache(10 * time.Minute).
FindOne(ctx)
if err != nil {
return nil, err
}
return user, nil
}
func updateUserWithCache(userID bson.ObjectID, updates bson.D) error {
session := pie.Table[User](engine)
// Update user
_, err := session.
Where("_id", userID).
Update(ctx, updates)
if err != nil {
return err
}
// Clear related cache
_ = engine.Cache().DeleteByPattern(ctx, "user:*")
return nil
}
func getCachedUserStats() (*UserStats, error) {
session := pie.Table[User](engine)
var stats UserStats
// Cache total user count
totalCount, err := session.
Cache(30 * time.Minute).
Count(ctx)
if err != nil {
return nil, err
}
stats.TotalCount = totalCount
// Cache active user count with tags
activeCount, err := session.
Where("status", "active").
CacheWithTags("users", "stats").
Count(ctx)
if err != nil {
return nil, err
}
stats.ActiveCount = activeCount
return &stats, nil
}
func getCachedConfig(key string) (string, error) {
session := pie.Table[Config](engine)
config, err := session.
Where("key", key).
CacheWithTags("config").
FindOne(ctx)
if err != nil {
return "", err
}
return config.Value, nil
}
func setConfigWithCache(key, value string) error {
session := pie.Table[Config](engine)
// Update or insert configuration
_, err := session.
Where("key", key).
Upsert(ctx, &Config{
Key: key,
Value: value,
})
if err != nil {
return err
}
// Clear config cache by tags
_ = engine.Cache().DeleteByTags(ctx, "config")
return nil
}
func warmupCache() error {
session := pie.Table[User](engine)
// Warm up common queries
queries := []struct {
name string
query func() *pie.Session[User]
}{
{
name: "active_users",
query: func() *pie.Session[User] {
return session.Where("status", "active").Cache(1*time.Hour)
},
},
{
name: "admin_users",
query: func() *pie.Session[User] {
return session.Where("role", "admin").Cache(1*time.Hour)
},
},
}
for _, q := range queries {
users, err := q.query().Find(ctx)
if err != nil {
log.Printf("Failed to warmup cache for %s: %v", q.name, err)
} else {
log.Printf("Warmed up cache for %s: %d users", q.name, len(users))
}
}
return nil
}
func getUsersWithConditionalCache(useCache bool) ([]User, error) {
session := pie.Table[User](engine)
query := session.Where("status", "active")
if useCache {
query = query.Cache(5 * time.Minute)
}
users, err := query.Find(ctx)
return users, err
}
func invalidateUserCache(userID bson.ObjectID) error {
cache := engine.Cache()
// Clear specific user cache
_ = cache.Delete(ctx, fmt.Sprintf("user:%s", userID.Hex()))
// Clear related list cache
_ = cache.DeleteByPattern(ctx, "users:*")
_ = cache.DeleteByPattern(ctx, "active_users:*")
return nil
}
func invalidateAllUserCache() error {
cache := engine.Cache()
// Clear all user-related cache using tags
_ = cache.DeleteByTags(ctx, "users", "user_stats")
return nil
}
// Read-heavy, write-light data: long cache time
func getStaticData() ([]StaticData, error) {
session := pie.Table[StaticData](engine)
return session.Cache(1 * time.Hour).Find(ctx)
}
// Read-light, write-heavy data: short cache time
func getFrequentlyUpdatedData() ([]DynamicData, error) {
session := pie.Table[DynamicData](engine)
return session.Cache(1 * time.Minute).Find(ctx)
}
// L1: Fast memory cache for hot data
// L2: Persistent Redis cache for warm data
ristrettoCache, _ := pie.NewRistrettoCache(&pie.RistrettoCacheConfig{
NumCounters: 100000,
MaxCost: 50 * 1024 * 1024, // 50MB
})
redisCache, _ := pie.NewRedisCache(&pie.RedisCacheConfig{
Addr: "localhost:6379",
})
engine.UseCache(ristrettoCache, redisCache)
func monitorCachePerformance() {
cache := engine.Cache()
// Get cache statistics
stats := cache.Stats()
log.Printf("Cache Performance:")
log.Printf(" Hit Rate: %.2f%%", stats.HitRate)
log.Printf(" Total Requests: %d", stats.Total)
log.Printf(" Memory Usage: %d bytes", stats.Size)
// Alert if hit rate is too low
if stats.HitRate < 50.0 {
log.Printf("WARNING: Low cache hit rate: %.2f%%", stats.HitRate)
}
}
// Prevent cache stampede with TTL jitter
session := pie.Table[User](engine)
users, err := session.CacheWithJitter(10*time.Minute, 2*time.Minute).Find(ctx)
func handleCacheError(err error) {
if err == nil {
return
}
switch err {
case pie.ErrCacheNotFound:
log.Println("Cache miss - this is normal")
case pie.ErrCacheExpired:
log.Println("Cache expired - refreshing")
case pie.ErrCacheDisabled:
log.Println("Cache is disabled")
default:
log.Printf("Cache error: %v", err)
}
}
func getUsersWithFallback() ([]User, error) {
session := pie.Table[User](engine)
// Try to get from cache
users, err := session.
Where("status", "active").
Cache(5 * time.Minute).
Find(ctx)
if err != nil {
// Cache miss, fallback to database
log.Printf("Cache miss, falling back to database: %v", err)
users, err = session.
Where("status", "active").
Find(ctx)
if err != nil {
return nil, err
}
}
return users, nil
}