Using distributed locks with Redis in Go can help synchronize access to shared resources across multiple instances of your application, ensuring that only one instance can modify or access the resource at any given time.
// Import necessary packages
package main
import (
"fmt"
"time"
"github.com/go-redis/redis/v8"
"golang.org/x/net/context"
)
// Create a Redis client
var ctx = context.Background()
func main() {
rdb := redis.NewClient(&redis.Options{
Addr: "localhost:6379", // Redis server address
})
// Locking function
lockKey := "lock:myresource"
lockValue := "unique-lock-value"
// Attempt to acquire the lock
acquireLock(ctx, rdb, lockKey, lockValue, 10*time.Second)
}
// Function to acquire the lock
func acquireLock(ctx context.Context, rdb *redis.Client, key string, value string, expiration time.Duration) bool {
// Try to set the lock with an expiration
result, err := rdb.SetNX(ctx, key, value, expiration).Result()
if err != nil {
fmt.Println("Error acquiring lock:", err)
return false
}
if result {
fmt.Println("Lock acquired")
// Do work while holding the lock
defer releaseLock(ctx, rdb, key, value)
return true
}
fmt.Println("Lock not acquired")
return false
}
// Function to release the lock
func releaseLock(ctx context.Context, rdb *redis.Client, key string, value string) {
// Use Lua script to safely delete the lock
luaScript := `
if redis.call("get", KEYS[1]) == ARGV[1] then
return redis.call("del", KEYS[1])
else
return 0
end`
result, err := rdb.Eval(ctx, luaScript, []string{key}, value).Result()
if err != nil {
fmt.Println("Error releasing lock:", err)
return
}
if result.(int64) == 1 {
fmt.Println("Lock released")
} else {
fmt.Println("Lock not held, cannot release")
}
}
How do I avoid rehashing overhead with std::set in multithreaded code?
How do I find elements with custom comparators with std::set for embedded targets?
How do I erase elements while iterating with std::set for embedded targets?
How do I provide stable iteration order with std::unordered_map for large datasets?
How do I reserve capacity ahead of time with std::unordered_map for large datasets?
How do I erase elements while iterating with std::unordered_map in multithreaded code?
How do I provide stable iteration order with std::map for embedded targets?
How do I provide stable iteration order with std::map in multithreaded code?
How do I avoid rehashing overhead with std::map in performance-sensitive code?
How do I merge two containers efficiently with std::map for embedded targets?