Implementing a generic ring buffer in Go allows you to efficiently manage a fixed-size collection of items, where the oldest items are overwritten when new items are added beyond the capacity. This is particularly useful for use cases like implementing queues or buffers in concurrent programming.
package main
import "fmt"
// RingBuffer is a generic ring buffer
type RingBuffer[T any] struct {
buffer []T
head int
tail int
size int
count int
}
// New creates a new RingBuffer with the specified size
func New[T any](size int) *RingBuffer[T] {
return &RingBuffer[T]{
buffer: make([]T, size),
size: size,
}
}
// Push adds an item to the ring buffer
func (rb *RingBuffer[T]) Push(item T) {
rb.buffer[rb.tail] = item
rb.tail = (rb.tail + 1) % rb.size
if rb.count == rb.size {
rb.head = (rb.head + 1) % rb.size // Overwrite if full
} else {
rb.count++
}
}
// Pop retrieves the next item from the ring buffer
func (rb *RingBuffer[T]) Pop() (T, bool) {
if rb.count == 0 {
var zero T
return zero, false // Indicate empty
}
item := rb.buffer[rb.head]
rb.head = (rb.head + 1) % rb.size
rb.count--
return item, true
}
func main() {
rb := New[int](3)
rb.Push(1)
rb.Push(2)
rb.Push(3)
rb.Push(4) // This will overwrite the first item (1)
for i := 0; i < 3; i++ {
item, valid := rb.Pop()
if valid {
fmt.Println(item) // Outputs: 2, 3, 4
} else {
fmt.Println("Buffer is empty")
}
}
}
How do I avoid rehashing overhead with std::set in multithreaded code?
How do I find elements with custom comparators with std::set for embedded targets?
How do I erase elements while iterating with std::set for embedded targets?
How do I provide stable iteration order with std::unordered_map for large datasets?
How do I reserve capacity ahead of time with std::unordered_map for large datasets?
How do I erase elements while iterating with std::unordered_map in multithreaded code?
How do I provide stable iteration order with std::map for embedded targets?
How do I provide stable iteration order with std::map in multithreaded code?
How do I avoid rehashing overhead with std::map in performance-sensitive code?
How do I merge two containers efficiently with std::map for embedded targets?