In Go, implementing a generic heap can be achieved using an interface and type assertions. Below is an example of a generic heap that handles any type that implements the `Comparable` interface.
package main
import (
"container/heap"
"fmt"
)
// An Item is something we manage in a priority queue.
type Item[T comparable] struct {
value T // The value of the item; arbitrary.
priority int // The priority of the item in the queue.
}
// A PriorityQueue implements heap.Interface and holds Items.
type PriorityQueue[T comparable] []*Item[T]
func (pq PriorityQueue[T]) Len() int { return len(pq) }
func (pq PriorityQueue[T]) Less(i, j int) bool {
return pq[i].priority > pq[j].priority // Higher priority items first
}
func (pq PriorityQueue[T]) Swap(i, j int) {
pq[i], pq[j] = pq[j], pq[i]
}
func (pq *PriorityQueue[T]) Push(x interface{}) {
item := x.(*Item[T])
*pq = append(*pq, item)
}
func (pq *PriorityQueue[T]) Pop() interface{} {
old := *pq
n := len(old)
item := old[n-1]
*pq = old[0 : n-1]
return item
}
func main() {
// Create a priority queue and add some items.
pq := &PriorityQueue[int]{}
heap.Init(pq)
heap.Push(pq, &Item[int]{value: 1, priority: 3})
heap.Push(pq, &Item[int]{value: 2, priority: 4})
heap.Push(pq, &Item[int]{value: 3, priority: 2})
// Take the items out; they come out in priority order.
for pq.Len() > 0 {
item := heap.Pop(pq).(*Item[int])
fmt.Printf("Value: %v, Priority: %d\n", item.value, item.priority)
}
}
How do I avoid rehashing overhead with std::set in multithreaded code?
How do I find elements with custom comparators with std::set for embedded targets?
How do I erase elements while iterating with std::set for embedded targets?
How do I provide stable iteration order with std::unordered_map for large datasets?
How do I reserve capacity ahead of time with std::unordered_map for large datasets?
How do I erase elements while iterating with std::unordered_map in multithreaded code?
How do I provide stable iteration order with std::map for embedded targets?
How do I provide stable iteration order with std::map in multithreaded code?
How do I avoid rehashing overhead with std::map in performance-sensitive code?
How do I merge two containers efficiently with std::map for embedded targets?