In Swift, you can implement a generic Least Recently Used (LRU) cache by using a combination of a dictionary for fast access and a doubly linked list to maintain the order of usage. Below is a simple example of how to create an LRU cache that can store any type of value.
        import Foundation
        class LRUCache {
            private class Node {
                var key: Key
                var value: Value
                var prev: Node?
                var next: Node?
                init(key: Key, value: Value) {
                    self.key = key
                    self.value = value
                }
            }
            private var capacity: Int
            private var cache: [Key: Node]
            private var head: Node
            private var tail: Node
            init(capacity: Int) {
                self.capacity = capacity
                self.cache = [:]
                self.head = Node(key: "", value: "") // Dummy head
                self.tail = Node(key: "", value: "") // Dummy tail
                head.next = tail
                tail.prev = head
            }
            func get(key: Key) -> Value? {
                guard let node = cache[key] else {
                    return nil
                }
                moveToHead(node)
                return node.value
            }
            func put(key: Key, value: Value) {
                if let node = cache[key] {
                    node.value = value
                    moveToHead(node)
                } else {
                    let newNode = Node(key: key, value: value)
                    cache[key] = newNode
                    addNode(newNode)
                    if cache.count > capacity {
                        let tailNode = removeTail()
                        cache[tailNode.key] = nil
                    }
                }
            }
            private func addNode(_ node: Node) {
                node.prev = head
                node.next = head.next
                head.next?.prev = node
                head.next = node
            }
            private func removeNode(_ node: Node) {
                let prevNode = node.prev
                let nextNode = node.next
                prevNode?.next = nextNode
                nextNode?.prev = prevNode
            }
            private func moveToHead(_ node: Node) {
                removeNode(node)
                addNode(node)
            }
            private func removeTail() -> Node {
                let res = tail.prev!
                removeNode(res)
                return res
            }
        }
        
        // Example usage
        let lruCache = LRUCache(capacity: 2)
        lruCache.put(key: "a", value: 1)
        lruCache.put(key: "b", value: 2)
        print(lruCache.get(key: "a")) // returns 1
        lruCache.put(key: "c", value: 3) // evicts key "b"
        print(lruCache.get(key: "b")) // returns nil (not found)
        lruCache.put(key: "d", value: 4) // evicts key "a"
        print(lruCache.get(key: "a")) // returns nil (not found)
        print(lruCache.get(key: "c")) // returns 3
        print(lruCache.get(key: "d")) // returns 4
      
				
	
													How do I avoid rehashing overhead with std::set in multithreaded code?
														
													How do I find elements with custom comparators with std::set for embedded targets?
														
													How do I erase elements while iterating with std::set for embedded targets?
														
													How do I provide stable iteration order with std::unordered_map for large datasets?
														
													How do I reserve capacity ahead of time with std::unordered_map for large datasets?
														
													How do I erase elements while iterating with std::unordered_map in multithreaded code?
														
													How do I provide stable iteration order with std::map for embedded targets?
														
													How do I provide stable iteration order with std::map in multithreaded code?
														
													How do I avoid rehashing overhead with std::map in performance-sensitive code?
														
													How do I merge two containers efficiently with std::map for embedded targets?