// Swift implementation of a Bloom Filter
import Foundation
class BloomFilter {
private var bitArray: [Bool]
private let size: Int
private let hashFunctions: [(String) -> Int]
init(size: Int, hashFunctions: [(String) -> Int]) {
self.size = size
self.bitArray = Array(repeating: false, count: size)
self.hashFunctions = hashFunctions
}
func add(_ element: String) {
for hashFunc in hashFunctions {
let index = hashFunc(element) % size
bitArray[index] = true
}
}
func contains(_ element: String) -> Bool {
for hashFunc in hashFunctions {
let index = hashFunc(element) % size
if !bitArray[index] {
return false
}
}
return true
}
}
// Example hash functions
func simpleHash(_ input: String) -> Int {
return input.reduce(0) { $0 + Int($1.asciiValue ?? 0) }
}
func alternativeHash(_ input: String) -> Int {
return input.reduce(1) { $0 * Int($1.asciiValue ?? 0) }
}
// Usage
let bloomFilter = BloomFilter(size: 1000, hashFunctions: [simpleHash, alternativeHash])
bloomFilter.add("example@example.com")
print(bloomFilter.contains("example@example.com")) // true
print(bloomFilter.contains("test@test.com")) // false (probabilistically)
How do I avoid rehashing overhead with std::set in multithreaded code?
How do I find elements with custom comparators with std::set for embedded targets?
How do I erase elements while iterating with std::set for embedded targets?
How do I provide stable iteration order with std::unordered_map for large datasets?
How do I reserve capacity ahead of time with std::unordered_map for large datasets?
How do I erase elements while iterating with std::unordered_map in multithreaded code?
How do I provide stable iteration order with std::map for embedded targets?
How do I provide stable iteration order with std::map in multithreaded code?
How do I avoid rehashing overhead with std::map in performance-sensitive code?
How do I merge two containers efficiently with std::map for embedded targets?