A binary search tree (BST) is a data structure that maintains sorted order and allows for efficient insertion, deletion, and lookup operations. Below is an implementation of a binary search tree in Swift, along with methods for inserting and searching for elements.
class Node {
var value: Int
var left: Node?
var right: Node?
init(value: Int) {
self.value = value
}
}
class BinarySearchTree {
var root: Node?
func insert(value: Int) {
let newNode = Node(value: value)
if let rootNode = root {
self.insertNode(rootNode, newNode)
} else {
root = newNode
}
}
private func insertNode(_ rootNode: Node, _ newNode: Node) {
if newNode.value < rootNode.value {
if let leftNode = rootNode.left {
self.insertNode(leftNode, newNode)
} else {
rootNode.left = newNode
}
} else {
if let rightNode = rootNode.right {
self.insertNode(rightNode, newNode)
} else {
rootNode.right = newNode
}
}
}
func search(value: Int) -> Bool {
return searchNode(root, value)
}
private func searchNode(_ rootNode: Node?, _ value: Int) -> Bool {
guard let currentNode = rootNode else { return false }
if value < currentNode.value {
return searchNode(currentNode.left, value)
} else if value > currentNode.value {
return searchNode(currentNode.right, value)
} else {
return true // found
}
}
}
// Usage
let bst = BinarySearchTree()
bst.insert(value: 5)
bst.insert(value: 3)
bst.insert(value: 8)
print(bst.search(value: 3)) // true
print(bst.search(value: 6)) // false
How do I avoid rehashing overhead with std::set in multithreaded code?
How do I find elements with custom comparators with std::set for embedded targets?
How do I erase elements while iterating with std::set for embedded targets?
How do I provide stable iteration order with std::unordered_map for large datasets?
How do I reserve capacity ahead of time with std::unordered_map for large datasets?
How do I erase elements while iterating with std::unordered_map in multithreaded code?
How do I provide stable iteration order with std::map for embedded targets?
How do I provide stable iteration order with std::map in multithreaded code?
How do I avoid rehashing overhead with std::map in performance-sensitive code?
How do I merge two containers efficiently with std::map for embedded targets?