#include <iostream>
#include <thread>
#include <latch>
#include <barrier>
void worker(int id, std::latch &l, std::barrier &b) {
std::cout << "Worker " << id << " is doing work...\n";
std::this_thread::sleep_for(std::chrono::seconds(1)); // Simulate work
l.count_down(); // Signal that this worker is done
b.arrive_and_wait(); // Wait for all workers at the barrier
std::cout << "Worker " << id << " has crossed the barrier.\n";
}
int main() {
const int num_workers = 3;
std::latch latch(num_workers); // Initialize latch with 3
std::barrier b(num_workers); // Initialize barrier with 3
std::thread workers[num_workers];
for (int i = 0; i < num_workers; ++i) {
workers[i] = std::thread(worker, i, std::ref(latch), std::ref(b));
}
latch.wait(); // Wait for all workers to finish their work
std::cout << "All workers are done with their work.\n";
for (auto &w : workers) {
w.join(); // Join all worker threads
}
return 0;
}
How do I avoid rehashing overhead with std::set in multithreaded code?
How do I find elements with custom comparators with std::set for embedded targets?
How do I erase elements while iterating with std::set for embedded targets?
How do I provide stable iteration order with std::unordered_map for large datasets?
How do I reserve capacity ahead of time with std::unordered_map for large datasets?
How do I erase elements while iterating with std::unordered_map in multithreaded code?
How do I provide stable iteration order with std::map for embedded targets?
How do I provide stable iteration order with std::map in multithreaded code?
How do I avoid rehashing overhead with std::map in performance-sensitive code?
How do I merge two containers efficiently with std::map for embedded targets?