#pragma once #include #include #include #include #include #include #include // Fixed-size thread pool for parallel operations. // Minimizes thread creation overhead for batch operations. class ThreadPool { std::vector workers_; std::queue> tasks_; std::mutex queue_mutex_; std::condition_variable condition_; bool stop_ = false; public: explicit ThreadPool(size_t num_threads); ~ThreadPool(); // Add task to queue. Thread-safe. void enqueue(std::function task); // Wait for all queued tasks to complete void wait_all(); // Get optimal thread count (capped at 8 for I/O bound work) static uint32_t default_thread_count(); }; // Synchronization primitive: wait for N completions class CompletionLatch { std::atomic count_{0}; std::mutex mutex_; std::condition_variable cv_; public: explicit CompletionLatch(size_t total) : count_(total) {} void arrive() { if (--count_ == 0) { std::lock_guard lock(mutex_); cv_.notify_all(); } } void wait() { std::unique_lock lock(mutex_); cv_.wait(lock, [&] { return count_.load() == 0; }); } };