Security Fixes: - CVE-2024-45339: Add O_EXCL flag to temp file creation in storage_write_entries() Prevents symlink attacks on predictable .tmp file paths - CVE-2025-47290: Use openat_nofollow() in storage_open() Closes TOCTOU race condition via path_sanitizer infrastructure - CVE-2025-0838: Add MAX_BATCH_SIZE=10000 to add_tasks() Prevents integer overflow in batch operations Research Trustworthiness (dataset_hash): - Deterministic file ordering: std::sort after collect_files() - Recursive directory traversal: depth-limited with cycle detection - Documented exclusions: hidden files and special files noted in API Bug Fixes: - R1: storage_init path validation for non-existent directories - R2: safe_strncpy return value check before strcat - R3: parallel_hash 256-file cap replaced with std::vector - R4: wire qi_compact_index/qi_rebuild_index stubs - R5: CompletionLatch race condition fix (hold mutex during decrement) - R6: ARMv8 SHA256 transform fix (save abcd_pre before vsha256hq_u32) - R7: fuzz_index_storage header format fix - R8: enforce null termination in add_tasks/update_tasks - R9: use 64 bytes (not 65) in combined hash to exclude null terminator - R10: status field persistence in save() New Tests: - test_recursive_dataset.cpp: Verify deterministic recursive hashing - test_storage_symlink_resistance.cpp: Verify CVE-2024-45339 fix - test_queue_index_batch_limit.cpp: Verify CVE-2025-0838 fix - test_sha256_arm_kat.cpp: ARMv8 known-answer tests - test_storage_init_new_dir.cpp: F1 verification - test_parallel_hash_large_dir.cpp: F3 verification - test_queue_index_compact.cpp: F4 verification All 8 native tests passing. Library ready for research lab deployment.
56 lines
1.6 KiB
C++
56 lines
1.6 KiB
C++
#include "thread_pool.h"
|
|
|
|
ThreadPool::ThreadPool(size_t num_threads) {
|
|
for (size_t i = 0; i < num_threads; ++i) {
|
|
workers_.emplace_back([this] {
|
|
for (;;) {
|
|
std::function<void()> task;
|
|
{
|
|
std::unique_lock<std::mutex> lock(queue_mutex_);
|
|
condition_.wait(lock, [this] { return stop_ || !tasks_.empty(); });
|
|
if (stop_ && tasks_.empty()) return;
|
|
task = std::move(tasks_.front());
|
|
tasks_.pop();
|
|
++active_tasks_;
|
|
}
|
|
task();
|
|
{
|
|
std::lock_guard<std::mutex> lock(queue_mutex_);
|
|
--active_tasks_;
|
|
}
|
|
done_condition_.notify_all();
|
|
}
|
|
});
|
|
}
|
|
}
|
|
|
|
ThreadPool::~ThreadPool() {
|
|
{
|
|
std::unique_lock<std::mutex> lock(queue_mutex_);
|
|
stop_ = true;
|
|
}
|
|
condition_.notify_all();
|
|
for (auto& worker : workers_) {
|
|
worker.join();
|
|
}
|
|
}
|
|
|
|
void ThreadPool::enqueue(std::function<void()> task) {
|
|
{
|
|
std::unique_lock<std::mutex> lock(queue_mutex_);
|
|
tasks_.emplace(std::move(task));
|
|
}
|
|
condition_.notify_one();
|
|
}
|
|
|
|
void ThreadPool::wait_all() {
|
|
std::unique_lock<std::mutex> lock(queue_mutex_);
|
|
// Wait for both queue empty AND all active tasks completed
|
|
done_condition_.wait(lock, [this] { return tasks_.empty() && active_tasks_.load() == 0; });
|
|
}
|
|
|
|
uint32_t ThreadPool::default_thread_count() {
|
|
uint32_t n = std::thread::hardware_concurrency();
|
|
if (n == 0) n = 4;
|
|
return n > 8 ? 8 : n;
|
|
}
|