**Payload Performance Test:** - Add job cleanup after each iteration using DeleteJob() - Ensure isolated memory measurements between test runs **All Benchmark Tests:** - General improvements and maintenance updates
62 lines
1.5 KiB
Go
62 lines
1.5 KiB
Go
package benchmarks
|
|
|
|
import (
|
|
"os"
|
|
"path/filepath"
|
|
"testing"
|
|
|
|
"github.com/jfraeys/fetch_ml/internal/worker"
|
|
)
|
|
|
|
// createSmallDataset creates a temporary small dataset for benchmarking
|
|
func createSmallDataset(b *testing.B) string {
|
|
tmpDir := b.TempDir()
|
|
// Create 10 small files (100KB each) to emphasize context overhead
|
|
data := make([]byte, 100*1024)
|
|
for i := range data {
|
|
data[i] = byte(i % 256)
|
|
}
|
|
for i := range 10 {
|
|
path := filepath.Join(tmpDir, "data", string(rune('a'+i%26)), "chunk.bin")
|
|
if err := os.MkdirAll(filepath.Dir(path), 0750); err != nil {
|
|
b.Fatal(err)
|
|
}
|
|
if err := os.WriteFile(path, data, 0640); err != nil {
|
|
b.Fatal(err)
|
|
}
|
|
}
|
|
return tmpDir
|
|
}
|
|
|
|
// BenchmarkContextReuse measures overhead of repeated hash operations
|
|
// This verifies the 5-20ms savings from context reuse in native_bridge_libs.go
|
|
func BenchmarkContextReuse(b *testing.B) {
|
|
testDir := createSmallDataset(b)
|
|
|
|
b.ResetTimer()
|
|
for i := 0; i < b.N; i++ {
|
|
_, err := worker.DirOverallSHA256Hex(testDir)
|
|
if err != nil {
|
|
b.Fatal(err)
|
|
}
|
|
}
|
|
}
|
|
|
|
// BenchmarkSequentialHashes simulates TUI scrolling through datasets
|
|
// With context reuse: ~8ms per hash
|
|
// Without context reuse: ~17ms per hash (9ms overhead)
|
|
func BenchmarkSequentialHashes(b *testing.B) {
|
|
testDir := createSmallDataset(b)
|
|
|
|
b.ReportAllocs()
|
|
|
|
for b.Loop() {
|
|
// Simulate viewing 10 datasets (like TUI scrolling)
|
|
for range 10 {
|
|
_, err := worker.DirOverallSHA256Hex(testDir)
|
|
if err != nil {
|
|
b.Fatal(err)
|
|
}
|
|
}
|
|
}
|
|
}
|