fetch_ml/tests/testreport/reporter.go
Jeremie Fraeys 6af85ddaf6
feat(tests): enable stress and long-running test suites
Stress Tests:
- TestStress_WorkerConnectBurst: 30 workers, p99 latency validation
- TestStress_JobSubmissionBurst: 1K job submissions
- TestStress_WorkerChurn: 50 connect/disconnect cycles, memory leak detection
- TestStress_ConcurrentScheduling: 10 workers x 20 jobs contention

Long-Running Tests:
- TestLongRunning_MemoryLeak: heap growth monitoring
- TestLongRunning_OrphanRecovery: worker death/requeue stability
- TestLongRunning_WebSocketStability: 20 worker connection stability

Infrastructure:
- Add testreport package with JSON output, flaky test tracking
- Add TestTimer for timing/budget enforcement
- Add WaitForEvent, WaitForTaskStatus helpers
- Fix worker IDs to use valid bench-worker token patterns
2026-03-12 14:05:45 -04:00

278 lines
6.4 KiB
Go

// Package testreport provides structured test reporting and output
package testreport
import (
"encoding/json"
"fmt"
"os"
"strings"
"testing"
"time"
)
// TestResult represents a single test result
type TestResult struct {
Name string `json:"name"`
Package string `json:"package"`
Status string `json:"status"` // pass, fail, skip
Duration time.Duration `json:"duration"`
Output string `json:"output,omitempty"`
Error string `json:"error,omitempty"`
StartTime time.Time `json:"start_time"`
EndTime time.Time `json:"end_time"`
}
// TestSuite represents a collection of test results
type TestSuite struct {
Name string `json:"name"`
Package string `json:"package"`
StartTime time.Time `json:"start_time"`
EndTime time.Time `json:"end_time"`
Tests []TestResult `json:"tests"`
}
// Summary provides aggregate statistics
type Summary struct {
Total int `json:"total"`
Passed int `json:"passed"`
Failed int `json:"failed"`
Skipped int `json:"skipped"`
Duration time.Duration `json:"duration"`
}
// Reporter handles test reporting
type Reporter struct {
suite TestSuite
current *TestResult
testMap map[string]*TestResult
}
// NewReporter creates a new test reporter
func NewReporter(name, pkg string) *Reporter {
return &Reporter{
suite: TestSuite{
Name: name,
Package: pkg,
StartTime: time.Now(),
Tests: []TestResult{},
},
testMap: make(map[string]*TestResult),
}
}
// StartTest records the start of a test
func (r *Reporter) StartTest(name string) {
result := &TestResult{
Name: name,
Package: r.suite.Package,
StartTime: time.Now(),
Status: "running",
}
r.current = result
r.testMap[name] = result
}
// EndTest records the end of a test
func (r *Reporter) EndTest(name string, status string, err error) {
if r.current == nil || r.current.Name != name {
r.current = r.testMap[name]
}
if r.current == nil {
return
}
r.current.EndTime = time.Now()
r.current.Duration = r.current.EndTime.Sub(r.current.StartTime)
r.current.Status = status
if err != nil {
r.current.Error = err.Error()
}
r.suite.Tests = append(r.suite.Tests, *r.current)
r.current = nil
}
// RecordOutput captures test output
func (r *Reporter) RecordOutput(output string) {
if r.current != nil {
r.current.Output += output + "\n"
}
}
// Summary generates aggregate statistics
func (r *Reporter) Summary() Summary {
s := Summary{
Total: len(r.suite.Tests),
}
for _, t := range r.suite.Tests {
switch t.Status {
case "pass":
s.Passed++
case "fail":
s.Failed++
case "skip":
s.Skipped++
}
}
return s
}
// ToJSON exports the test suite as JSON
func (r *Reporter) ToJSON() ([]byte, error) {
r.suite.EndTime = time.Now()
return json.MarshalIndent(r.suite, "", " ")
}
// SaveToFile writes the test report to a file
func (r *Reporter) SaveToFile(path string) error {
data, err := r.ToJSON()
if err != nil {
return err
}
return os.WriteFile(path, data, 0644)
}
// ReportToEnv outputs report path to environment for CI
func (r *Reporter) ReportToEnv() {
if path := os.Getenv("TEST_REPORT_PATH"); path != "" {
r.SaveToFile(path)
fmt.Fprintf(os.Stderr, "Test report saved to: %s\n", path)
}
}
// FlakyTestTracker tracks potentially flaky tests
type FlakyTestTracker struct {
runs map[string][]bool // test name -> []passed
}
// NewFlakyTestTracker creates a new flaky test tracker
func NewFlakyTestTracker() *FlakyTestTracker {
return &FlakyTestTracker{
runs: make(map[string][]bool),
}
}
// RecordResult records a test result
func (ft *FlakyTestTracker) RecordResult(name string, passed bool) {
ft.runs[name] = append(ft.runs[name], passed)
}
// IsFlaky returns true if a test has inconsistent results
func (ft *FlakyTestTracker) IsFlaky(name string) bool {
runs := ft.runs[name]
if len(runs) < 3 {
return false
}
// Check for mixed results
passed := 0
failed := 0
for _, r := range runs {
if r {
passed++
} else {
failed++
}
}
// Flaky if both passed and failed exist
return passed > 0 && failed > 0
}
// GetFlakyTests returns all tests that appear flaky
func (ft *FlakyTestTracker) GetFlakyTests() []string {
var flaky []string
for name := range ft.runs {
if ft.IsFlaky(name) {
flaky = append(flaky, name)
}
}
return flaky
}
// Report generates a flaky test report
func (ft *FlakyTestTracker) Report() string {
flaky := ft.GetFlakyTests()
if len(flaky) == 0 {
return "No flaky tests detected"
}
var report strings.Builder
report.WriteString("Potentially Flaky Tests:\n")
for _, name := range flaky {
runs := ft.runs[name]
passed := 0
for _, r := range runs {
if r {
passed++
}
}
fmt.Fprintf(&report, " - %s: %d/%d passed (%.1f%%)\n",
name, passed, len(runs), float64(passed)*100/float64(len(runs)))
}
return report.String()
}
// TestTimer provides timing utilities for tests
type TestTimer struct {
start time.Time
duration time.Duration
}
// NewTestTimer creates a new test timer
func NewTestTimer() *TestTimer {
return &TestTimer{start: time.Now()}
}
// Elapsed returns elapsed time
func (tt *TestTimer) Elapsed() time.Duration {
return time.Since(tt.start)
}
// CheckBudget checks if test is within time budget
func (tt *TestTimer) CheckBudget(budget time.Duration, t *testing.T) bool {
elapsed := tt.Elapsed()
if elapsed > budget {
t.Logf("WARNING: Test exceeded time budget: %v > %v", elapsed, budget)
return false
}
return true
}
// PerformanceRegression tracks performance metrics
type PerformanceRegression struct {
metrics map[string][]float64 // metric name -> values
}
// NewPerformanceRegression creates a new tracker
func NewPerformanceRegression() *PerformanceRegression {
return &PerformanceRegression{
metrics: make(map[string][]float64),
}
}
// Record records a metric value
func (pr *PerformanceRegression) Record(name string, value float64) {
pr.metrics[name] = append(pr.metrics[name], value)
}
// CheckRegression checks if current value regresses from baseline
func (pr *PerformanceRegression) CheckRegression(name string, current float64, threshold float64) bool {
values := pr.metrics[name]
if len(values) < 3 {
return false // Not enough data
}
// Calculate average of previous runs
var sum float64
for _, v := range values {
sum += v
}
avg := sum / float64(len(values))
// Regression if current is worse than threshold * average
return current > avg*threshold
}