Stress Tests: - TestStress_WorkerConnectBurst: 30 workers, p99 latency validation - TestStress_JobSubmissionBurst: 1K job submissions - TestStress_WorkerChurn: 50 connect/disconnect cycles, memory leak detection - TestStress_ConcurrentScheduling: 10 workers x 20 jobs contention Long-Running Tests: - TestLongRunning_MemoryLeak: heap growth monitoring - TestLongRunning_OrphanRecovery: worker death/requeue stability - TestLongRunning_WebSocketStability: 20 worker connection stability Infrastructure: - Add testreport package with JSON output, flaky test tracking - Add TestTimer for timing/budget enforcement - Add WaitForEvent, WaitForTaskStatus helpers - Fix worker IDs to use valid bench-worker token patterns
134 lines
3.3 KiB
Go
134 lines
3.3 KiB
Go
package testreport_test
|
|
|
|
import (
|
|
"errors"
|
|
"os"
|
|
"path/filepath"
|
|
"testing"
|
|
"time"
|
|
|
|
"github.com/jfraeys/fetch_ml/tests/testreport"
|
|
"github.com/stretchr/testify/assert"
|
|
"github.com/stretchr/testify/require"
|
|
)
|
|
|
|
func TestReporter_BasicFlow(t *testing.T) {
|
|
r := testreport.NewReporter("unit-test", "test/package")
|
|
|
|
// Record a passing test
|
|
r.StartTest("TestPass")
|
|
time.Sleep(10 * time.Millisecond)
|
|
r.EndTest("TestPass", "pass", nil)
|
|
|
|
// Record a failing test
|
|
r.StartTest("TestFail")
|
|
time.Sleep(5 * time.Millisecond)
|
|
r.EndTest("TestFail", "fail", errors.New("test error"))
|
|
|
|
// Record a skipped test
|
|
r.StartTest("TestSkip")
|
|
r.EndTest("TestSkip", "skip", nil)
|
|
|
|
// Check summary
|
|
summary := r.Summary()
|
|
assert.Equal(t, 3, summary.Total)
|
|
assert.Equal(t, 1, summary.Passed)
|
|
assert.Equal(t, 1, summary.Failed)
|
|
assert.Equal(t, 1, summary.Skipped)
|
|
}
|
|
|
|
func TestReporter_JSONOutput(t *testing.T) {
|
|
r := testreport.NewReporter("json-test", "test/package")
|
|
|
|
r.StartTest("TestOne")
|
|
r.RecordOutput("some output")
|
|
r.EndTest("TestOne", "pass", nil)
|
|
|
|
jsonData, err := r.ToJSON()
|
|
require.NoError(t, err)
|
|
assert.Contains(t, string(jsonData), "json-test")
|
|
assert.Contains(t, string(jsonData), "TestOne")
|
|
assert.Contains(t, string(jsonData), "some output")
|
|
}
|
|
|
|
func TestReporter_SaveToFile(t *testing.T) {
|
|
r := testreport.NewReporter("file-test", "test/package")
|
|
|
|
r.StartTest("TestOne")
|
|
r.EndTest("TestOne", "pass", nil)
|
|
|
|
tmpDir := t.TempDir()
|
|
path := filepath.Join(tmpDir, "report.json")
|
|
|
|
err := r.SaveToFile(path)
|
|
require.NoError(t, err)
|
|
|
|
data, err := os.ReadFile(path)
|
|
require.NoError(t, err)
|
|
assert.Contains(t, string(data), "file-test")
|
|
}
|
|
|
|
func TestFlakyTestTracker(t *testing.T) {
|
|
ft := testreport.NewFlakyTestTracker()
|
|
|
|
// Record consistent passes
|
|
ft.RecordResult("stable-pass", true)
|
|
ft.RecordResult("stable-pass", true)
|
|
ft.RecordResult("stable-pass", true)
|
|
|
|
// Record consistent failures
|
|
ft.RecordResult("stable-fail", false)
|
|
ft.RecordResult("stable-fail", false)
|
|
|
|
// Record mixed results (flaky)
|
|
ft.RecordResult("flaky-test", true)
|
|
ft.RecordResult("flaky-test", false)
|
|
ft.RecordResult("flaky-test", true)
|
|
|
|
// Not flaky with < 3 runs
|
|
assert.False(t, ft.IsFlaky("stable-pass"))
|
|
assert.False(t, ft.IsFlaky("stable-fail"))
|
|
|
|
// Flaky with mixed results
|
|
assert.True(t, ft.IsFlaky("flaky-test"))
|
|
|
|
// Get all flaky tests
|
|
flaky := ft.GetFlakyTests()
|
|
require.Len(t, flaky, 1)
|
|
assert.Equal(t, "flaky-test", flaky[0])
|
|
|
|
// Check report
|
|
report := ft.Report()
|
|
assert.Contains(t, report, "flaky-test")
|
|
}
|
|
|
|
func TestTestTimer(t *testing.T) {
|
|
timer := testreport.NewTestTimer()
|
|
|
|
// Should be very small initially
|
|
elapsed := timer.Elapsed()
|
|
assert.True(t, elapsed < 100*time.Millisecond)
|
|
|
|
// Budget check should pass
|
|
passed := timer.CheckBudget(1*time.Second, t)
|
|
assert.True(t, passed)
|
|
}
|
|
|
|
func TestPerformanceRegression(t *testing.T) {
|
|
pr := testreport.NewPerformanceRegression()
|
|
|
|
// Record baseline values
|
|
pr.Record("latency", 100.0)
|
|
pr.Record("latency", 110.0)
|
|
pr.Record("latency", 105.0)
|
|
|
|
// Current value within threshold
|
|
assert.False(t, pr.CheckRegression("latency", 120.0, 1.5))
|
|
|
|
// Current value regresses
|
|
assert.True(t, pr.CheckRegression("latency", 200.0, 1.5))
|
|
|
|
// Not enough data for new metric
|
|
pr.Record("new-metric", 50.0)
|
|
assert.False(t, pr.CheckRegression("new-metric", 1000.0, 2.0))
|
|
}
|