**scripts/benchmarks/run-benchmarks-local.sh:** - Add support for native library benchmarks **scripts/ci/test.sh:** - Update CI test commands for new test structure **scripts/dev/smoke-test.sh:** - Improve smoke test reliability and output
242 lines
8.8 KiB
Bash
Executable file
242 lines
8.8 KiB
Bash
Executable file
#!/usr/bin/env bash
|
|
|
|
# Local Benchmark Runner
|
|
# Mimics the GitHub Actions workflow for local execution
|
|
|
|
set -euo pipefail
|
|
|
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
PROJECT_ROOT="$(dirname "$(dirname "$SCRIPT_DIR")")"
|
|
LOCAL_ARTIFACTS_DIR="$PROJECT_ROOT/.local-artifacts"
|
|
ARCHIVE_DIR="$LOCAL_ARTIFACTS_DIR/archive"
|
|
TIMESTAMP=$(date -u +"%Y%m%d_%H%M%S")
|
|
RUN_DIR="$LOCAL_ARTIFACTS_DIR/run_$TIMESTAMP"
|
|
|
|
# Create artifacts directory
|
|
mkdir -p "$RUN_DIR"
|
|
|
|
echo "=== Local Benchmark Runner ==="
|
|
echo "Run ID: $TIMESTAMP"
|
|
echo "Artifacts: $RUN_DIR"
|
|
echo ""
|
|
|
|
# Step 1: Run benchmarks
|
|
echo "Step 1: Running benchmarks..."
|
|
cd "$PROJECT_ROOT"
|
|
BENCHMARK_RESULTS_FILE="$RUN_DIR/benchmark_results.txt"
|
|
GO_TEST_EXIT_CODE=0
|
|
go test -bench=. -benchmem ./tests/benchmarks/... > "$BENCHMARK_RESULTS_FILE" 2>&1 || GO_TEST_EXIT_CODE=$?
|
|
if [ "$GO_TEST_EXIT_CODE" -ne 0 ]; then
|
|
echo "Benchmark run exited non-zero (exit code: $GO_TEST_EXIT_CODE)." >&2
|
|
echo "Continuing to generate metrics from available output: $BENCHMARK_RESULTS_FILE" >&2
|
|
echo "--- tail (last 50 lines) ---" >&2
|
|
tail -n 50 "$BENCHMARK_RESULTS_FILE" >&2 || true
|
|
fi
|
|
|
|
# Step 1b: Run native library benchmarks if available
|
|
NATIVE_RESULTS_FILE="$RUN_DIR/native_benchmark_results.txt"
|
|
NATIVE_EXIT_CODE=0
|
|
if [[ -f "native/build/libqueue_index.dylib" || -f "native/build/libqueue_index.so" ]]; then
|
|
echo ""
|
|
echo "Step 1b: Running native library benchmarks..."
|
|
CGO_ENABLED=1 go test -tags native_libs -bench=. -benchmem ./tests/benchmarks/... > "$NATIVE_RESULTS_FILE" 2>&1 || NATIVE_EXIT_CODE=$?
|
|
if [ "$NATIVE_EXIT_CODE" -ne 0 ]; then
|
|
echo "Native benchmark run exited non-zero (exit code: $NATIVE_EXIT_CODE)." >&2
|
|
echo "--- tail (last 50 lines) ---" >&2
|
|
tail -n 50 "$NATIVE_RESULTS_FILE" >&2 || true
|
|
fi
|
|
else
|
|
echo ""
|
|
echo "Step 1b: Native libraries not found, skipping native benchmarks"
|
|
echo " (Build with: make native-build)"
|
|
fi
|
|
|
|
# Extract benchmark results
|
|
grep "Benchmark.*-[0-9].*" "$BENCHMARK_RESULTS_FILE" > "$RUN_DIR/clean_benchmarks.txt" || true
|
|
|
|
# Step 2: Convert to Prometheus metrics
|
|
echo "Step 2: Converting to Prometheus metrics..."
|
|
cat > "$RUN_DIR/prometheus_metrics.txt" << EOF
|
|
# HELP benchmark_time_per_op Time per operation in nanoseconds
|
|
# TYPE benchmark_time_per_op gauge
|
|
# HELP benchmark_memory_per_op Memory per operation in bytes
|
|
# TYPE benchmark_memory_per_op gauge
|
|
# HELP benchmark_allocs_per_op Allocations per operation
|
|
# TYPE benchmark_allocs_per_op gauge
|
|
EOF
|
|
|
|
# Parse benchmark results and convert to Prometheus format
|
|
while IFS= read -r line; do
|
|
if [[ -n "$line" ]]; then
|
|
BENCHMARK_NAME=$(echo "$line" | awk '{print $1}' | sed 's/-[0-9]*$//')
|
|
ITERATIONS=$(echo "$line" | awk '{print $2}')
|
|
|
|
# Go benchmark output can include optional columns (e.g. MB/s) and units are
|
|
# usually separate tokens: "123 ns/op 456 B/op 7 allocs/op".
|
|
TIME_VALUE=$(echo "$line" | awk '{for (i=1;i<=NF;i++) if ($i=="ns/op") {print $(i-1); exit}}')
|
|
MEMORY_VALUE=$(echo "$line" | awk '{for (i=1;i<=NF;i++) if ($i=="B/op") {print $(i-1); exit}}')
|
|
ALLOCS_VALUE=$(echo "$line" | awk '{for (i=1;i<=NF;i++) if ($i=="allocs/op") {print $(i-1); exit}}')
|
|
|
|
# Clean benchmark name for Prometheus
|
|
CLEAN_NAME=$(echo "$BENCHMARK_NAME" | sed 's/[^a-zA-Z0-9_]/_/g')
|
|
|
|
# Only add metrics if we have valid numeric values
|
|
if [[ "$TIME_VALUE" =~ ^[0-9.]+$ ]]; then
|
|
echo "benchmark_time_per_op{benchmark=\"$CLEAN_NAME\"} ${TIME_VALUE}" >> "$RUN_DIR/prometheus_metrics.txt"
|
|
fi
|
|
if [[ "$MEMORY_VALUE" =~ ^[0-9.]+$ ]]; then
|
|
echo "benchmark_memory_per_op{benchmark=\"$CLEAN_NAME\"} ${MEMORY_VALUE}" >> "$RUN_DIR/prometheus_metrics.txt"
|
|
fi
|
|
if [[ "$ALLOCS_VALUE" =~ ^[0-9.]+$ ]]; then
|
|
echo "benchmark_allocs_per_op{benchmark=\"$CLEAN_NAME\"} ${ALLOCS_VALUE}" >> "$RUN_DIR/prometheus_metrics.txt"
|
|
fi
|
|
fi
|
|
done < "$RUN_DIR/clean_benchmarks.txt"
|
|
|
|
# Step 3: Push to local Pushgateway (if running)
|
|
echo "Step 3: Pushing to Prometheus..."
|
|
if command -v curl >/dev/null 2>&1; then
|
|
if curl -s http://localhost:9091 >/dev/null 2>&1; then
|
|
echo "Pushgateway detected, pushing metrics..."
|
|
curl --data-binary @"$RUN_DIR/prometheus_metrics.txt" \
|
|
"http://localhost:9091/metrics/job/benchmark/instance/local_$TIMESTAMP"
|
|
else
|
|
echo "Pushgateway not running at http://localhost:9091"
|
|
echo "Start it with: make monitoring-performance"
|
|
fi
|
|
else
|
|
echo "curl not available, skipping push to Pushgateway"
|
|
fi
|
|
|
|
# Step 4: Display results
|
|
echo ""
|
|
echo "=== Results Summary ==="
|
|
echo "Benchmark results saved to: $RUN_DIR/benchmark_results.txt"
|
|
echo "Prometheus metrics saved to: $RUN_DIR/prometheus_metrics.txt"
|
|
if [ "${GO_TEST_EXIT_CODE:-0}" -ne 0 ]; then
|
|
echo "WARNING: go test exited with code: $GO_TEST_EXIT_CODE"
|
|
fi
|
|
echo ""
|
|
|
|
# Show top 10 results
|
|
echo "Top 10 Go benchmark times:"
|
|
cat "$RUN_DIR/prometheus_metrics.txt" | grep "benchmark_time_per_op" | head -10
|
|
|
|
# Show native comparison if available
|
|
if [[ -f "$NATIVE_RESULTS_FILE" && "$NATIVE_EXIT_CODE" -eq 0 ]]; then
|
|
echo ""
|
|
echo "Native library benchmarks available at: $NATIVE_RESULTS_FILE"
|
|
echo "To compare Go vs Native:"
|
|
echo " make benchmark-compare"
|
|
fi
|
|
|
|
# Step 5: Generate HTML report
|
|
echo "Step 5: Generating HTML report..."
|
|
cat > "$RUN_DIR/report.html" << EOF
|
|
<!DOCTYPE html>
|
|
<html>
|
|
<head>
|
|
<title>Benchmark Report - $TIMESTAMP</title>
|
|
<style>
|
|
body { font-family: Arial, sans-serif; margin: 20px; }
|
|
table { border-collapse: collapse; width: 100%; }
|
|
th, td { border: 1px solid #ddd; padding: 8px; text-align: left; }
|
|
th { background-color: #f2f2f2; }
|
|
.metric { font-family: monospace; }
|
|
</style>
|
|
</head>
|
|
<body>
|
|
<h1>Benchmark Report</h1>
|
|
<p><strong>Run ID:</strong> $TIMESTAMP</p>
|
|
<p><strong>Date:</strong> $(date)</p>
|
|
|
|
<h2>Results</h2>
|
|
<table>
|
|
<tr>
|
|
<th>Benchmark</th>
|
|
<th>Time (ns/op)</th>
|
|
<th>Memory (B/op)</th>
|
|
<th>Allocs (allocs/op)</th>
|
|
</tr>
|
|
$(cat "$RUN_DIR/clean_benchmarks.txt" | while IFS= read -r line; do
|
|
if [[ -n "$line" ]]; then
|
|
BENCHMARK_NAME=$(echo "$line" | awk '{print $1}')
|
|
|
|
TIME_PER_OP=$(echo "$line" | awk '{for (i=1;i<=NF;i++) if ($i=="ns/op") {print $(i-1)" " $i; exit}}')
|
|
MEMORY_PER_OP=$(echo "$line" | awk '{for (i=1;i<=NF;i++) if ($i=="B/op") {print $(i-1)" " $i; exit}}')
|
|
ALLOCS_PER_OP=$(echo "$line" | awk '{for (i=1;i<=NF;i++) if ($i=="allocs/op") {print $(i-1)" " $i; exit}}')
|
|
echo " <tr>"
|
|
echo " <td class=\"metric\">$BENCHMARK_NAME</td>"
|
|
echo " <td>$TIME_PER_OP</td>"
|
|
echo " <td>$MEMORY_PER_OP</td>"
|
|
echo " <td>$ALLOCS_PER_OP</td>"
|
|
echo " </tr>"
|
|
fi
|
|
done)
|
|
</table>
|
|
|
|
<h2>Raw Output</h2>
|
|
<pre>$(cat "$RUN_DIR/benchmark_results.txt")</pre>
|
|
</body>
|
|
</html>
|
|
EOF
|
|
|
|
echo "HTML report saved to: $RUN_DIR/report.html"
|
|
echo "Open with: open $RUN_DIR/report.html"
|
|
|
|
# Step 6: Artifact management
|
|
echo ""
|
|
echo "=== Artifact Management ==="
|
|
echo "All artifacts saved in: $RUN_DIR"
|
|
echo "Total runs: $(ls -1d "$LOCAL_ARTIFACTS_DIR"/run_* 2>/dev/null | wc -l)"
|
|
echo ""
|
|
|
|
# Show recent runs
|
|
echo "Recent runs:"
|
|
ls -lt "$LOCAL_ARTIFACTS_DIR"/run_* 2>/dev/null | head -5 || echo "No previous runs found"
|
|
|
|
# Step 7: Comprehensive cleanup
|
|
echo ""
|
|
echo "=== Cleanup Procedures ==="
|
|
|
|
# Use the dedicated cleanup script
|
|
if [ -f "$SCRIPT_DIR/cleanup-benchmarks.sh" ]; then
|
|
echo "Running standard benchmark cleanup..."
|
|
"$SCRIPT_DIR/cleanup-benchmarks.sh" benchmarks
|
|
else
|
|
# Fallback cleanup if script not available
|
|
echo "Archiving old benchmark runs (keeping last 10)..."
|
|
stamp=$(date -u +%Y%m%d-%H%M%S)
|
|
mkdir -p "$ARCHIVE_DIR/$stamp"
|
|
cd "$LOCAL_ARTIFACTS_DIR"
|
|
ls -1t run_* 2>/dev/null | tail -n +11 | while read -r run; do
|
|
[ -n "$run" ] || continue
|
|
mv "$run" "$ARCHIVE_DIR/$stamp/" 2>/dev/null || true
|
|
done
|
|
|
|
# Clean temporary files
|
|
echo "Archiving temporary files..."
|
|
tmp_archive_dir="$LOCAL_ARTIFACTS_DIR/tmp-archive/$stamp"
|
|
mkdir -p "$tmp_archive_dir"
|
|
find /tmp -name "benchmark_*" -type f -mmin +60 -print0 2>/dev/null | while IFS= read -r -d '' f; do
|
|
mv "$f" "$tmp_archive_dir/" 2>/dev/null || true
|
|
done
|
|
find /var/tmp -name "benchmark_*" -type f -mmin +60 -print0 2>/dev/null | while IFS= read -r -d '' f; do
|
|
mv "$f" "$tmp_archive_dir/" 2>/dev/null || true
|
|
done
|
|
|
|
# Clean Go build cache
|
|
echo "Cleaning Go build cache..."
|
|
go clean -testcache 2>/dev/null || true
|
|
fi
|
|
|
|
# Show final status
|
|
echo ""
|
|
echo "=== Final Status ==="
|
|
echo "Active runs remaining: $(ls -1d "$LOCAL_ARTIFACTS_DIR"/run_* 2>/dev/null | wc -l)"
|
|
echo "Disk usage: $(du -sh "$LOCAL_ARTIFACTS_DIR" 2>/dev/null | cut -f1 || echo "N/A")"
|
|
|
|
echo ""
|
|
echo "=== Complete! ==="
|
|
echo "View results: open $RUN_DIR/report.html"
|
|
echo "Push metrics: Available at http://localhost:9091 (if running)"
|