- Add safety checks to Zig build - Add TUI with job management and narrative views - Add WebSocket support and export services - Add smart configuration defaults - Update API routes with security headers - Update SECURITY.md with comprehensive policy - Add Makefile security scanning targets
403 lines
13 KiB
Go
403 lines
13 KiB
Go
// Package controller provides TUI command handlers
|
|
package controller
|
|
|
|
import (
|
|
"fmt"
|
|
"path/filepath"
|
|
"strings"
|
|
"time"
|
|
|
|
tea "github.com/charmbracelet/bubbletea"
|
|
"github.com/jfraeys/fetch_ml/cmd/tui/internal/model"
|
|
"github.com/jfraeys/fetch_ml/internal/container"
|
|
)
|
|
|
|
func shellQuote(s string) string {
|
|
return "'" + strings.ReplaceAll(s, "'", "'\\''") + "'"
|
|
}
|
|
|
|
// Command factories for loading data
|
|
|
|
func (c *Controller) loadAllData() tea.Cmd {
|
|
return tea.Batch(
|
|
c.loadJobs(),
|
|
c.loadQueue(),
|
|
c.loadGPU(),
|
|
c.loadContainer(),
|
|
c.loadDatasets(),
|
|
)
|
|
}
|
|
|
|
func (c *Controller) loadJobs() tea.Cmd {
|
|
return func() tea.Msg {
|
|
type jobResult struct {
|
|
jobs []model.Job
|
|
err error
|
|
}
|
|
|
|
resultChan := make(chan jobResult, 1)
|
|
go func() {
|
|
var jobs []model.Job
|
|
statusChan := make(chan []model.Job, 4)
|
|
|
|
// Debug: Print paths being used
|
|
c.logger.Info("Loading jobs from paths",
|
|
"pending", c.getPathForStatus(model.StatusPending),
|
|
"running", c.getPathForStatus(model.StatusRunning),
|
|
"finished", c.getPathForStatus(model.StatusFinished),
|
|
"failed", c.getPathForStatus(model.StatusFailed))
|
|
|
|
for _, status := range []model.JobStatus{
|
|
model.StatusPending,
|
|
model.StatusRunning,
|
|
model.StatusFinished,
|
|
model.StatusFailed,
|
|
} {
|
|
go func(s model.JobStatus) {
|
|
path := c.getPathForStatus(s)
|
|
names := c.server.ListDir(path)
|
|
|
|
// Debug: Log what we found
|
|
c.logger.Info("Listed directory", "status", s, "path", path, "count", len(names))
|
|
|
|
var statusJobs []model.Job
|
|
for _, name := range names {
|
|
// Lazy loading: only fetch basic info for list view
|
|
// Full details (GPU, narrative) loaded on selection
|
|
statusJobs = append(statusJobs, model.Job{
|
|
Name: name,
|
|
Status: s,
|
|
// TaskID, Priority, GPU info loaded lazily
|
|
})
|
|
}
|
|
statusChan <- statusJobs
|
|
}(status)
|
|
}
|
|
|
|
for range 4 {
|
|
jobs = append(jobs, <-statusChan...)
|
|
}
|
|
|
|
resultChan <- jobResult{jobs: jobs, err: nil}
|
|
}()
|
|
|
|
result := <-resultChan
|
|
if result.err != nil {
|
|
return model.StatusMsg{Text: "Failed to load jobs: " + result.err.Error(), Level: "error"}
|
|
}
|
|
return model.JobsLoadedMsg(result.jobs)
|
|
}
|
|
}
|
|
|
|
// loadJobDetails loads full details for a specific job (lazy loading)
|
|
func (c *Controller) loadJobDetails(jobName string) tea.Cmd {
|
|
return func() tea.Msg {
|
|
jobStatus, _ := c.taskQueue.GetJobStatus(jobName)
|
|
|
|
// Parse priority
|
|
priority := int64(0)
|
|
if p, ok := jobStatus["priority"]; ok {
|
|
fmt.Sscanf(p, "%d", &priority)
|
|
}
|
|
|
|
// Build full job with details
|
|
// This is called when job is selected for detailed view
|
|
|
|
return model.StatusMsg{Text: "Loaded details for " + jobName, Level: "info"}
|
|
}
|
|
}
|
|
|
|
func (c *Controller) loadQueue() tea.Cmd {
|
|
return func() tea.Msg {
|
|
tasks, err := c.taskQueue.GetQueuedTasks()
|
|
if err != nil {
|
|
c.logger.Error("failed to load queue", "error", err)
|
|
return model.StatusMsg{Text: "Failed to load queue: " + err.Error(), Level: "error"}
|
|
}
|
|
c.logger.Info("loaded queue", "task_count", len(tasks))
|
|
return model.TasksLoadedMsg(tasks)
|
|
}
|
|
}
|
|
|
|
func (c *Controller) loadGPU() tea.Cmd {
|
|
return func() tea.Msg {
|
|
type gpuResult struct {
|
|
content string
|
|
err error
|
|
}
|
|
|
|
resultChan := make(chan gpuResult, 1)
|
|
go func() {
|
|
cmd := "nvidia-smi --query-gpu=index,name,utilization.gpu," +
|
|
"memory.used,memory.total,temperature.gpu --format=csv,noheader,nounits"
|
|
out, err := c.server.Exec(cmd)
|
|
if err == nil && strings.TrimSpace(out) != "" {
|
|
var formatted strings.Builder
|
|
formatted.WriteString("GPU Status\n")
|
|
formatted.WriteString(strings.Repeat("═", 50) + "\n\n")
|
|
lines := strings.Split(strings.TrimSpace(out), "\n")
|
|
for _, line := range lines {
|
|
parts := strings.Split(line, ", ")
|
|
if len(parts) >= 6 {
|
|
formatted.WriteString(fmt.Sprintf("🎮 GPU %s: %s\n", parts[0], parts[1]))
|
|
formatted.WriteString(fmt.Sprintf(" Utilization: %s%%\n", parts[2]))
|
|
formatted.WriteString(fmt.Sprintf(" Memory: %s/%s MB\n", parts[3], parts[4]))
|
|
formatted.WriteString(fmt.Sprintf(" Temperature: %s°C\n\n", parts[5]))
|
|
}
|
|
}
|
|
c.logger.Info("loaded GPU status", "type", "nvidia")
|
|
resultChan <- gpuResult{content: formatted.String(), err: nil}
|
|
return
|
|
}
|
|
|
|
cmd = "system_profiler SPDisplaysDataType | grep 'Chipset Model\\|VRAM' | head -2"
|
|
out, err = c.server.Exec(cmd)
|
|
if err != nil {
|
|
c.logger.Warn("GPU info unavailable", "error", err)
|
|
resultChan <- gpuResult{
|
|
content: "GPU info unavailable\n\nRun on a system with nvidia-smi or macOS GPU",
|
|
err: err,
|
|
}
|
|
return
|
|
}
|
|
|
|
var formatted strings.Builder
|
|
formatted.WriteString("GPU Status (macOS)\n")
|
|
formatted.WriteString(strings.Repeat("═", 50) + "\n\n")
|
|
lines := strings.Split(strings.TrimSpace(out), "\n")
|
|
for _, line := range lines {
|
|
if strings.Contains(line, "Chipset Model") || strings.Contains(line, "VRAM") {
|
|
formatted.WriteString("🎮 " + strings.TrimSpace(line) + "\n")
|
|
}
|
|
}
|
|
formatted.WriteString("\n💡 Note: nvidia-smi not available on macOS\n")
|
|
|
|
c.logger.Info("loaded GPU status", "type", "macos")
|
|
resultChan <- gpuResult{content: formatted.String(), err: nil}
|
|
}()
|
|
|
|
result := <-resultChan
|
|
return model.GpuLoadedMsg(result.content)
|
|
}
|
|
}
|
|
|
|
func (c *Controller) loadContainer() tea.Cmd {
|
|
return func() tea.Msg {
|
|
resultChan := make(chan string, 1)
|
|
go func() {
|
|
var formatted strings.Builder
|
|
formatted.WriteString("Container Status\n")
|
|
formatted.WriteString(strings.Repeat("═", 50) + "\n\n")
|
|
|
|
formatted.WriteString("📋 Configuration:\n")
|
|
formatted.WriteString(fmt.Sprintf(" Image: %s\n", c.config.PodmanImage))
|
|
formatted.WriteString(fmt.Sprintf(" GPU Devices: %v\n", c.config.GPUDevices))
|
|
formatted.WriteString(fmt.Sprintf(" Workspace: %s\n", c.config.ContainerWorkspace))
|
|
formatted.WriteString(fmt.Sprintf(" Results: %s\n\n", c.config.ContainerResults))
|
|
|
|
cmd := "podman ps -a --format '{{.Names}}|{{.Status}}|{{.Image}}'"
|
|
out, err := c.server.Exec(cmd)
|
|
if err == nil && strings.TrimSpace(out) != "" {
|
|
formatted.WriteString("🐳 Running Containers (Podman):\n")
|
|
lines := strings.Split(strings.TrimSpace(out), "\n")
|
|
for _, line := range lines {
|
|
parts := strings.Split(line, "|")
|
|
if len(parts) >= 3 {
|
|
status := "🟢"
|
|
if strings.Contains(parts[1], "Exited") {
|
|
status = "🔴"
|
|
}
|
|
formatted.WriteString(fmt.Sprintf(" %s %s\n", status, parts[0]))
|
|
formatted.WriteString(fmt.Sprintf(" Status: %s\n", parts[1]))
|
|
formatted.WriteString(fmt.Sprintf(" Image: %s\n\n", parts[2]))
|
|
}
|
|
}
|
|
} else {
|
|
cmd = "docker ps -a --format '{{.Names}}|{{.Status}}|{{.Image}}'"
|
|
out, err = c.server.Exec(cmd)
|
|
if err == nil && strings.TrimSpace(out) != "" {
|
|
formatted.WriteString("🐳 Running Containers (Docker):\n")
|
|
lines := strings.Split(strings.TrimSpace(out), "\n")
|
|
for _, line := range lines {
|
|
parts := strings.Split(line, "|")
|
|
if len(parts) >= 3 {
|
|
status := "🟢"
|
|
if strings.Contains(parts[1], "Exited") {
|
|
status = "🔴"
|
|
}
|
|
formatted.WriteString(fmt.Sprintf(" %s %s\n", status, parts[0]))
|
|
formatted.WriteString(fmt.Sprintf(" Status: %s\n", parts[1]))
|
|
formatted.WriteString(fmt.Sprintf(" Image: %s\n\n", parts[2]))
|
|
}
|
|
}
|
|
} else {
|
|
formatted.WriteString("⚠️ No containers found\n")
|
|
}
|
|
}
|
|
|
|
formatted.WriteString("💻 System Info:\n")
|
|
if podmanVersion, err := c.server.Exec("podman --version"); err == nil {
|
|
formatted.WriteString(fmt.Sprintf(" Podman: %s\n", strings.TrimSpace(podmanVersion)))
|
|
} else if dockerVersion, err := c.server.Exec("docker --version"); err == nil {
|
|
formatted.WriteString(fmt.Sprintf(" Docker: %s\n", strings.TrimSpace(dockerVersion)))
|
|
} else {
|
|
formatted.WriteString(" ⚠️ Container engine not available\n")
|
|
}
|
|
|
|
c.logger.Info("loaded container status")
|
|
resultChan <- formatted.String()
|
|
}()
|
|
|
|
return model.ContainerLoadedMsg(<-resultChan)
|
|
}
|
|
}
|
|
|
|
func (c *Controller) queueJob(jobName string, args string) tea.Cmd {
|
|
return func() tea.Msg {
|
|
resultChan := make(chan model.StatusMsg, 1)
|
|
go func() {
|
|
priority := int64(5)
|
|
if strings.Contains(args, "--priority") {
|
|
_, err := fmt.Sscanf(args, "--priority %d", &priority)
|
|
if err != nil {
|
|
c.logger.Error("invalid priority argument", "args", args, "error", err)
|
|
resultChan <- model.StatusMsg{
|
|
Text: fmt.Sprintf("Invalid priority: %v", err),
|
|
Level: "error",
|
|
}
|
|
return
|
|
}
|
|
}
|
|
|
|
task, err := c.taskQueue.EnqueueTask(jobName, args, priority)
|
|
if err != nil {
|
|
c.logger.Error("failed to queue job", "job_name", jobName, "error", err)
|
|
resultChan <- model.StatusMsg{
|
|
Text: fmt.Sprintf("Failed to queue %s: %v", jobName, err),
|
|
Level: "error",
|
|
}
|
|
return
|
|
}
|
|
|
|
c.logger.Info("job queued", "job_name", jobName, "task_id", task.ID[:8], "priority", priority)
|
|
resultChan <- model.StatusMsg{
|
|
Text: fmt.Sprintf("✓ Queued: %s (ID: %s, P:%d)", jobName, task.ID[:8], priority),
|
|
Level: "success",
|
|
}
|
|
}()
|
|
|
|
return <-resultChan
|
|
}
|
|
}
|
|
|
|
func (c *Controller) deleteJob(jobName string) tea.Cmd {
|
|
return func() tea.Msg {
|
|
if err := container.ValidateJobName(jobName); err != nil {
|
|
return model.StatusMsg{Text: fmt.Sprintf("Invalid job name %s: %v", jobName, err), Level: "error"}
|
|
}
|
|
|
|
jobPath := filepath.Join(c.config.PendingPath(), jobName)
|
|
stamp := time.Now().UTC().Format("20060102-150405")
|
|
archiveRoot := filepath.Join(c.config.BasePath, "archive", "pending", stamp)
|
|
dst := filepath.Join(archiveRoot, jobName)
|
|
cmd := fmt.Sprintf("mkdir -p %s && mv %s %s", shellQuote(archiveRoot), shellQuote(jobPath), shellQuote(dst))
|
|
if _, err := c.server.Exec(cmd); err != nil {
|
|
return model.StatusMsg{Text: fmt.Sprintf("Failed to archive %s: %v", jobName, err), Level: "error"}
|
|
}
|
|
return model.StatusMsg{Text: fmt.Sprintf("✓ Archived: %s", jobName), Level: "success"}
|
|
}
|
|
}
|
|
|
|
func (c *Controller) markFailed(jobName string) tea.Cmd {
|
|
return func() tea.Msg {
|
|
src := filepath.Join(c.config.RunningPath(), jobName)
|
|
dst := filepath.Join(c.config.FailedPath(), jobName)
|
|
if _, err := c.server.Exec(fmt.Sprintf("mv %s %s", src, dst)); err != nil {
|
|
return model.StatusMsg{Text: fmt.Sprintf("Failed to mark failed: %v", err), Level: "error"}
|
|
}
|
|
return model.StatusMsg{Text: fmt.Sprintf("⚠ Marked failed: %s", jobName), Level: "warning"}
|
|
}
|
|
}
|
|
|
|
func (c *Controller) cancelTask(taskID string) tea.Cmd {
|
|
return func() tea.Msg {
|
|
if err := c.taskQueue.CancelTask(taskID); err != nil {
|
|
c.logger.Error("failed to cancel task", "task_id", taskID[:8], "error", err)
|
|
return model.StatusMsg{Text: fmt.Sprintf("Cancel failed: %v", err), Level: "error"}
|
|
}
|
|
c.logger.Info("task cancelled", "task_id", taskID[:8])
|
|
return model.StatusMsg{Text: fmt.Sprintf("✓ Cancelled: %s", taskID[:8]), Level: "success"}
|
|
}
|
|
}
|
|
|
|
func (c *Controller) showQueue(m model.State) tea.Cmd {
|
|
return func() tea.Msg {
|
|
var content strings.Builder
|
|
content.WriteString("Task Queue\n")
|
|
content.WriteString(strings.Repeat("═", 60) + "\n\n")
|
|
|
|
if len(m.QueuedTasks) == 0 {
|
|
content.WriteString("📭 No tasks in queue\n")
|
|
} else {
|
|
for i, task := range m.QueuedTasks {
|
|
statusIcon := "⏳"
|
|
if task.Status == "running" {
|
|
statusIcon = "▶"
|
|
}
|
|
|
|
content.WriteString(fmt.Sprintf("%d. %s %s [ID: %s]\n",
|
|
i+1, statusIcon, task.JobName, task.ID[:8]))
|
|
content.WriteString(fmt.Sprintf(" Priority: %d | Status: %s\n",
|
|
task.Priority, task.Status))
|
|
if task.Args != "" {
|
|
content.WriteString(fmt.Sprintf(" Args: %s\n", task.Args))
|
|
}
|
|
content.WriteString(fmt.Sprintf(" Created: %s\n",
|
|
task.CreatedAt.Format("2006-01-02 15:04:05")))
|
|
|
|
if task.StartedAt != nil {
|
|
duration := time.Since(*task.StartedAt)
|
|
content.WriteString(fmt.Sprintf(" Running for: %s\n",
|
|
duration.Round(time.Second)))
|
|
}
|
|
|
|
if task.Tracking != nil {
|
|
var tools []string
|
|
if task.Tracking.MLflow != nil && task.Tracking.MLflow.Enabled {
|
|
tools = append(tools, "MLflow")
|
|
}
|
|
if task.Tracking.TensorBoard != nil && task.Tracking.TensorBoard.Enabled {
|
|
tools = append(tools, "TensorBoard")
|
|
}
|
|
if task.Tracking.Wandb != nil && task.Tracking.Wandb.Enabled {
|
|
tools = append(tools, "Wandb")
|
|
}
|
|
if len(tools) > 0 {
|
|
content.WriteString(fmt.Sprintf(" Tracking: %s\n", strings.Join(tools, ", ")))
|
|
}
|
|
}
|
|
content.WriteString("\n")
|
|
}
|
|
}
|
|
|
|
return model.QueueLoadedMsg(content.String())
|
|
}
|
|
}
|
|
|
|
func (c *Controller) loadDatasets() tea.Cmd {
|
|
return func() tea.Msg {
|
|
datasets, err := c.taskQueue.ListDatasets()
|
|
if err != nil {
|
|
c.logger.Error("failed to load datasets", "error", err)
|
|
return model.StatusMsg{Text: "Failed to load datasets: " + err.Error(), Level: "error"}
|
|
}
|
|
c.logger.Info("loaded datasets", "count", len(datasets))
|
|
return model.DatasetsLoadedMsg(datasets)
|
|
}
|
|
}
|
|
|
|
func tickCmd() tea.Cmd {
|
|
return tea.Tick(time.Second, func(t time.Time) tea.Msg {
|
|
return model.TickMsg(t)
|
|
})
|
|
}
|