fetch_ml/cmd/tui/internal/controller/commands.go
Jeremie Fraeys ea15af1833 Fix multi-user authentication and clean up debug code
- Fix YAML tags in auth config struct (json -> yaml)
- Update CLI configs to use pre-hashed API keys
- Remove double hashing in WebSocket client
- Fix port mapping (9102 -> 9103) in CLI commands
- Update permission keys to use jobs:read, jobs:create, etc.
- Clean up all debug logging from CLI and server
- All user roles now authenticate correctly:
  * Admin: Can queue jobs and see all jobs
  * Researcher: Can queue jobs and see own jobs
  * Analyst: Can see status (read-only access)

Multi-user authentication is now fully functional.
2025-12-06 12:35:32 -05:00

373 lines
11 KiB
Go

// Package controller provides TUI command handlers
package controller
import (
"fmt"
"path/filepath"
"strings"
"time"
tea "github.com/charmbracelet/bubbletea"
"github.com/jfraeys/fetch_ml/cmd/tui/internal/model"
)
// JobsLoadedMsg contains loaded jobs from the queue
type JobsLoadedMsg []model.Job
// TasksLoadedMsg contains loaded tasks from the queue
type TasksLoadedMsg []*model.Task
// GpuLoadedMsg contains GPU status information
type GpuLoadedMsg string
// ContainerLoadedMsg contains container status information
type ContainerLoadedMsg string
// LogLoadedMsg contains log content
type LogLoadedMsg string
// QueueLoadedMsg contains queue status information
type QueueLoadedMsg string
// SettingsContentMsg contains settings content
type SettingsContentMsg string
// SettingsUpdateMsg indicates settings should be updated
type SettingsUpdateMsg struct{}
// StatusMsg contains status text and level
type StatusMsg struct {
Text string
Level string
}
// TickMsg represents a timer tick
type TickMsg time.Time
// Command factories for loading data
func (c *Controller) loadAllData() tea.Cmd {
return tea.Batch(
c.loadJobs(),
c.loadQueue(),
c.loadGPU(),
c.loadContainer(),
)
}
func (c *Controller) loadJobs() tea.Cmd {
return func() tea.Msg {
type jobResult struct {
jobs []model.Job
err error
}
resultChan := make(chan jobResult, 1)
go func() {
var jobs []model.Job
statusChan := make(chan []model.Job, 4)
for _, status := range []model.JobStatus{
model.StatusPending,
model.StatusRunning,
model.StatusFinished,
model.StatusFailed,
} {
go func(s model.JobStatus) {
path := c.getPathForStatus(s)
names := c.server.ListDir(path)
var statusJobs []model.Job
for _, name := range names {
jobStatus, _ := c.taskQueue.GetJobStatus(name)
taskID := jobStatus["task_id"]
priority := int64(0)
if p, ok := jobStatus["priority"]; ok {
_, err := fmt.Sscanf(p, "%d", &priority)
if err != nil {
priority = 0
}
}
statusJobs = append(statusJobs, model.Job{
Name: name,
Status: s,
TaskID: taskID,
Priority: priority,
})
}
statusChan <- statusJobs
}(status)
}
for range 4 {
jobs = append(jobs, <-statusChan...)
}
resultChan <- jobResult{jobs: jobs, err: nil}
}()
result := <-resultChan
if result.err != nil {
return StatusMsg{Text: "Failed to load jobs: " + result.err.Error(), Level: "error"}
}
return JobsLoadedMsg(result.jobs)
}
}
func (c *Controller) loadQueue() tea.Cmd {
return func() tea.Msg {
tasks, err := c.taskQueue.GetQueuedTasks()
if err != nil {
c.logger.Error("failed to load queue", "error", err)
return StatusMsg{Text: "Failed to load queue: " + err.Error(), Level: "error"}
}
c.logger.Info("loaded queue", "task_count", len(tasks))
return TasksLoadedMsg(tasks)
}
}
func (c *Controller) loadGPU() tea.Cmd {
return func() tea.Msg {
type gpuResult struct {
content string
err error
}
resultChan := make(chan gpuResult, 1)
go func() {
cmd := "nvidia-smi --query-gpu=index,name,utilization.gpu," +
"memory.used,memory.total,temperature.gpu --format=csv,noheader,nounits"
out, err := c.server.Exec(cmd)
if err == nil && strings.TrimSpace(out) != "" {
var formatted strings.Builder
formatted.WriteString("GPU Status\n")
formatted.WriteString(strings.Repeat("═", 50) + "\n\n")
lines := strings.Split(strings.TrimSpace(out), "\n")
for _, line := range lines {
parts := strings.Split(line, ", ")
if len(parts) >= 6 {
formatted.WriteString(fmt.Sprintf("🎮 GPU %s: %s\n", parts[0], parts[1]))
formatted.WriteString(fmt.Sprintf(" Utilization: %s%%\n", parts[2]))
formatted.WriteString(fmt.Sprintf(" Memory: %s/%s MB\n", parts[3], parts[4]))
formatted.WriteString(fmt.Sprintf(" Temperature: %s°C\n\n", parts[5]))
}
}
c.logger.Info("loaded GPU status", "type", "nvidia")
resultChan <- gpuResult{content: formatted.String(), err: nil}
return
}
cmd = "system_profiler SPDisplaysDataType | grep 'Chipset Model\\|VRAM' | head -2"
out, err = c.server.Exec(cmd)
if err != nil {
c.logger.Warn("GPU info unavailable", "error", err)
resultChan <- gpuResult{
content: "GPU info unavailable\n\nRun on a system with nvidia-smi or macOS GPU",
err: err,
}
return
}
var formatted strings.Builder
formatted.WriteString("GPU Status (macOS)\n")
formatted.WriteString(strings.Repeat("═", 50) + "\n\n")
lines := strings.Split(strings.TrimSpace(out), "\n")
for _, line := range lines {
if strings.Contains(line, "Chipset Model") || strings.Contains(line, "VRAM") {
formatted.WriteString("🎮 " + strings.TrimSpace(line) + "\n")
}
}
formatted.WriteString("\n💡 Note: nvidia-smi not available on macOS\n")
c.logger.Info("loaded GPU status", "type", "macos")
resultChan <- gpuResult{content: formatted.String(), err: nil}
}()
result := <-resultChan
return GpuLoadedMsg(result.content)
}
}
func (c *Controller) loadContainer() tea.Cmd {
return func() tea.Msg {
resultChan := make(chan string, 1)
go func() {
var formatted strings.Builder
formatted.WriteString("Container Status\n")
formatted.WriteString(strings.Repeat("═", 50) + "\n\n")
formatted.WriteString("📋 Configuration:\n")
formatted.WriteString(fmt.Sprintf(" Image: %s\n", c.config.PodmanImage))
formatted.WriteString(fmt.Sprintf(" GPU: %v\n", c.config.GPUAccess))
formatted.WriteString(fmt.Sprintf(" Workspace: %s\n", c.config.ContainerWorkspace))
formatted.WriteString(fmt.Sprintf(" Results: %s\n\n", c.config.ContainerResults))
cmd := "podman ps -a --format '{{.Names}}|{{.Status}}|{{.Image}}'"
out, err := c.server.Exec(cmd)
if err == nil && strings.TrimSpace(out) != "" {
formatted.WriteString("🐳 Running Containers (Podman):\n")
lines := strings.Split(strings.TrimSpace(out), "\n")
for _, line := range lines {
parts := strings.Split(line, "|")
if len(parts) >= 3 {
status := "🟢"
if strings.Contains(parts[1], "Exited") {
status = "🔴"
}
formatted.WriteString(fmt.Sprintf(" %s %s\n", status, parts[0]))
formatted.WriteString(fmt.Sprintf(" Status: %s\n", parts[1]))
formatted.WriteString(fmt.Sprintf(" Image: %s\n\n", parts[2]))
}
}
} else {
cmd = "docker ps -a --format '{{.Names}}|{{.Status}}|{{.Image}}'"
out, err = c.server.Exec(cmd)
if err == nil && strings.TrimSpace(out) != "" {
formatted.WriteString("🐳 Running Containers (Docker):\n")
lines := strings.Split(strings.TrimSpace(out), "\n")
for _, line := range lines {
parts := strings.Split(line, "|")
if len(parts) >= 3 {
status := "🟢"
if strings.Contains(parts[1], "Exited") {
status = "🔴"
}
formatted.WriteString(fmt.Sprintf(" %s %s\n", status, parts[0]))
formatted.WriteString(fmt.Sprintf(" Status: %s\n", parts[1]))
formatted.WriteString(fmt.Sprintf(" Image: %s\n\n", parts[2]))
}
}
} else {
formatted.WriteString("⚠️ No containers found\n")
}
}
formatted.WriteString("💻 System Info:\n")
if podmanVersion, err := c.server.Exec("podman --version"); err == nil {
formatted.WriteString(fmt.Sprintf(" Podman: %s\n", strings.TrimSpace(podmanVersion)))
} else if dockerVersion, err := c.server.Exec("docker --version"); err == nil {
formatted.WriteString(fmt.Sprintf(" Docker: %s\n", strings.TrimSpace(dockerVersion)))
} else {
formatted.WriteString(" ⚠️ Container engine not available\n")
}
c.logger.Info("loaded container status")
resultChan <- formatted.String()
}()
return ContainerLoadedMsg(<-resultChan)
}
}
func (c *Controller) queueJob(jobName string, args string) tea.Cmd {
return func() tea.Msg {
resultChan := make(chan StatusMsg, 1)
go func() {
priority := int64(5)
if strings.Contains(args, "--priority") {
_, err := fmt.Sscanf(args, "--priority %d", &priority)
if err != nil {
c.logger.Error("invalid priority argument", "args", args, "error", err)
resultChan <- StatusMsg{
Text: fmt.Sprintf("Invalid priority: %v", err),
Level: "error",
}
return
}
}
task, err := c.taskQueue.EnqueueTask(jobName, args, priority)
if err != nil {
c.logger.Error("failed to queue job", "job_name", jobName, "error", err)
resultChan <- StatusMsg{
Text: fmt.Sprintf("Failed to queue %s: %v", jobName, err),
Level: "error",
}
return
}
c.logger.Info("job queued", "job_name", jobName, "task_id", task.ID[:8], "priority", priority)
resultChan <- StatusMsg{
Text: fmt.Sprintf("✓ Queued: %s (ID: %s, P:%d)", jobName, task.ID[:8], priority),
Level: "success",
}
}()
return <-resultChan
}
}
func (c *Controller) deleteJob(jobName string) tea.Cmd {
return func() tea.Msg {
jobPath := filepath.Join(c.config.PendingPath(), jobName)
if _, err := c.server.Exec(fmt.Sprintf("rm -rf %s", jobPath)); err != nil {
return StatusMsg{Text: fmt.Sprintf("Failed to delete %s: %v", jobName, err), Level: "error"}
}
return StatusMsg{Text: fmt.Sprintf("✓ Deleted: %s", jobName), Level: "success"}
}
}
func (c *Controller) markFailed(jobName string) tea.Cmd {
return func() tea.Msg {
src := filepath.Join(c.config.RunningPath(), jobName)
dst := filepath.Join(c.config.FailedPath(), jobName)
if _, err := c.server.Exec(fmt.Sprintf("mv %s %s", src, dst)); err != nil {
return StatusMsg{Text: fmt.Sprintf("Failed to mark failed: %v", err), Level: "error"}
}
return StatusMsg{Text: fmt.Sprintf("⚠ Marked failed: %s", jobName), Level: "warning"}
}
}
func (c *Controller) cancelTask(taskID string) tea.Cmd {
return func() tea.Msg {
if err := c.taskQueue.CancelTask(taskID); err != nil {
c.logger.Error("failed to cancel task", "task_id", taskID[:8], "error", err)
return StatusMsg{Text: fmt.Sprintf("Cancel failed: %v", err), Level: "error"}
}
c.logger.Info("task cancelled", "task_id", taskID[:8])
return StatusMsg{Text: fmt.Sprintf("✓ Cancelled: %s", taskID[:8]), Level: "success"}
}
}
func (c *Controller) showQueue(m model.State) tea.Cmd {
return func() tea.Msg {
var content strings.Builder
content.WriteString("Task Queue\n")
content.WriteString(strings.Repeat("═", 60) + "\n\n")
if len(m.QueuedTasks) == 0 {
content.WriteString("📭 No tasks in queue\n")
} else {
for i, task := range m.QueuedTasks {
statusIcon := "⏳"
if task.Status == "running" {
statusIcon = "▶"
}
content.WriteString(fmt.Sprintf("%d. %s %s [ID: %s]\n",
i+1, statusIcon, task.JobName, task.ID[:8]))
content.WriteString(fmt.Sprintf(" Priority: %d | Status: %s\n",
task.Priority, task.Status))
if task.Args != "" {
content.WriteString(fmt.Sprintf(" Args: %s\n", task.Args))
}
content.WriteString(fmt.Sprintf(" Created: %s\n",
task.CreatedAt.Format("2006-01-02 15:04:05")))
if task.StartedAt != nil {
duration := time.Since(*task.StartedAt)
content.WriteString(fmt.Sprintf(" Running for: %s\n",
duration.Round(time.Second)))
}
content.WriteString("\n")
}
}
return QueueLoadedMsg(content.String())
}
}
func tickCmd() tea.Cmd {
return tea.Tick(time.Second, func(t time.Time) tea.Msg {
return TickMsg(t)
})
}