fetch_ml/cli/src/commands/status.zig
Jeremie Fraeys cf8115c670
feat(cli): standardize connection handling across commands
Add isConnected() method to common.ConnectionContext to check WebSocket
client connection state. Migrate all server-connected commands to use
the standardized ConnectionContext pattern:

- jupyter/lifecycle.zig: Replace local ConnectionCtx with common.ConnectionContext
- status.zig: Use ConnectionContext, remove manual connection boilerplate,
  add connection status indicators (connecting/connected)
- cancel.zig: Use ConnectionContext for server cancel operations
- dataset.zig: Use ConnectionContext for list/register/info/search operations
- exec/remote.zig: Use ConnectionContext for remote job execution

Benefits:
- Eliminates ~160 lines of duplicated connection boilerplate
- Consistent error handling and cleanup across commands
- Single point of change for connection logic
- Adds runtime connection state visibility to status command
2026-03-05 12:07:41 -05:00

157 lines
5.3 KiB
Zig

const std = @import("std");
const Config = @import("../config.zig").Config;
const ws = @import("../net/ws/client.zig");
const crypto = @import("../utils/crypto.zig");
const io = @import("../utils/io.zig");
const auth = @import("../utils/auth.zig");
const core = @import("../core.zig");
const common = @import("common.zig");
pub const StatusOptions = struct {
json: bool = false,
watch: bool = false,
tui: bool = false,
limit: ?usize = null,
watch_interval: u32 = 5,
};
pub fn run(allocator: std.mem.Allocator, args: []const []const u8) !void {
var options = StatusOptions{};
var i: usize = 0;
while (i < args.len) : (i += 1) {
const arg = args[i];
if (std.mem.eql(u8, arg, "--json")) {
options.json = true;
} else if (std.mem.eql(u8, arg, "--watch")) {
options.watch = true;
} else if (std.mem.eql(u8, arg, "--tui")) {
options.tui = true;
} else if (std.mem.eql(u8, arg, "--limit") and i + 1 < args.len) {
options.limit = try std.fmt.parseInt(usize, args[i + 1], 10);
i += 1;
} else if (std.mem.startsWith(u8, arg, "--watch-interval=")) {
options.watch_interval = try std.fmt.parseInt(u32, arg[17..], 10);
} else if (std.mem.eql(u8, arg, "--help")) {
return printUsage();
}
}
core.output.setMode(if (options.json) .json else .text);
const config = try Config.load(allocator);
defer {
var mut_config = config;
mut_config.deinit(allocator);
}
if (config.api_key.len == 0) {
return error.APIKeyMissing;
}
var user_context = auth.UserContext{
.name = try allocator.dupe(u8, "default"),
.admin = true,
.allocator = allocator,
};
defer user_context.deinit();
if (options.watch) {
try runWatchMode(allocator, user_context, options);
} else if (options.tui) {
try runTuiMode(allocator, config, args);
} else {
try runSingleStatus(allocator, user_context, options);
}
}
fn runSingleStatus(allocator: std.mem.Allocator, user_context: auth.UserContext, options: StatusOptions) !void {
var ctx = try common.ConnectionContext.init(allocator);
defer ctx.deinit();
// Show connection status before attempting connection
if (!options.json) {
std.debug.print("Remote: connecting...\n", .{});
}
try ctx.connect();
if (!options.json) {
std.debug.print("Remote: connected\n", .{});
}
try ctx.client.sendStatusRequest(ctx.api_key_hash);
try ctx.client.receiveAndHandleStatusResponse(allocator, user_context, options);
}
fn runWatchMode(allocator: std.mem.Allocator, user_context: auth.UserContext, options: StatusOptions) !void {
std.debug.print("Starting watch mode (interval: {d}s). Press Ctrl+C to stop.\n", .{options.watch_interval});
while (true) {
if (!options.json) {
std.debug.print("\n=== FetchML Status - {s} ===", .{user_context.name});
}
try runSingleStatus(allocator, user_context, options);
if (!options.json) {
std.debug.print("Next update in {d} seconds...\n", .{options.watch_interval});
}
std.Thread.sleep(options.watch_interval * std.time.ns_per_s);
}
}
fn runTuiMode(allocator: std.mem.Allocator, config: Config, args: []const []const u8) !void {
if (config.isLocalMode()) {
core.output.err("TUI mode requires server mode. Use 'ml status' without --tui for local mode.");
return error.ServerOnlyFeature;
}
std.debug.print("Launching TUI via SSH...\n", .{});
// Build remote command that exports config via env vars and runs the TUI
var remote_cmd_buffer = std.ArrayList(u8){};
defer remote_cmd_buffer.deinit(allocator);
{
const writer = remote_cmd_buffer.writer(allocator);
try writer.print("cd {s} && ", .{config.worker_base});
try writer.print(
"FETCH_ML_CLI_HOST=\"{s}\" FETCH_ML_CLI_USER=\"{s}\" FETCH_ML_CLI_BASE=\"{s}\" ",
.{ config.worker_host, config.worker_user, config.worker_base },
);
try writer.print(
"FETCH_ML_CLI_PORT=\"{d}\" FETCH_ML_CLI_API_KEY=\"{s}\" ",
.{ config.worker_port, config.api_key },
);
try writer.writeAll("./bin/tui");
for (args) |arg| {
try writer.print(" {s}", .{arg});
}
}
const remote_cmd = try remote_cmd_buffer.toOwnedSlice(allocator);
defer allocator.free(remote_cmd);
// Execute SSH command to launch TUI
const ssh_args = &[_][]const u8{
"ssh",
config.worker_user,
config.worker_host,
remote_cmd,
};
var child = std.process.Child.init(ssh_args, allocator);
_ = try child.spawn();
_ = try child.wait();
}
fn printUsage() !void {
std.debug.print("Usage: ml status [options]\n", .{});
std.debug.print("\nOptions:\n", .{});
std.debug.print("\t--json\t\t\tOutput structured JSON\n", .{});
std.debug.print("\t--watch\t\t\tWatch mode - continuously update status\n", .{});
std.debug.print("\t--tui\t\t\tLaunch TUI monitor via SSH\n", .{});
std.debug.print("\t--limit <count>\tLimit number of results shown\n", .{});
std.debug.print("\t--watch-interval=<s>\tSet watch interval in seconds (default: 5)\n", .{});
std.debug.print("\t--help\t\t\tShow this help message\n", .{});
}