refactor(cli): remove obsolete printUsage from exec/mod.zig

Exec is now an internal module used by 'ml run', not a standalone
command. Remove the misleading 'ml exec' usage documentation and
replace with simple internal module message.
This commit is contained in:
Jeremie Fraeys 2026-03-05 12:23:42 -05:00
parent a36a5e4522
commit c4b6ae5d0c
No known key found for this signature in database

View file

@ -50,7 +50,7 @@ pub fn run(allocator: std.mem.Allocator, args: []const []const u8) !void {
const arg = pre[i];
if (std.mem.eql(u8, arg, "--help") or std.mem.eql(u8, arg, "-h")) {
try printUsage();
std.debug.print("Exec module - used internally by 'ml run'\n", .{});
return;
} else if (std.mem.eql(u8, arg, "--json")) {
flags.json = true;
@ -95,7 +95,7 @@ pub fn run(allocator: std.mem.Allocator, args: []const []const u8) !void {
}
if (job_name == null) {
try printUsage();
std.log.err("No job name specified", .{});
return error.InvalidArgs;
}
@ -159,36 +159,3 @@ pub fn run(allocator: std.mem.Allocator, args: []const []const u8) !void {
},
}
}
fn printUsage() !void {
std.debug.print(
\\n
\\ml exec <job_name> [options] [-- <args>]
\\
\\Unified execution - works locally or remotely with transparent fallback.
\\
\\Options:
\\ --priority <1-10> Job priority (default: 5)
\\ --cpu <n> CPU cores requested (default: 1)
\\ --memory <n> Memory GB requested (default: 4)
\\ --gpu <n> GPU devices requested (default: 0)
\\ --gpu-memory <spec> GPU memory spec
\\
\\Execution mode is controlled by execution_mode setting in config.
\\Use 'ml init --mode=local|remote|auto' to change.
\\
\\Research context:
\\ --hypothesis <text> What you're testing
\\ --context <text> Background information
\\ --intent <text> What you're trying to do
\\ --expected-outcome <t> What you expect to happen
\\ --tags <csv> Comma-separated tags
\\
\\Examples:
\\ ml exec train.py
\\ ml exec train.py -- --lr 0.001 --epochs 10
\\ ml exec train.py --priority 8 --gpu 1
\\ ml exec train.py --hypothesis "LR scaling helps"
\\
, .{});
}