diff --git a/cli/src/commands/exec/mod.zig b/cli/src/commands/exec/mod.zig index efa873a..5602df9 100644 --- a/cli/src/commands/exec/mod.zig +++ b/cli/src/commands/exec/mod.zig @@ -50,7 +50,7 @@ pub fn run(allocator: std.mem.Allocator, args: []const []const u8) !void { const arg = pre[i]; if (std.mem.eql(u8, arg, "--help") or std.mem.eql(u8, arg, "-h")) { - try printUsage(); + std.debug.print("Exec module - used internally by 'ml run'\n", .{}); return; } else if (std.mem.eql(u8, arg, "--json")) { flags.json = true; @@ -95,7 +95,7 @@ pub fn run(allocator: std.mem.Allocator, args: []const []const u8) !void { } if (job_name == null) { - try printUsage(); + std.log.err("No job name specified", .{}); return error.InvalidArgs; } @@ -159,36 +159,3 @@ pub fn run(allocator: std.mem.Allocator, args: []const []const u8) !void { }, } } - -fn printUsage() !void { - std.debug.print( - \\n - \\ml exec [options] [-- ] - \\ - \\Unified execution - works locally or remotely with transparent fallback. - \\ - \\Options: - \\ --priority <1-10> Job priority (default: 5) - \\ --cpu CPU cores requested (default: 1) - \\ --memory Memory GB requested (default: 4) - \\ --gpu GPU devices requested (default: 0) - \\ --gpu-memory GPU memory spec - \\ - \\Execution mode is controlled by execution_mode setting in config. - \\Use 'ml init --mode=local|remote|auto' to change. - \\ - \\Research context: - \\ --hypothesis What you're testing - \\ --context Background information - \\ --intent What you're trying to do - \\ --expected-outcome What you expect to happen - \\ --tags Comma-separated tags - \\ - \\Examples: - \\ ml exec train.py - \\ ml exec train.py -- --lr 0.001 --epochs 10 - \\ ml exec train.py --priority 8 --gpu 1 - \\ ml exec train.py --hypothesis "LR scaling helps" - \\ - , .{}); -}