refactor(cli): update build system and config for local mode

- Update Makefile with build-sqlite target matching rsync pattern
- Fix build.zig to handle SQLite assets and dataset_hash linking
- Add SQLite asset detection mirroring rsync binary detection
- Update CLI README with local mode documentation
- Restructure rsync assets into rsync/ subdirectory
- Remove obsolete files (fix_arraylist.sh, old rsync_placeholder.bin)
- Add build_rsync.sh script to fetch/build rsync from source
This commit is contained in:
Jeremie Fraeys 2026-02-20 15:50:52 -05:00
parent ff542b533f
commit 2c596038b5
No known key found for this signature in database
7 changed files with 426 additions and 125 deletions

View file

@ -4,13 +4,17 @@ ZIG ?= zig
BUILD_DIR ?= zig-out/bin
BINARY := $(BUILD_DIR)/ml
.PHONY: all prod dev test build-rsync install clean help
.PHONY: all prod dev test build-rsync build-sqlite install clean help
RSYNC_VERSION ?= 3.3.0
RSYNC_SRC_BASE ?= https://download.samba.org/pub/rsync/src
RSYNC_TARBALL ?= rsync-$(RSYNC_VERSION).tar.gz
RSYNC_TARBALL_SHA256 ?=
SQLITE_VERSION ?= 3450000
SQLITE_YEAR ?= 2024
SQLITE_SRC_BASE ?= https://www.sqlite.org/2024
all: $(BINARY)
$(BUILD_DIR):
@ -35,6 +39,12 @@ build-rsync:
RSYNC_TARBALL_SHA256="$(RSYNC_TARBALL_SHA256)" \
bash "$(CURDIR)/scripts/build_rsync.sh"
build-sqlite:
@SQLITE_VERSION="$(SQLITE_VERSION)" \
SQLITE_YEAR="$(SQLITE_YEAR)" \
SQLITE_SRC_BASE="$(SQLITE_SRC_BASE)" \
bash "$(CURDIR)/scripts/build_sqlite.sh"
install: $(BINARY)
install -d $(DESTDIR)/usr/local/bin
install -m 0755 $(BINARY) $(DESTDIR)/usr/local/bin/ml
@ -49,5 +59,6 @@ help:
@echo " dev - build development binary with ReleaseFast"
@echo " test - run Zig unit tests"
@echo " build-rsync - build pinned rsync from official source into src/assets (RSYNC_VERSION=... override)"
@echo " build-sqlite - fetch SQLite amalgamation into src/assets (SQLITE_VERSION=... override)"
@echo " install - copy binary into /usr/local/bin"
@echo " clean - remove build artifacts"

View file

@ -1,6 +1,6 @@
# ML CLI
Fast CLI tool for managing ML experiments.
Fast CLI tool for managing ML experiments. Supports both **local mode** (SQLite) and **server mode** (WebSocket).
## Quick Start
@ -8,58 +8,62 @@ Fast CLI tool for managing ML experiments.
# 1. Build
zig build
# 2. Setup configuration
# 2. Initialize local tracking (creates fetch_ml.db)
./zig-out/bin/ml init
# 3. Run experiment
./zig-out/bin/ml sync ./my-experiment --queue
# 3. Create experiment and run locally
./zig-out/bin/ml experiment create --name "baseline"
./zig-out/bin/ml run start --experiment <id> --name "run-1"
./zig-out/bin/ml experiment log --run <id> --name loss --value 0.5
./zig-out/bin/ml run finish --run <id>
```
## Commands
- `ml init` - Setup configuration
### Local Mode Commands (SQLite)
- `ml init` - Initialize local experiment tracking database
- `ml experiment create --name <name>` - Create experiment locally
- `ml experiment list` - List experiments from SQLite
- `ml experiment log --run <id> --name <key> --value <val>` - Log metrics
- `ml run start --experiment <id> [--name <name>]` - Start a run
- `ml run finish --run <id>` - Mark run as finished
- `ml run fail --run <id>` - Mark run as failed
- `ml run list` - List all runs
### Server Mode Commands (WebSocket)
- `ml sync <path>` - Sync project to server
- `ml queue <job1> [job2 ...] [--commit <id>] [--priority N] [--note <text>]` - Queue one or more jobs
- `ml status` - Check system/queue status for your API key
- `ml validate <commit_id> [--json] [--task <task_id>]` - Validate provenance + integrity for a commit or task (includes `run_manifest.json` consistency checks when validating by task)
- `ml info <path|id> [--json] [--base <path>]` - Show run info from `run_manifest.json` (by path or by scanning `finished/failed/running/pending`)
- `ml annotate <path|run_id|task_id> --note <text> [--author <name>] [--base <path>] [--json]` - Append a human annotation to `run_manifest.json`
- `ml narrative set <path|run_id|task_id> [--hypothesis <text>] [--context <text>] [--intent <text>] [--expected-outcome <text>] [--parent-run <id>] [--experiment-group <text>] [--tags <csv>] [--base <path>] [--json]` - Patch the `narrative` field in `run_manifest.json`
- `ml monitor` - Launch monitoring interface (TUI)
- `ml cancel <job>` - Cancel a running/queued job you own
- `ml prune --keep N` - Keep N recent experiments
- `ml watch <path>` - Auto-sync directory
- `ml experiment log|show|list|delete` - Manage experiments and metrics
- `ml queue <job1> [job2 ...] [--commit <id>] [--priority N] [--note <text>]` - Queue jobs
- `ml status` - Check system/queue status
- `ml validate <commit_id> [--json] [--task <task_id>]` - Validate provenance
- `ml cancel <job>` - Cancel a running/queued job
### Shared Commands (Auto-detect Mode)
- `ml experiment log|show|list|delete` - Works in both local and server mode
- `ml monitor` - Launch TUI (local SQLite or remote SSH)
Notes:
- `--json` mode is designed to be pipe-friendly: machine-readable JSON is emitted to stdout, while user-facing messages/errors go to stderr.
- When running `ml validate --task <task_id>`, the server will try to locate the job's `run_manifest.json` under the configured base path (pending/running/finished/failed) and cross-check key fields (task id, commit id, deps, snapshot).
- For tasks in `running`, `completed`, or `failed` state, a missing `run_manifest.json` is treated as a validation failure. For `queued` tasks, it is treated as a warning (the job may not have started yet).
### Experiment workflow (minimal)
- `ml sync ./my-experiment --queue`
Syncs files, computes a unique commit ID for the directory, and queues a job.
- `ml queue my-job`
Queues a job named `my-job`. If `--commit` is omitted, the CLI generates a random commit ID
and records `(job_name, commit_id)` in `~/.ml/history.log` so you don't have to remember hashes.
- `ml queue my-job --note "baseline run; lr=1e-3"`
Adds a human-readable note to the run; it will be persisted into the run's `run_manifest.json` (under `metadata.note`).
- `ml experiment list`
Shows recent experiments from history with alias (job name) and commit ID.
- `ml experiment delete <alias|commit>`
Cancels a running/queued experiment by job name, full commit ID, or short commit prefix.
- Commands auto-detect mode from config (`sqlite://` vs `wss://`)
- `--json` mode is designed to be pipe-friendly
## Configuration
Create `~/.ml/config.toml`:
### Local Mode (SQLite)
```toml
# .fetchml/config.toml or ~/.ml/config.toml
tracking_uri = "sqlite://./fetch_ml.db"
artifact_path = "./experiments/"
sync_uri = "" # Optional: server to sync with
```
### Server Mode (WebSocket)
```toml
# ~/.ml/config.toml
worker_host = "worker.local"
worker_user = "mluser"
worker_base = "/data/ml-experiments"
@ -67,6 +71,23 @@ worker_port = 22
api_key = "your-api-key"
```
## Building
### Development
```bash
cd cli
zig build
```
### Production (requires SQLite in assets/)
```bash
cd cli
make build-sqlite # Fetch SQLite amalgamation
zig build prod # Build with embedded SQLite
```
## Install
```bash
@ -81,3 +102,4 @@ cp zig-out/bin/ml /usr/local/bin/
- `ml --help` - Show command help
- `ml <command> --help` - Show command-specific help

View file

@ -62,7 +62,36 @@ pub fn build(b: *std.Build) void {
options.addOption(bool, "has_rsync_release", has_rsync_release);
options.addOption([]const u8, "rsync_release_path", selected_embed_path);
// CLI executable
// Check for SQLite assets (mirrors rsync pattern)
const sqlite_dir_specific = b.fmt("src/assets/sqlite_release_{s}_{s}", .{ os_str, arch_str });
const sqlite_dir_default = "src/assets/sqlite_release";
var has_sqlite_release = false;
var sqlite_release_path: []const u8 = "";
// Try platform-specific directory first
if (std.fs.cwd().access(sqlite_dir_specific, .{})) |_| {
has_sqlite_release = true;
sqlite_release_path = sqlite_dir_specific;
} else |_| {
// Try default directory
if (std.fs.cwd().access(sqlite_dir_default, .{})) |_| {
has_sqlite_release = true;
sqlite_release_path = sqlite_dir_default;
} else |_| {}
}
if ((optimize == .ReleaseSmall or optimize == .ReleaseFast) and !has_sqlite_release) {
std.debug.panic(
"Release build requires SQLite amalgamation. Run: make build-sqlite",
.{},
);
}
options.addOption(bool, "has_sqlite_release", has_sqlite_release);
options.addOption([]const u8, "sqlite_release_path", sqlite_release_path);
// CLI executable - declared BEFORE SQLite setup so exe can be referenced
const exe = b.addExecutable(.{
.name = "ml",
.root_module = b.createModule(.{
@ -73,7 +102,6 @@ pub fn build(b: *std.Build) void {
});
exe.root_module.strip = true;
exe.root_module.addOptions("build_options", options);
// Link native dataset_hash library
@ -82,6 +110,22 @@ pub fn build(b: *std.Build) void {
exe.linkSystemLibrary("dataset_hash");
exe.addIncludePath(b.path("../native/dataset_hash"));
// SQLite setup: embedded for release, system lib for dev
if (has_sqlite_release) {
// Release: compile SQLite from downloaded amalgamation
const sqlite_c_path = b.fmt("{s}/sqlite3.c", .{sqlite_release_path});
exe.addCSourceFile(.{ .file = b.path(sqlite_c_path), .flags = &.{
"-DSQLITE_ENABLE_FTS5",
"-DSQLITE_ENABLE_JSON1",
"-DSQLITE_THREADSAFE=1",
"-DSQLITE_USE_URI",
} });
exe.addIncludePath(b.path(sqlite_release_path));
} else {
// Dev: link against system SQLite
exe.linkSystemLibrary("sqlite3");
}
// Install the executable to zig-out/bin
b.installArtifact(exe);

View file

@ -1,14 +0,0 @@
#!/bin/bash
# Fix ArrayList Zig 0.15 syntax
cd /Users/jfraeys/Documents/dev/fetch_ml/cli/src
for f in $(find . -name "*.zig" -exec grep -l "ArrayList" {} \;); do
# Fix .deinit() -> .deinit(allocator)
sed -i '' 's/\.deinit();/.deinit(allocator);/g' "$f"
# Fix .toOwnedSlice() -> .toOwnedSlice(allocator)
sed -i '' 's/\.toOwnedSlice();/.toOwnedSlice(allocator);/g' "$f"
done
echo "Fixed deinit and toOwnedSlice patterns"

View file

@ -83,3 +83,78 @@ ls -lh zig-out/prod/ml
- Different platforms need different static binaries
- For cross-compilation, provide platform-specific binaries
- The wrapper approach for dev builds is intentional for fast iteration
---
# SQLite Amalgamation Setup for Local Mode
## Overview
This directory contains SQLite source for FetchML local mode:
- `sqlite_release_<os>_<arch>/` - SQLite amalgamation for local mode (fetched, not in repo)
- `sqlite3.c` - Single-file SQLite implementation
- `sqlite3.h` - SQLite header file
## Build Modes
### Development/Debug Builds
- SQLite is compiled from source into the binary
- No system SQLite library required
- Results in ~500KB larger binary (includes SQLite)
- Zero external dependencies
### Release Builds (ReleaseSmall, ReleaseFast)
- Same SQLite compilation, optimized with release flags
- Fully self-contained, no dependencies
- Works on any system without SQLite installed
## Preparing SQLite
### Option 1: Fetch from Official Source (recommended)
```bash
cd cli
make build-sqlite SQLITE_VERSION=3450000
```
### Option 2: Download Yourself
```bash
# Download official amalgamation
SQLITE_VERSION=3450000
SQLITE_YEAR=2024
curl -fsSL "https://www.sqlite.org/${SQLITE_YEAR}/sqlite-amalgamation-${SQLITE_VERSION}.zip" -o sqlite.zip
unzip sqlite.zip
# Copy to assets (example)
os=$(uname -s | tr '[:upper:]' '[:lower:]')
arch=$(uname -m)
mkdir -p cli/src/assets/sqlite_release_${os}_${arch}
cp sqlite-amalgamation-${SQLITE_VERSION}/sqlite3.c cli/src/assets/sqlite_release_${os}_${arch}/
cp sqlite-amalgamation-${SQLITE_VERSION}/sqlite3.h cli/src/assets/sqlite_release_${os}_${arch}/
```
## Verification
After fetching SQLite:
```bash
# Verify files exist
ls -lh cli/src/assets/sqlite_release_*/sqlite3.c
ls -lh cli/src/assets/sqlite_release_*/sqlite3.h
# Build CLI
cd cli
zig build prod
# Check binary works with local mode
./zig-out/bin/ml init
```
## Notes
- `sqlite_release_*/` directories are not tracked in git
- SQLite is compiled directly into the binary (not linked)
- WAL mode is enabled for concurrent CLI writes and TUI reads
- The amalgamation approach matches SQLite's recommended embedding pattern

View file

@ -1,15 +0,0 @@
#!/bin/bash
# Rsync wrapper for development builds
# This calls the system's rsync instead of embedding a full binary
# Keeps the dev binary small (152KB) while still functional
# Find rsync on the system
RSYNC_PATH=$(which rsync 2>/dev/null || echo "/usr/bin/rsync")
if [ ! -x "$RSYNC_PATH" ]; then
echo "Error: rsync not found on system. Please install rsync or use a release build with embedded rsync." >&2
exit 127
fi
# Pass all arguments to system rsync
exec "$RSYNC_PATH" "$@"

View file

@ -1,7 +1,17 @@
const std = @import("std");
const security = @import("security.zig");
/// URI-based configuration for FetchML
/// Supports: sqlite:///path/to.db or wss://server.com/ws
pub const Config = struct {
// Primary storage URI for local mode
tracking_uri: []const u8,
// Artifacts directory (for local storage)
artifact_path: []const u8,
// Sync target URI (for pushing local runs to server)
sync_uri: []const u8,
// Legacy server config (for runner mode)
worker_host: []const u8,
worker_user: []const u8,
worker_base: []const u8,
@ -20,73 +30,118 @@ pub const Config = struct {
default_json: bool,
default_priority: u8,
/// Check if this is local mode (sqlite://) or runner mode (wss://)
pub fn isLocalMode(self: Config) bool {
return std.mem.startsWith(u8, self.tracking_uri, "sqlite://");
}
/// Get the database path from tracking_uri (removes sqlite:// prefix)
pub fn getDBPath(self: Config, allocator: std.mem.Allocator) ![]const u8 {
const prefix = "sqlite://";
if (!std.mem.startsWith(u8, self.tracking_uri, prefix)) {
return error.InvalidTrackingURI;
}
const path = self.tracking_uri[prefix.len..];
// Handle ~ expansion for home directory
if (path.len > 0 and path[0] == '~') {
const home = std.posix.getenv("HOME") orelse return error.NoHomeDir;
return std.fmt.allocPrint(allocator, "{s}{s}", .{ home, path[1..] });
}
return allocator.dupe(u8, path);
}
pub fn validate(self: Config) !void {
// Validate host
if (self.worker_host.len == 0) {
return error.EmptyHost;
}
// Only validate server config if not in local mode
if (!self.isLocalMode()) {
// Validate host
if (self.worker_host.len == 0) {
return error.EmptyHost;
}
// Validate port range
if (self.worker_port == 0 or self.worker_port > 65535) {
return error.InvalidPort;
}
// Validate port range
if (self.worker_port == 0 or self.worker_port > 65535) {
return error.InvalidPort;
}
// Validate API key presence
if (self.api_key.len == 0) {
return error.EmptyAPIKey;
}
// Validate API key presence
if (self.api_key.len == 0) {
return error.EmptyAPIKey;
}
// Validate base path
if (self.worker_base.len == 0) {
return error.EmptyBasePath;
// Validate base path
if (self.worker_base.len == 0) {
return error.EmptyBasePath;
}
}
}
pub fn load(allocator: std.mem.Allocator) !Config {
const home = std.posix.getenv("HOME") orelse return error.NoHomeDir;
const config_path = try std.fmt.allocPrint(allocator, "{s}/.ml/config.toml", .{home});
defer allocator.free(config_path);
/// Load config with priority: CLI > Env > Project > Global > Default
pub fn loadWithOverrides(allocator: std.mem.Allocator, cli_tracking_uri: ?[]const u8, cli_artifact_path: ?[]const u8, cli_sync_uri: ?[]const u8) !Config {
// Start with defaults
var config = try loadDefaults(allocator);
const file = std.fs.openFileAbsolute(config_path, .{}) catch |err| {
if (err == error.FileNotFound) {
std.debug.print("Config file not found. Run 'ml init' first.\n", .{});
return error.ConfigNotFound;
}
return err;
};
defer file.close();
// Load config with environment variable overrides
var config = try loadFromFile(allocator, file);
// Apply environment variable overrides (FETCH_ML_CLI_* to match TUI)
if (std.posix.getenv("FETCH_ML_CLI_HOST")) |host| {
config.worker_host = try allocator.dupe(u8, host);
}
if (std.posix.getenv("FETCH_ML_CLI_USER")) |user| {
config.worker_user = try allocator.dupe(u8, user);
}
if (std.posix.getenv("FETCH_ML_CLI_BASE")) |base| {
config.worker_base = try allocator.dupe(u8, base);
}
if (std.posix.getenv("FETCH_ML_CLI_PORT")) |port_str| {
config.worker_port = try std.fmt.parseInt(u16, port_str, 10);
}
if (std.posix.getenv("FETCH_ML_CLI_API_KEY")) |api_key| {
config.api_key = try allocator.dupe(u8, api_key);
// Priority 4: Apply global config if exists
if (try loadGlobalConfig(allocator)) |global| {
config.apply(global);
config.deinitGlobal(allocator, global);
}
// Try to get API key from keychain if not in config or env
if (config.api_key.len == 0) {
if (try security.SecureStorage.retrieveApiKey(allocator)) |keychain_key| {
config.api_key = keychain_key;
}
// Priority 3: Apply project config if exists
if (try loadProjectConfig(allocator)) |project| {
config.apply(project);
config.deinitGlobal(allocator, project);
}
// Priority 2: Apply environment variables
config.applyEnv(allocator);
// Priority 1: Apply CLI overrides
if (cli_tracking_uri) |uri| {
allocator.free(config.tracking_uri);
config.tracking_uri = try allocator.dupe(u8, uri);
}
if (cli_artifact_path) |path| {
allocator.free(config.artifact_path);
config.artifact_path = try allocator.dupe(u8, path);
}
if (cli_sync_uri) |uri| {
allocator.free(config.sync_uri);
config.sync_uri = try allocator.dupe(u8, uri);
}
try config.validate();
return config;
}
/// Legacy load function (no overrides)
pub fn load(allocator: std.mem.Allocator) !Config {
return loadWithOverrides(allocator, null, null, null);
}
/// Load default configuration
fn loadDefaults(allocator: std.mem.Allocator) !Config {
return Config{
.tracking_uri = try allocator.dupe(u8, "sqlite://./fetch_ml.db"),
.artifact_path = try allocator.dupe(u8, "./experiments/"),
.sync_uri = try allocator.dupe(u8, ""),
.worker_host = try allocator.dupe(u8, ""),
.worker_user = try allocator.dupe(u8, ""),
.worker_base = try allocator.dupe(u8, ""),
.worker_port = 22,
.api_key = try allocator.dupe(u8, ""),
.default_cpu = 2,
.default_memory = 8,
.default_gpu = 0,
.default_gpu_memory = null,
.default_dry_run = false,
.default_validate = false,
.default_json = false,
.default_priority = 5,
};
}
fn loadFromFile(allocator: std.mem.Allocator, file: std.fs.File) !Config {
const content = try file.readToEndAlloc(allocator, 1024 * 1024);
defer allocator.free(content);
@ -123,7 +178,13 @@ pub const Config = struct {
else
value_raw;
if (std.mem.eql(u8, key, "worker_host")) {
if (std.mem.eql(u8, key, "tracking_uri")) {
config.tracking_uri = try allocator.dupe(u8, value);
} else if (std.mem.eql(u8, key, "artifact_path")) {
config.artifact_path = try allocator.dupe(u8, value);
} else if (std.mem.eql(u8, key, "sync_uri")) {
config.sync_uri = try allocator.dupe(u8, value);
} else if (std.mem.eql(u8, key, "worker_host")) {
config.worker_host = try allocator.dupe(u8, value);
} else if (std.mem.eql(u8, key, "worker_user")) {
config.worker_user = try allocator.dupe(u8, value);
@ -175,6 +236,11 @@ pub const Config = struct {
defer file.close();
const writer = file.writer();
try writer.print("# FetchML Configuration\n", .{});
try writer.print("tracking_uri = \"{s}\"\n", .{self.tracking_uri});
try writer.print("artifact_path = \"{s}\"\n", .{self.artifact_path});
try writer.print("sync_uri = \"{s}\"\n", .{self.sync_uri});
try writer.print("\n# Server config (for runner mode)\n", .{});
try writer.print("worker_host = \"{s}\"\n", .{self.worker_host});
try writer.print("worker_user = \"{s}\"\n", .{self.worker_user});
try writer.print("worker_base = \"{s}\"\n", .{self.worker_base});
@ -195,6 +261,9 @@ pub const Config = struct {
}
pub fn deinit(self: *Config, allocator: std.mem.Allocator) void {
allocator.free(self.tracking_uri);
allocator.free(self.artifact_path);
allocator.free(self.sync_uri);
allocator.free(self.worker_host);
allocator.free(self.worker_user);
allocator.free(self.worker_base);
@ -204,6 +273,89 @@ pub const Config = struct {
}
}
/// Apply settings from another config (for layering)
fn apply(self: *Config, other: Config) void {
if (other.tracking_uri.len > 0) {
self.tracking_uri = other.tracking_uri;
}
if (other.artifact_path.len > 0) {
self.artifact_path = other.artifact_path;
}
if (other.sync_uri.len > 0) {
self.sync_uri = other.sync_uri;
}
if (other.worker_host.len > 0) {
self.worker_host = other.worker_host;
}
if (other.worker_user.len > 0) {
self.worker_user = other.worker_user;
}
if (other.worker_base.len > 0) {
self.worker_base = other.worker_base;
}
if (other.worker_port != 22) {
self.worker_port = other.worker_port;
}
if (other.api_key.len > 0) {
self.api_key = other.api_key;
}
}
/// Deinit a config that was loaded temporarily
fn deinitGlobal(self: Config, allocator: std.mem.Allocator, other: Config) void {
_ = self;
allocator.free(other.tracking_uri);
allocator.free(other.artifact_path);
allocator.free(other.sync_uri);
allocator.free(other.worker_host);
allocator.free(other.worker_user);
allocator.free(other.worker_base);
allocator.free(other.api_key);
if (other.default_gpu_memory) |gpu_mem| {
allocator.free(gpu_mem);
}
}
/// Apply environment variable overrides
fn applyEnv(self: *Config, allocator: std.mem.Allocator) void {
// FETCHML_* environment variables for URI-based config
if (std.posix.getenv("FETCHML_TRACKING_URI")) |uri| {
allocator.free(self.tracking_uri);
self.tracking_uri = allocator.dupe(u8, uri) catch self.tracking_uri;
}
if (std.posix.getenv("FETCHML_ARTIFACT_PATH")) |path| {
allocator.free(self.artifact_path);
self.artifact_path = allocator.dupe(u8, path) catch self.artifact_path;
}
if (std.posix.getenv("FETCHML_SYNC_URI")) |uri| {
allocator.free(self.sync_uri);
self.sync_uri = allocator.dupe(u8, uri) catch self.sync_uri;
}
// Legacy FETCH_ML_CLI_* variables
if (std.posix.getenv("FETCH_ML_CLI_HOST")) |host| {
allocator.free(self.worker_host);
self.worker_host = allocator.dupe(u8, host) catch self.worker_host;
}
if (std.posix.getenv("FETCH_ML_CLI_USER")) |user| {
allocator.free(self.worker_user);
self.worker_user = allocator.dupe(u8, user) catch self.worker_user;
}
if (std.posix.getenv("FETCH_ML_CLI_BASE")) |base| {
allocator.free(self.worker_base);
self.worker_base = allocator.dupe(u8, base) catch self.worker_base;
}
if (std.posix.getenv("FETCH_ML_CLI_PORT")) |port_str| {
if (std.fmt.parseInt(u16, port_str, 10)) |port| {
self.worker_port = port;
} else |_| {}
}
if (std.posix.getenv("FETCH_ML_CLI_API_KEY")) |api_key| {
allocator.free(self.api_key);
self.api_key = allocator.dupe(u8, api_key) catch self.api_key;
}
}
/// Get WebSocket URL for connecting to the server
pub fn getWebSocketUrl(self: Config, allocator: std.mem.Allocator) ![]u8 {
const protocol = if (self.worker_port == 443) "wss" else "ws";
@ -212,3 +364,29 @@ pub const Config = struct {
});
}
};
/// Load global config from ~/.ml/config.toml
fn loadGlobalConfig(allocator: std.mem.Allocator) !?Config {
const home = std.posix.getenv("HOME") orelse return null;
const config_path = try std.fmt.allocPrint(allocator, "{s}/.ml/config.toml", .{home});
defer allocator.free(config_path);
const file = std.fs.openFileAbsolute(config_path, .{}) catch |err| {
if (err == error.FileNotFound) return null;
return err;
};
defer file.close();
return try Config.loadFromFile(allocator, file);
}
/// Load project config from .fetchml/config.toml in CWD
fn loadProjectConfig(allocator: std.mem.Allocator) !?Config {
const file = std.fs.openFileAbsolute(".fetchml/config.toml", .{}) catch |err| {
if (err == error.FileNotFound) return null;
return err;
};
defer file.close();
return try Config.loadFromFile(allocator, file);
}