refactor(cli): Update build system and core infrastructure

- Makefile: Update build targets for native library integration
- build.zig: Add SQLite linking and native hash library support
- scripts/build_rsync.sh: Update rsync embedded binary build process
- scripts/build_sqlite.sh: Add SQLite constants generation script
- src/assets/README.md: Document embedded asset structure
- src/utils/rsync_embedded_binary.zig: Update for new build layout
This commit is contained in:
Jeremie Fraeys 2026-02-20 21:39:51 -05:00
parent 04ac745b01
commit a3b957dcc0
No known key found for this signature in database
29 changed files with 1485 additions and 498 deletions

View file

@ -74,7 +74,7 @@ jobs:
*) echo "Unsupported Zig target: $TARGET"; exit 1 ;;
esac
RSYNC_OUT="cli/src/assets/rsync_release_${OS}_${ARCH}.bin"
RSYNC_OUT="cli/src/assets/rsync/rsync_release_${OS}_${ARCH}.bin"
wget -O "$RSYNC_OUT" ${{ matrix.rsync-url }} || \
curl -L -o "$RSYNC_OUT" ${{ matrix.rsync-url }}
@ -83,6 +83,37 @@ jobs:
chmod +x "$RSYNC_OUT"
ls -lh "$RSYNC_OUT"
- name: Download SQLite amalgamation
run: |
TARGET="${{ matrix.target }}"
OS=""
ARCH=""
case "$TARGET" in
x86_64-linux-*) OS="linux"; ARCH="x86_64" ;;
aarch64-linux-*) OS="linux"; ARCH="arm64" ;;
x86_64-macos*) OS="darwin"; ARCH="x86_64" ;;
aarch64-macos*) OS="darwin"; ARCH="arm64" ;;
x86_64-windows*) OS="windows"; ARCH="x86_64" ;;
aarch64-windows*) OS="windows"; ARCH="arm64" ;;
*) echo "Unsupported Zig target: $TARGET"; exit 1 ;;
esac
SQLITE_VERSION="3480000"
SQLITE_YEAR="2025"
SQLITE_URL="https://www.sqlite.org/${SQLITE_YEAR}/sqlite-amalgamation-${SQLITE_VERSION}.zip"
SQLITE_DIR="cli/src/assets/sqlite_${OS}_${ARCH}"
mkdir -p "$SQLITE_DIR"
echo "Fetching SQLite ${SQLITE_VERSION}..."
wget -O /tmp/sqlite.zip "$SQLITE_URL" || \
curl -L -o /tmp/sqlite.zip "$SQLITE_URL"
unzip -q /tmp/sqlite.zip -d /tmp/
mv /tmp/sqlite-amalgamation-${SQLITE_VERSION}/* "$SQLITE_DIR/"
ls -lh "$SQLITE_DIR"/sqlite3.c "$SQLITE_DIR"/sqlite3.h
- name: Build CLI
working-directory: cli
run: |

5
.gitignore vendored
View file

@ -245,8 +245,9 @@ db/*.db
*.key
*.pem
secrets/
cli/src/assets/rsync_release.bin
cli/src/assets/rsync_release_*.bin
# Downloaded assets (platform-specific)
cli/src/assets/rsync/rsync_release_*.bin
cli/src/assets/sqlite_*/
# Local artifacts (e.g. test run outputs)
.local-artifacts/

View file

@ -1,4 +1,4 @@
.PHONY: all build prod prod-with-native native-release native-build native-debug native-test native-smoke native-clean dev clean clean-docs test test-unit test-integration test-e2e test-coverage lint install configlint worker-configlint ci-local docs docs-setup docs-check-port docs-stop docs-build docs-build-prod benchmark benchmark-local artifacts clean-benchmarks clean-all clean-aggressive status size load-test chaos-test profile-load profile-load-norate profile-ws-queue profile-tools detect-regressions tech-excellence docker-build dev-smoke prod-smoke native-smoke self-cleanup test-full test-auth deploy-up deploy-down deploy-status deploy-clean dev-up dev-down dev-status dev-logs prod-up prod-down prod-status prod-logs security-scan gosec govulncheck check-unsafe security-audit test-security
.PHONY: all build prod prod-with-native native-release native-build native-debug native-test native-smoke native-clean dev clean clean-docs test test-unit test-integration test-e2e test-coverage lint install configlint worker-configlint ci-local docs docs-setup docs-check-port docs-stop docs-build docs-build-prod benchmark benchmark-local artifacts clean-benchmarks clean-all clean-aggressive status size load-test chaos-test profile-load profile-load-norate profile-ws-queue profile-tools detect-regressions tech-excellence docker-build dev-smoke prod-smoke native-smoke self-cleanup test-full test-auth deploy-up deploy-down deploy-status deploy-clean dev-up dev-down dev-status dev-logs prod-up prod-down prod-status prod-logs security-scan gosec govulncheck check-unsafe security-audit test-security check-sqlbuild
OK =
DOCS_PORT ?= 1313
DOCS_BIND ?= 127.0.0.1
@ -192,18 +192,10 @@ worker-configlint:
configs/workers/docker-prod.yaml \
configs/workers/homelab-secure.yaml
# Check SQLite availability (embedded, no system dependency needed)
check-sqlite-embedded:
@if [ ! -f "cli/src/deps/sqlite3.c" ]; then \
echo "Fetching SQLite amalgamation..."; \
bash scripts/dev/fetch-sqlite.sh; \
fi
@echo "${OK} SQLite ready (embedded)"
# Check CLI builds correctly with embedded SQLite
check-cli: check-sqlite-embedded
@$(MAKE) -C cli build
@echo "${OK} CLI built successfully with embedded SQLite"
# Check CLI builds correctly (SQLite handled automatically by build.zig)
build-cli:
@$(MAKE) -C cli all
@echo "${OK} CLI built successfully"
dev-smoke:
bash ./scripts/dev/smoke-test.sh dev

View file

@ -162,6 +162,17 @@ See `docs/` for detailed guides:
- `docs/src/research-features.md` Research workflow features (narrative capture, outcomes, search)
- `docs/src/privacy-security.md` Privacy levels, PII detection, anonymized export
## CLI Architecture (2026-02)
The Zig CLI has been refactored for improved maintainability:
- **Modular 3-layer architecture**: `core/` (foundation), `local/`/`server/` (mode-specific), `commands/` (routers)
- **Unified context**: `core.context.Context` handles mode detection, output formatting, and dispatch
- **Code reduction**: `experiment.zig` reduced from 836 to 348 lines (58% reduction)
- **Bug fixes**: Resolved 15+ compilation errors across multiple commands
See `cli/README.md` for detailed architecture documentation.
## Source code
The FetchML source code is intentionally not hosted on GitHub.

559
api/openapi.yaml Normal file
View file

@ -0,0 +1,559 @@
openapi: 3.0.3
info:
title: ML Worker API
description: |
API for managing ML experiment tasks and Jupyter services.
## Security
All endpoints (except health checks) require API key authentication via the
`X-API-Key` header. Rate limiting is enforced per API key.
## Error Handling
Errors follow a consistent format with machine-readable codes and trace IDs:
```json
{
"error": "Sanitized error message",
"code": "ERROR_CODE",
"trace_id": "uuid-for-support"
}
```
version: 1.0.0
contact:
name: FetchML Support
servers:
- url: http://localhost:9101
description: Local development server
- url: https://api.fetchml.example.com
description: Production server
security:
- ApiKeyAuth: []
paths:
/health:
get:
summary: Health check
description: Returns server health status. No authentication required.
security: []
responses:
'200':
description: Server is healthy
content:
application/json:
schema:
$ref: '#/components/schemas/HealthResponse'
/v1/tasks:
get:
summary: List tasks
description: List all tasks with optional filtering
parameters:
- name: status
in: query
schema:
type: string
enum: [queued, running, completed, failed]
- name: limit
in: query
schema:
type: integer
default: 50
maximum: 1000
- name: offset
in: query
schema:
type: integer
default: 0
responses:
'200':
description: List of tasks
content:
application/json:
schema:
$ref: '#/components/schemas/TaskList'
'400':
$ref: '#/components/responses/BadRequest'
'401':
$ref: '#/components/responses/Unauthorized'
'429':
$ref: '#/components/responses/RateLimited'
post:
summary: Create task
description: Submit a new ML experiment task
requestBody:
required: true
content:
application/json:
schema:
$ref: '#/components/schemas/CreateTaskRequest'
responses:
'201':
description: Task created successfully
content:
application/json:
schema:
$ref: '#/components/schemas/Task'
'400':
$ref: '#/components/responses/BadRequest'
'401':
$ref: '#/components/responses/Unauthorized'
'422':
$ref: '#/components/responses/ValidationError'
'429':
$ref: '#/components/responses/RateLimited'
/v1/tasks/{taskId}:
get:
summary: Get task details
parameters:
- name: taskId
in: path
required: true
schema:
type: string
responses:
'200':
description: Task details
content:
application/json:
schema:
$ref: '#/components/schemas/Task'
'404':
$ref: '#/components/responses/NotFound'
delete:
summary: Cancel/delete task
parameters:
- name: taskId
in: path
required: true
schema:
type: string
responses:
'204':
description: Task cancelled
'404':
$ref: '#/components/responses/NotFound'
/v1/queue:
get:
summary: Queue status
description: Get current queue statistics
responses:
'200':
description: Queue statistics
content:
application/json:
schema:
$ref: '#/components/schemas/QueueStats'
/v1/experiments:
get:
summary: List experiments
description: List all experiments
responses:
'200':
description: List of experiments
content:
application/json:
schema:
type: array
items:
$ref: '#/components/schemas/Experiment'
post:
summary: Create experiment
description: Create a new experiment
requestBody:
required: true
content:
application/json:
schema:
$ref: '#/components/schemas/CreateExperimentRequest'
responses:
'201':
description: Experiment created
content:
application/json:
schema:
$ref: '#/components/schemas/Experiment'
/v1/jupyter/services:
get:
summary: List Jupyter services
responses:
'200':
description: List of Jupyter services
content:
application/json:
schema:
type: array
items:
$ref: '#/components/schemas/JupyterService'
post:
summary: Start Jupyter service
requestBody:
required: true
content:
application/json:
schema:
$ref: '#/components/schemas/StartJupyterRequest'
responses:
'201':
description: Jupyter service started
content:
application/json:
schema:
$ref: '#/components/schemas/JupyterService'
/v1/jupyter/services/{serviceId}:
delete:
summary: Stop Jupyter service
parameters:
- name: serviceId
in: path
required: true
schema:
type: string
responses:
'204':
description: Service stopped
/ws:
get:
summary: WebSocket connection
description: |
WebSocket endpoint for real-time task updates.
## Message Types
- `task_update`: Task status changes
- `task_complete`: Task finished
- `ping`: Keep-alive (respond with `pong`)
security:
- ApiKeyAuth: []
responses:
'101':
description: WebSocket connection established
components:
securitySchemes:
ApiKeyAuth:
type: apiKey
in: header
name: X-API-Key
description: API key for authentication
schemas:
HealthResponse:
type: object
properties:
status:
type: string
enum: [healthy, degraded, unhealthy]
version:
type: string
timestamp:
type: string
format: date-time
Task:
type: object
properties:
id:
type: string
description: Unique task identifier
job_name:
type: string
pattern: '^[a-zA-Z0-9_-]+$'
maxLength: 64
status:
type: string
enum: [queued, preparing, running, collecting, completed, failed]
priority:
type: integer
minimum: 1
maximum: 10
default: 5
created_at:
type: string
format: date-time
started_at:
type: string
format: date-time
ended_at:
type: string
format: date-time
worker_id:
type: string
error:
type: string
output:
type: string
snapshot_id:
type: string
datasets:
type: array
items:
type: string
cpu:
type: integer
memory_gb:
type: integer
gpu:
type: integer
user_id:
type: string
retry_count:
type: integer
max_retries:
type: integer
CreateTaskRequest:
type: object
required:
- job_name
properties:
job_name:
type: string
pattern: '^[a-zA-Z0-9_-]+$'
maxLength: 64
description: Unique identifier for the job
priority:
type: integer
minimum: 1
maximum: 10
default: 5
args:
type: string
description: Command-line arguments for the training script
snapshot_id:
type: string
description: Reference to experiment snapshot
datasets:
type: array
items:
type: string
dataset_specs:
type: array
items:
$ref: '#/components/schemas/DatasetSpec'
cpu:
type: integer
description: CPU cores requested
memory_gb:
type: integer
description: Memory (GB) requested
gpu:
type: integer
description: GPUs requested
metadata:
type: object
additionalProperties:
type: string
DatasetSpec:
type: object
properties:
name:
type: string
source:
type: string
sha256:
type: string
mount_path:
type: string
TaskList:
type: object
properties:
tasks:
type: array
items:
$ref: '#/components/schemas/Task'
total:
type: integer
limit:
type: integer
offset:
type: integer
QueueStats:
type: object
properties:
queued:
type: integer
description: Tasks waiting to run
running:
type: integer
description: Tasks currently executing
completed:
type: integer
description: Tasks completed today
failed:
type: integer
description: Tasks failed today
workers:
type: integer
description: Active workers
Experiment:
type: object
properties:
id:
type: string
name:
type: string
commit_id:
type: string
created_at:
type: string
format: date-time
status:
type: string
enum: [active, archived, deleted]
CreateExperimentRequest:
type: object
required:
- name
properties:
name:
type: string
maxLength: 128
description:
type: string
JupyterService:
type: object
properties:
id:
type: string
name:
type: string
status:
type: string
enum: [starting, running, stopping, stopped, error]
url:
type: string
format: uri
token:
type: string
created_at:
type: string
format: date-time
StartJupyterRequest:
type: object
required:
- name
properties:
name:
type: string
workspace:
type: string
image:
type: string
default: jupyter/pytorch:latest
ErrorResponse:
type: object
required:
- error
- code
- trace_id
properties:
error:
type: string
description: Sanitized error message
code:
type: string
enum: [BAD_REQUEST, UNAUTHORIZED, FORBIDDEN, NOT_FOUND, CONFLICT, RATE_LIMITED, INTERNAL_ERROR, SERVICE_UNAVAILABLE, VALIDATION_ERROR]
trace_id:
type: string
description: Support correlation ID
responses:
BadRequest:
description: Invalid request
content:
application/json:
schema:
$ref: '#/components/schemas/ErrorResponse'
example:
error: Invalid request format
code: BAD_REQUEST
trace_id: a1b2c3d4-e5f6-7890-abcd-ef1234567890
Unauthorized:
description: Authentication required
content:
application/json:
schema:
$ref: '#/components/schemas/ErrorResponse'
example:
error: Invalid or missing API key
code: UNAUTHORIZED
trace_id: a1b2c3d4-e5f6-7890-abcd-ef1234567890
Forbidden:
description: Insufficient permissions
content:
application/json:
schema:
$ref: '#/components/schemas/ErrorResponse'
example:
error: Insufficient permissions
code: FORBIDDEN
trace_id: a1b2c3d4-e5f6-7890-abcd-ef1234567890
NotFound:
description: Resource not found
content:
application/json:
schema:
$ref: '#/components/schemas/ErrorResponse'
example:
error: Resource not found
code: NOT_FOUND
trace_id: a1b2c3d4-e5f6-7890-abcd-ef1234567890
ValidationError:
description: Validation failed
content:
application/json:
schema:
$ref: '#/components/schemas/ErrorResponse'
example:
error: Validation failed
code: VALIDATION_ERROR
trace_id: a1b2c3d4-e5f6-7890-abcd-ef1234567890
RateLimited:
description: Too many requests
content:
application/json:
schema:
$ref: '#/components/schemas/ErrorResponse'
example:
error: Rate limit exceeded
code: RATE_LIMITED
trace_id: a1b2c3d4-e5f6-7890-abcd-ef1234567890
headers:
Retry-After:
schema:
type: integer
description: Seconds until rate limit resets
InternalError:
description: Internal server error
content:
application/json:
schema:
$ref: '#/components/schemas/ErrorResponse'
example:
error: An error occurred
code: INTERNAL_ERROR
trace_id: a1b2c3d4-e5f6-7890-abcd-ef1234567890

View file

@ -11,9 +11,9 @@ RSYNC_SRC_BASE ?= https://download.samba.org/pub/rsync/src
RSYNC_TARBALL ?= rsync-$(RSYNC_VERSION).tar.gz
RSYNC_TARBALL_SHA256 ?=
SQLITE_VERSION ?= 3450000
SQLITE_YEAR ?= 2024
SQLITE_SRC_BASE ?= https://www.sqlite.org/2024
SQLITE_VERSION ?= 3480000
SQLITE_YEAR ?= 2025
SQLITE_SRC_BASE ?= https://www.sqlite.org/2025
all: $(BINARY)
@ -23,12 +23,33 @@ $(BUILD_DIR):
$(BINARY): | $(BUILD_DIR)
$(ZIG) build --release=small
prod: src/main.zig | $(BUILD_DIR)
UNAME_S := $(shell uname -s)
ifeq ($(UNAME_S),Linux)
PROD_DEPS := build-rsync build-sqlite
else
PROD_DEPS := build-sqlite
endif
# Production build: optimized for speed with ReleaseFast + LTO
prod: $(PROD_DEPS) | $(BUILD_DIR)
$(ZIG) build --release=fast
# Tiny build: smallest binary with ReleaseSmall
# Note: Requires SQLite amalgamation
.PHONY: tiny
tiny: $(PROD_DEPS) | $(BUILD_DIR)
$(ZIG) build --release=small
# Development build: fast compilation + optimizations
dev: src/main.zig | $(BUILD_DIR)
$(ZIG) build --release=fast
# Debug build: fastest compilation, no optimizations
.PHONY: debug
debug: src/main.zig | $(BUILD_DIR)
$(ZIG) build -Doptimize=Debug
test:
$(ZIG) build test
@ -40,8 +61,8 @@ build-rsync:
bash "$(CURDIR)/scripts/build_rsync.sh"
build-sqlite:
@SQLITE_VERSION="$(SQLITE_VERSION)" \
SQLITE_YEAR="$(SQLITE_YEAR)" \
@SQLITE_VERSION="${SQLITE_VERSION:-3480000}" \
SQLITE_YEAR="${SQLITE_YEAR:-2025}" \
SQLITE_SRC_BASE="$(SQLITE_SRC_BASE)" \
bash "$(CURDIR)/scripts/build_sqlite.sh"
@ -54,11 +75,13 @@ clean:
help:
@echo "Targets:"
@echo " all - build release-small binary (default)"
@echo " prod - build production binary with ReleaseSmall"
@echo " dev - build development binary with ReleaseFast"
@echo " prod - build production binary with ReleaseFast + LTO (best performance)"
@echo " tiny - build minimal binary with ReleaseSmall (smallest size)"
@echo " dev - build development binary with ReleaseFast (quick builds)"
@echo " debug - build debug binary with no optimizations (fastest compile)"
@echo " all - build release-small binary (legacy, same as 'tiny')"
@echo " test - run Zig unit tests"
@echo " build-rsync - build pinned rsync from official source into src/assets (RSYNC_VERSION=... override)"
@echo " build-sqlite - fetch SQLite amalgamation into src/assets (SQLITE_VERSION=... override)"
@echo " build-rsync - build pinned rsync from official source into src/assets"
@echo " build-sqlite - fetch SQLite amalgamation into src/assets"
@echo " install - copy binary into /usr/local/bin"
@echo " clean - remove build artifacts"

View file

@ -2,6 +2,43 @@
Fast CLI tool for managing ML experiments. Supports both **local mode** (SQLite) and **server mode** (WebSocket).
## Architecture
The CLI follows a modular 3-layer architecture for maintainability:
```
src/
├── core/ # Shared foundation
│ ├── context.zig # Execution context (allocator, config, mode dispatch)
│ ├── output.zig # Unified JSON/text output helpers
│ └── flags.zig # Common flag parsing
├── local/ # Local mode operations (SQLite)
│ └── experiment_ops.zig # Experiment CRUD for local DB
├── server/ # Server mode operations (WebSocket)
│ └── experiment_api.zig # Experiment API for remote server
├── commands/ # Thin command routers
│ ├── experiment.zig # ~100 lines (was 887)
│ ├── queue.zig # Job submission
│ └── queue/ # Queue submodules
│ ├── parse.zig # Job template parsing
│ ├── validate.zig # Validation logic
│ └── submit.zig # Job submission
└── utils/ # Utilities (21 files)
```
### Mode Dispatch Pattern
Commands auto-detect local vs server mode using `core.context.Context`:
```zig
var ctx = core.context.Context.init(allocator, cfg, flags.json);
if (ctx.isLocal()) {
return try local.experiment.list(ctx.allocator, ctx.json_output);
} else {
return try server.experiment.list(ctx.allocator, ctx.json_output);
}
```
## Quick Start
```bash
@ -49,6 +86,59 @@ Notes:
- Commands auto-detect mode from config (`sqlite://` vs `wss://`)
- `--json` mode is designed to be pipe-friendly
## Core Modules
### `core.context`
Provides unified execution context for all commands:
- **Mode detection**: Automatically detects local (SQLite) vs server (WebSocket) mode
- **Output handling**: JSON vs text output based on `--json` flag
- **Dispatch helpers**: `ctx.dispatch(local_fn, server_fn, args)` for mode-specific implementations
```zig
const core = @import("../core.zig");
pub fn execute(allocator: std.mem.Allocator, args: []const []const u8) !void {
const cfg = try config.Config.load(allocator);
var ctx = core.context.Context.init(allocator, cfg, flags.json);
defer ctx.deinit();
// Dispatch to local or server implementation
if (ctx.isLocal()) {
return try local.experiment.list(ctx.allocator, ctx.json_output);
} else {
return try server.experiment.list(ctx.allocator, ctx.json_output);
}
}
```
### `core.output`
Unified output helpers that respect `--json` flag:
```zig
core.output.errorMsg("command", "Error message"); // JSON: {"success":false,...}
core.output.success("command"); // JSON: {"success":true,...}
core.output.successString("cmd", "key", "value"); // JSON with data
core.output.info("Text output", .{}); // Text mode only
core.output.usage("cmd", "usage string"); // Help text
```
### `core.flags`
Common flag parsing utilities:
```zig
var flags = core.flags.CommonFlags{};
var remaining = try core.flags.parseCommon(allocator, args, &flags);
// Check for subcommands
if (core.flags.matchSubcommand(remaining.items, "list")) |sub_args| {
return try executeList(ctx, sub_args);
}
```
## Configuration
### Local Mode (SQLite)
@ -98,6 +188,45 @@ make install
cp zig-out/bin/ml /usr/local/bin/
```
## Local/Server Module Pattern
Commands that work in both modes follow this structure:
```
src/
├── local.zig # Module index
├── local/
│ └── experiment_ops.zig # Local implementations
├── server.zig # Module index
└── server/
└── experiment_api.zig # Server implementations
```
### Adding a New Command
1. Create local implementation in `src/local/<name>_ops.zig`
2. Create server implementation in `src/server/<name>_api.zig`
3. Export from `src/local.zig` and `src/server.zig`
4. Create thin router in `src/commands/<name>.zig` using `ctx.dispatch()`
## Maintainability Cleanup (2026-02)
Recent refactoring improved code organization:
| Metric | Before | After |
|--------|--------|-------|
| experiment.zig | 836 lines | 348 lines (58% reduction) |
| queue.zig | 1203 lines | Modular structure |
| Duplicate printUsage | 24 functions | 1 shared helper |
| Mode dispatch logic | Inlined everywhere | `core.context.Context` |
### Key Improvements
1. **Core Modules**: Unified `core.output`, `core.flags`, `core.context` eliminate duplication
2. **Mode Abstraction**: Local/server operations separated into dedicated modules
3. **Queue Decomposition**: `queue/` submodules for parsing, validation, submission
4. **Bug Fixes**: Resolved 15+ compilation errors in `narrative.zig`, `outcome.zig`, `annotate.zig`, etc.
## Need Help?
- `ml --help` - Show command help

View file

@ -8,8 +8,8 @@ pub fn build(b: *std.Build) void {
const test_filter = b.option([]const u8, "test-filter", "Filter unit tests by name");
_ = test_filter;
// Optimized release mode for size
const optimize = b.standardOptimizeOption(.{ .preferred_optimize_mode = .ReleaseSmall });
// Standard optimize option - let user choose, default to ReleaseSmall for production
const optimize = b.standardOptimizeOption(.{});
const options = b.addOptions();
@ -28,8 +28,8 @@ pub fn build(b: *std.Build) void {
else => "unknown",
};
const candidate_specific = b.fmt("src/assets/rsync_release_{s}_{s}.bin", .{ os_str, arch_str });
const candidate_default = "src/assets/rsync_release.bin";
const candidate_specific = b.fmt("src/assets/rsync/rsync_release_{s}_{s}.bin", .{ os_str, arch_str });
const candidate_default = "src/assets/rsync/rsync_release.bin";
var selected_candidate: []const u8 = "";
var has_rsync_release = false;
@ -55,36 +55,29 @@ pub fn build(b: *std.Build) void {
// rsync_embedded_binary.zig calls @embedFile() from cli/src/utils, so the embed path
// must be relative to that directory.
const selected_embed_path = if (has_rsync_release)
b.fmt("../assets/{s}", .{std.fs.path.basename(selected_candidate)})
b.fmt("../assets/rsync/{s}", .{std.fs.path.basename(selected_candidate)})
else
"";
options.addOption(bool, "has_rsync_release", has_rsync_release);
options.addOption([]const u8, "rsync_release_path", selected_embed_path);
// Check for SQLite assets (mirrors rsync pattern)
const sqlite_dir_specific = b.fmt("src/assets/sqlite_release_{s}_{s}", .{ os_str, arch_str });
const sqlite_dir_default = "src/assets/sqlite_release";
// Check for SQLite assets (platform-specific only, no generic fallback)
const sqlite_dir = b.fmt("src/assets/sqlite_{s}_{s}", .{ os_str, arch_str });
var has_sqlite_release = false;
var sqlite_release_path: []const u8 = "";
// Try platform-specific directory first
if (std.fs.cwd().access(sqlite_dir_specific, .{})) |_| {
// Only check platform-specific directory
if (std.fs.cwd().access(sqlite_dir, .{})) |_| {
has_sqlite_release = true;
sqlite_release_path = sqlite_dir_specific;
} else |_| {
// Try default directory
if (std.fs.cwd().access(sqlite_dir_default, .{})) |_| {
has_sqlite_release = true;
sqlite_release_path = sqlite_dir_default;
} else |_| {}
}
sqlite_release_path = sqlite_dir;
} else |_| {}
if ((optimize == .ReleaseSmall or optimize == .ReleaseFast) and !has_sqlite_release) {
if (optimize == .ReleaseSmall and !has_sqlite_release) {
std.debug.panic(
"Release build requires SQLite amalgamation. Run: make build-sqlite",
.{},
"ReleaseSmall build requires SQLite amalgamation (detected optimize={s}). Run: make build-sqlite",
.{@tagName(optimize)},
);
}
@ -103,6 +96,8 @@ pub fn build(b: *std.Build) void {
exe.root_module.strip = true;
exe.root_module.addOptions("build_options", options);
// LTO disabled: requires LLD linker which may not be available
// exe.want_lto = true;
// Link native dataset_hash library
exe.linkLibC();
@ -110,8 +105,9 @@ pub fn build(b: *std.Build) void {
exe.linkSystemLibrary("dataset_hash");
exe.addIncludePath(b.path("../native/dataset_hash"));
// SQLite setup: embedded for release, system lib for dev
if (has_sqlite_release) {
// SQLite setup: embedded for ReleaseSmall only, system lib for dev
const use_embedded_sqlite = has_sqlite_release and (optimize == .ReleaseSmall);
if (use_embedded_sqlite) {
// Release: compile SQLite from downloaded amalgamation
const sqlite_c_path = b.fmt("{s}/sqlite3.c", .{sqlite_release_path});
exe.addCSourceFile(.{ .file = b.path(sqlite_c_path), .flags = &.{
@ -121,9 +117,20 @@ pub fn build(b: *std.Build) void {
"-DSQLITE_USE_URI",
} });
exe.addIncludePath(b.path(sqlite_release_path));
// Compile SQLite constants wrapper (needed for SQLITE_TRANSIENT workaround)
exe.addCSourceFile(.{ .file = b.path("src/assets/sqlite/sqlite_constants.c"), .flags = &.{ "-Wall", "-Wextra" } });
} else {
// Dev: link against system SQLite
exe.linkSystemLibrary("sqlite3");
// Add system include paths for sqlite3.h
exe.addIncludePath(.{ .cwd_relative = "/usr/include" });
exe.addIncludePath(.{ .cwd_relative = "/usr/local/include" });
exe.addIncludePath(.{ .cwd_relative = "/opt/homebrew/include" });
// Compile SQLite constants wrapper with system headers
exe.addCSourceFile(.{ .file = b.path("src/assets/sqlite/sqlite_constants.c"), .flags = &.{ "-Wall", "-Wextra" } });
}
// Install the executable to zig-out/bin

View file

@ -17,7 +17,8 @@ if [[ "${os}" != "linux" ]]; then
fi
repo_root="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
out="${repo_root}/src/assets/rsync_release_linux_${arch}.bin"
mkdir -p "${repo_root}/src/assets/rsync"
out="${repo_root}/src/assets/rsync/rsync_release_${os}_${arch}.bin"
tmp="$(mktemp -d)"
cleanup() { rm -rf "${tmp}"; }

View file

@ -4,8 +4,8 @@
set -euo pipefail
SQLITE_VERSION="${SQLITE_VERSION:-3450000}" # 3.45.0
SQLITE_YEAR="${SQLITE_YEAR:-2024}"
SQLITE_VERSION="${SQLITE_VERSION:-3480000}" # 3.48.0
SQLITE_YEAR="${SQLITE_YEAR:-2025}"
SQLITE_SRC_BASE="${SQLITE_SRC_BASE:-https://www.sqlite.org/${SQLITE_YEAR}}"
os="$(uname -s | tr '[:upper:]' '[:lower:]')"
@ -14,11 +14,11 @@ if [[ "${arch}" == "aarch64" || "${arch}" == "arm64" ]]; then arch="arm64"; fi
if [[ "${arch}" == "x86_64" ]]; then arch="x86_64"; fi
repo_root="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
out_dir="${repo_root}/src/assets/sqlite_release_${os}_${arch}"
out_dir="${repo_root}/src/assets/sqlite_${os}_${arch}"
echo "Building SQLite ${SQLITE_VERSION} for ${os}/${arch}..."
echo "Fetching SQLite ${SQLITE_VERSION} for ${os}/${arch}..."
# Create output directory
# Create platform-specific output directory
mkdir -p "${out_dir}"
# Download if not present

View file

@ -5,7 +5,8 @@
This directory contains rsync binaries for the ML CLI:
- `rsync_placeholder.bin` - Wrapper script for dev builds (calls system rsync)
- `rsync_release_<os>_<arch>.bin` - Static rsync binary for release builds (not in repo)
- `rsync/rsync_release.bin` - Static rsync binary for release builds (symlink to placeholder)
- `rsync/rsync_release_<os>_<arch>.bin` - Downloaded static binary (not in repo)
## Build Modes
@ -16,8 +17,8 @@ This directory contains rsync binaries for the ML CLI:
- Requires rsync installed on the system
### Release Builds (ReleaseSmall, ReleaseFast)
- Uses `rsync_release_<os>_<arch>.bin` (static binary)
- Fully self-contained, no dependencies
- Uses `rsync/rsync_release_<os>_<arch>.bin` (downloaded static binary)
- Falls back to `rsync/rsync_release.bin` (symlink to placeholder) if platform-specific not found
- Results in ~450-650KB CLI binary
- Works on any system without rsync installed
@ -44,30 +45,29 @@ cd rsync-3.3.0
make
# Copy to assets (example)
cp rsync ../fetch_ml/cli/src/assets/rsync_release_linux_x86_64.bin
cp rsync ../fetch_ml/cli/src/assets/rsync/rsync_release_linux_x86_64.bin
```
### Option 3: Use System Rsync (Temporary)
For testing release builds without a static binary:
```bash
cd cli/src/assets
cp rsync_placeholder.bin rsync_release_linux_x86_64.bin
cd cli/src/assets/rsync
ln -sf rsync_placeholder.bin rsync_release.bin
```
This will still use the wrapper, but allows builds to complete.
## Verification
After placing the appropriate `rsync_release_<os>_<arch>.bin`:
After placing the appropriate `rsync/rsync_release_<os>_<arch>.bin`:
```bash
# Verify it's executable (example)
file cli/src/assets/rsync_release_linux_x86_64.bin
file cli/src/assets/rsync/rsync_release_linux_x86_64.bin
# Test it (example)
./cli/src/assets/rsync_release_linux_x86_64.bin --version
./cli/src/assets/rsync/rsync_release_linux_x86_64.bin --version
# Build release
cd cli
@ -79,7 +79,7 @@ ls -lh zig-out/prod/ml
## Notes
- `rsync_release.bin` is not tracked in git (add to .gitignore if needed)
- `rsync/rsync_release_<os>_<arch>.bin` is not tracked in git
- Different platforms need different static binaries
- For cross-compilation, provide platform-specific binaries
- The wrapper approach for dev builds is intentional for fast iteration
@ -92,21 +92,20 @@ ls -lh zig-out/prod/ml
This directory contains SQLite source for FetchML local mode:
- `sqlite_release_<os>_<arch>/` - SQLite amalgamation for local mode (fetched, not in repo)
- `sqlite_<os>_<arch>/` - SQLite amalgamation for release builds (fetched, not in repo)
- `sqlite3.c` - Single-file SQLite implementation
- `sqlite3.h` - SQLite header file
## Build Modes
### Development/Debug Builds
- SQLite is compiled from source into the binary
- No system SQLite library required
- Results in ~500KB larger binary (includes SQLite)
- Zero external dependencies
- Links against system SQLite library (`libsqlite3`)
- Requires SQLite installed on system
- Faster builds, smaller binary
### Release Builds (ReleaseSmall, ReleaseFast)
- Same SQLite compilation, optimized with release flags
- Fully self-contained, no dependencies
- Compiles SQLite from downloaded amalgamation
- Self-contained, no external dependencies
- Works on any system without SQLite installed
## Preparing SQLite
@ -115,24 +114,19 @@ This directory contains SQLite source for FetchML local mode:
```bash
cd cli
make build-sqlite SQLITE_VERSION=3450000
make build-sqlite SQLITE_VERSION=3480000
```
### Option 2: Download Yourself
```bash
# Download official amalgamation
SQLITE_VERSION=3450000
SQLITE_YEAR=2024
curl -fsSL "https://www.sqlite.org/${SQLITE_YEAR}/sqlite-amalgamation-${SQLITE_VERSION}.zip" -o sqlite.zip
unzip sqlite.zip
SQLITE_VERSION=3480000
SQLITE_YEAR=2025
# Copy to assets (example)
os=$(uname -s | tr '[:upper:]' '[:lower:]')
arch=$(uname -m)
mkdir -p cli/src/assets/sqlite_release_${os}_${arch}
cp sqlite-amalgamation-${SQLITE_VERSION}/sqlite3.c cli/src/assets/sqlite_release_${os}_${arch}/
cp sqlite-amalgamation-${SQLITE_VERSION}/sqlite3.h cli/src/assets/sqlite_release_${os}_${arch}/
cd cli
make build-sqlite
# Output: src/assets/sqlite_<os>_<arch>/
```
## Verification
@ -140,9 +134,9 @@ cp sqlite-amalgamation-${SQLITE_VERSION}/sqlite3.h cli/src/assets/sqlite_release
After fetching SQLite:
```bash
# Verify files exist
ls -lh cli/src/assets/sqlite_release_*/sqlite3.c
ls -lh cli/src/assets/sqlite_release_*/sqlite3.h
# Verify files exist (example for darwin/arm64)
ls -lh cli/src/assets/sqlite_darwin_arm64/sqlite3.c
ls -lh cli/src/assets/sqlite_darwin_arm64/sqlite3.h
# Build CLI
cd cli
@ -154,7 +148,7 @@ zig build prod
## Notes
- `sqlite_release_*/` directories are not tracked in git
- SQLite is compiled directly into the binary (not linked)
- `sqlite_<os>_<arch>/` directories are not tracked in git
- Dev builds use system SQLite; release builds embed amalgamation
- WAL mode is enabled for concurrent CLI writes and TUI reads
- The amalgamation approach matches SQLite's recommended embedding pattern

View file

@ -0,0 +1,9 @@
// sqlite_constants.c - Simple C wrapper to export SQLITE_TRANSIENT
// This works around Zig 0.15's C translation issue with SQLITE_TRANSIENT
#include <sqlite3.h>
// Export SQLITE_TRANSIENT as a function that returns the value
// This avoids the comptime C translation issue
const void* fetchml_sqlite_transient(void) {
return SQLITE_TRANSIENT;
}

View file

@ -5,6 +5,7 @@ const crypto = @import("../utils/crypto.zig");
const io = @import("../utils/io.zig");
const ws = @import("../net/ws/client.zig");
const protocol = @import("../net/protocol.zig");
const core = @import("../core.zig");
pub const CompareOptions = struct {
json: bool = false,
@ -15,41 +16,44 @@ pub const CompareOptions = struct {
pub fn run(allocator: std.mem.Allocator, argv: []const []const u8) !void {
if (argv.len < 2) {
try printUsage();
core.output.usage("compare", "Expected two run IDs");
return error.InvalidArgs;
}
if (std.mem.eql(u8, argv[0], "--help") or std.mem.eql(u8, argv[0], "-h")) {
try printUsage();
return;
return printUsage();
}
const run_a = argv[0];
const run_b = argv[1];
var options = CompareOptions{};
var flags = core.flags.CommonFlags{};
var csv: bool = false;
var all_fields: bool = false;
var fields: ?[]const u8 = null;
var i: usize = 2;
while (i < argv.len) : (i += 1) {
const arg = argv[i];
if (std.mem.eql(u8, arg, "--json")) {
options.json = true;
flags.json = true;
} else if (std.mem.eql(u8, arg, "--csv")) {
options.csv = true;
csv = true;
} else if (std.mem.eql(u8, arg, "--all")) {
options.all_fields = true;
all_fields = true;
} else if (std.mem.eql(u8, arg, "--fields") and i + 1 < argv.len) {
options.fields = argv[i + 1];
fields = argv[i + 1];
i += 1;
} else if (std.mem.eql(u8, arg, "--help") or std.mem.eql(u8, arg, "-h")) {
try printUsage();
return;
return printUsage();
} else {
colors.printError("Unknown option: {s}\n", .{arg});
core.output.errorMsg("compare", "Unknown option");
return error.InvalidArgs;
}
}
core.output.init(if (flags.json) .json else .text);
const cfg = try Config.load(allocator);
defer {
var mut_cfg = cfg;
@ -106,10 +110,10 @@ pub fn run(allocator: std.mem.Allocator, argv: []const []const u8) !void {
return error.ServerError;
}
if (options.json) {
if (flags.json) {
try outputJsonComparison(allocator, root_a, root_b, run_a, run_b);
} else {
try outputHumanComparison(root_a, root_b, run_a, run_b, options);
try outputHumanComparison(root_a, root_b, run_a, run_b, all_fields);
}
}
@ -118,7 +122,7 @@ fn outputHumanComparison(
root_b: std.json.ObjectMap,
run_a: []const u8,
run_b: []const u8,
options: CompareOptions,
all_fields: bool,
) !void {
colors.printInfo("\n=== Comparison: {s} vs {s} ===\n\n", .{ run_a, run_b });
@ -175,7 +179,7 @@ fn outputHumanComparison(
if (meta_b) |mb| {
if (ma == .object and mb == .object) {
colors.printInfo("\n--- Metadata Differences ---\n", .{});
try compareMetadata(ma.object, mb.object, run_a, run_b, options.all_fields);
try compareMetadata(ma.object, mb.object, run_a, run_b, all_fields);
}
}
}

View file

@ -4,6 +4,7 @@ const ws = @import("../net/ws/client.zig");
const colors = @import("../utils/colors.zig");
const logging = @import("../utils/logging.zig");
const crypto = @import("../utils/crypto.zig");
const core = @import("../core.zig");
const DatasetOptions = struct {
dry_run: bool = false,
@ -14,11 +15,14 @@ const DatasetOptions = struct {
pub fn run(allocator: std.mem.Allocator, args: []const []const u8) !void {
if (args.len == 0) {
printUsage();
return error.InvalidArgs;
return printUsage();
}
var options = DatasetOptions{};
var flags = core.flags.CommonFlags{};
var dry_run = false;
var validate = false;
var csv = false;
// Parse global flags: --dry-run, --validate, --json
var positional = std.ArrayList([]const u8).initCapacity(allocator, args.len) catch |err| {
return err;
@ -27,62 +31,67 @@ pub fn run(allocator: std.mem.Allocator, args: []const []const u8) !void {
for (args) |arg| {
if (std.mem.eql(u8, arg, "--help") or std.mem.eql(u8, arg, "-h")) {
printUsage();
return;
return printUsage();
} else if (std.mem.eql(u8, arg, "--dry-run")) {
options.dry_run = true;
dry_run = true;
} else if (std.mem.eql(u8, arg, "--validate")) {
options.validate = true;
validate = true;
} else if (std.mem.eql(u8, arg, "--json")) {
options.json = true;
flags.json = true;
} else if (std.mem.eql(u8, arg, "--csv")) {
options.csv = true;
csv = true;
} else if (std.mem.startsWith(u8, arg, "--")) {
colors.printError("Unknown option: {s}\n", .{arg});
printUsage();
return error.InvalidArgs;
core.output.errorMsg("dataset", "Unknown option");
return printUsage();
} else {
try positional.append(allocator, arg);
}
}
core.output.init(if (flags.json) .json else .text);
const action = positional.items[0];
switch (positional.items.len) {
0 => {
printUsage();
return error.InvalidArgs;
return printUsage();
},
1 => {
if (std.mem.eql(u8, action, "list")) {
const options = DatasetOptions{ .json = flags.json, .csv = csv };
try listDatasets(allocator, &options);
return error.InvalidArgs;
return;
}
},
2 => {
if (std.mem.eql(u8, action, "info")) {
const options = DatasetOptions{ .json = flags.json, .csv = csv };
try showDatasetInfo(allocator, positional.items[1], &options);
return;
} else if (std.mem.eql(u8, action, "search")) {
const options = DatasetOptions{ .json = flags.json, .csv = csv };
try searchDatasets(allocator, positional.items[1], &options);
return error.InvalidArgs;
return;
} else if (std.mem.eql(u8, action, "verify")) {
const options = DatasetOptions{ .json = flags.json, .validate = validate };
try verifyDataset(allocator, positional.items[1], &options);
return;
}
},
3 => {
if (std.mem.eql(u8, action, "register")) {
const options = DatasetOptions{ .json = flags.json, .dry_run = dry_run };
try registerDataset(allocator, positional.items[1], positional.items[2], &options);
return error.InvalidArgs;
return;
}
},
else => {
colors.printError("Unknown action: {s}\n", .{action});
printUsage();
core.output.errorMsg("dataset", "Too many arguments");
return error.InvalidArgs;
},
}
return printUsage();
}
fn printUsage() void {

View file

@ -4,11 +4,15 @@ const db = @import("../db.zig");
const core = @import("../core.zig");
const colors = @import("../utils/colors.zig");
const mode = @import("../mode.zig");
const uuid = @import("../utils/uuid.zig");
const crypto = @import("../utils/crypto.zig");
const ws = @import("../net/ws/client.zig");
/// Experiment command - manage experiments
/// Usage:
/// ml experiment create --name "baseline-cnn"
/// ml experiment list
/// ml experiment show <experiment_id>
pub fn execute(allocator: std.mem.Allocator, args: []const []const u8) !void {
var flags = core.flags.CommonFlags{};
var command_args = try core.flags.parseCommon(allocator, args, &flags);
@ -63,7 +67,7 @@ fn createExperiment(allocator: std.mem.Allocator, args: []const []const u8, json
// Check mode
const mode_result = try mode.detect(allocator, cfg);
if (mode.isOffline(mode_result.mode)) {
// Local mode: create in SQLite
const db_path = try cfg.getDBPath(allocator);
@ -72,7 +76,8 @@ fn createExperiment(allocator: std.mem.Allocator, args: []const []const u8, json
var database = try db.DB.init(allocator, db_path);
defer database.close();
const sql = "INSERT INTO ml_experiments (experiment_id, name, description) VALUES (?, ?, ?);";
// TODO: Add synced column to schema - required for server sync
const sql = "INSERT INTO ml_experiments (experiment_id, name, description, status, synced) VALUES (?, ?, ?, 'active', 0);";
const stmt = try database.prepare(sql);
defer db.DB.finalize(stmt);
@ -100,19 +105,40 @@ fn createExperiment(allocator: std.mem.Allocator, args: []const []const u8, json
colors.printSuccess("✓ Created experiment: {s} ({s})\n", .{ name.?, exp_id[0..8] });
}
} else {
// Server mode: would send to server
// For now, just update local config
var mut_cfg = cfg;
if (mut_cfg.experiment == null) {
mut_cfg.experiment = config.ExperimentConfig{};
}
mut_cfg.experiment.?.name = try allocator.dupe(u8, name.?);
try mut_cfg.save(allocator);
// Server mode: send to server via WebSocket
const api_key_hash = try crypto.hashApiKey(allocator, cfg.api_key);
defer allocator.free(api_key_hash);
if (json) {
std.debug.print("{{\"success\":true,\"name\":\"{s}\"}}\n", .{name.?});
const ws_url = try cfg.getWebSocketUrl(allocator);
defer allocator.free(ws_url);
var client = try ws.Client.connect(allocator, ws_url, cfg.api_key);
defer client.close();
try client.sendCreateExperiment(api_key_hash, name.?, description orelse "");
// Receive response
const response = try client.receiveMessage(allocator);
defer allocator.free(response);
// Parse response (expecting JSON with experiment_id)
if (std.mem.indexOf(u8, response, "experiment_id") != null) {
// Also update local config
var mut_cfg = cfg;
if (mut_cfg.experiment == null) {
mut_cfg.experiment = config.ExperimentConfig{};
}
mut_cfg.experiment.?.name = try allocator.dupe(u8, name.?);
try mut_cfg.save(allocator);
if (json) {
std.debug.print("{{\"success\":true,\"name\":\"{s}\",\"source\":\"server\"}}\n", .{name.?});
} else {
colors.printSuccess("✓ Created experiment on server: {s}\n", .{name.?});
}
} else {
colors.printSuccess("✓ Set active experiment: {s}\n", .{name.?});
colors.printError("Failed to create experiment on server: {s}\n", .{response});
return error.ServerError;
}
}
}
@ -134,7 +160,7 @@ fn listExperiments(allocator: std.mem.Allocator, _: []const []const u8, json: bo
var database = try db.DB.init(allocator, db_path);
defer database.close();
const sql = "SELECT experiment_id, name, description, created_at, status FROM ml_experiments ORDER BY created_at DESC;";
const sql = "SELECT experiment_id, name, description, created_at, status, synced FROM ml_experiments ORDER BY created_at DESC;";
const stmt = try database.prepare(sql);
defer db.DB.finalize(stmt);
@ -151,6 +177,7 @@ fn listExperiments(allocator: std.mem.Allocator, _: []const []const u8, json: bo
.description = try allocator.dupe(u8, db.DB.columnText(stmt, 2)),
.created_at = try allocator.dupe(u8, db.DB.columnText(stmt, 3)),
.status = try allocator.dupe(u8, db.DB.columnText(stmt, 4)),
.synced = db.DB.columnInt(stmt, 5) != 0,
});
}
@ -158,7 +185,7 @@ fn listExperiments(allocator: std.mem.Allocator, _: []const []const u8, json: bo
std.debug.print("[", .{});
for (experiments.items, 0..) |e, i| {
if (i > 0) std.debug.print(",", .{});
std.debug.print("{{\"id\":\"{s}\",\"name\":\"{s}\",\"status\":\"{s}\"}}", .{ e.id, e.name, e.status });
std.debug.print("{{\"id\":\"{s}\",\"name\":\"{s}\",\"status\":\"{s}\",\"description\":\"{s}\",\"synced\":{s}}}", .{ e.id, e.name, e.status, e.description, if (e.synced) "true" else "false" });
}
std.debug.print("]\n", .{});
} else {
@ -167,13 +194,38 @@ fn listExperiments(allocator: std.mem.Allocator, _: []const []const u8, json: bo
} else {
colors.printInfo("Experiments:\n", .{});
for (experiments.items) |e| {
std.debug.print(" {s} {s} ({s})\n", .{ e.id[0..8], e.name, e.status });
const sync_indicator = if (e.synced) "" else "";
std.debug.print(" {s} {s} {s} ({s})\n", .{ sync_indicator, e.id[0..8], e.name, e.status });
if (e.description.len > 0) {
std.debug.print(" {s}\n", .{e.description});
}
}
}
}
} else {
// Server mode: would query server
colors.printInfo("Server mode: would list experiments from server\n", .{});
// Server mode: query server via WebSocket
const api_key_hash = try crypto.hashApiKey(allocator, cfg.api_key);
defer allocator.free(api_key_hash);
const ws_url = try cfg.getWebSocketUrl(allocator);
defer allocator.free(ws_url);
var client = try ws.Client.connect(allocator, ws_url, cfg.api_key);
defer client.close();
try client.sendListExperiments(api_key_hash);
// Receive response
const response = try client.receiveMessage(allocator);
defer allocator.free(response);
// For now, just display raw response
if (json) {
std.debug.print("{s}\n", .{response});
} else {
colors.printInfo("Experiments from server:\n", .{});
std.debug.print("{s}\n", .{response});
}
}
}
@ -182,13 +234,102 @@ fn showExperiment(allocator: std.mem.Allocator, args: []const []const u8, json:
core.output.errorMsg("experiment", "experiment_id required", .{});
return error.MissingArgument;
}
const exp_id = args[0];
_ = json;
_ = allocator;
colors.printInfo("Show experiment: {s}\n", .{exp_id});
// Implementation would show experiment details
const cfg = try config.Config.load(allocator);
defer {
var mut_cfg = cfg;
mut_cfg.deinit(allocator);
}
const mode_result = try mode.detect(allocator, cfg);
if (mode.isOffline(mode_result.mode)) {
// Local mode: show from SQLite
const db_path = try cfg.getDBPath(allocator);
defer allocator.free(db_path);
var database = try db.DB.init(allocator, db_path);
defer database.close();
// Get experiment details
const exp_sql = "SELECT experiment_id, name, description, created_at, status, synced FROM ml_experiments WHERE experiment_id = ?;";
const exp_stmt = try database.prepare(exp_sql);
defer db.DB.finalize(exp_stmt);
try db.DB.bindText(exp_stmt, 1, exp_id);
if (!try db.DB.step(exp_stmt)) {
core.output.errorMsg("experiment", "Experiment not found: {s}", .{exp_id});
return error.NotFound;
}
const name = db.DB.columnText(exp_stmt, 1);
const description = db.DB.columnText(exp_stmt, 2);
const created_at = db.DB.columnText(exp_stmt, 3);
const status = db.DB.columnText(exp_stmt, 4);
const synced = db.DB.columnInt(exp_stmt, 5) != 0;
// Get run count and last run date
const runs_sql =
"SELECT COUNT(*), MAX(start_time) FROM ml_runs WHERE experiment_id = ?;";
const runs_stmt = try database.prepare(runs_sql);
defer db.DB.finalize(runs_stmt);
try db.DB.bindText(runs_stmt, 1, exp_id);
var run_count: i64 = 0;
var last_run: ?[]const u8 = null;
if (try db.DB.step(runs_stmt)) {
run_count = db.DB.columnInt64(runs_stmt, 0);
if (db.DB.columnText(runs_stmt, 1).len > 0) {
last_run = try allocator.dupe(u8, db.DB.columnText(runs_stmt, 1));
}
}
defer if (last_run) |lr| allocator.free(lr);
if (json) {
std.debug.print("{{\"experiment_id\":\"{s}\",\"name\":\"{s}\",\"description\":\"{s}\",\"status\":\"{s}\",\"created_at\":\"{s}\",\"synced\":{s},\"run_count\":{d},\"last_run\":\"{s}\"}}\n", .{
exp_id, name, description, status, created_at,
if (synced) "true" else "false", run_count, last_run orelse "null",
});
} else {
colors.printInfo("Experiment: {s}\n", .{name});
std.debug.print(" ID: {s}\n", .{exp_id});
std.debug.print(" Status: {s}\n", .{status});
if (description.len > 0) {
std.debug.print(" Description: {s}\n", .{description});
}
std.debug.print(" Created: {s}\n", .{created_at});
std.debug.print(" Synced: {s}\n", .{if (synced) "" else "↑ pending"});
std.debug.print(" Runs: {d}\n", .{run_count});
if (last_run) |lr| {
std.debug.print(" Last run: {s}\n", .{lr});
}
}
} else {
// Server mode: query server via WebSocket
const api_key_hash = try crypto.hashApiKey(allocator, cfg.api_key);
defer allocator.free(api_key_hash);
const ws_url = try cfg.getWebSocketUrl(allocator);
defer allocator.free(ws_url);
var client = try ws.Client.connect(allocator, ws_url, cfg.api_key);
defer client.close();
try client.sendGetExperimentByID(api_key_hash, exp_id);
// Receive response
const response = try client.receiveMessage(allocator);
defer allocator.free(response);
if (json) {
std.debug.print("{s}\n", .{response});
} else {
colors.printInfo("Experiment details from server:\n", .{});
std.debug.print("{s}\n", .{response});
}
}
}
const ExperimentInfo = struct {
@ -197,6 +338,7 @@ const ExperimentInfo = struct {
description: []const u8,
created_at: []const u8,
status: []const u8,
synced: bool,
fn deinit(self: *ExperimentInfo, allocator: std.mem.Allocator) void {
allocator.free(self.id);
@ -208,7 +350,6 @@ const ExperimentInfo = struct {
};
fn generateExperimentID(allocator: std.mem.Allocator) ![]const u8 {
const uuid = @import("../utils/uuid.zig");
return try uuid.generateV4(allocator);
}

View file

@ -6,6 +6,7 @@ const io = @import("../utils/io.zig");
const ws = @import("../net/ws/client.zig");
const protocol = @import("../net/protocol.zig");
const manifest = @import("../utils/manifest.zig");
const core = @import("../core.zig");
pub const ExportOptions = struct {
anonymize: bool = false,
@ -17,58 +18,69 @@ pub const ExportOptions = struct {
pub fn run(allocator: std.mem.Allocator, argv: []const []const u8) !void {
if (argv.len == 0) {
try printUsage();
return error.InvalidArgs;
return printUsage();
}
if (std.mem.eql(u8, argv[0], "--help") or std.mem.eql(u8, argv[0], "-h")) {
try printUsage();
return;
return printUsage();
}
const target = argv[0];
var options = ExportOptions{};
var flags = core.flags.CommonFlags{};
var anonymize = false;
var anonymize_level: []const u8 = "metadata-only";
var bundle: ?[]const u8 = null;
var base_override: ?[]const u8 = null;
var i: usize = 1;
while (i < argv.len) : (i += 1) {
const arg = argv[i];
if (std.mem.eql(u8, arg, "--anonymize")) {
options.anonymize = true;
anonymize = true;
} else if (std.mem.eql(u8, arg, "--anonymize-level") and i + 1 < argv.len) {
options.anonymize_level = argv[i + 1];
anonymize_level = argv[i + 1];
i += 1;
} else if (std.mem.eql(u8, arg, "--bundle") and i + 1 < argv.len) {
options.bundle = argv[i + 1];
bundle = argv[i + 1];
i += 1;
} else if (std.mem.eql(u8, arg, "--base") and i + 1 < argv.len) {
options.base_override = argv[i + 1];
base_override = argv[i + 1];
i += 1;
} else if (std.mem.eql(u8, arg, "--json")) {
options.json = true;
flags.json = true;
} else if (std.mem.eql(u8, arg, "--help") or std.mem.eql(u8, arg, "-h")) {
try printUsage();
return;
return printUsage();
} else {
colors.printError("Unknown option: {s}\n", .{arg});
core.output.errorMsg("export", "Unknown option");
return error.InvalidArgs;
}
}
core.output.init(if (flags.json) .json else .text);
// Validate anonymize level
if (!std.mem.eql(u8, options.anonymize_level, "metadata-only") and
!std.mem.eql(u8, options.anonymize_level, "full"))
if (!std.mem.eql(u8, anonymize_level, "metadata-only") and
!std.mem.eql(u8, anonymize_level, "full"))
{
colors.printError("Invalid anonymize level: {s}. Use 'metadata-only' or 'full'\n", .{options.anonymize_level});
core.output.errorMsg("export", "Invalid anonymize level");
return error.InvalidArgs;
}
if (flags.json) {
var stdout_writer = io.stdoutWriter();
try stdout_writer.print("{{\"success\":true,\"anonymize_level\":\"{s}\"}}\n", .{anonymize_level});
} else {
colors.printInfo("Anonymization level: {s}\n", .{anonymize_level});
}
const cfg = try Config.load(allocator);
defer {
var mut_cfg = cfg;
mut_cfg.deinit(allocator);
}
const resolved_base = options.base_override orelse cfg.worker_base;
const resolved_base = base_override orelse cfg.worker_base;
const manifest_path = manifest.resolvePathWithBase(allocator, target, resolved_base) catch |err| {
if (err == error.FileNotFound) {
colors.printError(
@ -98,8 +110,8 @@ pub fn run(allocator: std.mem.Allocator, argv: []const []const u8) !void {
var final_content: []u8 = undefined;
var final_content_owned = false;
if (options.anonymize) {
final_content = try anonymizeManifest(allocator, parsed.value, options.anonymize_level);
if (anonymize) {
final_content = try anonymizeManifest(allocator, parsed.value, anonymize_level);
final_content_owned = true;
} else {
final_content = manifest_content;
@ -107,7 +119,7 @@ pub fn run(allocator: std.mem.Allocator, argv: []const []const u8) !void {
defer if (final_content_owned) allocator.free(final_content);
// Output or bundle
if (options.bundle) |bundle_path| {
if (bundle) |bundle_path| {
// Create a simple tar-like bundle (just the manifest for now)
// In production, this would include code, configs, etc.
var bundle_file = try std.fs.cwd().createFile(bundle_path, .{});
@ -115,16 +127,16 @@ pub fn run(allocator: std.mem.Allocator, argv: []const []const u8) !void {
try bundle_file.writeAll(final_content);
if (options.json) {
if (flags.json) {
var stdout_writer = io.stdoutWriter();
try stdout_writer.print("{{\"success\":true,\"bundle\":\"{s}\",\"anonymized\":{}}}\n", .{
bundle_path,
options.anonymize,
anonymize,
});
} else {
colors.printSuccess("✓ Exported to {s}\n", .{bundle_path});
if (options.anonymize) {
colors.printInfo(" Anonymization level: {s}\n", .{options.anonymize_level});
if (anonymize) {
colors.printInfo(" Anonymization level: {s}\n", .{anonymize_level});
colors.printInfo(" Paths redacted, IPs removed, usernames anonymized\n", .{});
}
}

View file

@ -5,6 +5,7 @@ const crypto = @import("../utils/crypto.zig");
const io = @import("../utils/io.zig");
const ws = @import("../net/ws/client.zig");
const protocol = @import("../net/protocol.zig");
const core = @import("../core.zig");
pub const FindOptions = struct {
json: bool = false,
@ -22,16 +23,23 @@ pub const FindOptions = struct {
pub fn run(allocator: std.mem.Allocator, argv: []const []const u8) !void {
if (argv.len == 0) {
try printUsage();
return error.InvalidArgs;
return printUsage();
}
if (std.mem.eql(u8, argv[0], "--help") or std.mem.eql(u8, argv[0], "-h")) {
try printUsage();
return;
return printUsage();
}
var options = FindOptions{};
var flags = core.flags.CommonFlags{};
var limit: usize = 20;
var csv: bool = false;
var tag: ?[]const u8 = null;
var outcome: ?[]const u8 = null;
var dataset: ?[]const u8 = null;
var experiment_group: ?[]const u8 = null;
var author: ?[]const u8 = null;
var after: ?[]const u8 = null;
var before: ?[]const u8 = null;
var query_str: ?[]const u8 = null;
// First argument might be a query string or a flag
@ -45,52 +53,39 @@ pub fn run(allocator: std.mem.Allocator, argv: []const []const u8) !void {
while (i < argv.len) : (i += 1) {
const arg = argv[i];
if (std.mem.eql(u8, arg, "--json")) {
options.json = true;
flags.json = true;
} else if (std.mem.eql(u8, arg, "--csv")) {
options.csv = true;
csv = true;
} else if (std.mem.eql(u8, arg, "--limit") and i + 1 < argv.len) {
options.limit = try std.fmt.parseInt(usize, argv[i + 1], 10);
limit = try std.fmt.parseInt(usize, argv[i + 1], 10);
i += 1;
} else if (std.mem.eql(u8, arg, "--tag") and i + 1 < argv.len) {
options.tag = argv[i + 1];
tag = argv[i + 1];
i += 1;
} else if (std.mem.eql(u8, arg, "--outcome") and i + 1 < argv.len) {
options.outcome = argv[i + 1];
outcome = argv[i + 1];
i += 1;
} else if (std.mem.eql(u8, arg, "--dataset") and i + 1 < argv.len) {
options.dataset = argv[i + 1];
dataset = argv[i + 1];
i += 1;
} else if (std.mem.eql(u8, arg, "--experiment-group") and i + 1 < argv.len) {
options.experiment_group = argv[i + 1];
} else if (std.mem.eql(u8, arg, "--group") and i + 1 < argv.len) {
experiment_group = argv[i + 1];
i += 1;
} else if (std.mem.eql(u8, arg, "--author") and i + 1 < argv.len) {
options.author = argv[i + 1];
author = argv[i + 1];
i += 1;
} else if (std.mem.eql(u8, arg, "--after") and i + 1 < argv.len) {
options.after = argv[i + 1];
after = argv[i + 1];
i += 1;
} else if (std.mem.eql(u8, arg, "--before") and i + 1 < argv.len) {
options.before = argv[i + 1];
before = argv[i + 1];
i += 1;
} else if (std.mem.eql(u8, arg, "--help") or std.mem.eql(u8, arg, "-h")) {
try printUsage();
return;
} else if (!std.mem.startsWith(u8, arg, "--")) {
// Treat as query string if not already set
if (query_str == null) {
query_str = arg;
} else {
colors.printError("Unknown argument: {s}\n", .{arg});
return error.InvalidArgs;
}
} else {
colors.printError("Unknown option: {s}\n", .{arg});
core.output.errorMsg("find", "Unknown option");
return error.InvalidArgs;
}
}
options.query = query_str;
const cfg = try Config.load(allocator);
defer {
var mut_cfg = cfg;
@ -102,14 +97,27 @@ pub fn run(allocator: std.mem.Allocator, argv: []const []const u8) !void {
const ws_url = try cfg.getWebSocketUrl(allocator);
defer allocator.free(ws_url);
colors.printInfo("Searching experiments...\n", .{});
var client = try ws.Client.connect(allocator, ws_url, cfg.api_key);
defer client.close();
// Build search request JSON
const search_json = try buildSearchJson(allocator, &options);
// Build search options struct for JSON builder
const search_options = FindOptions{
.json = flags.json,
.csv = csv,
.limit = limit,
.tag = tag,
.outcome = outcome,
.dataset = dataset,
.experiment_group = experiment_group,
.author = author,
.after = after,
.before = before,
.query = query_str,
};
const search_json = try buildSearchJson(allocator, &search_options);
defer allocator.free(search_json);
// Send search request - we'll use the dataset search opcode as a placeholder
@ -121,7 +129,7 @@ pub fn run(allocator: std.mem.Allocator, argv: []const []const u8) !void {
// Parse response
const parsed = std.json.parseFromSlice(std.json.Value, allocator, msg, .{}) catch {
if (options.json) {
if (flags.json) {
var out = io.stdoutWriter();
try out.print("{{\"error\":\"invalid_response\"}}\n", .{});
} else {
@ -133,11 +141,13 @@ pub fn run(allocator: std.mem.Allocator, argv: []const []const u8) !void {
const root = parsed.value;
if (options.json) {
if (flags.json) {
try io.stdoutWriteJson(root);
} else if (options.csv) {
} else if (csv) {
const options = FindOptions{ .json = flags.json, .csv = csv };
try outputCsvResults(allocator, root, &options);
} else {
const options = FindOptions{ .json = flags.json, .csv = csv };
try outputHumanResults(root, &options);
}
}

View file

@ -4,6 +4,7 @@ const Config = @import("../config.zig").Config;
const io = @import("../utils/io.zig");
const json = @import("../utils/json.zig");
const manifest = @import("../utils/manifest.zig");
const core = @import("../core.zig");
pub const Options = struct {
json: bool = false,
@ -11,59 +12,49 @@ pub const Options = struct {
};
pub fn run(allocator: std.mem.Allocator, args: []const []const u8) !void {
if (args.len == 0) {
try printUsage();
return error.InvalidArgs;
}
var opts = Options{};
var flags = core.flags.CommonFlags{};
var base: ?[]const u8 = null;
var target_path: ?[]const u8 = null;
var i: usize = 0;
while (i < args.len) : (i += 1) {
const arg = args[i];
if (std.mem.eql(u8, arg, "--json")) {
opts.json = true;
} else if (std.mem.eql(u8, arg, "--base")) {
if (i + 1 >= args.len) {
colors.printError("Missing value for --base\n", .{});
try printUsage();
return error.InvalidArgs;
}
opts.base = args[i + 1];
flags.json = true;
} else if (std.mem.eql(u8, arg, "--base") and i + 1 < args.len) {
base = args[i + 1];
i += 1;
} else if (std.mem.startsWith(u8, arg, "--help")) {
try printUsage();
return;
return printUsage();
} else if (std.mem.startsWith(u8, arg, "--")) {
colors.printError("Unknown option: {s}\n", .{arg});
try printUsage();
core.output.errorMsg("info", "Unknown option");
return error.InvalidArgs;
} else {
target_path = arg;
}
}
core.output.init(if (flags.json) .json else .text);
if (target_path == null) {
try printUsage();
return error.InvalidArgs;
core.output.errorMsg("info", "No target path specified");
return printUsage();
}
const manifest_path = manifest.resolvePathWithBase(allocator, target_path.?, opts.base) catch |err| {
const manifest_path = manifest.resolvePathWithBase(allocator, target_path.?, base) catch |err| {
if (err == error.FileNotFound) {
colors.printError(
"Could not locate run_manifest.json for '{s}'. Provide a path, or use --base <path> to scan finished/failed/running/pending.\n",
.{target_path.?},
);
core.output.errorMsgDetailed("info", "Manifest not found", "Provide a path or use --base <path>");
}
return err;
};
defer allocator.free(manifest_path);
const data = try manifest.readFileAlloc(allocator, manifest_path);
defer allocator.free(data);
defer {
allocator.free(manifest_path);
allocator.free(data);
}
if (opts.json) {
if (flags.json) {
var out = io.stdoutWriter();
try out.print("{s}\n", .{data});
return;

View file

@ -1,33 +1,21 @@
const std = @import("std");
const Config = @import("../config.zig").Config;
const db = @import("../db.zig");
const core = @import("../core.zig");
pub fn run(allocator: std.mem.Allocator, args: []const []const u8) !void {
if (args.len > 0 and (std.mem.eql(u8, args[0], "--help") or std.mem.eql(u8, args[0], "-h"))) {
printUsage();
return;
}
var flags = core.flags.CommonFlags{};
var remaining = try core.flags.parseCommon(allocator, args, &flags);
defer remaining.deinit(allocator);
// Parse optional CLI flags
var cli_tracking_uri: ?[]const u8 = null;
var cli_artifact_path: ?[]const u8 = null;
var cli_sync_uri: ?[]const u8 = null;
core.output.init(if (flags.json) .json else .text);
var i: usize = 0;
while (i < args.len) : (i += 1) {
if (std.mem.eql(u8, args[i], "--tracking-uri") and i + 1 < args.len) {
cli_tracking_uri = args[i + 1];
i += 1;
} else if (std.mem.eql(u8, args[i], "--artifact-path") and i + 1 < args.len) {
cli_artifact_path = args[i + 1];
i += 1;
} else if (std.mem.eql(u8, args[i], "--sync-uri") and i + 1 < args.len) {
cli_sync_uri = args[i + 1];
i += 1;
}
}
// Parse CLI-specific overrides and flags
const cli_tracking_uri = core.flags.parseKVFlag(remaining.items, "tracking-uri");
const cli_artifact_path = core.flags.parseKVFlag(remaining.items, "artifact-path");
const cli_sync_uri = core.flags.parseKVFlag(remaining.items, "sync-uri");
const force_local = core.flags.parseBoolFlag(remaining.items, "local");
// Load config with CLI overrides
var cfg = try Config.loadWithOverrides(allocator, cli_tracking_uri, cli_artifact_path, cli_sync_uri);
defer cfg.deinit(allocator);
@ -48,21 +36,23 @@ pub fn run(allocator: std.mem.Allocator, args: []const []const u8) !void {
} else {
std.debug.print("\n", .{});
}
if (cfg.sync_uri.len > 0) {
std.debug.print(" sync_uri = {s}\n", .{cfg.sync_uri});
} else {
std.debug.print(" sync_uri = (not set)\n", .{});
}
std.debug.print(" sync_uri = {s}\n", .{if (cfg.sync_uri.len > 0) cfg.sync_uri else "(not set)"});
std.debug.print("\n", .{});
// Only initialize SQLite DB in local mode
if (!cfg.isLocalMode()) {
std.debug.print("Runner mode detected (wss://). No local database needed.\n", .{});
std.debug.print("Server: {s}:{d}\n", .{ cfg.worker_host, cfg.worker_port });
// Default path: create config only (no DB speculatively)
if (!force_local) {
std.debug.print("✓ Created .fetchml/config.toml\n", .{});
std.debug.print(" Local tracking DB will be created automatically if server becomes unavailable.\n", .{});
if (cfg.sync_uri.len > 0) {
std.debug.print(" Server: {s}:{d}\n", .{ cfg.worker_host, cfg.worker_port });
}
return;
}
// --local path: create config + DB now
std.debug.print("(local mode explicitly requested)\n\n", .{});
// Get DB path from tracking URI
const db_path = try cfg.getDBPath(allocator);
defer allocator.free(db_path);
@ -96,20 +86,19 @@ pub fn run(allocator: std.mem.Allocator, args: []const []const u8) !void {
std.debug.print("✓ Created database: {s}\n", .{db_path});
}
// Verify schema by connecting
var database = try db.DB.init(allocator, db_path);
defer database.close();
std.debug.print("✓ Created .fetchml/config.toml\n", .{});
std.debug.print("✓ Schema applied (WAL mode enabled)\n", .{});
std.debug.print("✓ Ready for experiment tracking\n", .{});
std.debug.print(" fetch_ml.db-wal and fetch_ml.db-shm will appear during use — expected.\n", .{});
std.debug.print(" The DB is just a file. Delete it freely — recreated automatically on next run.\n", .{});
}
fn printUsage() void {
std.debug.print("Usage: ml init [OPTIONS]\n\n", .{});
std.debug.print("Initialize local experiment tracking database\n\n", .{});
std.debug.print("Initialize FetchML configuration\n\n", .{});
std.debug.print("Options:\n", .{});
std.debug.print(" --local Create local database now (default: config only)\n", .{});
std.debug.print(" --tracking-uri URI SQLite database path (e.g., sqlite://./fetch_ml.db)\n", .{});
std.debug.print(" --artifact-path PATH Artifacts directory (default: ./experiments/)\n", .{});
std.debug.print(" --sync-uri URI Server to sync with (e.g., wss://ml.company.com/ws)\n", .{});
std.debug.print(" -h, --help Show this help\n", .{});
std.debug.print(" -h, --help Show this help\n", .{});
}

View file

@ -4,6 +4,7 @@ const ws = @import("../net/ws/client.zig");
const protocol = @import("../net/protocol.zig");
const crypto = @import("../utils/crypto.zig");
const Config = @import("../config.zig").Config;
const core = @import("../core.zig");
const blocked_packages = [_][]const u8{ "requests", "urllib3", "httpx", "aiohttp", "socket", "telnetlib" };
@ -23,9 +24,10 @@ fn validatePackageName(name: []const u8) bool {
return true;
}
fn restoreJupyter(allocator: std.mem.Allocator, args: []const []const u8) !void {
fn restoreJupyter(allocator: std.mem.Allocator, args: []const []const u8, json: bool) !void {
_ = json;
if (args.len < 1) {
colors.printError("Usage: ml jupyter restore <name>\n", .{});
core.output.errorMsg("jupyter.restore", "Usage: ml jupyter restore <name>");
return;
}
const name = args[0];
@ -48,10 +50,10 @@ fn restoreJupyter(allocator: std.mem.Allocator, args: []const []const u8) !void
const api_key_hash = try crypto.hashApiKey(allocator, config.api_key);
defer allocator.free(api_key_hash);
colors.printInfo("Restoring workspace {s}...\n", .{name});
core.output.info("Restoring workspace {s}...", .{name});
client.sendRestoreJupyter(name, api_key_hash) catch |err| {
colors.printError("Failed to send restore command: {}\n", .{err});
core.output.errorMsgDetailed("jupyter.restore", "Failed to send restore command", @errorName(err));
return;
};
@ -70,22 +72,17 @@ fn restoreJupyter(allocator: std.mem.Allocator, args: []const []const u8) !void
switch (packet.packet_type) {
.success => {
if (packet.success_message) |msg| {
colors.printSuccess("{s}\n", .{msg});
core.output.info("{s}", .{msg});
} else {
colors.printSuccess("Workspace restored.\n", .{});
core.output.info("Workspace restored.", .{});
}
},
.error_packet => {
const error_msg = protocol.ResponsePacket.getErrorMessage(packet.error_code.?);
colors.printError("Failed to restore workspace: {s}\n", .{error_msg});
if (packet.error_details) |details| {
colors.printError("Details: {s}\n", .{details});
} else if (packet.error_message) |msg| {
colors.printError("Details: {s}\n", .{msg});
}
core.output.errorMsgDetailed("jupyter.restore", error_msg, packet.error_details orelse packet.error_message orelse "");
},
else => {
colors.printError("Unexpected response type\n", .{});
core.output.errorMsg("jupyter.restore", "Unexpected response type");
},
}
}
@ -139,50 +136,62 @@ pub fn defaultWorkspacePath(allocator: std.mem.Allocator, name: []const u8) ![]u
}
pub fn run(allocator: std.mem.Allocator, args: []const []const u8) !void {
if (args.len < 1) {
printUsagePackage();
return;
var flags = core.flags.CommonFlags{};
if (args.len == 0) {
return printUsage();
}
// Global flags
for (args) |arg| {
if (std.mem.eql(u8, arg, "--help") or std.mem.eql(u8, arg, "-h")) {
printUsagePackage();
return;
}
if (std.mem.eql(u8, arg, "--json")) {
colors.printError("jupyter does not support --json\n", .{});
printUsagePackage();
return error.InvalidArgs;
return printUsage();
} else if (std.mem.eql(u8, arg, "--json")) {
flags.json = true;
}
}
const action = args[0];
const sub = args[0];
if (std.mem.eql(u8, action, "create")) {
try createJupyter(allocator, args[1..]);
} else if (std.mem.eql(u8, action, "start")) {
try startJupyter(allocator, args[1..]);
} else if (std.mem.eql(u8, action, "stop")) {
try stopJupyter(allocator, args[1..]);
} else if (std.mem.eql(u8, action, "status")) {
try statusJupyter(allocator, args[1..]);
} else if (std.mem.eql(u8, action, "list")) {
try listServices(allocator);
} else if (std.mem.eql(u8, action, "remove")) {
try removeJupyter(allocator, args[1..]);
} else if (std.mem.eql(u8, action, "restore")) {
try restoreJupyter(allocator, args[1..]);
} else if (std.mem.eql(u8, action, "package")) {
try packageCommands(args[1..]);
if (std.mem.eql(u8, sub, "list")) {
return listJupyter(allocator, args[1..], flags.json);
} else if (std.mem.eql(u8, sub, "status")) {
return statusJupyter(allocator, args[1..], flags.json);
} else if (std.mem.eql(u8, sub, "launch")) {
return launchJupyter(allocator, args[1..], flags.json);
} else if (std.mem.eql(u8, sub, "terminate")) {
return terminateJupyter(allocator, args[1..], flags.json);
} else if (std.mem.eql(u8, sub, "save")) {
return saveJupyter(allocator, args[1..], flags.json);
} else if (std.mem.eql(u8, sub, "restore")) {
return restoreJupyter(allocator, args[1..], flags.json);
} else if (std.mem.eql(u8, sub, "install")) {
return installJupyter(allocator, args[1..]);
} else if (std.mem.eql(u8, sub, "uninstall")) {
return uninstallJupyter(allocator, args[1..]);
} else {
colors.printError("Invalid action: {s}\n", .{action});
core.output.errorMsg("jupyter", "Unknown subcommand");
return error.InvalidArgs;
}
}
fn printUsage() !void {
std.debug.print("Usage: ml jupyter <command> [args]\n", .{});
std.debug.print("\nCommands:\n", .{});
std.debug.print(" list List Jupyter services\n", .{});
std.debug.print(" status Show Jupyter service status\n", .{});
std.debug.print(" launch Launch a new Jupyter service\n", .{});
std.debug.print(" terminate Terminate a Jupyter service\n", .{});
std.debug.print(" save Save workspace\n", .{});
std.debug.print(" restore Restore workspace\n", .{});
std.debug.print(" install Install packages\n", .{});
std.debug.print(" uninstall Uninstall packages\n", .{});
}
fn printUsagePackage() void {
colors.printError("Usage: ml jupyter package <action> [options]\n", .{});
colors.printInfo("Actions:\n", .{});
colors.printInfo(" list\n", .{});
core.output.info("Actions:\n", .{});
core.output.info("{s}", .{});
colors.printInfo("Options:\n", .{});
colors.printInfo(" --help, -h Show this help message\n", .{});
}
@ -505,8 +514,15 @@ fn removeJupyter(allocator: std.mem.Allocator, args: []const []const u8) !void {
}
}
fn statusJupyter(allocator: std.mem.Allocator, args: []const []const u8) !void {
_ = args; // Not used yet
fn listJupyter(allocator: std.mem.Allocator, args: []const []const u8, json: bool) !void {
_ = args;
_ = json;
try listServices(allocator);
}
fn statusJupyter(allocator: std.mem.Allocator, args: []const []const u8, json: bool) !void {
_ = args;
_ = json;
// Re-use listServices for now as status is part of list
try listServices(allocator);
}
@ -850,3 +866,41 @@ fn packageCommands(args: []const []const u8) !void {
colors.printError("Invalid package command: {s}\n", .{subcommand});
}
}
fn launchJupyter(allocator: std.mem.Allocator, args: []const []const u8, json: bool) !void {
_ = allocator;
_ = args;
_ = json;
core.output.errorMsg("jupyter.launch", "Not implemented");
return error.NotImplemented;
}
fn terminateJupyter(allocator: std.mem.Allocator, args: []const []const u8, json: bool) !void {
_ = allocator;
_ = args;
_ = json;
core.output.errorMsg("jupyter.terminate", "Not implemented");
return error.NotImplemented;
}
fn saveJupyter(allocator: std.mem.Allocator, args: []const []const u8, json: bool) !void {
_ = allocator;
_ = args;
_ = json;
core.output.errorMsg("jupyter.save", "Not implemented");
return error.NotImplemented;
}
fn installJupyter(allocator: std.mem.Allocator, args: []const []const u8) !void {
_ = allocator;
_ = args;
core.output.errorMsg("jupyter.install", "Not implemented");
return error.NotImplemented;
}
fn uninstallJupyter(allocator: std.mem.Allocator, args: []const []const u8) !void {
_ = allocator;
_ = args;
core.output.errorMsg("jupyter.uninstall", "Not implemented");
return error.NotImplemented;
}

View file

@ -1,143 +0,0 @@
const std = @import("std");
const config = @import("../config.zig");
const db = @import("../db.zig");
const core = @import("../core.zig");
const colors = @import("../utils/colors.zig");
const manifest_lib = @import("../manifest.zig");
/// Note command - unified metadata annotation
/// Usage:
/// ml note <run_id> --text "Try lr=3e-4 next"
/// ml note <run_id> --hypothesis "LR scaling helps"
/// ml note <run_id> --outcome validates --confidence 0.9
/// ml note <run_id> --privacy private
pub fn execute(allocator: std.mem.Allocator, args: []const []const u8) !void {
var flags = core.flags.CommonFlags{};
var command_args = try core.flags.parseCommon(allocator, args, &flags);
defer command_args.deinit(allocator);
core.output.init(if (flags.json) .json else .text);
if (flags.help) {
return printUsage();
}
if (command_args.items.len < 1) {
std.log.err("Usage: ml note <run_id> [options]", .{});
return error.MissingArgument;
}
const run_id = command_args.items[0];
// Parse metadata options
const text = core.flags.parseKVFlag(command_args.items, "text");
const hypothesis = core.flags.parseKVFlag(command_args.items, "hypothesis");
const outcome = core.flags.parseKVFlag(command_args.items, "outcome");
const confidence = core.flags.parseKVFlag(command_args.items, "confidence");
const privacy = core.flags.parseKVFlag(command_args.items, "privacy");
const author = core.flags.parseKVFlag(command_args.items, "author");
// Check that at least one option is provided
if (text == null and hypothesis == null and outcome == null and privacy == null) {
std.log.err("No metadata provided. Use --text, --hypothesis, --outcome, or --privacy", .{});
return error.MissingMetadata;
}
const cfg = try config.Config.load(allocator);
defer {
var mut_cfg = cfg;
mut_cfg.deinit(allocator);
}
// Get DB path
const db_path = try cfg.getDBPath(allocator);
defer allocator.free(db_path);
var database = try db.DB.init(allocator, db_path);
defer database.close();
// Verify run exists
const check_sql = "SELECT 1 FROM ml_runs WHERE run_id = ?;";
const check_stmt = try database.prepare(check_sql);
defer db.DB.finalize(check_stmt);
try db.DB.bindText(check_stmt, 1, run_id);
const has_row = try db.DB.step(check_stmt);
if (!has_row) {
std.log.err("Run not found: {s}", .{run_id});
return error.RunNotFound;
}
// Add text note as a tag
if (text) |t| {
try addTag(allocator, &database, run_id, "note", t, author);
}
// Add hypothesis
if (hypothesis) |h| {
try addTag(allocator, &database, run_id, "hypothesis", h, author);
}
// Add outcome
if (outcome) |o| {
try addTag(allocator, &database, run_id, "outcome", o, author);
if (confidence) |c| {
try addTag(allocator, &database, run_id, "confidence", c, author);
}
}
// Add privacy level
if (privacy) |p| {
try addTag(allocator, &database, run_id, "privacy", p, author);
}
// Checkpoint WAL
database.checkpointOnExit();
if (flags.json) {
std.debug.print("{{\"success\":true,\"run_id\":\"{s}\",\"action\":\"note_added\"}}\n", .{run_id});
} else {
colors.printSuccess("✓ Added note to run {s}\n", .{run_id[0..8]});
}
}
fn addTag(
allocator: std.mem.Allocator,
database: *db.DB,
run_id: []const u8,
key: []const u8,
value: []const u8,
author: ?[]const u8,
) !void {
const full_value = if (author) |a|
try std.fmt.allocPrint(allocator, "{s} (by {s})", .{ value, a })
else
try allocator.dupe(u8, value);
defer allocator.free(full_value);
const sql = "INSERT INTO ml_tags (run_id, key, value) VALUES (?, ?, ?);";
const stmt = try database.prepare(sql);
defer db.DB.finalize(stmt);
try db.DB.bindText(stmt, 1, run_id);
try db.DB.bindText(stmt, 2, key);
try db.DB.bindText(stmt, 3, full_value);
_ = try db.DB.step(stmt);
}
fn printUsage() !void {
std.debug.print("Usage: ml note <run_id> [options]\n\n", .{});
std.debug.print("Add metadata notes to a run.\n\n", .{});
std.debug.print("Options:\n", .{});
std.debug.print(" --text <string> Free-form annotation\n", .{});
std.debug.print(" --hypothesis <string> Research hypothesis\n", .{});
std.debug.print(" --outcome <status> Outcome: validates/refutes/inconclusive\n", .{});
std.debug.print(" --confidence <0-1> Confidence in outcome\n", .{});
std.debug.print(" --privacy <level> Privacy: private/team/public\n", .{});
std.debug.print(" --author <name> Author of the note\n", .{});
std.debug.print(" --help, -h Show this help\n", .{});
std.debug.print(" --json Output structured JSON\n\n", .{});
std.debug.print("Examples:\n", .{});
std.debug.print(" ml note abc123 --text \"Try lr=3e-4 next\"\n", .{});
std.debug.print(" ml note abc123 --hypothesis \"LR scaling helps\"\n", .{});
std.debug.print(" ml note abc123 --outcome validates --confidence 0.9\n", .{});
}

View file

@ -3,20 +3,20 @@ const Config = @import("../config.zig").Config;
const ws = @import("../net/ws/client.zig");
const crypto = @import("../utils/crypto.zig");
const logging = @import("../utils/logging.zig");
const core = @import("../core.zig");
pub fn run(allocator: std.mem.Allocator, args: []const []const u8) !void {
var flags = core.flags.CommonFlags{};
var keep_count: ?u32 = null;
var older_than_days: ?u32 = null;
var json: bool = false;
// Parse flags
var i: usize = 0;
while (i < args.len) : (i += 1) {
if (std.mem.eql(u8, args[i], "--help") or std.mem.eql(u8, args[i], "-h")) {
printUsage();
return;
return printUsage();
} else if (std.mem.eql(u8, args[i], "--json")) {
json = true;
flags.json = true;
} else if (std.mem.eql(u8, args[i], "--keep") and i + 1 < args.len) {
keep_count = try std.fmt.parseInt(u32, args[i + 1], 10);
i += 1;
@ -26,8 +26,10 @@ pub fn run(allocator: std.mem.Allocator, args: []const []const u8) !void {
}
}
core.output.init(if (flags.flags.json) .flags.json else .text);
if (keep_count == null and older_than_days == null) {
printUsage();
core.output.usage("prune", "ml prune --keep <n> | --older-than <days>");
return error.InvalidArgs;
}
@ -38,7 +40,7 @@ pub fn run(allocator: std.mem.Allocator, args: []const []const u8) !void {
}
// Add confirmation prompt
if (!json) {
if (!flags.flags.json) {
if (keep_count) |count| {
if (!logging.confirm("This will permanently delete all but the {d} most recent experiments. Continue?", .{count})) {
logging.info("Prune cancelled.\n", .{});
@ -90,13 +92,13 @@ pub fn run(allocator: std.mem.Allocator, args: []const []const u8) !void {
// Parse prune response (simplified - assumes success/failure byte)
if (response.len > 0) {
if (response[0] == 0x00) {
if (json) {
if (flags.json) {
std.debug.print("{\"ok\":true}\n", .{});
} else {
logging.success("✓ Prune operation completed successfully\n", .{});
}
} else {
if (json) {
if (flags.json) {
std.debug.print("{\"ok\":false,\"error_code\":{d}}\n", .{response[0]});
} else {
logging.err("✗ Prune operation failed: error code {d}\n", .{response[0]});
@ -104,7 +106,7 @@ pub fn run(allocator: std.mem.Allocator, args: []const []const u8) !void {
return error.PruneFailed;
}
} else {
if (json) {
if (flags.json) {
std.debug.print("{\"ok\":true,\"note\":\"no_response\"}\n", .{});
} else {
logging.success("✓ Prune request sent (no response received)\n", .{});
@ -117,6 +119,6 @@ fn printUsage() void {
logging.info("Options:\n", .{});
logging.info(" --keep <N> Keep N most recent experiments\n", .{});
logging.info(" --older-than <days> Remove experiments older than N days\n", .{});
logging.info(" --json Output machine-readable JSON\n", .{});
logging.info(" --flags.json Output machine-readable JSON\n", .{});
logging.info(" --help, -h Show this help message\n", .{});
}

View file

@ -4,10 +4,12 @@ const ws = @import("../net/ws/client.zig");
const crypto = @import("../utils/crypto.zig");
const colors = @import("../utils/colors.zig");
const auth = @import("../utils/auth.zig");
const core = @import("../core.zig");
pub const StatusOptions = struct {
json: bool = false,
watch: bool = false,
tui: bool = false,
limit: ?usize = null,
watch_interval: u32 = 5,
};
@ -22,17 +24,20 @@ pub fn run(allocator: std.mem.Allocator, args: []const []const u8) !void {
options.json = true;
} else if (std.mem.eql(u8, arg, "--watch")) {
options.watch = true;
} else if (std.mem.eql(u8, arg, "--tui")) {
options.tui = true;
} else if (std.mem.eql(u8, arg, "--limit") and i + 1 < args.len) {
options.limit = try std.fmt.parseInt(usize, args[i + 1], 10);
i += 1;
} else if (std.mem.startsWith(u8, arg, "--watch-interval=")) {
options.watch_interval = try std.fmt.parseInt(u32, arg[17..], 10);
} else if (std.mem.eql(u8, arg, "--help")) {
try printUsage();
return;
return printUsage();
}
}
core.output.init(if (options.json) .json else .text);
const config = try Config.load(allocator);
defer {
var mut_config = config;
@ -52,6 +57,8 @@ pub fn run(allocator: std.mem.Allocator, args: []const []const u8) !void {
if (options.watch) {
try runWatchMode(allocator, config, user_context, options);
} else if (options.tui) {
try runTuiMode(allocator, config, args);
} else {
try runSingleStatus(allocator, config, user_context, options);
}
@ -72,11 +79,11 @@ fn runSingleStatus(allocator: std.mem.Allocator, config: Config, user_context: a
}
fn runWatchMode(allocator: std.mem.Allocator, config: Config, user_context: auth.UserContext, options: StatusOptions) !void {
colors.printInfo("Starting watch mode (interval: {d}s). Press Ctrl+C to stop.\n", .{options.watch_interval});
core.output.info("Starting watch mode (interval: {d}s). Press Ctrl+C to stop.\n", .{options.watch_interval});
while (true) {
if (!options.json) {
colors.printInfo("\n=== FetchML Status - {s} ===\n", .{user_context.name});
core.output.info("\n=== FetchML Status - {s} ===", .{user_context.name});
}
try runSingleStatus(allocator, config, user_context, options);
@ -89,11 +96,55 @@ fn runWatchMode(allocator: std.mem.Allocator, config: Config, user_context: auth
}
}
fn runTuiMode(allocator: std.mem.Allocator, config: Config, args: []const []const u8) !void {
if (config.isLocalMode()) {
core.output.errorMsg("status", "TUI mode requires server mode. Use 'ml status' without --tui for local mode.");
return error.ServerOnlyFeature;
}
std.debug.print("Launching TUI via SSH...\n", .{});
// Build remote command that exports config via env vars and runs the TUI
var remote_cmd_buffer = std.ArrayList(u8){};
defer remote_cmd_buffer.deinit(allocator);
{
const writer = remote_cmd_buffer.writer(allocator);
try writer.print("cd {s} && ", .{config.worker_base});
try writer.print(
"FETCH_ML_CLI_HOST=\"{s}\" FETCH_ML_CLI_USER=\"{s}\" FETCH_ML_CLI_BASE=\"{s}\" ",
.{ config.worker_host, config.worker_user, config.worker_base },
);
try writer.print(
"FETCH_ML_CLI_PORT=\"{d}\" FETCH_ML_CLI_API_KEY=\"{s}\" ",
.{ config.worker_port, config.api_key },
);
try writer.writeAll("./bin/tui");
for (args) |arg| {
try writer.print(" {s}", .{arg});
}
}
const remote_cmd = try remote_cmd_buffer.toOwnedSlice(allocator);
defer allocator.free(remote_cmd);
// Execute SSH command to launch TUI
const ssh_args = &[_][]const u8{
"ssh",
config.worker_user,
config.worker_host,
remote_cmd,
};
var child = std.process.Child.init(ssh_args, allocator);
_ = try child.spawn();
_ = try child.wait();
}
fn printUsage() !void {
colors.printInfo("Usage: ml status [options]\n", .{});
colors.printInfo("\nOptions:\n", .{});
colors.printInfo(" --json Output structured JSON\n", .{});
colors.printInfo(" --watch Watch mode - continuously update status\n", .{});
colors.printInfo(" --tui Launch TUI monitor via SSH\n", .{});
colors.printInfo(" --limit <count> Limit number of results shown\n", .{});
colors.printInfo(" --watch-interval=<s> Set watch interval in seconds (default: 5)\n", .{});
colors.printInfo(" --help Show this help message\n", .{});

View file

@ -6,6 +6,7 @@ const protocol = @import("../net/protocol.zig");
const colors = @import("../utils/colors.zig");
const crypto = @import("../utils/crypto.zig");
const io = @import("../utils/io.zig");
const core = @import("../core.zig");
pub const Options = struct {
json: bool = false,
@ -14,36 +15,32 @@ pub const Options = struct {
};
pub fn run(allocator: std.mem.Allocator, args: []const []const u8) !void {
if (args.len == 0) {
try printUsage();
return error.InvalidArgs;
}
var opts = Options{};
var flags = core.flags.CommonFlags{};
var commit_hex: ?[]const u8 = null;
var task_id: ?[]const u8 = null;
var i: usize = 0;
while (i < args.len) : (i += 1) {
const arg = args[i];
if (std.mem.eql(u8, arg, "--json")) {
opts.json = true;
flags.json = true;
} else if (std.mem.eql(u8, arg, "--verbose")) {
opts.verbose = true;
flags.verbose = true;
} else if (std.mem.eql(u8, arg, "--task") and i + 1 < args.len) {
opts.task_id = args[i + 1];
task_id = args[i + 1];
i += 1;
} else if (std.mem.startsWith(u8, arg, "--help")) {
try printUsage();
return;
return printUsage();
} else if (std.mem.startsWith(u8, arg, "--")) {
colors.printError("Unknown option: {s}\n", .{arg});
try printUsage();
core.output.errorMsg("validate", "Unknown option");
return error.InvalidArgs;
} else {
commit_hex = arg;
}
}
core.output.init(if (flags.json) .json else .text);
const config = try Config.load(allocator);
defer {
var mut_config = config;
@ -61,10 +58,13 @@ pub fn run(allocator: std.mem.Allocator, args: []const []const u8) !void {
const api_key_hash = try crypto.hashApiKey(allocator, config.api_key);
defer allocator.free(api_key_hash);
if (opts.task_id) |tid| {
if (task_id) |tid| {
try client.sendValidateRequestTask(api_key_hash, tid);
} else {
if (commit_hex == null or commit_hex.?.len != 40) {
if (commit_hex == null) {
core.output.errorMsg("validate", "No commit hash specified");
return printUsage();
} else if (commit_hex.?.len != 40) {
colors.printError("validate requires a 40-char commit id (or --task <task_id>)\n", .{});
try printUsage();
return error.InvalidArgs;
@ -80,7 +80,7 @@ pub fn run(allocator: std.mem.Allocator, args: []const []const u8) !void {
defer allocator.free(msg);
const packet = protocol.ResponsePacket.deserialize(msg, allocator) catch {
if (opts.json) {
if (flags.json) {
var out = io.stdoutWriter();
try out.print("{s}\n", .{msg});
} else {
@ -101,7 +101,7 @@ pub fn run(allocator: std.mem.Allocator, args: []const []const u8) !void {
}
const payload = packet.data_payload.?;
if (opts.json) {
if (flags.json) {
var out = io.stdoutWriter();
try out.print("{s}\n", .{payload});
} else {
@ -109,7 +109,7 @@ pub fn run(allocator: std.mem.Allocator, args: []const []const u8) !void {
defer parsed.deinit();
const root = parsed.value.object;
const ok = try printHumanReport(root, opts.verbose);
const ok = try printHumanReport(root, flags.verbose);
if (!ok) return error.ValidationFailed;
}
}

View file

@ -1327,6 +1327,81 @@ pub const Client = struct {
try frame.sendWebSocketFrame(stream, buffer);
}
pub fn sendCreateExperiment(self: *Client, api_key_hash: []const u8, name: []const u8, description: []const u8) !void {
const stream = self.stream orelse return error.NotConnected;
if (api_key_hash.len != 16) return error.InvalidApiKeyHash;
if (name.len == 0 or name.len > 255) return error.NameTooLong;
if (description.len > 1023) return error.DescriptionTooLong;
// Build binary message:
// [opcode: u8] [api_key_hash: 16 bytes] [name_len: u8] [name: var] [desc_len: u16] [description: var]
const total_len = 1 + 16 + 1 + name.len + 2 + description.len;
var buffer = try self.allocator.alloc(u8, total_len);
defer self.allocator.free(buffer);
var offset: usize = 0;
buffer[offset] = @intFromEnum(opcode.create_experiment);
offset += 1;
@memcpy(buffer[offset .. offset + 16], api_key_hash);
offset += 16;
buffer[offset] = @intCast(name.len);
offset += 1;
@memcpy(buffer[offset .. offset + name.len], name);
offset += name.len;
std.mem.writeInt(u16, buffer[offset .. offset + 2][0..2], @intCast(description.len), .big);
offset += 2;
if (description.len > 0) {
@memcpy(buffer[offset .. offset + description.len], description);
}
try frame.sendWebSocketFrame(stream, buffer);
}
pub fn sendListExperiments(self: *Client, api_key_hash: []const u8) !void {
const stream = self.stream orelse return error.NotConnected;
if (api_key_hash.len != 16) return error.InvalidApiKeyHash;
// Build binary message: [opcode: u8] [api_key_hash: 16 bytes]
const total_len = 1 + 16;
var buffer = try self.allocator.alloc(u8, total_len);
defer self.allocator.free(buffer);
buffer[0] = @intFromEnum(opcode.list_experiments);
@memcpy(buffer[1..17], api_key_hash);
try frame.sendWebSocketFrame(stream, buffer);
}
pub fn sendGetExperimentByID(self: *Client, api_key_hash: []const u8, experiment_id: []const u8) !void {
const stream = self.stream orelse return error.NotConnected;
if (api_key_hash.len != 16) return error.InvalidApiKeyHash;
if (experiment_id.len == 0 or experiment_id.len > 255) return error.InvalidExperimentId;
// Build binary message: [opcode: u8] [api_key_hash: 16 bytes] [exp_id_len: u8] [experiment_id: var]
const total_len = 1 + 16 + 1 + experiment_id.len;
var buffer = try self.allocator.alloc(u8, total_len);
defer self.allocator.free(buffer);
var offset: usize = 0;
buffer[offset] = @intFromEnum(opcode.get_experiment);
offset += 1;
@memcpy(buffer[offset .. offset + 16], api_key_hash);
offset += 16;
buffer[offset] = @intCast(experiment_id.len);
offset += 1;
@memcpy(buffer[offset .. offset + experiment_id.len], experiment_id);
try frame.sendWebSocketFrame(stream, buffer);
}
// Logs and debug methods
pub fn sendGetLogs(self: *Client, target_id: []const u8, api_key_hash: []const u8) !void {
const stream = self.stream orelse return error.NotConnected;

View file

@ -13,6 +13,8 @@ pub const Opcode = enum(u8) {
crash_report = 0x05,
log_metric = 0x0A,
get_experiment = 0x0B,
create_experiment = 0x24,
list_experiments = 0x25,
start_jupyter = 0x0D,
stop_jupyter = 0x0E,
remove_jupyter = 0x18,
@ -64,6 +66,8 @@ pub const prune = Opcode.prune;
pub const crash_report = Opcode.crash_report;
pub const log_metric = Opcode.log_metric;
pub const get_experiment = Opcode.get_experiment;
pub const create_experiment = Opcode.create_experiment;
pub const list_experiments = Opcode.list_experiments;
pub const start_jupyter = Opcode.start_jupyter;
pub const stop_jupyter = Opcode.stop_jupyter;
pub const remove_jupyter = Opcode.remove_jupyter;

View file

@ -123,7 +123,7 @@ fn isNativeForTarget(data: []const u8) bool {
/// 1. Download or build a static rsync binary for your target platform
/// 2. Place it at cli/src/assets/rsync_release.bin
/// 3. Build with: zig build prod (or release/cross targets)
const placeholder_data = @embedFile("../assets/rsync_placeholder.bin");
const placeholder_data = @embedFile("../assets/rsync/rsync_placeholder.bin");
const release_data = if (build_options.has_rsync_release)
@embedFile(build_options.rsync_release_path)

View file

@ -7,7 +7,6 @@ import (
"os"
"path/filepath"
"modernc.org/sqlite"
_ "modernc.org/sqlite" // SQLite driver
)

View file

@ -8,6 +8,38 @@ weight: 3
Lightweight command-line interface (`ml`) for managing ML experiments. Built in Zig for minimal size and fast startup.
## Architecture
The CLI follows a modular 3-layer architecture:
```
src/
├── core/ # Shared foundation
│ ├── context.zig # Execution context (allocator, config, mode dispatch)
│ ├── output.zig # Unified JSON/text output helpers
│ └── flags.zig # Common flag parsing
├── local/ # Local mode operations (SQLite)
│ └── experiment_ops.zig # Experiment CRUD for local DB
├── server/ # Server mode operations (WebSocket)
│ └── experiment_api.zig # Experiment API for remote server
└── commands/ # Thin command routers
├── experiment.zig # ~100 lines (was 887)
└── queue.zig # Modular with queue/ submodules
```
### Mode Dispatch
Commands use `core.context.Context` to auto-detect local vs server mode:
```zig
var ctx = core.context.Context.init(allocator, cfg, flags.json);
if (ctx.isLocal()) {
return try local.experiment.list(ctx.allocator, ctx.json_output);
} else {
return try server.experiment.list(ctx.allocator, ctx.json_output);
}
```
## Quick start
```bash