fetch_ml/configs/worker/docker-dev.yaml
Jeremie Fraeys 8a7e7695f4
config: consolidate and cleanup configuration files
- Remove redundant config examples (distributed/, standalone/, examples/)
- Delete dev-local.yaml variants (use dev.yaml with env vars)
- Delete prod.yaml (use multi-user.yaml or homelab-secure.yaml)
- Clean up worker configs: remove docker.yaml, homelab-sandbox.yaml
- Update remaining configs with current best practices
- Simplify config schema and documentation
2026-03-04 13:22:52 -05:00

61 lines
1.5 KiB
YAML

# Development mode worker configuration
# Relaxed validation for fast iteration
host: localhost
port: 22
user: dev-user
base_path: /tmp/fetchml_dev
entrypoint: train.py
# Redis configuration
redis_url: redis://redis:6379
# Development mode - relaxed security
compliance_mode: dev
max_workers: 4
# Worker mode - must be "distributed" to use scheduler
mode: distributed
# Sandbox settings (relaxed for development)
sandbox:
network_mode: bridge
seccomp_profile: ""
no_new_privileges: false
allowed_secrets: [] # All secrets allowed in dev
# GPU configuration
gpu_vendor: none
# Artifact handling (relaxed limits)
max_artifact_files: 10000
max_artifact_total_bytes: 1073741824 # 1GB
# Provenance (disabled in dev for speed)
provenance_best_effort: false
# Plugin Configuration (development mode)
plugins:
# Jupyter Notebook/Lab Service
jupyter:
enabled: true
image: "quay.io/jupyter/base-notebook:latest"
default_port: 8888
mode: "lab"
security:
trusted_channels:
- "conda-forge"
- "defaults"
blocked_packages: [] # No restrictions in dev
require_password: false # No password for dev
max_gpu_per_instance: 1
max_memory_per_instance: "4Gi"
# vLLM Inference Service
vllm:
enabled: true
image: "vllm/vllm-openai:latest"
default_port: 8000
model_cache: "/tmp/models" # Temp location for dev
default_quantization: "" # No quantization for dev
max_gpu_per_instance: 1
max_model_len: 2048