# Development mode worker configuration # Relaxed validation for fast iteration host: localhost port: 22 user: dev-user base_path: /tmp/fetchml_dev entrypoint: train.py # Redis configuration redis_url: redis://redis:6379 # Development mode - relaxed security compliance_mode: dev max_workers: 4 # Worker mode - must be "distributed" to use scheduler mode: distributed # Sandbox settings (relaxed for development) sandbox: network_mode: bridge seccomp_profile: "" no_new_privileges: false allowed_secrets: [] # All secrets allowed in dev # GPU configuration gpu_vendor: none # Artifact handling (relaxed limits) max_artifact_files: 10000 max_artifact_total_bytes: 1073741824 # 1GB # Provenance (disabled in dev for speed) provenance_best_effort: false # Plugin Configuration (development mode) plugins: # Jupyter Notebook/Lab Service jupyter: enabled: true image: "quay.io/jupyter/base-notebook:latest" default_port: 8888 mode: "lab" security: trusted_channels: - "conda-forge" - "defaults" blocked_packages: [] # No restrictions in dev require_password: false # No password for dev max_gpu_per_instance: 1 max_memory_per_instance: "4Gi" # vLLM Inference Service vllm: enabled: true image: "vllm/vllm-openai:latest" default_port: 8000 model_cache: "/tmp/models" # Temp location for dev default_quantization: "" # No quantization for dev max_gpu_per_instance: 1 max_model_len: 2048