# Standard security mode worker configuration # Normal sandbox, network isolation host: localhost port: 22 user: worker-user base_path: /var/lib/fetchml entrypoint: train.py # Redis configuration redis_url: redis://redis:6379 # Standard mode - normal security compliance_mode: standard max_workers: 2 # Sandbox settings (standard isolation) sandbox: network_mode: none seccomp_profile: default no_new_privileges: true allowed_secrets: - HF_TOKEN - WANDB_API_KEY - AWS_ACCESS_KEY_ID - AWS_SECRET_ACCESS_KEY # GPU configuration gpu_vendor: none # Artifact handling (reasonable limits) max_artifact_files: 1000 max_artifact_total_bytes: 536870912 # 512MB # Provenance (enabled) provenance_best_effort: true # Plugin Configuration plugins: # Jupyter Notebook/Lab Service jupyter: enabled: true image: "quay.io/jupyter/base-notebook:latest" default_port: 8888 mode: "lab" security: trusted_channels: - "conda-forge" - "defaults" blocked_packages: - "requests" - "urllib3" require_password: true max_gpu_per_instance: 1 max_memory_per_instance: "8Gi" # vLLM Inference Service vllm: enabled: true image: "vllm/vllm-openai:latest" default_port: 8000 model_cache: "/models" default_quantization: "" max_gpu_per_instance: 1 max_model_len: 4096