feat: add colima gpu model runner scripts
This commit is contained in:
parent
911ec3a676
commit
4bdd64898c
2 changed files with 656 additions and 0 deletions
509
colima/.config/colima/setup-colima-gpu-model-runner.sh
Executable file
509
colima/.config/colima/setup-colima-gpu-model-runner.sh
Executable file
|
|
@ -0,0 +1,509 @@
|
|||
#!/bin/bash
|
||||
# setup-official-model-runner.sh
|
||||
#
|
||||
# This script builds and installs Docker Model Runner from the OFFICIAL Docker source
|
||||
# with GPU support on macOS Apple Silicon using Colima.
|
||||
#
|
||||
# Prerequisites:
|
||||
# - macOS with Apple Silicon (M1/M2/M3/M4)
|
||||
# - Homebrew installed
|
||||
# - Colima installed (brew install colima)
|
||||
#
|
||||
# What this script does:
|
||||
# 1. Installs build dependencies (Go, llama.cpp, Docker CLI)
|
||||
# 2. Clones and builds model-runner from official Docker repo
|
||||
# 3. Builds the CLI plugin from official Docker repo
|
||||
# 4. Sets up model-runner as a macOS service (launchd)
|
||||
# 5. Configures macOS Docker CLI to use the host service
|
||||
# 6. Tests the setup
|
||||
|
||||
set -e
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Configuration
|
||||
OFFICIAL_REPO="https://github.com/docker/model-runner.git"
|
||||
BUILD_DIR="$HOME/.local/src/docker-model-runner"
|
||||
BIN_DIR="$HOME/.local/bin"
|
||||
CLI_PLUGINS_DIR="$HOME/.docker/cli-plugins"
|
||||
LAUNCH_AGENT_LABEL="com.docker.model-runner"
|
||||
LAUNCH_AGENT_PLIST="$HOME/Library/LaunchAgents/${LAUNCH_AGENT_LABEL}.plist"
|
||||
MODEL_RUNNER_PORT=12434
|
||||
|
||||
# Helper functions
|
||||
log_info() {
|
||||
echo -e "${GREEN}[INFO]${NC} $1"
|
||||
}
|
||||
|
||||
log_warn() {
|
||||
echo -e "${YELLOW}[WARN]${NC} $1"
|
||||
}
|
||||
|
||||
log_error() {
|
||||
echo -e "${RED}[ERROR]${NC} $1"
|
||||
}
|
||||
|
||||
log_step() {
|
||||
echo -e "${BLUE}[STEP]${NC} $1"
|
||||
}
|
||||
|
||||
check_prerequisites() {
|
||||
log_info "Checking prerequisites..."
|
||||
|
||||
# Check macOS
|
||||
if [[ "$OSTYPE" != "darwin"* ]]; then
|
||||
log_error "This script only works on macOS"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check Apple Silicon
|
||||
if [[ $(uname -m) != "arm64" ]]; then
|
||||
log_error "This script requires Apple Silicon (M1/M2/M3/M4)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check Homebrew
|
||||
if ! command -v brew &>/dev/null; then
|
||||
log_error "Homebrew is not installed. Install from https://brew.sh"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check Colima
|
||||
if ! command -v colima &>/dev/null; then
|
||||
log_warn "Colima not found. Installing..."
|
||||
brew install colima
|
||||
fi
|
||||
|
||||
log_info "✓ Prerequisites check passed!"
|
||||
}
|
||||
|
||||
install_build_dependencies() {
|
||||
log_step "Installing build dependencies..."
|
||||
|
||||
# Install Go (required for building)
|
||||
if ! command -v go &>/dev/null; then
|
||||
log_info "Installing Go..."
|
||||
brew install go
|
||||
else
|
||||
log_info "✓ Go already installed ($(go version))"
|
||||
fi
|
||||
|
||||
# Install llama.cpp with Metal support
|
||||
if ! command -v llama-server &>/dev/null; then
|
||||
log_info "Installing llama.cpp..."
|
||||
brew install llama.cpp
|
||||
else
|
||||
log_info "✓ llama.cpp already installed"
|
||||
fi
|
||||
|
||||
# Verify llama-server path
|
||||
LLAMA_SERVER_PATH=$(which llama-server)
|
||||
if [ -z "$LLAMA_SERVER_PATH" ]; then
|
||||
log_error "llama-server not found in PATH"
|
||||
exit 1
|
||||
fi
|
||||
log_info "✓ llama-server found at: $LLAMA_SERVER_PATH"
|
||||
|
||||
# Test if llama-server has Metal support
|
||||
if llama-server --version 2>&1 | grep -q "Metal"; then
|
||||
log_info "✓ llama-server has Metal GPU support"
|
||||
else
|
||||
log_warn "llama-server may not have Metal GPU support"
|
||||
fi
|
||||
|
||||
# Install Docker CLI
|
||||
if ! command -v docker &>/dev/null; then
|
||||
log_info "Installing Docker CLI..."
|
||||
brew install docker
|
||||
else
|
||||
log_info "✓ Docker CLI already installed ($(docker --version))"
|
||||
fi
|
||||
|
||||
# Install git
|
||||
if ! command -v git &>/dev/null; then
|
||||
log_info "Installing git..."
|
||||
brew install git
|
||||
else
|
||||
log_info "✓ git already installed"
|
||||
fi
|
||||
|
||||
# Install make
|
||||
if ! command -v make &>/dev/null; then
|
||||
log_info "Installing make..."
|
||||
xcode-select --install 2>/dev/null || log_info "Xcode Command Line Tools already installed"
|
||||
else
|
||||
log_info "✓ make already installed"
|
||||
fi
|
||||
|
||||
log_info "✓ All build dependencies installed"
|
||||
}
|
||||
|
||||
clone_official_repo() {
|
||||
log_step "Cloning official Docker model-runner repository..."
|
||||
|
||||
# Create source directory
|
||||
mkdir -p "$(dirname "$BUILD_DIR")"
|
||||
|
||||
# Clone or update repo
|
||||
if [ -d "$BUILD_DIR" ]; then
|
||||
log_info "Repository already exists, updating..."
|
||||
cd "$BUILD_DIR"
|
||||
git fetch origin
|
||||
git reset --hard origin/main
|
||||
else
|
||||
log_info "Cloning from $OFFICIAL_REPO"
|
||||
git clone "$OFFICIAL_REPO" "$BUILD_DIR"
|
||||
cd "$BUILD_DIR"
|
||||
fi
|
||||
|
||||
log_info "✓ Official repository ready at $BUILD_DIR"
|
||||
log_info " Latest commit: $(git log -1 --oneline)"
|
||||
}
|
||||
|
||||
build_model_runner() {
|
||||
log_step "Building model-runner binary from source..."
|
||||
|
||||
cd "$BUILD_DIR"
|
||||
|
||||
# Build the model-runner binary
|
||||
log_info "Running 'make build'..."
|
||||
make build
|
||||
|
||||
# Create bin directory
|
||||
mkdir -p "$BIN_DIR"
|
||||
|
||||
# Copy binary to bin directory
|
||||
if [ -f "./model-runner" ]; then
|
||||
cp "./model-runner" "$BIN_DIR/model-runner"
|
||||
chmod +x "$BIN_DIR/model-runner"
|
||||
log_info "✓ model-runner built and installed to $BIN_DIR/model-runner"
|
||||
else
|
||||
log_error "Build failed - model-runner binary not found"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
build_cli_plugin() {
|
||||
log_step "Building docker model CLI plugin from source..."
|
||||
|
||||
cd "$BUILD_DIR/cmd/cli"
|
||||
|
||||
# Build the CLI plugin
|
||||
log_info "Running 'make build'..."
|
||||
make build
|
||||
|
||||
# Create CLI plugins directory
|
||||
mkdir -p "$CLI_PLUGINS_DIR"
|
||||
|
||||
# Copy plugin to CLI plugins directory
|
||||
if [ -f "./model-cli" ]; then
|
||||
cp "./model-cli" "$CLI_PLUGINS_DIR/docker-model"
|
||||
chmod +x "$CLI_PLUGINS_DIR/docker-model"
|
||||
log_info "✓ docker model plugin built and installed to $CLI_PLUGINS_DIR/docker-model"
|
||||
else
|
||||
log_error "Build failed - model-cli binary not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Verify installation
|
||||
cd -
|
||||
if docker model --help &>/dev/null; then
|
||||
log_info "✓ docker model plugin is working!"
|
||||
else
|
||||
log_warn "docker model plugin installed but verification failed"
|
||||
fi
|
||||
}
|
||||
|
||||
setup_launch_daemon() {
|
||||
log_step "Setting up model-runner as a macOS service..."
|
||||
|
||||
# Stop existing service if running
|
||||
if launchctl list | grep -q "$LAUNCH_AGENT_LABEL"; then
|
||||
log_info "Stopping existing service..."
|
||||
launchctl unload "$LAUNCH_AGENT_PLIST" 2>/dev/null || true
|
||||
fi
|
||||
|
||||
# Create LaunchAgent plist
|
||||
cat >"$LAUNCH_AGENT_PLIST" <<EOF
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
|
||||
<plist version="1.0">
|
||||
<dict>
|
||||
<key>Label</key>
|
||||
<string>$LAUNCH_AGENT_LABEL</string>
|
||||
<key>ProgramArguments</key>
|
||||
<array>
|
||||
<string>$BIN_DIR/model-runner</string>
|
||||
</array>
|
||||
<key>EnvironmentVariables</key>
|
||||
<dict>
|
||||
<key>MODEL_RUNNER_PORT</key>
|
||||
<string>$MODEL_RUNNER_PORT</string>
|
||||
<key>LLAMA_SERVER_BIN</key>
|
||||
<string>/opt/homebrew/bin/llama-server</string>
|
||||
<key>PATH</key>
|
||||
<string>/opt/homebrew/bin:/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin</string>
|
||||
<key>HOME</key>
|
||||
<string>$HOME</string>
|
||||
</dict>
|
||||
<key>WorkingDirectory</key>
|
||||
<string>$HOME</string>
|
||||
<key>RunAtLoad</key>
|
||||
<true/>
|
||||
<key>KeepAlive</key>
|
||||
<true/>
|
||||
<key>StandardOutPath</key>
|
||||
<string>$HOME/Library/Logs/model-runner.log</string>
|
||||
<key>StandardErrorPath</key>
|
||||
<string>$HOME/Library/Logs/model-runner.err</string>
|
||||
</dict>
|
||||
</plist>
|
||||
EOF
|
||||
|
||||
# Load the service
|
||||
log_info "Starting model-runner service..."
|
||||
launchctl load "$LAUNCH_AGENT_PLIST"
|
||||
|
||||
# Wait for it to start
|
||||
sleep 3
|
||||
|
||||
# Check if it's running
|
||||
if launchctl list | grep -q "$LAUNCH_AGENT_LABEL"; then
|
||||
log_info "✓ model-runner service is running!"
|
||||
else
|
||||
log_error "Failed to start model-runner service"
|
||||
log_info "Check logs at: $HOME/Library/Logs/model-runner.log"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
verify_gpu_support() {
|
||||
log_step "Verifying GPU support and configuration..."
|
||||
|
||||
# Wait for service to be ready
|
||||
sleep 3
|
||||
|
||||
# Check both log files
|
||||
log_info "Checking model-runner logs..."
|
||||
|
||||
if [ -f "$HOME/Library/Logs/model-runner.err" ]; then
|
||||
# Show last few lines of error log
|
||||
log_info "Recent error log:"
|
||||
tail -n 10 "$HOME/Library/Logs/model-runner.err"
|
||||
fi
|
||||
|
||||
if [ -f "$HOME/Library/Logs/model-runner.log" ]; then
|
||||
log_info "Recent output log:"
|
||||
tail -n 10 "$HOME/Library/Logs/model-runner.log"
|
||||
fi
|
||||
|
||||
# Check for GPU support message
|
||||
if grep -q "gpuSupport=true\|Metal" "$HOME/Library/Logs/model-runner.err" 2>/dev/null ||
|
||||
grep -q "gpuSupport=true\|Metal" "$HOME/Library/Logs/model-runner.log" 2>/dev/null; then
|
||||
log_info "✓ GPU support detected!"
|
||||
return 0
|
||||
else
|
||||
log_warn "Checking for configuration issues..."
|
||||
|
||||
# Check if llama-server path issue
|
||||
if grep -q "No such file or directory" "$HOME/Library/Logs/model-runner.err" 2>/dev/null; then
|
||||
log_error "llama-server path issue detected!"
|
||||
log_info "The model-runner cannot find llama-server"
|
||||
log_info "Try setting LLAMA_SERVER_BIN explicitly"
|
||||
fi
|
||||
|
||||
log_warn "Check full logs:"
|
||||
log_warn " tail -f $HOME/Library/Logs/model-runner.err"
|
||||
log_warn " tail -f $HOME/Library/Logs/model-runner.log"
|
||||
return 0
|
||||
fi
|
||||
}
|
||||
|
||||
test_service() {
|
||||
log_step "Testing model-runner service..."
|
||||
|
||||
# Test basic connectivity
|
||||
if curl -sf http://localhost:$MODEL_RUNNER_PORT/models >/dev/null; then
|
||||
log_info "✓ Service is responding on port $MODEL_RUNNER_PORT"
|
||||
else
|
||||
log_error "Service is not responding"
|
||||
log_info "Check logs at: $HOME/Library/Logs/model-runner.log"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
setup_colima() {
|
||||
log_step "Setting up Colima..."
|
||||
|
||||
# Check if Colima is running
|
||||
if ! colima status &>/dev/null; then
|
||||
log_info "Starting Colima..."
|
||||
colima start --cpu 4 --memory 8 --disk 60 --vm-type=vz
|
||||
else
|
||||
log_info "✓ Colima is already running"
|
||||
fi
|
||||
}
|
||||
|
||||
configure_docker_cli() {
|
||||
log_step "Configuring Docker CLI to use host model-runner..."
|
||||
|
||||
# Add to shell profile for MODEL_RUNNER_HOST env var
|
||||
SHELL_RC=""
|
||||
if [ -n "$ZSH_VERSION" ]; then
|
||||
SHELL_RC="$HOME/.zshrc"
|
||||
elif [ -n "$BASH_VERSION" ]; then
|
||||
SHELL_RC="$HOME/.bashrc"
|
||||
fi
|
||||
|
||||
if [ -n "$SHELL_RC" ]; then
|
||||
if ! grep -q "MODEL_RUNNER_HOST" "$SHELL_RC" 2>/dev/null; then
|
||||
log_info "Adding MODEL_RUNNER_HOST to $SHELL_RC"
|
||||
cat >>"$SHELL_RC" <<'EOF'
|
||||
|
||||
# Docker Model Runner (GPU-accelerated, built from official source)
|
||||
export MODEL_RUNNER_HOST="http://localhost:12434"
|
||||
export PATH="$HOME/.local/bin:$PATH"
|
||||
EOF
|
||||
log_info "✓ Added MODEL_RUNNER_HOST to shell profile"
|
||||
log_warn "Run 'source $SHELL_RC' or restart your terminal to apply"
|
||||
else
|
||||
log_info "✓ MODEL_RUNNER_HOST already configured in shell profile"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Set for current session
|
||||
export MODEL_RUNNER_HOST="http://localhost:12434"
|
||||
export PATH="$HOME/.local/bin:$PATH"
|
||||
}
|
||||
|
||||
test_docker_model_cli() {
|
||||
log_step "Testing docker model CLI..."
|
||||
|
||||
export MODEL_RUNNER_HOST="http://localhost:12434"
|
||||
|
||||
if docker model ls &>/dev/null; then
|
||||
log_info "✓ docker model CLI is working!"
|
||||
else
|
||||
log_warn "docker model CLI test returned an error, but this might be normal if no models are installed yet"
|
||||
fi
|
||||
}
|
||||
|
||||
print_usage_info() {
|
||||
cat <<'EOF'
|
||||
|
||||
╔════════════════════════════════════════════════════════════════════════╗
|
||||
║ 🎉 Installation Complete (Official Docker Source)! 🎉 ║
|
||||
╚════════════════════════════════════════════════════════════════════════╝
|
||||
|
||||
Docker Model Runner with GPU support is now running on your system!
|
||||
Built from official Docker repository: github.com/docker/model-runner
|
||||
|
||||
⚠️ IMPORTANT: Run this command now or restart your terminal:
|
||||
source ~/.zshrc # or ~/.bashrc if you use bash
|
||||
|
||||
📋 Quick Start Guide:
|
||||
|
||||
1. Pull a model:
|
||||
docker model pull ai/smollm2
|
||||
|
||||
2. List models:
|
||||
docker model ls
|
||||
|
||||
3. Run inference (single prompt):
|
||||
docker model run ai/smollm2 "Hello, how are you?"
|
||||
|
||||
4. Run inference (interactive):
|
||||
docker model run ai/smollm2
|
||||
|
||||
5. Use in containers:
|
||||
docker run -e OPENAI_API_BASE=http://host.lima.internal:12434/v1 your-app
|
||||
|
||||
6. Use OpenAI-compatible API:
|
||||
curl http://localhost:12434/v1/chat/completions \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"model": "ai/smollm2",
|
||||
"messages": [{"role": "user", "content": "Hello!"}]
|
||||
}'
|
||||
|
||||
🔍 Monitoring & Troubleshooting:
|
||||
|
||||
View logs:
|
||||
tail -f ~/Library/Logs/model-runner.log
|
||||
tail -f ~/Library/Logs/model-runner.err
|
||||
|
||||
Check service status:
|
||||
launchctl list | grep model-runner
|
||||
|
||||
Monitor GPU usage:
|
||||
sudo powermetrics --samplers gpu_power -i 1000
|
||||
|
||||
Restart service:
|
||||
launchctl unload ~/Library/LaunchAgents/com.docker.model-runner.plist
|
||||
launchctl load ~/Library/LaunchAgents/com.docker.model-runner.plist
|
||||
|
||||
Stop service:
|
||||
launchctl unload ~/Library/LaunchAgents/com.docker.model-runner.plist
|
||||
|
||||
Rebuild from source (to get updates):
|
||||
cd ~/.local/src/docker-model-runner
|
||||
git pull origin main
|
||||
make build
|
||||
cp ./model-runner ~/.local/bin/
|
||||
launchctl unload ~/Library/LaunchAgents/com.docker.model-runner.plist
|
||||
launchctl load ~/Library/LaunchAgents/com.docker.model-runner.plist
|
||||
|
||||
API endpoint:
|
||||
http://localhost:12434
|
||||
|
||||
🎯 What's Running:
|
||||
|
||||
✓ model-runner service (built from official Docker source)
|
||||
✓ llama.cpp with Metal acceleration
|
||||
✓ Colima (Docker daemon)
|
||||
✓ docker model CLI (built from official Docker source)
|
||||
|
||||
📚 Documentation:
|
||||
- Official repo: https://github.com/docker/model-runner
|
||||
- Docker docs: https://docs.docker.com/ai/model-runner/
|
||||
|
||||
✅ Source Verification:
|
||||
- Built from: github.com/docker/model-runner (official Docker repository)
|
||||
- Build location: ~/.local/src/docker-model-runner
|
||||
- Binary location: ~/.local/bin/model-runner
|
||||
- Verify source: cd ~/.local/src/docker-model-runner && git remote -v
|
||||
|
||||
Happy inferencing! 🚀
|
||||
EOF
|
||||
}
|
||||
|
||||
# Main installation flow
|
||||
main() {
|
||||
echo ""
|
||||
log_info "Starting Docker Model Runner Setup (Official Source Build)"
|
||||
log_info "Building from: $OFFICIAL_REPO"
|
||||
echo ""
|
||||
|
||||
check_prerequisites
|
||||
install_build_dependencies
|
||||
clone_official_repo
|
||||
build_model_runner
|
||||
build_cli_plugin
|
||||
setup_launch_daemon
|
||||
verify_gpu_support
|
||||
test_service
|
||||
setup_colima
|
||||
configure_docker_cli
|
||||
test_docker_model_cli
|
||||
|
||||
echo ""
|
||||
print_usage_info
|
||||
}
|
||||
|
||||
# Run main function
|
||||
main "$@"
|
||||
147
colima/.config/colima/uninstall-model-runner.sh
Executable file
147
colima/.config/colima/uninstall-model-runner.sh
Executable file
|
|
@ -0,0 +1,147 @@
|
|||
#!/bin/bash
|
||||
# uninstall-model-runner.sh
|
||||
#
|
||||
# Removes the model-runner service, binary, and related files.
|
||||
# Leaves llama.cpp, Docker CLI, and Colima installed.
|
||||
|
||||
set -e
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Configuration
|
||||
BIN_DIR="$HOME/.local/bin"
|
||||
BUILD_DIR="$HOME/.local/src/docker-model-runner"
|
||||
CLI_PLUGINS_DIR="$HOME/.docker/cli-plugins"
|
||||
LAUNCH_AGENT_LABEL="com.docker.model-runner"
|
||||
LAUNCH_AGENT_PLIST="$HOME/Library/LaunchAgents/${LAUNCH_AGENT_LABEL}.plist"
|
||||
SYMLINK_DIR="$HOME/updated-inference/bin"
|
||||
|
||||
# Helper functions
|
||||
log_info() {
|
||||
echo -e "${GREEN}[INFO]${NC} $1"
|
||||
}
|
||||
|
||||
log_warn() {
|
||||
echo -e "${YELLOW}[WARN]${NC} $1"
|
||||
}
|
||||
|
||||
log_error() {
|
||||
echo -e "${RED}[ERROR]${NC} $1"
|
||||
}
|
||||
|
||||
log_info "Uninstalling Docker Model Runner..."
|
||||
echo ""
|
||||
|
||||
# Stop and unload LaunchAgent if running
|
||||
if launchctl list | grep -q "$LAUNCH_AGENT_LABEL"; then
|
||||
log_info "Stopping model-runner service..."
|
||||
launchctl unload "$LAUNCH_AGENT_PLIST" 2>/dev/null || true
|
||||
log_info "✓ Service stopped"
|
||||
else
|
||||
log_info "Service not running"
|
||||
fi
|
||||
|
||||
# Remove LaunchAgent plist
|
||||
if [ -f "$LAUNCH_AGENT_PLIST" ]; then
|
||||
log_info "Removing LaunchAgent configuration..."
|
||||
rm "$LAUNCH_AGENT_PLIST"
|
||||
log_info "✓ LaunchAgent removed"
|
||||
else
|
||||
log_info "LaunchAgent plist not found"
|
||||
fi
|
||||
|
||||
# Remove binary
|
||||
if [ -f "$BIN_DIR/model-runner" ]; then
|
||||
log_info "Removing model-runner binary..."
|
||||
rm "$BIN_DIR/model-runner"
|
||||
log_info "✓ Binary removed"
|
||||
else
|
||||
log_info "Binary not found at $BIN_DIR/model-runner"
|
||||
fi
|
||||
|
||||
# Remove docker model CLI plugin
|
||||
if [ -f "$CLI_PLUGINS_DIR/docker-model" ]; then
|
||||
log_info "Removing docker model CLI plugin..."
|
||||
rm "$CLI_PLUGINS_DIR/docker-model"
|
||||
log_info "✓ CLI plugin removed"
|
||||
else
|
||||
log_info "CLI plugin not found"
|
||||
fi
|
||||
|
||||
# Remove symlinks
|
||||
if [ -d "$SYMLINK_DIR" ]; then
|
||||
log_info "Removing llama-server symlinks..."
|
||||
rm -rf "$SYMLINK_DIR"
|
||||
log_info "✓ Symlinks removed"
|
||||
else
|
||||
log_info "Symlink directory not found"
|
||||
fi
|
||||
|
||||
# Remove source directory (optional)
|
||||
if [ -d "$BUILD_DIR" ]; then
|
||||
log_warn "Source directory found at: $BUILD_DIR"
|
||||
read -p "Remove source directory? (y/n) " -n 1 -r
|
||||
echo
|
||||
if [[ $REPLY =~ ^[Yy]$ ]]; then
|
||||
rm -rf "$BUILD_DIR"
|
||||
log_info "✓ Source directory removed"
|
||||
else
|
||||
log_info "Keeping source directory"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Remove logs
|
||||
if [ -f "$HOME/Library/Logs/model-runner.log" ] || [ -f "$HOME/Library/Logs/model-runner.err" ]; then
|
||||
log_info "Removing logs..."
|
||||
rm -f "$HOME/Library/Logs/model-runner.log" 2>/dev/null || true
|
||||
rm -f "$HOME/Library/Logs/model-runner.err" 2>/dev/null || true
|
||||
log_info "✓ Logs removed"
|
||||
fi
|
||||
|
||||
# Remove models directory (optional)
|
||||
MODELS_DIR="$HOME/.cache/docker-model-runner/models"
|
||||
if [ -d "$MODELS_DIR" ]; then
|
||||
log_warn "Models directory found at: $MODELS_DIR"
|
||||
log_info "This may contain large downloaded model files"
|
||||
read -p "Remove models directory? (y/n) " -n 1 -r
|
||||
echo
|
||||
if [[ $REPLY =~ ^[Yy]$ ]]; then
|
||||
rm -rf "$MODELS_DIR"
|
||||
log_info "✓ Models directory removed"
|
||||
else
|
||||
log_info "Keeping models directory"
|
||||
fi
|
||||
fi
|
||||
|
||||
echo ""
|
||||
log_info "✓ Docker Model Runner uninstalled successfully"
|
||||
echo ""
|
||||
log_info "═══════════════════════════════════════════════════════════"
|
||||
log_info "The following were NOT removed (still installed):"
|
||||
echo ""
|
||||
log_info " ✓ llama.cpp (brew: llama-server)"
|
||||
log_info " ✓ Docker CLI"
|
||||
log_info " ✓ Colima"
|
||||
log_info " ✓ Go compiler"
|
||||
echo ""
|
||||
log_info "Environment variables in shell profile:"
|
||||
log_info " • MODEL_RUNNER_HOST (in ~/.zshrc or ~/.bashrc)"
|
||||
echo ""
|
||||
log_info "To remove the environment variable, edit your shell profile:"
|
||||
log_info " nano ~/.zshrc # or ~/.bashrc"
|
||||
log_info ""
|
||||
log_info "And remove these lines:"
|
||||
log_info " export MODEL_RUNNER_HOST=\"http://localhost:12434\""
|
||||
log_info " export PATH=\"\$HOME/.local/bin:\$PATH\""
|
||||
echo ""
|
||||
log_info "To completely remove all dependencies:"
|
||||
log_info " brew uninstall llama.cpp"
|
||||
log_info " brew uninstall docker"
|
||||
log_info " brew uninstall colima"
|
||||
log_info " brew uninstall go"
|
||||
log_info "═══════════════════════════════════════════════════════════"
|
||||
echo ""
|
||||
Loading…
Reference in a new issue