Ollama OpenWebUI Raspberry Pi
curl -fsSL https://ollama.com/install.sh | sh
ollama run deepseek-r1:1.5b
ollama stop deepseek-r1:1.5b
ollama run llama3.2:1b
ollama stop llama3.2:1b
Edit Ollama service to point to a ENV file
sudo systemctl edit ollama.service
[Service]
EnviromentFile =/path/to/envs/file
Create /path/to/envs/file
OLLAMA_HOST=0.0.0.0:11434
OLLAMA_MODELS=/path/to/models
If you change the env variables, remember to export
them before calling ollama run
Restart services
sudo systemctl daemon-reload
sudo systemctl restart ollama
Set-up Open-WebUI as the UI interface for the AI chat. It is a Docker image.
mkdir ~ /OpenWebUI
cd ~ /OpenWebUI
nano compose.yml
services :
open-webui :
image : ghcr.io/open-webui/open-webui:main
container_name : open-webui
volumes :
- ./data:/app/backend/data
ports :
- 3030:8080
extra_hosts :
- host.docker.internal:host-gateway
restart : unless-stopped
podman network create open-webui
podman run -d \
-p 3030:8080 \
--gpus all \
-v open-webui:/app/backend/data \
--name open-webui \
--restart always \
--network open-webui \
ghcr.io/open-webui/open-webui:cuda
sudo ufw allow 11434 # ollama
sudo ufw allow 3030 # open-webui docker
ollama list
ollama ps
ollama show llama3.2:1b