Files
gooseek/backend/deploy/k8s/ollama.yaml
home 7a40ff629e
Some checks failed
Build and Deploy GooSeek / build-and-deploy (push) Failing after 8m25s
feat: LLM routing by tier (free→Ollama, pro→Timeweb)
- Add tier-based provider routing in llm-svc
  - free tier → Ollama (local qwen3.5:9b)
  - pro/business → Timeweb Cloud AI
- Add /api/v1/embed endpoint for embeddings via Ollama
- Update Ollama client: qwen3.5:9b default, remove auth
- Add GenerateEmbedding() function for qwen3-embedding:0.6b
- Add Ollama K8s deployment with GPU support (RTX 4060 Ti)
- Add monitoring stack (Prometheus, Grafana, Alertmanager)
- Add Grafana dashboards for LLM and security metrics
- Update deploy.sh with monitoring and Ollama deployment

Made-with: Cursor
2026-03-03 02:25:22 +03:00

131 lines
2.8 KiB
YAML

# Ollama Deployment with GPU
# Требования: NVIDIA GPU Operator установлен в кластере
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: ollama
namespace: gooseek
labels:
app: ollama
app.kubernetes.io/name: ollama
app.kubernetes.io/part-of: gooseek
spec:
replicas: 1
selector:
matchLabels:
app: ollama
template:
metadata:
labels:
app: ollama
spec:
runtimeClassName: nvidia
containers:
# Ollama server (только GPU)
- name: ollama
image: ollama/ollama:latest
ports:
- containerPort: 11434
name: http
env:
- name: OLLAMA_HOST
value: "0.0.0.0:11434"
- name: OLLAMA_KEEP_ALIVE
value: "24h"
- name: OLLAMA_MODELS
value: "/root/.ollama/models"
# Параллельная обработка для SaaS
- name: OLLAMA_NUM_PARALLEL
value: "4"
- name: OLLAMA_MAX_LOADED_MODELS
value: "2"
- name: OLLAMA_FLASH_ATTENTION
value: "true"
# GPU
- name: NVIDIA_VISIBLE_DEVICES
value: "all"
- name: NVIDIA_DRIVER_CAPABILITIES
value: "compute,utility"
volumeMounts:
- name: ollama-data
mountPath: /root/.ollama
resources:
requests:
cpu: 1000m
memory: 8Gi
nvidia.com/gpu: 1
limits:
cpu: 4000m
memory: 16Gi
nvidia.com/gpu: 1
livenessProbe:
httpGet:
path: /
port: 11434
initialDelaySeconds: 30
periodSeconds: 30
timeoutSeconds: 5
readinessProbe:
httpGet:
path: /
port: 11434
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 5
volumes:
- name: ollama-data
persistentVolumeClaim:
claimName: ollama-pvc
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: ollama-pvc
namespace: gooseek
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 20Gi
---
apiVersion: v1
kind: Service
metadata:
name: ollama
namespace: gooseek
spec:
type: ClusterIP
selector:
app: ollama
ports:
- port: 11434
targetPort: 11434
name: http
---
# NetworkPolicy: llm-svc и model-loader могут обращаться к ollama
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: ollama-access
namespace: gooseek
spec:
podSelector:
matchLabels:
app: ollama
policyTypes:
- Ingress
ingress:
- from:
- podSelector:
matchLabels:
app: llm-svc
- podSelector:
matchLabels:
app: ollama-model-loader
ports:
- protocol: TCP
port: 11434