# chat-svc — LLM, Mastra, Writer, Classifier, Researcher # docs/architecture: 02-k3s-microservices-spec.md # Frontend при CHAT_SVC_URL проксирует /api/chat сюда apiVersion: apps/v1 kind: Deployment metadata: name: chat-svc namespace: gooseek spec: replicas: 2 selector: matchLabels: app: chat-svc template: metadata: labels: app: chat-svc annotations: prometheus.io/scrape: "true" prometheus.io/port: "3005" prometheus.io/path: "/metrics" spec: containers: - name: chat-svc image: gooseek/chat-svc:latest ports: - containerPort: 3005 env: - name: PORT value: "3005" - name: DATA_DIR value: "/app/data" - name: SEARCH_SVC_URL value: "http://search-svc.gooseek:3001" - name: MEMORY_SVC_URL value: "http://memory-svc.gooseek:3010" - name: LLM_SVC_URL value: "http://llm-svc.gooseek:3020" - name: LLM_PROVIDER valueFrom: configMapKeyRef: name: chat-svc-config key: llm-provider - name: OLLAMA_BASE_URL valueFrom: configMapKeyRef: name: chat-svc-config key: ollama-base-url optional: true - name: OPENAI_API_KEY valueFrom: secretKeyRef: name: llm-credentials key: openai-api-key optional: true volumeMounts: - name: chat-data mountPath: /app/data livenessProbe: httpGet: path: /health port: 3005 initialDelaySeconds: 10 periodSeconds: 10 readinessProbe: httpGet: path: /ready port: 3005 initialDelaySeconds: 5 periodSeconds: 5 resources: requests: cpu: 200m memory: 512Mi limits: cpu: 2000m memory: 2Gi volumes: - name: chat-data emptyDir: {} --- apiVersion: v1 kind: Service metadata: name: chat-svc namespace: gooseek spec: selector: app: chat-svc ports: - port: 3005 targetPort: 3005 --- # ConfigMap для LLM — секреты в llm-credentials (опционально) # При отсутствии ollama в кластере укажите внешний URL или LLM_PROVIDER=openai apiVersion: v1 kind: ConfigMap metadata: name: chat-svc-config namespace: gooseek data: llm-provider: "ollama" ollama-base-url: "http://host.docker.internal:11434"