111 lines
4.0 KiB
Python
111 lines
4.0 KiB
Python
"""
|
|
app/services/embeddings_client.py
|
|
Client für die Vektorisierung von Texten via Ollama API.
|
|
|
|
Version: 2.4.0 (Async + Dedicated Embedding Model Support)
|
|
"""
|
|
from __future__ import annotations
|
|
import os
|
|
import logging
|
|
import httpx
|
|
from typing import List, Optional
|
|
from functools import lru_cache
|
|
from app.config import get_settings
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
class EmbeddingsClient:
|
|
"""
|
|
Async Client für Embeddings via Ollama.
|
|
Trennt Chat-Modell (Generation) von Embedding-Modell (Semantik).
|
|
"""
|
|
def __init__(self):
|
|
self.settings = get_settings()
|
|
self.base_url = os.getenv("MINDNET_OLLAMA_URL", "http://127.0.0.1:11434")
|
|
|
|
# Lese Konfiguration für spezialisiertes Embedding-Modell
|
|
self.model = os.getenv("MINDNET_EMBEDDING_MODEL")
|
|
|
|
# Fallback auf LLM, falls kein Embedding-Modell gesetzt (nicht empfohlen für Prod)
|
|
if not self.model:
|
|
self.model = os.getenv("MINDNET_LLM_MODEL", "phi3:mini")
|
|
logger.warning(f"No MINDNET_EMBEDDING_MODEL set. Falling back to LLM '{self.model}'. Quality might suffer.")
|
|
else:
|
|
logger.info(f"EmbeddingsClient initialized with model: {self.model}")
|
|
|
|
async def embed_query(self, text: str) -> List[float]:
|
|
"""
|
|
Erzeugt Embedding für einen einzelnen Text (z.B. Suchanfrage).
|
|
"""
|
|
return await self._request_embedding(text)
|
|
|
|
async def embed_documents(self, texts: List[str]) -> List[List[float]]:
|
|
"""
|
|
Erzeugt Embeddings für eine Liste von Texten (z.B. Chunks beim Import).
|
|
Nutzt eine persistente Session für Performance.
|
|
"""
|
|
vectors = []
|
|
# Timeout erhöht für Batch-Processing
|
|
async with httpx.AsyncClient(timeout=120.0) as client:
|
|
for text in texts:
|
|
vec = await self._request_embedding_with_client(client, text)
|
|
vectors.append(vec)
|
|
return vectors
|
|
|
|
async def _request_embedding(self, text: str) -> List[float]:
|
|
"""Interne Hilfsmethode für Single-Request."""
|
|
async with httpx.AsyncClient(timeout=30.0) as client:
|
|
return await self._request_embedding_with_client(client, text)
|
|
|
|
async def _request_embedding_with_client(self, client: httpx.AsyncClient, text: str) -> List[float]:
|
|
"""
|
|
Führt den eigentlichen HTTP-Request gegen Ollama aus.
|
|
"""
|
|
if not text or not text.strip():
|
|
return []
|
|
|
|
url = f"{self.base_url}/api/embeddings"
|
|
try:
|
|
response = await client.post(
|
|
url,
|
|
json={
|
|
"model": self.model,
|
|
"prompt": text
|
|
}
|
|
)
|
|
|
|
if response.status_code == 404:
|
|
logger.error(f"Model '{self.model}' not found in Ollama. Run: ollama pull {self.model}")
|
|
return []
|
|
|
|
response.raise_for_status()
|
|
data = response.json()
|
|
return data.get("embedding", [])
|
|
|
|
except Exception as e:
|
|
logger.error(f"Embedding error (Model: {self.model}): {e}")
|
|
# Wir geben eine leere Liste zurück, damit der Batch-Prozess nicht komplett crasht.
|
|
# Der Aufrufer (IngestionService) muss prüfen, ob Vektor leer ist.
|
|
return []
|
|
|
|
# --- LEGACY SUPPORT (Synchron) ---
|
|
# Wird nur noch von alten Skripten oder Tests ohne Async-Support genutzt.
|
|
|
|
@lru_cache(maxsize=1)
|
|
def _cached_legacy_model():
|
|
from sentence_transformers import SentenceTransformer
|
|
s = get_settings()
|
|
# Hier nutzen wir das Modell aus den Settings, meist CPU-basiert
|
|
return SentenceTransformer(s.MODEL_NAME, device="cpu")
|
|
|
|
def embed_text(text: str) -> List[float]:
|
|
"""
|
|
LEGACY: Synchrones Embedding via SentenceTransformers (CPU).
|
|
"""
|
|
if not text or not text.strip():
|
|
return []
|
|
try:
|
|
return _cached_legacy_model().encode([text], normalize_embeddings=True)[0].tolist()
|
|
except Exception as e:
|
|
logger.error(f"Legacy embed_text failed: {e}")
|
|
return [] |