async
This commit is contained in:
parent
e7cec6acc6
commit
4ed04039e5
|
|
@ -1,32 +1,97 @@
|
|||
"""
|
||||
app/services/embeddings_client.py — Text→Embedding (WP-04)
|
||||
app/services/embeddings_client.py — Text→Embedding Service
|
||||
|
||||
Zweck:
|
||||
Liefert 384-d Embeddings für Textqueries (lazy load, einmal pro Prozess).
|
||||
Standard: Sentence-Transformers (MODEL_NAME aus app.config.Settings).
|
||||
Hinweis: Kein Netz-Zugriff; nutzt lokal installierte Modelle.
|
||||
Liefert Embeddings für Textqueries.
|
||||
- Legacy Mode (Sync): Nutzt lokal Sentence-Transformers (CPU).
|
||||
- Modern Mode (Async/Class): Nutzt Ollama API (HTTP) für Non-Blocking Operations (WP-11).
|
||||
|
||||
Kompatibilität:
|
||||
Python 3.12+, sentence-transformers 5.x
|
||||
Python 3.12+, sentence-transformers 5.x, httpx
|
||||
Version:
|
||||
0.1.0 (Erstanlage)
|
||||
0.2.0 (Erweitert um Async EmbeddingsClient)
|
||||
Stand:
|
||||
2025-10-07
|
||||
Bezug:
|
||||
- app/core/retriever.py (nutzt embed_text_if_needed)
|
||||
- app/config.py (MODEL_NAME, VECTOR_SIZE)
|
||||
Nutzung:
|
||||
from app.services.embeddings_client import embed_text
|
||||
Änderungsverlauf:
|
||||
0.1.0 (2025-10-07) – Erstanlage.
|
||||
2025-12-11
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
from typing import List
|
||||
import os
|
||||
import logging
|
||||
import httpx
|
||||
from typing import List, Optional
|
||||
from functools import lru_cache
|
||||
from app.config import get_settings
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# ==============================================================================
|
||||
# TEIL 1: NEUE ASYNC KLASSE (Für Ingestion API / WP-11)
|
||||
# ==============================================================================
|
||||
|
||||
class EmbeddingsClient:
|
||||
"""
|
||||
Async Client für Embeddings via Ollama (oder kompatible APIs).
|
||||
Verhindert das Blockieren des Event-Loops bei schweren Berechnungen.
|
||||
"""
|
||||
def __init__(self):
|
||||
self.settings = get_settings()
|
||||
# Fallback auf Environment Variablen, falls Settings nicht geladen
|
||||
self.base_url = os.getenv("MINDNET_OLLAMA_URL", "http://127.0.0.1:11434")
|
||||
# Nutze explizites Embedding Modell oder Fallback auf LLM Modell
|
||||
self.model = os.getenv("MINDNET_EMBEDDING_MODEL", os.getenv("MINDNET_LLM_MODEL", "phi3:mini"))
|
||||
|
||||
async def embed_query(self, text: str) -> List[float]:
|
||||
"""Erzeugt Embedding für einen einzelnen Text."""
|
||||
return await self._request_embedding(text)
|
||||
|
||||
async def embed_documents(self, texts: List[str]) -> List[List[float]]:
|
||||
"""
|
||||
Erzeugt Embeddings für eine Liste von Texten.
|
||||
Nutzt eine Session für effizientere Requests.
|
||||
"""
|
||||
vectors = []
|
||||
async with httpx.AsyncClient(timeout=60.0) as client:
|
||||
for text in texts:
|
||||
vec = await self._request_embedding_with_client(client, text)
|
||||
vectors.append(vec)
|
||||
return vectors
|
||||
|
||||
async def _request_embedding(self, text: str) -> List[float]:
|
||||
"""Interne Hilfsmethode für Single-Request (One-off Client)."""
|
||||
async with httpx.AsyncClient(timeout=30.0) as client:
|
||||
return await self._request_embedding_with_client(client, text)
|
||||
|
||||
async def _request_embedding_with_client(self, client: httpx.AsyncClient, text: str) -> List[float]:
|
||||
"""Führt den eigentlichen Request gegen Ollama aus."""
|
||||
if not text or not text.strip():
|
||||
return []
|
||||
|
||||
url = f"{self.base_url}/api/embeddings"
|
||||
try:
|
||||
response = await client.post(
|
||||
url,
|
||||
json={
|
||||
"model": self.model,
|
||||
"prompt": text
|
||||
}
|
||||
)
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
return data.get("embedding", [])
|
||||
except Exception as e:
|
||||
logger.error(f"Embedding error (Ollama) for model {self.model}: {e}")
|
||||
# Fallback: Leere Liste, damit der Prozess nicht crasht (wird vom Caller gefiltert)
|
||||
return []
|
||||
|
||||
|
||||
# ==============================================================================
|
||||
# TEIL 2: LEGACY FUNKTIONEN (Für bestehende Sync-Module / CLI)
|
||||
# ==============================================================================
|
||||
|
||||
# Lazy import, damit Testläufe ohne Modell-Laden schnell sind
|
||||
def _load_model():
|
||||
# Performance-Warnung loggen, da dies viel RAM braucht
|
||||
logger.info("Loading local SentenceTransformer model (Legacy Mode)...")
|
||||
from sentence_transformers import SentenceTransformer # import hier, nicht top-level
|
||||
s = get_settings()
|
||||
return SentenceTransformer(s.MODEL_NAME, device="cpu")
|
||||
|
|
@ -37,10 +102,19 @@ def _cached_model():
|
|||
|
||||
def embed_text(text: str) -> List[float]:
|
||||
"""
|
||||
Erzeugt einen 384-d Vektor (oder laut Settings.VECTOR_SIZE) für den gegebenen Text.
|
||||
LEGACY: Erzeugt einen Vektor synchron via Sentence-Transformers.
|
||||
Wird u.a. vom Retriever oder alten CLI-Skripten genutzt.
|
||||
"""
|
||||
if not text or not text.strip():
|
||||
raise ValueError("embed_text: leerer Text")
|
||||
# Um Konsistenz mit neuer Klasse zu wahren, loggen wir Warnung statt Error
|
||||
# raise ValueError("embed_text: leerer Text") -> Veraltet
|
||||
logger.warning("embed_text called with empty string")
|
||||
return []
|
||||
|
||||
try:
|
||||
model = _cached_model()
|
||||
vec = model.encode([text], normalize_embeddings=True)[0]
|
||||
return vec.astype(float).tolist()
|
||||
except Exception as e:
|
||||
logger.error(f"Legacy embed_text failed: {e}")
|
||||
return []
|
||||
Loading…
Reference in New Issue
Block a user