Dateien nach "app/core" hochladen
All checks were successful
Deploy mindnet to llm-node / deploy (push) Successful in 4s
All checks were successful
Deploy mindnet to llm-node / deploy (push) Successful in 4s
This commit is contained in:
parent
42216865e2
commit
a4548a7ee1
|
|
@ -217,3 +217,84 @@ def search_chunks_by_vector(client: QdrantClient, prefix: str, vector: List[floa
|
|||
for r in res:
|
||||
out.append((str(r.id), float(r.score), dict(r.payload or {})))
|
||||
return out
|
||||
|
||||
|
||||
# --- Edge retrieval helper ---
|
||||
|
||||
def get_edges_for_sources(
|
||||
client: QdrantClient,
|
||||
prefix: str,
|
||||
source_ids: Iterable[str],
|
||||
edge_types: Optional[Iterable[str]] = None,
|
||||
limit: int = 2048,
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""Retrieve edge payloads from the <prefix>_edges collection.
|
||||
|
||||
Args:
|
||||
client: QdrantClient instance.
|
||||
prefix: Mindnet collection prefix (e.g. "mindnet").
|
||||
source_ids: Iterable of source_id values (typically chunk_ids or note_ids).
|
||||
edge_types: Optional iterable of edge kinds (e.g. ["references", "depends_on"]). If None,
|
||||
all kinds are returned.
|
||||
limit: Maximum number of edge payloads to return.
|
||||
|
||||
Returns:
|
||||
A list of edge payload dicts, e.g.:
|
||||
{
|
||||
"note_id": "...",
|
||||
"chunk_id": "...",
|
||||
"kind": "references" | "depends_on" | ...,
|
||||
"scope": "chunk",
|
||||
"source_id": "...",
|
||||
"target_id": "...",
|
||||
"rule_id": "...",
|
||||
"confidence": 0.7,
|
||||
...
|
||||
}
|
||||
"""
|
||||
source_ids = list(source_ids)
|
||||
if not source_ids or limit <= 0:
|
||||
return []
|
||||
|
||||
# Resolve collection name
|
||||
_, _, edges_col = _names(prefix)
|
||||
|
||||
# Build filter: source_id IN source_ids
|
||||
src_filter = _filter_any("source_id", [str(s) for s in source_ids])
|
||||
|
||||
# Optional: kind IN edge_types
|
||||
kind_filter = None
|
||||
if edge_types:
|
||||
kind_filter = _filter_any("kind", [str(k) for k in edge_types])
|
||||
|
||||
flt = _merge_filters(src_filter, kind_filter)
|
||||
|
||||
out: List[Dict[str, Any]] = []
|
||||
next_page = None
|
||||
remaining = int(limit)
|
||||
|
||||
# Use paginated scroll API; we don't need vectors, only payloads.
|
||||
while remaining > 0:
|
||||
batch_limit = min(256, remaining)
|
||||
res, next_page = client.scroll(
|
||||
collection_name=edges_col,
|
||||
scroll_filter=flt,
|
||||
limit=batch_limit,
|
||||
with_payload=True,
|
||||
with_vectors=False,
|
||||
offset=next_page,
|
||||
)
|
||||
|
||||
if not res:
|
||||
break
|
||||
|
||||
for r in res:
|
||||
out.append(dict(r.payload or {}))
|
||||
remaining -= 1
|
||||
if remaining <= 0:
|
||||
break
|
||||
|
||||
if next_page is None or remaining <= 0:
|
||||
break
|
||||
|
||||
return out
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user