scripts/ollama_tool_runner.py hinzugefügt
All checks were successful
Deploy mindnet to llm-node / deploy (push) Successful in 3s

This commit is contained in:
Lars 2025-10-07 13:39:17 +02:00
parent 104ddddb96
commit d797957abc

View File

@ -0,0 +1,119 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
scripts/ollama_tool_runner.py Minimaler Tool-Caller für Ollama + mindnet
Zweck:
Führt den Tool-Call-Loop für mindnet_query / mindnet_subgraph aus.
Nutzt /tools/ollama (Schema) und deine FastAPI (/query, /graph).
ENV:
OLLAMA=http://127.0.0.1:11434
API_BASE=http://127.0.0.1:8000
MODEL=llama3.1
Nutzung:
python3 scripts/ollama_tool_runner.py "Frage an den Agenten"
Version:
0.1.0 (Erstanlage)
Stand:
2025-10-07
"""
from __future__ import annotations
import os, sys, json, requests
OLLAMA = os.getenv("OLLAMA", "http://127.0.0.1:11434")
API_BASE = os.getenv("API_BASE", "http://127.0.0.1:8000")
MODEL = os.getenv("MODEL", "llama3.1")
def load_tools():
r = requests.get(f"{API_BASE}/tools/ollama", timeout=30)
r.raise_for_status()
return r.json()["tools"]
def call_model(messages, tools):
payload = {
"model": MODEL,
"messages": messages,
"tools": tools,
"stream": False,
"tool_choice": "auto",
}
r = requests.post(f"{OLLAMA}/api/chat", json=payload, timeout=180)
r.raise_for_status()
return r.json()
def execute_tool(name: str, arguments: dict):
if name == "mindnet_query":
# mappe expand_depth/edge_types auf /query-Body
body = {
"mode": "hybrid",
"top_k": int(arguments.get("top_k", 10)),
"expand": {
"depth": int(arguments.get("expand_depth", 1)),
"edge_types": arguments.get("edge_types") or ["references","belongs_to","prev","next"],
},
"filters": arguments.get("filters"),
}
if "query_vector" in arguments:
body["query_vector"] = arguments["query_vector"]
if "query" in arguments:
body["query"] = arguments["query"]
r = requests.post(f"{API_BASE}/query", json=body, timeout=120)
r.raise_for_status()
return r.json()
if name == "mindnet_subgraph":
params = {
"depth": int(arguments.get("depth", 1)),
}
edge_types = arguments.get("edge_types")
if edge_types:
# mehrfacher Query-Param 'edge_types' wird von requests automatisch gebaut
params["edge_types"] = edge_types
note_id = arguments["note_id"]
r = requests.get(f"{API_BASE}/graph/{note_id}", params=params, timeout=60)
r.raise_for_status()
return r.json()
raise ValueError(f"Unbekanntes Tool: {name}")
def main():
if len(sys.argv) < 2:
print("Usage: python3 scripts/ollama_tool_runner.py \"Deine Frage\"")
return 2
user_prompt = sys.argv[1]
tools = load_tools()
messages = [{"role": "user", "content": user_prompt}]
while True:
resp = call_model(messages, tools)
msg = resp.get("message") or {}
tool_calls = msg.get("tool_calls") or []
if not tool_calls:
# finale Antwort
print("\n=== Antwort ===\n")
print(msg.get("content", "").strip())
return 0
# Führe Toolcalls nacheinander aus
for tc in tool_calls:
name = tc["function"]["name"]
args = tc["function"].get("arguments") or {}
if isinstance(args, str):
try: args = json.loads(args)
except Exception: args = {"raw": args}
print(f"[tool] {name}({args})")
result = execute_tool(name, args)
# Übergib Toolresultat ans Modell zurück
messages.append({"role": "tool", "content": json.dumps(result), "name": name})
# füttere auch die originale Antwort (falls enthalten) in den Verlauf
if msg.get("content"):
messages.append({"role": "assistant", "content": msg["content"]})
if __name__ == "__main__":
sys.exit(main())