Trainer_LLM/.bash_history

358 lines
12 KiB
Bash
Raw Blame History

This file contains invisible Unicode characters

This file contains invisible Unicode characters that are indistinguishable to humans but may be processed differently by a computer. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

sudo systemctl stop cloud-init
sudo systemctl disable cloud-init
sudo systemctl mask cloud-init
sudo touch /etc/cloud/cloud-init.disabled
sudo reboot
hostnamectl
sudo hostnamectl set-hostname llm-node
timedatectl list-timezones | grep Berlin
sudo timedatectl set-timezone Europe/Berlin
sudo update-locale LANG=de_DE.UTF-8
PermitRootLogin no
PasswordAuthentication yes
sudo nano /etc/ssh/sshd_config
sudo systemctl reload ssh
sudo apt install ufw -y
sudo ufw default deny incoming
sudo ufw default allow outgoing
sudo ufw allow OpenSSH
sudo ufw enable
sudo ufw status verbose
sudo apt update
sudo apt install -y ca-certificates curl gnupg lsb-release
sudo install -m 0755 -d /etc/apt/keyrings
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg
sudo chmod a+r /etc/apt/keyrings/docker.gpg
echo "deb [arch=$(dpkg --print-architecture) \
signed-by=/etc/apt/keyrings/docker.gpg] \
https://download.docker.com/linux/ubuntu \
$(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
sudo apt update
sudo apt install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
docker --version
docker compose version
docker run hello-world
newgrp docker
docker run hello-world
curl "http://localhost:8000/search?query=Mae-geri"
source /home/llmadmin/venvs/embedding/bin/activate
exit
echo 'source ~/venvs/embedding/bin/activate' >> ~/.bash_aliases
source ~/.bash_aliases
embedding
sudo nano /home/llmadmin/llm-api/app.py
ls
cd venvs
ls
cd embedding/
ls
cd /
ls
cd home
ls
cd llmadmin/
ls
mkdir -p ~/llm-api
ls
cd llm-api/
nano app.py
mkdir logs
ls
uvicorn app:app --host 0.0.0.0 --port 8000
groups
sudo usermod -aG docker $USER
newgrp docker
curl -fsSL https://ollama.com/install.sh | sh
ollama --version
ollama version is 0.10.1
ollama run mistral
curl http://localhost:11434/api/tags
mkdir -p ~/docker/qdrant
cd ~/docker/qdrant
nano docker-compose.yml
docker compose up -d
docker ps
curl http://localhost:6333/collections
sudo systemctl enable ollama
sudo systemctl start ollama
ollama run mistral --keep-alive
sudo apt install python3 python3-pip -y
pip3 install --upgrade pip
# 1. Installiere venv (falls noch nicht da)
sudo apt install python3-venv -y
# 2. Erstelle eine neue virtuelle Umgebung z.B. im Projektverzeichnis
python3 -m venv ~/venvs/embedding
# 3. Aktiviere die virtuelle Umgebung
source ~/venvs/embedding/bin/activate
# 4. Upgrade pip (jetzt erlaubt!)
pip install --upgrade pip
# 5. Installiere deine Embedding-Tools, z.B.
pip install sentence-transformers qdrant-client
sudo nano embed_to_qdrant.py
python embed_to_qdrant.py
sudo nano search_qdrant.py
python search_qdrant.py
sudo nano search_qdrant.py
python search_qdrant.py
sudo nano search_qdrant.py
python search_qdrant.py
pip install fastapi uvicorn
pip install requests
sudo nano app.py
uvicorn app:app --host 0.0.0.0 --port 8000
ls
rm app.py
ls
curl -X POST http://localhost:8000/embed -H "Content-Type: application/json" -d '{"texts": ["Mae-geri ist ein gerader Tritt.", "Hikite stabilisiert die Technik."]}'
curl "http://localhost:8000/search?query=Wie tritt man Mae-geri?"
curl -X POST http://localhost:8000/prompt -H "Content-Type: application/json" -d '{"query": "Wie wird Mae-geri ausgeführt?"}'
curl "http://localhost:8000/search?query=Wie tritt man Mae-geri?"
curl "http://localhost:8000/search?query=Wie%20tritt%20man%20Mae-geri?"
curl --get http://localhost:8000/search --data-urlencode "query=Wie tritt man Mae-geri?"
/home/llmadmin/llm-api/app.py
/home/llmadmin/venvs/embedding/
/home/llmadmin/venvs/embedding/bin/python --version
cd /
cd ~/llm-api
uvicorn app:app --host 0.0.0.0 --port 8000
sudo nano /etc/systemd/system/llm-api.service
sudo systemctl daemon-reexec
sudo systemctl daemon-reload
sudo systemctl enable llm-api
sudo systemctl start llm-api
sudo systemctl status llm-api
deactivate
ls
cd /
ls
cd home
ls
cd llmadmin/
ls
cd docker/
ls
cd q
cd qdrant/
ls
nano docker-compose.yml
sudo restart
sudo systemctl status llm-api
docker ps
curl http://localhost:8000/search?query=test
cd /
curl http://localhost:8000/search?query=test
sudo reboot
sudo systemctl status llm-api
docker ps
curl http://localhost:8000/search?query=test
sudo systemctl status llm-api
docker ps
curl http://localhost:8000/search?query=test
docker ps
curl http://localhost:8000/search?query=test
● llm-api.service - FastAPI Embedding API (LLM)
Aug 04 13:41:14 llm-node systemd[1]: Started llm-api.service - FastAPI Embeddin>
Aug 04 13:41:23 llm-node uvicorn[1312]: INFO: Started server process [1312]
Aug 04 13:41:23 llm-node uvicorn[1312]: INFO: Waiting for application start>
Aug 04 13:41:23 llm-node uvicorn[1312]: INFO: Application startup complete.
Aug 04 13:41:23 llm-node uvicorn[1312]: INFO: Uvicorn running on http://0.0>
Aug 04 13:42:05 llm-node uvicorn[1312]: /home/llmadmin/venvs/embedding/lib/pyth>
Aug 04 13:42:05 llm-node uvicorn[1312]: return forward_call(*args, **kwargs)
Aug 04 13:42:05 llm-node uvicorn[1312]: INFO: 127.0.0.1:58988 - "GET /searc>
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
30dfd2e38f0b qdrant/qdrant "./entrypoint.sh" 2 hours ago Up About a minute 0.0.0.0:6333->6333/tcp, [::]:6333->6333/tcp, 6334/tcp qdrant
[{"score":0.105378054,"text":"Hikite stabilisiert die Technik."},{"score":0.067576095,"text":"Ein Kumite-Drill verbessert die Reaktionsgeschwindigkeit."},{"score":0.042029735,"text":"Mae-geri ist ein gerader Tritt."}](embedding) llmadmin@llm-node:~$
deaktivate
Aug 04 13:41:23 llm-node uvicorn[1312]: INFO: Uvicorn running on http://0.0>deactivate
deaktivate
deactivate
sudo systemctl status llm-api
docker ps
curl http://localhost:8000/search?query=test
sudo systemctl status llm-api
sudo ss -tuln | grep 8000
sudo journalctl -fu llm-api
sudo ufw status verbose
sudo ufw allow 8000/tcp
sudo ufw reload
sudo ufw status verbose
cd home
ls
cd llm-api
nano app.py
cd ~/llm-api
uvicorn app:app --host 0.0.0.0 --port 8000
source ~/venvs/embedding/bin/activate
cd ~/llm-api
uvicorn app:app --host 0.0.0.0 --port 8000
sudo reboot
uvicorn app:app --host 0.0.0.0 --port 8000
cd llm-api/
uvicorn app:app --host 0.0.0.0 --port 8000
uvicorn llm-api:app --host 0.0.0.0 --port 8000
ls
./test_llm_api.sh
pkill -f "uvicorn llm_api:app"
uvicorn llm-api:app --host 0.0.0.0 --port 8000
uvicorn llm_api:app --reload --host 0.0.0.0 --port 8000 --log-level debug
uvicorn app:app --host 0.0.0.0 --port 8000
pkill -f "uvicorn llm_api:app"
lsof -i TCP:8000 -sTCP:LISTEN
kill 52619
lsof -i TCP:8000 -sTCP:LISTEN
uvicorn llm_api:app --reload --host 0.0.0.0 --port 8000 --log-level debug
lsof -i TCP:8000 -sTCP:LISTEN
./test_llm_api.sh
pkill -f "uvicorn llm_api:app"
uvicorn llm_api:app --reload --host 0.0.0.0 --port 8000
./test_llm_api.sh
uvicorn llm_api:app --reload --host 0.0.0.0 --port 8000
./test_llm_api.sh
pkill -f "uvicorn llm_api:app"
uvicorn llm_api:app --reload --host 0.0.0.0 --port 8000
lsof -i TCP:8000 -sTCP:LISTEN
kill 54019
uvicorn llm_api:app --reload --host 0.0.0.0 --port 8000
pkill -f "uvicorn llm_api:app"
lsof -i TCP:8000 -sTCP:LISTEN
uvicorn llm_api:app --reload --host 0.0.0.0 --port 8000
# Tötet alle Prozesse, die auf Port 8000 lauschen
lsof -ti TCP:8000 | xargs -r kill -9
lsof -i TCP:8000 -sTCP:LISTEN
# im llm-api-Verzeichnis und aktiviertem venv
uvicorn llm_api:app --reload --host 0.0.0.0 --port 8000 &
SERVER_PID=$!
echo "Uvicorn läuft als PID $SERVER_PID"
lsof -ti TCP:8000 | xargs -r kill -9
uvicorn llm_api:app --reload --host 0.0.0.0 --port 8000 &
pip install nltk
python - <<EOF
import nltk
nltk.download('punkt')
EOF
pip install pdfplumber
pip install pillow
# im virtuellen Environment
uvicorn llm_api:app --reload --host 0.0.0.0 --port 8000
(embedding) llmadmin@llm-node:~$ # im virtuellen Environment
uvicorn llm_api:app --reload --host 0.0.0.0 --port 8000
INFO: Will watch for changes in these directories: ['/home/llmadmin']
ERROR: [Errno 98] Address already in use
lsof -i TCP:8000 -sTCP:LISTEN
kill 27023
uvicorn llm_api:app --reload --host 0.0.0.0 --port 8000
lsof -i TCP:8000 -sTCP:LISTEN
cd llm-api/
uvicorn llm_api:app --reload --host 0.0.0.0 --port 8000
lsof -i TCP:8000 -sTCP:LISTEN
kill 48457
lsof -i TCP:8000 -sTCP:LISTEN
uvicorn llm_api:app --reload --host 0.0.0.0 --port 8000
lsof -i TCP:8000 -sTCP:LISTEN
sudo ufw allow 8000/tcp
sudo ufw status
lsof -i TCP:8000 -sTCP:LISTEN
kill 48507
uvicorn llm_api:app --reload --host 0.0.0.0 --port 8000
curl -i http://127.0.0.1:8000/openapi.json
curl -X POST http://127.0.0.1:8000/embed -H "Content-Type: application/json" -d '{"text": "Test"}'
curl -i -X POST http://127.0.0.1:8000/embed -H "Content-Type: application/json" -d '{
"collection": "test",
"chunks": [
{
"text": "Das ist ein Testtext.",
"source": "unit-test"
}
]
}'
curl -G http://127.0.0.1:8000/search --data-urlencode "query=Test"
curl -i -G http://127.0.0.1:8000/search --data-urlencode "query=Test" -H "Accept: application/json"
curl -i -G http://127.0.0.1:8000/search --data-urlencode "query=Test" --data-urlencode "collection=test" -H "Accept: application/json"
lsof -i TCP:8000 -sTCP:LISTEN
kill 48633
uvicorn llm_api:app --reload --host 0.0.0.0 --port 8000
lsof -i TCP:8000 -sTCP:LISTEN
kill 49437
uvicorn llm_api:app --reload --host 0.0.0.0 --port 8000
lsof -i TCP:8000 -sTCP:LISTEN
cd ~/llm-api
chmod +x test_llm_api.sh
./test_llm_api.sh
chmod +x ~/llm-api/test_llm_api.sh
cd ~/llm-api
./test_llm_api.sh
nano test_llm_api.sh
./test_llm_api.sh
# Achte darauf, dass im aktuellen Shell-Environment OLLAMA_URL stimmt (Default ist bereits /api/generate).
pkill -f "uvicorn llm_api:app" # alten Prozess beenden
uvicorn llm_api:app --reload --host 0.0.0.0 --port 8000
cd ~/llm-api
./test_llm_api.sh
pkill -f "uvicorn llm_api:app"
uvicorn llm_api:app --reload --host 0.0.0.0 --port 8000
pkill -f "uvicorn llm_api:app" # alten Prozess beenden
lsof -i TCP:8000 -sTCP:LISTEN
uvicorn llm_api:app --reload --host 0.0.0.0 --port 8000
pkill -f "uvicorn llm_api:app"
uvicorn llm_api:app --reload --host 0.0.0.0 --port 8000 --log-level debug
pkill -f "uvicorn llm_api:app"
uvicorn llm_api:app --reload --host 0.0.0.0 --port 8000 --log-level debug
./test_llm_api.sh
pkill -f "uvicorn llm_api:app"
uvicorn llm_api:app --reload --host 0.0.0.0 --port 8000 --log-level debug
./test_llm_api.sh
uvicorn llm_api:app --reload --host 0.0.0.0 --port 8000 --log-level debug
./test_llm_api.sh
pkill -f "uvicorn llm_api:app"
uvicorn llm_api:app --reload --host 0.0.0.0 --port 8000 --log-level debug
./test_llm_api.sh
uvicorn llm_api:app --reload --host 0.0.0.0 --port 8000 --log-level debug
./test_llm_api.sh
pkill -f "uvicorn llm_api:app"
uvicorn llm_api:app --reload --host 0.0.0.0 --port 8000 --log-level debug
./test_llm_api.sh
pkill -f "uvicorn llm_api:app" # alten Prozess beenden
lsof -i TCP:8000 -sTCP:LISTEN
uvicorn llm_api:app --reload --host 0.0.0.0 --p ort 8000
deactivate
uvicorn llm_api:app --reload --host 0.0.0.0 --p ort 8000
./test_llm_api.sh
# Server beenden, falls noch laufend
lsof -ti TCP:8000 | xargs -r kill -9
# Server neu starten
uvicorn llm_api:app --reload --host 0.0.0.0 --port 8000 &
# Kurz warten
sleep 2
# Tests
./test_llm_api.sh
lsof -i TCP:8000 -sTCP:LISTEN
lsof -ti TCP:8000 | xargs -r kill -9
lsof -i TCP:8000 -sTCP:LISTEN
uvicorn llm_api:app --reload --host 0.0.0.0 --port 8000 &
deactivate
curl -i -X POST http://127.0.0.1:11434/api/generate -H "Content-Type: application/json" -d '{
"model": "mistral:latest",
"prompt": "Hallo",
"stream": false
}'
cd knowledge/
cd karatetrainer/
nano Lateral.txt
ls
cd _imported/
ls
cd ..
ls
nano Literal.txt
nano Lateral.txt
cd llm-api/
sudo systemctl restart llm-api
sudo journalctl -fu llm-api
sudo systemctl status llm-api