Spaces:
Building
Building
Upload 22 files
Browse files- .dockerignore +11 -11
- .gitattributes +35 -35
- Dockerfile +21 -21
- app.py +44 -44
- chat_handler.py +85 -85
- controllers/admin_controller.py +113 -81
- controllers/chat_controller.py +33 -33
- controllers/health_controller.py +13 -13
- controllers/test_controller.py +12 -12
- core.py +7 -6
- intent_api.py +198 -197
- intent_test_runner.py +85 -85
- intent_utils.py +174 -64
- llm_model.py +65 -65
- log.py +10 -10
- parse_llm_blocks.py +25 -25
- service_config.json +113 -113
- service_config.py +62 -62
- session.py +28 -28
.dockerignore
CHANGED
@@ -1,12 +1,12 @@
|
|
1 |
-
__pycache__/
|
2 |
-
*.pyc
|
3 |
-
*.pyo
|
4 |
-
*.pyd
|
5 |
-
.git/
|
6 |
-
.vscode/
|
7 |
-
*.log
|
8 |
-
*.sqlite3
|
9 |
-
*.db
|
10 |
-
.DS_Store
|
11 |
-
.env
|
12 |
*.zip
|
|
|
1 |
+
__pycache__/
|
2 |
+
*.pyc
|
3 |
+
*.pyo
|
4 |
+
*.pyd
|
5 |
+
.git/
|
6 |
+
.vscode/
|
7 |
+
*.log
|
8 |
+
*.sqlite3
|
9 |
+
*.db
|
10 |
+
.DS_Store
|
11 |
+
.env
|
12 |
*.zip
|
.gitattributes
CHANGED
@@ -1,35 +1,35 @@
|
|
1 |
-
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
-
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
-
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
-
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
-
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
-
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
-
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
-
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
-
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
-
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
-
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
-
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
-
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
-
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
Dockerfile
CHANGED
@@ -1,22 +1,22 @@
|
|
1 |
-
FROM python:3.10-slim
|
2 |
-
|
3 |
-
# === Hugging Face Spaces özel dizinleri
|
4 |
-
RUN mkdir -p /data/chunks /data/projects /data/tokenized_chunks /data/zip_temp /data/output /app/.cache /app/.torch_cache && chmod -R 777 /data /app
|
5 |
-
|
6 |
-
# === Ortam değişkenleri
|
7 |
-
ENV HF_HOME=/app/.cache \
|
8 |
-
HF_DATASETS_CACHE=/app/.cache \
|
9 |
-
HF_HUB_CACHE=/app/.cache \
|
10 |
-
TORCH_HOME=/app/.torch_cache
|
11 |
-
|
12 |
-
# PyTorch kernel cache için klasör
|
13 |
-
ENV TORCH_HOME=/app/.torch_cache
|
14 |
-
|
15 |
-
# ✅ Tüm proje klasörünü kopyala
|
16 |
-
COPY . .
|
17 |
-
|
18 |
-
# Install Python dependencies
|
19 |
-
RUN pip install --no-cache-dir -r requirements.txt
|
20 |
-
|
21 |
-
# ✅ Çalıştırılacak komut
|
22 |
CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
|
|
|
1 |
+
FROM python:3.10-slim
|
2 |
+
|
3 |
+
# === Hugging Face Spaces özel dizinleri
|
4 |
+
RUN mkdir -p /data/chunks /data/projects /data/tokenized_chunks /data/zip_temp /data/output /app/.cache /app/.torch_cache && chmod -R 777 /data /app
|
5 |
+
|
6 |
+
# === Ortam değişkenleri
|
7 |
+
ENV HF_HOME=/app/.cache \
|
8 |
+
HF_DATASETS_CACHE=/app/.cache \
|
9 |
+
HF_HUB_CACHE=/app/.cache \
|
10 |
+
TORCH_HOME=/app/.torch_cache
|
11 |
+
|
12 |
+
# PyTorch kernel cache için klasör
|
13 |
+
ENV TORCH_HOME=/app/.torch_cache
|
14 |
+
|
15 |
+
# ✅ Tüm proje klasörünü kopyala
|
16 |
+
COPY . .
|
17 |
+
|
18 |
+
# Install Python dependencies
|
19 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
20 |
+
|
21 |
+
# ✅ Çalıştırılacak komut
|
22 |
CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
|
app.py
CHANGED
@@ -1,44 +1,44 @@
|
|
1 |
-
from fastapi import FastAPI
|
2 |
-
from controllers import chat_controller, test_controller, admin_controller, health_controller
|
3 |
-
from core import service_config, session_store, llm_models
|
4 |
-
from llm_model import LLMModel
|
5 |
-
from log import log
|
6 |
-
import os
|
7 |
-
import warnings
|
8 |
-
|
9 |
-
warnings.simplefilter(action='ignore', category=FutureWarning)
|
10 |
-
|
11 |
-
app = FastAPI()
|
12 |
-
|
13 |
-
app.include_router(health_controller.router)
|
14 |
-
app.include_router(chat_controller.router)
|
15 |
-
app.include_router(test_controller.router)
|
16 |
-
app.include_router(admin_controller.router)
|
17 |
-
|
18 |
-
BASE_PROJECTS_DIR = "/data/projects"
|
19 |
-
|
20 |
-
def load_project(project_name, config, project_path):
|
21 |
-
llm_config = config.get_project_llm_config(project_name)
|
22 |
-
model_instance = LLMModel()
|
23 |
-
model_instance.setup(config, llm_config, project_path)
|
24 |
-
|
25 |
-
# ❌ Intent modeli artık kullanılmıyor → bu blok kaldırıldı
|
26 |
-
|
27 |
-
return model_instance
|
28 |
-
|
29 |
-
log("🌐 Servis başlatılıyor...")
|
30 |
-
service_config.load(is_reload=False)
|
31 |
-
|
32 |
-
for project_name in service_config.projects:
|
33 |
-
project_path = os.path.join(BASE_PROJECTS_DIR, project_name)
|
34 |
-
os.makedirs(project_path, exist_ok=True)
|
35 |
-
os.makedirs(os.path.join(project_path, "llm", "base_model"), exist_ok=True)
|
36 |
-
os.makedirs(os.path.join(project_path, "llm", "fine_tune"), exist_ok=True)
|
37 |
-
|
38 |
-
model_instance = load_project(project_name, service_config, project_path)
|
39 |
-
llm_models[project_name] = model_instance
|
40 |
-
log(f"✅ '{project_name}' için tüm modeller yüklenip belleğe alındı.")
|
41 |
-
|
42 |
-
if __name__ == "__main__":
|
43 |
-
import uvicorn
|
44 |
-
uvicorn.run(app, host="0.0.0.0", port=7860)
|
|
|
1 |
+
from fastapi import FastAPI
|
2 |
+
from controllers import chat_controller, test_controller, admin_controller, health_controller
|
3 |
+
from core import service_config, session_store, llm_models
|
4 |
+
from llm_model import LLMModel
|
5 |
+
from log import log
|
6 |
+
import os
|
7 |
+
import warnings
|
8 |
+
|
9 |
+
warnings.simplefilter(action='ignore', category=FutureWarning)
|
10 |
+
|
11 |
+
app = FastAPI()
|
12 |
+
|
13 |
+
app.include_router(health_controller.router)
|
14 |
+
app.include_router(chat_controller.router)
|
15 |
+
app.include_router(test_controller.router)
|
16 |
+
app.include_router(admin_controller.router)
|
17 |
+
|
18 |
+
BASE_PROJECTS_DIR = "/data/projects"
|
19 |
+
|
20 |
+
def load_project(project_name, config, project_path):
|
21 |
+
llm_config = config.get_project_llm_config(project_name)
|
22 |
+
model_instance = LLMModel()
|
23 |
+
model_instance.setup(config, llm_config, project_path)
|
24 |
+
|
25 |
+
# ❌ Intent modeli artık kullanılmıyor → bu blok kaldırıldı
|
26 |
+
|
27 |
+
return model_instance
|
28 |
+
|
29 |
+
log("🌐 Servis başlatılıyor...")
|
30 |
+
service_config.load(is_reload=False)
|
31 |
+
|
32 |
+
for project_name in service_config.projects:
|
33 |
+
project_path = os.path.join(BASE_PROJECTS_DIR, project_name)
|
34 |
+
os.makedirs(project_path, exist_ok=True)
|
35 |
+
os.makedirs(os.path.join(project_path, "llm", "base_model"), exist_ok=True)
|
36 |
+
os.makedirs(os.path.join(project_path, "llm", "fine_tune"), exist_ok=True)
|
37 |
+
|
38 |
+
model_instance = load_project(project_name, service_config, project_path)
|
39 |
+
llm_models[project_name] = model_instance
|
40 |
+
log(f"✅ '{project_name}' için tüm modeller yüklenip belleğe alındı.")
|
41 |
+
|
42 |
+
if __name__ == "__main__":
|
43 |
+
import uvicorn
|
44 |
+
uvicorn.run(app, host="0.0.0.0", port=7860)
|
chat_handler.py
CHANGED
@@ -1,85 +1,85 @@
|
|
1 |
-
from fastapi import Request
|
2 |
-
from fastapi.responses import JSONResponse
|
3 |
-
import traceback
|
4 |
-
import random
|
5 |
-
from llm_model import Message, LLMModel
|
6 |
-
from intent_api import execute_intent
|
7 |
-
from intent_utils import validate_variable_formats
|
8 |
-
from parse_llm_blocks import parse_llm_blocks
|
9 |
-
from log import log
|
10 |
-
|
11 |
-
async def handle_chat(msg: Message, request: Request, app, service_config, session, llm_model: LLMModel):
|
12 |
-
try:
|
13 |
-
user_input = msg.user_input.strip()
|
14 |
-
log(f"💬 Kullanıcı input'u: '{user_input}'")
|
15 |
-
|
16 |
-
project_name = session.project_name
|
17 |
-
project_config = service_config.get_project_llm_config(project_name)
|
18 |
-
system_prompt = service_config.system_prompt # 👉 system_prompt buradan alınıyor
|
19 |
-
|
20 |
-
# Chat history'ye user mesajını ekle
|
21 |
-
session.chat_history.append({"role": "user", "content": user_input})
|
22 |
-
|
23 |
-
# === LLM çağrısı
|
24 |
-
llm_response = await llm_model.generate_response_with_messages(session.chat_history, project_config, system_prompt)
|
25 |
-
log(f"🤖 LLM cevabı: {llm_response}")
|
26 |
-
|
27 |
-
# === LLM cevabını parse et
|
28 |
-
parsed = parse_llm_blocks(llm_response)
|
29 |
-
intent = parsed["intent"]
|
30 |
-
params = parsed["params"]
|
31 |
-
missing = parsed["missing"]
|
32 |
-
action_json = parsed["action_json"]
|
33 |
-
|
34 |
-
# Chat history'ye assistant cevabını ekle
|
35 |
-
session.chat_history.append({"role": "assistant", "content": llm_response})
|
36 |
-
|
37 |
-
# === INTENT yok → havadan sudan konuşma
|
38 |
-
if intent == "NONE":
|
39 |
-
session.awaiting_variable = None
|
40 |
-
session.last_intent = None
|
41 |
-
session.variables.clear()
|
42 |
-
return {"response": llm_response}
|
43 |
-
|
44 |
-
# === INTENT varsa
|
45 |
-
session.last_intent = intent
|
46 |
-
session.variables.update(params)
|
47 |
-
|
48 |
-
# Eksik parametre varsa
|
49 |
-
if missing:
|
50 |
-
session.awaiting_variable = missing[0]
|
51 |
-
return {"response": f"Lütfen {', '.join(missing)} bilgisini belirtir misiniz?"}
|
52 |
-
|
53 |
-
# === API çağrısı yap
|
54 |
-
intent_definitions = {i["name"]: i for i in service_config.get_project_intents(project_name)}
|
55 |
-
data_formats = service_config.data_formats
|
56 |
-
|
57 |
-
# Parametreleri validasyonla kontrol et (backend güvenlik katmanı)
|
58 |
-
variable_format_map = intent_definitions.get(intent, {}).get("variable_formats", {})
|
59 |
-
is_valid, validation_errors = validate_variable_formats(session.variables, variable_format_map, data_formats)
|
60 |
-
|
61 |
-
if not is_valid:
|
62 |
-
session.awaiting_variable = list(validation_errors.keys())[0]
|
63 |
-
return {"response": list(validation_errors.values())[0]}
|
64 |
-
|
65 |
-
log("🚀 execute_intent() çağrılıyor...")
|
66 |
-
result = execute_intent(
|
67 |
-
intent,
|
68 |
-
user_input,
|
69 |
-
session.__dict__,
|
70 |
-
intent_definitions,
|
71 |
-
data_formats,
|
72 |
-
project_name,
|
73 |
-
service_config
|
74 |
-
)
|
75 |
-
|
76 |
-
if "reply" in result:
|
77 |
-
return {"reply": result["reply"]}
|
78 |
-
elif "errors" in result:
|
79 |
-
return {"response": list(result["errors"].values())[0]}
|
80 |
-
else:
|
81 |
-
return {"response": random.choice(project_config["fallback_answers"])}
|
82 |
-
|
83 |
-
except Exception as e:
|
84 |
-
traceback.print_exc()
|
85 |
-
return JSONResponse(content={"error": str(e)}, status_code=500)
|
|
|
1 |
+
from fastapi import Request
|
2 |
+
from fastapi.responses import JSONResponse
|
3 |
+
import traceback
|
4 |
+
import random
|
5 |
+
from llm_model import Message, LLMModel
|
6 |
+
from intent_api import execute_intent
|
7 |
+
from intent_utils import validate_variable_formats
|
8 |
+
from parse_llm_blocks import parse_llm_blocks
|
9 |
+
from log import log
|
10 |
+
|
11 |
+
async def handle_chat(msg: Message, request: Request, app, service_config, session, llm_model: LLMModel):
|
12 |
+
try:
|
13 |
+
user_input = msg.user_input.strip()
|
14 |
+
log(f"💬 Kullanıcı input'u: '{user_input}'")
|
15 |
+
|
16 |
+
project_name = session.project_name
|
17 |
+
project_config = service_config.get_project_llm_config(project_name)
|
18 |
+
system_prompt = service_config.system_prompt # 👉 system_prompt buradan alınıyor
|
19 |
+
|
20 |
+
# Chat history'ye user mesajını ekle
|
21 |
+
session.chat_history.append({"role": "user", "content": user_input})
|
22 |
+
|
23 |
+
# === LLM çağrısı
|
24 |
+
llm_response = await llm_model.generate_response_with_messages(session.chat_history, project_config, system_prompt)
|
25 |
+
log(f"🤖 LLM cevabı: {llm_response}")
|
26 |
+
|
27 |
+
# === LLM cevabını parse et
|
28 |
+
parsed = parse_llm_blocks(llm_response)
|
29 |
+
intent = parsed["intent"]
|
30 |
+
params = parsed["params"]
|
31 |
+
missing = parsed["missing"]
|
32 |
+
action_json = parsed["action_json"]
|
33 |
+
|
34 |
+
# Chat history'ye assistant cevabını ekle
|
35 |
+
session.chat_history.append({"role": "assistant", "content": llm_response})
|
36 |
+
|
37 |
+
# === INTENT yok → havadan sudan konuşma
|
38 |
+
if intent == "NONE":
|
39 |
+
session.awaiting_variable = None
|
40 |
+
session.last_intent = None
|
41 |
+
session.variables.clear()
|
42 |
+
return {"response": llm_response}
|
43 |
+
|
44 |
+
# === INTENT varsa
|
45 |
+
session.last_intent = intent
|
46 |
+
session.variables.update(params)
|
47 |
+
|
48 |
+
# Eksik parametre varsa
|
49 |
+
if missing:
|
50 |
+
session.awaiting_variable = missing[0]
|
51 |
+
return {"response": f"Lütfen {', '.join(missing)} bilgisini belirtir misiniz?"}
|
52 |
+
|
53 |
+
# === API çağrısı yap
|
54 |
+
intent_definitions = {i["name"]: i for i in service_config.get_project_intents(project_name)}
|
55 |
+
data_formats = service_config.data_formats
|
56 |
+
|
57 |
+
# Parametreleri validasyonla kontrol et (backend güvenlik katmanı)
|
58 |
+
variable_format_map = intent_definitions.get(intent, {}).get("variable_formats", {})
|
59 |
+
is_valid, validation_errors = validate_variable_formats(session.variables, variable_format_map, data_formats)
|
60 |
+
|
61 |
+
if not is_valid:
|
62 |
+
session.awaiting_variable = list(validation_errors.keys())[0]
|
63 |
+
return {"response": list(validation_errors.values())[0]}
|
64 |
+
|
65 |
+
log("🚀 execute_intent() çağrılıyor...")
|
66 |
+
result = execute_intent(
|
67 |
+
intent,
|
68 |
+
user_input,
|
69 |
+
session.__dict__,
|
70 |
+
intent_definitions,
|
71 |
+
data_formats,
|
72 |
+
project_name,
|
73 |
+
service_config
|
74 |
+
)
|
75 |
+
|
76 |
+
if "reply" in result:
|
77 |
+
return {"reply": result["reply"]}
|
78 |
+
elif "errors" in result:
|
79 |
+
return {"response": list(result["errors"].values())[0]}
|
80 |
+
else:
|
81 |
+
return {"response": random.choice(project_config["fallback_answers"])}
|
82 |
+
|
83 |
+
except Exception as e:
|
84 |
+
traceback.print_exc()
|
85 |
+
return JSONResponse(content={"error": str(e)}, status_code=500)
|
controllers/admin_controller.py
CHANGED
@@ -1,81 +1,113 @@
|
|
1 |
-
from fastapi import APIRouter, Request
|
2 |
-
from core import service_config, llm_models
|
3 |
-
from llm_model import LLMModel
|
4 |
-
from
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import APIRouter, Request
|
2 |
+
from core import service_config, llm_models
|
3 |
+
from llm_model import LLMModel
|
4 |
+
from intent_utils import background_training
|
5 |
+
from log import log
|
6 |
+
import json, os, shutil, threading
|
7 |
+
|
8 |
+
router = APIRouter()
|
9 |
+
|
10 |
+
@router.post("/reload_config")
|
11 |
+
async def reload_config(request: Request):
|
12 |
+
body = await request.json()
|
13 |
+
project_name = body.get("project_name")
|
14 |
+
new_config_data = body.get("service_config")
|
15 |
+
|
16 |
+
if not project_name or not new_config_data:
|
17 |
+
return {"error": "project_name ve service_config gereklidir."}
|
18 |
+
|
19 |
+
def background_reload():
|
20 |
+
try:
|
21 |
+
current_project = service_config.projects.get(project_name)
|
22 |
+
incoming_project = new_config_data.get("projects", {}).get(project_name)
|
23 |
+
|
24 |
+
if not incoming_project:
|
25 |
+
log(f"❌ '{project_name}' yeni config içinde bulunamadı, işlem durduruldu.")
|
26 |
+
return
|
27 |
+
|
28 |
+
project_path = f"/data/projects/{project_name}"
|
29 |
+
temp_path = f"/data/projects/{project_name}_temp"
|
30 |
+
|
31 |
+
if os.path.exists(temp_path):
|
32 |
+
shutil.rmtree(temp_path)
|
33 |
+
os.makedirs(temp_path, exist_ok=True)
|
34 |
+
|
35 |
+
llm_config = incoming_project["llm"]
|
36 |
+
intents = incoming_project["intents"]
|
37 |
+
|
38 |
+
temp_instance = LLMModel()
|
39 |
+
|
40 |
+
# 🆕 Yeni proje ekleniyor
|
41 |
+
if current_project is None:
|
42 |
+
log(f"🆕 Yeni proje '{project_name}' tespit edildi, yükleme başlatılıyor...")
|
43 |
+
|
44 |
+
temp_instance.setup(service_config, llm_config, temp_path)
|
45 |
+
intent_model_path = os.path.join(temp_path, "intent", "trained_model")
|
46 |
+
background_training(
|
47 |
+
project_name,
|
48 |
+
intents,
|
49 |
+
llm_config["intent_model_id"],
|
50 |
+
intent_model_path,
|
51 |
+
llm_config["train_confidence_treshold"]
|
52 |
+
)
|
53 |
+
temp_instance.load_intent_model(intent_model_path)
|
54 |
+
|
55 |
+
if os.path.exists(project_path):
|
56 |
+
shutil.rmtree(project_path)
|
57 |
+
shutil.copytree(temp_path, project_path)
|
58 |
+
|
59 |
+
llm_models[project_name] = temp_instance
|
60 |
+
service_config.projects[project_name] = incoming_project
|
61 |
+
|
62 |
+
log(f"✅ Yeni proje '{project_name}' başarıyla yüklendi ve belleğe alındı.")
|
63 |
+
return
|
64 |
+
|
65 |
+
# 🔄 Var olan projede değişiklik varsa güncelle
|
66 |
+
if current_project == incoming_project:
|
67 |
+
log(f"ℹ️ '{project_name}' için değişiklik bulunamadı, işlem atlandı.")
|
68 |
+
return
|
69 |
+
|
70 |
+
log(f"🔄 '{project_name}' güncellemesi tespit edildi, güncelleme başlatılıyor...")
|
71 |
+
|
72 |
+
# Ana model değiştiyse yükle
|
73 |
+
if current_project["llm"]["model_base"] != llm_config["model_base"]:
|
74 |
+
temp_instance.setup(service_config, llm_config, temp_path)
|
75 |
+
else:
|
76 |
+
temp_instance.model = llm_models[project_name].model
|
77 |
+
temp_instance.tokenizer = llm_models[project_name].tokenizer
|
78 |
+
|
79 |
+
# Intent değiştiyse yeniden eğit
|
80 |
+
if current_project["intents"] != intents:
|
81 |
+
intent_model_path = os.path.join(temp_path, "intent", "trained_model")
|
82 |
+
background_training(
|
83 |
+
project_name,
|
84 |
+
intents,
|
85 |
+
llm_config["intent_model_id"],
|
86 |
+
intent_model_path,
|
87 |
+
llm_config["train_confidence_treshold"]
|
88 |
+
)
|
89 |
+
temp_instance.load_intent_model(intent_model_path)
|
90 |
+
else:
|
91 |
+
temp_instance.intent_model = llm_models[project_name].intent_model
|
92 |
+
temp_instance.intent_tokenizer = llm_models[project_name].intent_tokenizer
|
93 |
+
temp_instance.intent_label2id = llm_models[project_name].intent_label2id
|
94 |
+
|
95 |
+
if os.path.exists(project_path):
|
96 |
+
shutil.rmtree(project_path)
|
97 |
+
shutil.copytree(temp_path, project_path)
|
98 |
+
|
99 |
+
llm_models[project_name] = temp_instance
|
100 |
+
service_config.projects[project_name] = incoming_project
|
101 |
+
|
102 |
+
log(f"✅ '{project_name}' güncellemesi tamamlandı ve belleğe alındı.")
|
103 |
+
|
104 |
+
except Exception as e:
|
105 |
+
log(f"❌ reload_config background hatası: {e}")
|
106 |
+
|
107 |
+
# Arka planda başlat
|
108 |
+
threading.Thread(target=background_reload, daemon=True).start()
|
109 |
+
|
110 |
+
return {
|
111 |
+
"status": "accepted",
|
112 |
+
"message": f"'{project_name}' için güncelleme arka planda başlatıldı. İşlem loglardan takip edilebilir."
|
113 |
+
}
|
controllers/chat_controller.py
CHANGED
@@ -1,33 +1,33 @@
|
|
1 |
-
from fastapi import APIRouter, Request
|
2 |
-
from llm_model import Message
|
3 |
-
from chat_handler import handle_chat
|
4 |
-
from core import service_config, session_store, llm_models
|
5 |
-
import uuid
|
6 |
-
|
7 |
-
router = APIRouter()
|
8 |
-
|
9 |
-
@router.post("/start_chat")
|
10 |
-
def start_chat(request: Request):
|
11 |
-
project_name = request.query_params.get("project_name")
|
12 |
-
if not project_name:
|
13 |
-
return {"error": "project_name parametresi gereklidir."}
|
14 |
-
|
15 |
-
session = session_store.create_session(project_name)
|
16 |
-
return {"session_id": session.session_id}
|
17 |
-
|
18 |
-
@router.post("/chat")
|
19 |
-
async def chat_endpoint(msg: Message, request: Request):
|
20 |
-
session_id = request.headers.get("X-Session-ID")
|
21 |
-
if not session_id:
|
22 |
-
return {"error": "Session ID eksik."}
|
23 |
-
|
24 |
-
session = session_store.get_session(session_id)
|
25 |
-
if not session:
|
26 |
-
return {"error": "Geçersiz veya süresi dolmuş session."}
|
27 |
-
|
28 |
-
project_name = session.project_name
|
29 |
-
llm_model = llm_models.get(project_name)
|
30 |
-
if llm_model is None:
|
31 |
-
return {"error": f"{project_name} için model yüklenmemiş."}
|
32 |
-
|
33 |
-
return await handle_chat(msg, request, None, service_config, session, llm_model)
|
|
|
1 |
+
from fastapi import APIRouter, Request
|
2 |
+
from llm_model import Message
|
3 |
+
from chat_handler import handle_chat
|
4 |
+
from core import service_config, session_store, llm_models
|
5 |
+
import uuid
|
6 |
+
|
7 |
+
router = APIRouter()
|
8 |
+
|
9 |
+
@router.post("/start_chat")
|
10 |
+
def start_chat(request: Request):
|
11 |
+
project_name = request.query_params.get("project_name")
|
12 |
+
if not project_name:
|
13 |
+
return {"error": "project_name parametresi gereklidir."}
|
14 |
+
|
15 |
+
session = session_store.create_session(project_name)
|
16 |
+
return {"session_id": session.session_id}
|
17 |
+
|
18 |
+
@router.post("/chat")
|
19 |
+
async def chat_endpoint(msg: Message, request: Request):
|
20 |
+
session_id = request.headers.get("X-Session-ID")
|
21 |
+
if not session_id:
|
22 |
+
return {"error": "Session ID eksik."}
|
23 |
+
|
24 |
+
session = session_store.get_session(session_id)
|
25 |
+
if not session:
|
26 |
+
return {"error": "Geçersiz veya süresi dolmuş session."}
|
27 |
+
|
28 |
+
project_name = session.project_name
|
29 |
+
llm_model = llm_models.get(project_name)
|
30 |
+
if llm_model is None:
|
31 |
+
return {"error": f"{project_name} için model yüklenmemiş."}
|
32 |
+
|
33 |
+
return await handle_chat(msg, request, None, service_config, session, llm_model)
|
controllers/health_controller.py
CHANGED
@@ -1,13 +1,13 @@
|
|
1 |
-
from fastapi import APIRouter
|
2 |
-
from core import llm_models
|
3 |
-
|
4 |
-
router = APIRouter()
|
5 |
-
|
6 |
-
@router.get("/")
|
7 |
-
def health():
|
8 |
-
return {"status": "ok"}
|
9 |
-
|
10 |
-
@router.get("/health/intents")
|
11 |
-
def health_intents():
|
12 |
-
loaded_projects = [proj for proj, model in llm_models.items() if model.intent_model is not None]
|
13 |
-
return {"status": "ok", "loaded_projects": loaded_projects}
|
|
|
1 |
+
from fastapi import APIRouter
|
2 |
+
from core import llm_models
|
3 |
+
|
4 |
+
router = APIRouter()
|
5 |
+
|
6 |
+
@router.get("/")
|
7 |
+
def health():
|
8 |
+
return {"status": "ok"}
|
9 |
+
|
10 |
+
@router.get("/health/intents")
|
11 |
+
def health_intents():
|
12 |
+
loaded_projects = [proj for proj, model in llm_models.items() if model.intent_model is not None]
|
13 |
+
return {"status": "ok", "loaded_projects": loaded_projects}
|
controllers/test_controller.py
CHANGED
@@ -1,12 +1,12 @@
|
|
1 |
-
from fastapi import APIRouter
|
2 |
-
import threading
|
3 |
-
import intent_test_runner
|
4 |
-
from log import log
|
5 |
-
|
6 |
-
router = APIRouter()
|
7 |
-
|
8 |
-
@router.post("/run_tests", status_code=202)
|
9 |
-
def run_tests():
|
10 |
-
log("🚦 /run_tests çağrıldı. Testler başlatılıyor...")
|
11 |
-
threading.Thread(target=intent_test_runner.run_all_tests, daemon=True).start()
|
12 |
-
return {"status": "running", "message": "Test süreci başlatıldı."}
|
|
|
1 |
+
from fastapi import APIRouter
|
2 |
+
import threading
|
3 |
+
import intent_test_runner
|
4 |
+
from log import log
|
5 |
+
|
6 |
+
router = APIRouter()
|
7 |
+
|
8 |
+
@router.post("/run_tests", status_code=202)
|
9 |
+
def run_tests():
|
10 |
+
log("🚦 /run_tests çağrıldı. Testler başlatılıyor...")
|
11 |
+
threading.Thread(target=intent_test_runner.run_all_tests, daemon=True).start()
|
12 |
+
return {"status": "running", "message": "Test süreci başlatıldı."}
|
core.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1 |
-
from service_config import ServiceConfig
|
2 |
-
from session import SessionStore
|
3 |
-
|
4 |
-
service_config = ServiceConfig()
|
5 |
-
session_store = SessionStore()
|
6 |
-
llm_models = {}
|
|
|
|
1 |
+
from service_config import ServiceConfig
|
2 |
+
from session import SessionStore
|
3 |
+
|
4 |
+
service_config = ServiceConfig()
|
5 |
+
session_store = SessionStore()
|
6 |
+
llm_models = {}
|
7 |
+
INTENT_MODELS = {}
|
intent_api.py
CHANGED
@@ -1,197 +1,198 @@
|
|
1 |
-
import intent_utils as intent
|
2 |
-
import requests
|
3 |
-
import traceback
|
4 |
-
from log import log
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
response.
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
"
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
response.
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
"
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
log(f"
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
"
|
117 |
-
"
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
if auth_config:
|
133 |
-
token, session = auth_token_handler(action_api_name, auth_config, session)
|
134 |
-
else:
|
135 |
-
token = None
|
136 |
-
|
137 |
-
resolved_headers = {
|
138 |
-
h["key"]: intent.resolve_placeholders(h["value"], session, variables)
|
139 |
-
for h in headers
|
140 |
-
}
|
141 |
-
resolved_body = {
|
142 |
-
k: intent.resolve_placeholders(str(v), session, variables)
|
143 |
-
for k, v in body.items()
|
144 |
-
}
|
145 |
-
|
146 |
-
for attempt in range(retry_count + 1):
|
147 |
-
try:
|
148 |
-
response = requests.request(
|
149 |
-
method=method,
|
150 |
-
url=url,
|
151 |
-
headers=resolved_headers,
|
152 |
-
json=resolved_body,
|
153 |
-
timeout=timeout,
|
154 |
-
verify=verify_path
|
155 |
-
)
|
156 |
-
if response.status_code == 401 and auth_config and attempt < retry_count:
|
157 |
-
log("🔁 Token expired. Yenileniyor...")
|
158 |
-
token, session = refresh_auth_token(action_api_name, auth_config, session)
|
159 |
-
continue
|
160 |
-
response.raise_for_status()
|
161 |
-
break
|
162 |
-
except requests.HTTPError as e:
|
163 |
-
if response.status_code != 401 or attempt == retry_count:
|
164 |
-
raise e
|
165 |
-
|
166 |
-
log("✅ API çağrısı başarılı")
|
167 |
-
json_resp = response.json()
|
168 |
-
|
169 |
-
field = api_def.get("response_parser", {}).get("field")
|
170 |
-
value = json_resp.get(field) if field else json_resp
|
171 |
-
template = api_def.get("reply_template", str(value))
|
172 |
-
|
173 |
-
merged_variables = {**session.get("variables", {}), **variables}
|
174 |
-
if field:
|
175 |
-
merged_variables[field] = str(value)
|
176 |
-
|
177 |
-
log(f"🧩 merged_variables: {merged_variables}")
|
178 |
-
log(f"🧩 reply_template: {template}")
|
179 |
-
|
180 |
-
reply = intent.resolve_placeholders(template, session, merged_variables)
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
session
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
"
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
"
|
197 |
-
|
|
|
|
1 |
+
import intent_utils as intent
|
2 |
+
import requests
|
3 |
+
import traceback
|
4 |
+
from log import log
|
5 |
+
from core import llm_models
|
6 |
+
|
7 |
+
def auth_token_handler(api_name, auth_config, session):
|
8 |
+
try:
|
9 |
+
token_info = session.get("auth_tokens", {}).get(api_name)
|
10 |
+
if token_info and "token" in token_info:
|
11 |
+
return token_info["token"], session
|
12 |
+
|
13 |
+
auth_endpoint = auth_config.get("auth_endpoint")
|
14 |
+
auth_body = auth_config.get("auth_body", {})
|
15 |
+
token_path = auth_config.get("auth_token_path")
|
16 |
+
|
17 |
+
if not auth_endpoint or not token_path:
|
18 |
+
raise Exception("auth_endpoint veya token_path tanımsız")
|
19 |
+
|
20 |
+
resolved_body = {
|
21 |
+
k: intent.resolve_placeholders(str(v), session, session.get("variables", {})) for k, v in auth_body.items()
|
22 |
+
}
|
23 |
+
|
24 |
+
response = requests.post(auth_endpoint, json=resolved_body, timeout=5)
|
25 |
+
response.raise_for_status()
|
26 |
+
json_resp = response.json()
|
27 |
+
|
28 |
+
token_parts = token_path.split(".")
|
29 |
+
token = json_resp
|
30 |
+
for part in token_parts:
|
31 |
+
token = token.get(part)
|
32 |
+
if token is None:
|
33 |
+
raise Exception(f"Token path çözülemedi: {token_path}")
|
34 |
+
|
35 |
+
refresh_token = json_resp.get("refresh_token")
|
36 |
+
session.setdefault("auth_tokens", {})[api_name] = {
|
37 |
+
"token": token,
|
38 |
+
"refresh_token": refresh_token
|
39 |
+
}
|
40 |
+
|
41 |
+
return token, session
|
42 |
+
|
43 |
+
except Exception as e:
|
44 |
+
log(f"❌ Auth token alınamadı: {e}")
|
45 |
+
traceback.print_exc()
|
46 |
+
raise e
|
47 |
+
|
48 |
+
def refresh_auth_token(api_name, auth_config, session):
|
49 |
+
try:
|
50 |
+
refresh_endpoint = auth_config.get("auth_refresh_endpoint")
|
51 |
+
refresh_body = auth_config.get("refresh_body", {})
|
52 |
+
token_path = auth_config.get("auth_token_path")
|
53 |
+
|
54 |
+
if not refresh_endpoint or not token_path:
|
55 |
+
raise Exception("Refresh yapılandırması eksik")
|
56 |
+
|
57 |
+
refresh_token = session.get("auth_tokens", {}).get(api_name, {}).get("refresh_token")
|
58 |
+
if not refresh_token:
|
59 |
+
raise Exception("Mevcut refresh token bulunamadı")
|
60 |
+
|
61 |
+
resolved_body = {
|
62 |
+
k: intent.resolve_placeholders(str(v), session, session.get("variables", {})) for k, v in refresh_body.items()
|
63 |
+
}
|
64 |
+
|
65 |
+
response = requests.post(refresh_endpoint, json=resolved_body, timeout=5)
|
66 |
+
response.raise_for_status()
|
67 |
+
json_resp = response.json()
|
68 |
+
|
69 |
+
token_parts = token_path.split(".")
|
70 |
+
token = json_resp
|
71 |
+
for part in token_parts:
|
72 |
+
token = token.get(part)
|
73 |
+
if token is None:
|
74 |
+
raise Exception(f"Token path çözülemedi: {token_path}")
|
75 |
+
|
76 |
+
new_refresh_token = json_resp.get("refresh_token", refresh_token)
|
77 |
+
|
78 |
+
session.setdefault("auth_tokens", {})[api_name] = {
|
79 |
+
"token": token,
|
80 |
+
"refresh_token": new_refresh_token
|
81 |
+
}
|
82 |
+
|
83 |
+
log(f"🔁 Token başarıyla yenilendi: {api_name}")
|
84 |
+
return token, session
|
85 |
+
|
86 |
+
except Exception as e:
|
87 |
+
log(f"❌ Token yenileme başarısız: {e}")
|
88 |
+
traceback.print_exc()
|
89 |
+
raise e
|
90 |
+
|
91 |
+
def execute_intent(intent_name, user_input, session_dict, intent_definitions, data_formats, project_name, service_config):
|
92 |
+
try:
|
93 |
+
session = session_dict
|
94 |
+
intent_def = intent_definitions[intent_name]
|
95 |
+
action_api_name = intent_def.get("action")
|
96 |
+
|
97 |
+
if not action_api_name:
|
98 |
+
raise Exception(f"Intent '{intent_name}' için action tanımı eksik.")
|
99 |
+
|
100 |
+
api_def = service_config.get_api_config(action_api_name)
|
101 |
+
if not api_def:
|
102 |
+
raise Exception(f"API '{action_api_name}' tanımı bulunamadı.")
|
103 |
+
|
104 |
+
variables_raw = intent.extract_parameters(intent_def.get("variables", []), user_input)
|
105 |
+
variables = {item["key"]: item["value"] for item in variables_raw}
|
106 |
+
|
107 |
+
log(f"🚀 execute_intent('{intent_name}')")
|
108 |
+
log(f"🔍 Çıkarılan parametreler: {variables}")
|
109 |
+
|
110 |
+
variable_format_map = intent_def.get("variable_formats", {})
|
111 |
+
is_valid, validation_errors = intent.validate_variable_formats(variables, variable_format_map, data_formats)
|
112 |
+
if not is_valid:
|
113 |
+
log(f"⚠️ Validasyon hatası: {validation_errors}")
|
114 |
+
return {
|
115 |
+
"errors": validation_errors,
|
116 |
+
"awaiting_variable": list(validation_errors.keys())[0],
|
117 |
+
"session": session
|
118 |
+
}
|
119 |
+
|
120 |
+
headers = api_def.get("headers", [])
|
121 |
+
body = api_def.get("body", {})
|
122 |
+
method = api_def.get("method", "POST")
|
123 |
+
url = api_def["url"]
|
124 |
+
timeout = api_def.get("timeout", 5)
|
125 |
+
retry_count = api_def.get("retry_count", 0)
|
126 |
+
auth_config = api_def.get("auth")
|
127 |
+
tls = api_def.get("tls", {})
|
128 |
+
verify = tls.get("verify", True)
|
129 |
+
verify_path = tls.get("ca_bundle") if verify and tls.get("ca_bundle") else verify
|
130 |
+
|
131 |
+
# ✅ Düzeltilmiş auth çağrısı
|
132 |
+
if auth_config:
|
133 |
+
token, session = auth_token_handler(action_api_name, auth_config, session)
|
134 |
+
else:
|
135 |
+
token = None
|
136 |
+
|
137 |
+
resolved_headers = {
|
138 |
+
h["key"]: intent.resolve_placeholders(h["value"], session, variables)
|
139 |
+
for h in headers
|
140 |
+
}
|
141 |
+
resolved_body = {
|
142 |
+
k: intent.resolve_placeholders(str(v), session, variables)
|
143 |
+
for k, v in body.items()
|
144 |
+
}
|
145 |
+
|
146 |
+
for attempt in range(retry_count + 1):
|
147 |
+
try:
|
148 |
+
response = requests.request(
|
149 |
+
method=method,
|
150 |
+
url=url,
|
151 |
+
headers=resolved_headers,
|
152 |
+
json=resolved_body,
|
153 |
+
timeout=timeout,
|
154 |
+
verify=verify_path
|
155 |
+
)
|
156 |
+
if response.status_code == 401 and auth_config and attempt < retry_count:
|
157 |
+
log("🔁 Token expired. Yenileniyor...")
|
158 |
+
token, session = refresh_auth_token(action_api_name, auth_config, session)
|
159 |
+
continue
|
160 |
+
response.raise_for_status()
|
161 |
+
break
|
162 |
+
except requests.HTTPError as e:
|
163 |
+
if response.status_code != 401 or attempt == retry_count:
|
164 |
+
raise e
|
165 |
+
|
166 |
+
log("✅ API çağrısı başarılı")
|
167 |
+
json_resp = response.json()
|
168 |
+
|
169 |
+
field = api_def.get("response_parser", {}).get("field")
|
170 |
+
value = json_resp.get(field) if field else json_resp
|
171 |
+
template = api_def.get("reply_template", str(value))
|
172 |
+
|
173 |
+
merged_variables = {**session.get("variables", {}), **variables}
|
174 |
+
if field:
|
175 |
+
merged_variables[field] = str(value)
|
176 |
+
|
177 |
+
log(f"🧩 merged_variables: {merged_variables}")
|
178 |
+
log(f"🧩 reply_template: {template}")
|
179 |
+
|
180 |
+
reply = intent.resolve_placeholders(template, session, merged_variables)
|
181 |
+
|
182 |
+
log(f"🛠 Final reply: {reply}")
|
183 |
+
|
184 |
+
session.setdefault("variables", {}).update(merged_variables)
|
185 |
+
session["last_intent"] = intent_name
|
186 |
+
|
187 |
+
return {
|
188 |
+
"reply": reply,
|
189 |
+
"session": session
|
190 |
+
}
|
191 |
+
|
192 |
+
except Exception as e:
|
193 |
+
log(f"❌ execute_intent() hatası: {e}")
|
194 |
+
traceback.print_exc()
|
195 |
+
return {
|
196 |
+
"error": str(e),
|
197 |
+
"session": session
|
198 |
+
}
|
intent_test_runner.py
CHANGED
@@ -1,85 +1,85 @@
|
|
1 |
-
import os
|
2 |
-
import json
|
3 |
-
import requests
|
4 |
-
from log import log
|
5 |
-
|
6 |
-
BASE_URL = os.environ.get("BASE_URL", "http://localhost:7860")
|
7 |
-
SERVICE_CONFIG_PATH = os.environ.get("SERVICE_CONFIG_PATH", "service_config.json")
|
8 |
-
|
9 |
-
with open(SERVICE_CONFIG_PATH, "r", encoding="utf-8") as f:
|
10 |
-
service_config = json.load(f)
|
11 |
-
|
12 |
-
fallback_answers = service_config["projects"]["project1"]["llm"]["fallback_answers"]
|
13 |
-
currency_options = service_config["config"]["data_formats"]["currency_format"]["valid_options"]
|
14 |
-
city_options = service_config["config"]["data_formats"]["city_format"]["valid_options"]
|
15 |
-
|
16 |
-
test_results = []
|
17 |
-
|
18 |
-
def assert_test(name, actual, expected_substring, explanation=None):
|
19 |
-
if explanation:
|
20 |
-
log(f"🧪 TEST: {name} → {explanation}")
|
21 |
-
actual_str = str(actual)
|
22 |
-
passed = expected_substring in actual_str
|
23 |
-
if passed:
|
24 |
-
log(f"[TEST] {name:<45} ✅")
|
25 |
-
test_results.append((name, True))
|
26 |
-
else:
|
27 |
-
log(f"[TEST] {name:<45} ❌ — Beklenen: {expected_substring}, Gelen: {actual_str[:100]}...")
|
28 |
-
test_results.append((name, False))
|
29 |
-
|
30 |
-
def summarize_tests():
|
31 |
-
total = len(test_results)
|
32 |
-
success = sum(1 for _, ok in test_results if ok)
|
33 |
-
fail = total - success
|
34 |
-
log("🧾 TEST SONUCU ÖZETİ")
|
35 |
-
log(f"🔢 Toplam Test : {total}")
|
36 |
-
log(f"✅ Başarılı : {success}")
|
37 |
-
log(f"❌ Başarısız : {fail}")
|
38 |
-
|
39 |
-
def run_all_tests():
|
40 |
-
try:
|
41 |
-
log("🚀 Test süreci başlatıldı.")
|
42 |
-
|
43 |
-
response = requests.post(f"{BASE_URL}/start_chat?project_name=project1")
|
44 |
-
if response.status_code != 200:
|
45 |
-
raise Exception(f"Start chat başarısız: {response.status_code}, {response.text}")
|
46 |
-
|
47 |
-
session_id = response.json().get("session_id")
|
48 |
-
if not session_id:
|
49 |
-
raise Exception("Session ID alınamadı.")
|
50 |
-
|
51 |
-
headers = {"X-Session-ID": session_id}
|
52 |
-
|
53 |
-
# === Test 1: LLM fallback
|
54 |
-
r = requests.post(f"{BASE_URL}/chat", json={"user_input": "Mars'a bilet alabilir miyim?"}, headers=headers)
|
55 |
-
assert_test("LLM fallback", r.json(), "")
|
56 |
-
|
57 |
-
# === Test 2: Döviz kuru — başarılı
|
58 |
-
valid_currency = currency_options[0]
|
59 |
-
r = requests.post(f"{BASE_URL}/chat", json={"user_input": f"{valid_currency} kuru nedir"}, headers=headers)
|
60 |
-
assert_test("Parametre tamamlandı — dolar", r.json(), f"{valid_currency} kuru şu an")
|
61 |
-
|
62 |
-
# === Test 3: Döviz kuru — geçersiz parametre
|
63 |
-
r = requests.post(f"{BASE_URL}/chat", json={"user_input": "yenidolar kuru nedir"}, headers=headers)
|
64 |
-
assert_test("Geçersiz parametre — currency", r.json(), "Geçerli bir döviz cinsi")
|
65 |
-
|
66 |
-
# === Test 4: Yol durumu — eksik parametreleri isteme
|
67 |
-
r = requests.post(f"{BASE_URL}/chat", json={"user_input": "yol durumu"}, headers=headers)
|
68 |
-
assert_test("Eksik parametre — from_location & to_location", r.json(), "Lütfen şu bilgileri sırayla belirtir misiniz")
|
69 |
-
|
70 |
-
# === Test 4b: Yol durumu — eksik parametreleri tamamlama ve doğrulama
|
71 |
-
r = requests.post(f"{BASE_URL}/chat", json={"user_input": f"{city_options[0]} {city_options[1]}"}, headers=headers)
|
72 |
-
assert_test("Parametre tamamlandı — yol durumu", r.json(), "trafik açık")
|
73 |
-
|
74 |
-
# === Test 5: Hava durumu — eksik parametre isteme
|
75 |
-
r = requests.post(f"{BASE_URL}/chat", json={"user_input": "hava durumu"}, headers=headers)
|
76 |
-
assert_test("Eksik parametre — city", r.json(), "Lütfen şu bilgileri sırayla belirtir misiniz")
|
77 |
-
|
78 |
-
# === Test 5b: Hava durumu — parametre tamamlama ve doğrulama
|
79 |
-
r = requests.post(f"{BASE_URL}/chat", json={"user_input": f"{city_options[0]}"}, headers=headers)
|
80 |
-
assert_test("Parametre tamamlandı — hava durumu", r.json(), f"{city_options[0]} için hava güneşli")
|
81 |
-
|
82 |
-
summarize_tests()
|
83 |
-
|
84 |
-
except Exception as e:
|
85 |
-
log(f"❌ run_all_tests sırasında hata oluştu: {e}")
|
|
|
1 |
+
import os
|
2 |
+
import json
|
3 |
+
import requests
|
4 |
+
from log import log
|
5 |
+
|
6 |
+
BASE_URL = os.environ.get("BASE_URL", "http://localhost:7860")
|
7 |
+
SERVICE_CONFIG_PATH = os.environ.get("SERVICE_CONFIG_PATH", "service_config.json")
|
8 |
+
|
9 |
+
with open(SERVICE_CONFIG_PATH, "r", encoding="utf-8") as f:
|
10 |
+
service_config = json.load(f)
|
11 |
+
|
12 |
+
fallback_answers = service_config["projects"]["project1"]["llm"]["fallback_answers"]
|
13 |
+
currency_options = service_config["config"]["data_formats"]["currency_format"]["valid_options"]
|
14 |
+
city_options = service_config["config"]["data_formats"]["city_format"]["valid_options"]
|
15 |
+
|
16 |
+
test_results = []
|
17 |
+
|
18 |
+
def assert_test(name, actual, expected_substring, explanation=None):
|
19 |
+
if explanation:
|
20 |
+
log(f"🧪 TEST: {name} → {explanation}")
|
21 |
+
actual_str = str(actual)
|
22 |
+
passed = expected_substring in actual_str
|
23 |
+
if passed:
|
24 |
+
log(f"[TEST] {name:<45} ✅")
|
25 |
+
test_results.append((name, True))
|
26 |
+
else:
|
27 |
+
log(f"[TEST] {name:<45} ❌ — Beklenen: {expected_substring}, Gelen: {actual_str[:100]}...")
|
28 |
+
test_results.append((name, False))
|
29 |
+
|
30 |
+
def summarize_tests():
|
31 |
+
total = len(test_results)
|
32 |
+
success = sum(1 for _, ok in test_results if ok)
|
33 |
+
fail = total - success
|
34 |
+
log("🧾 TEST SONUCU ÖZETİ")
|
35 |
+
log(f"🔢 Toplam Test : {total}")
|
36 |
+
log(f"✅ Başarılı : {success}")
|
37 |
+
log(f"❌ Başarısız : {fail}")
|
38 |
+
|
39 |
+
def run_all_tests():
|
40 |
+
try:
|
41 |
+
log("🚀 Test süreci başlatıldı.")
|
42 |
+
|
43 |
+
response = requests.post(f"{BASE_URL}/start_chat?project_name=project1")
|
44 |
+
if response.status_code != 200:
|
45 |
+
raise Exception(f"Start chat başarısız: {response.status_code}, {response.text}")
|
46 |
+
|
47 |
+
session_id = response.json().get("session_id")
|
48 |
+
if not session_id:
|
49 |
+
raise Exception("Session ID alınamadı.")
|
50 |
+
|
51 |
+
headers = {"X-Session-ID": session_id}
|
52 |
+
|
53 |
+
# === Test 1: LLM fallback
|
54 |
+
r = requests.post(f"{BASE_URL}/chat", json={"user_input": "Mars'a bilet alabilir miyim?"}, headers=headers)
|
55 |
+
assert_test("LLM fallback", r.json(), "")
|
56 |
+
|
57 |
+
# === Test 2: Döviz kuru — başarılı
|
58 |
+
valid_currency = currency_options[0]
|
59 |
+
r = requests.post(f"{BASE_URL}/chat", json={"user_input": f"{valid_currency} kuru nedir"}, headers=headers)
|
60 |
+
assert_test("Parametre tamamlandı — dolar", r.json(), f"{valid_currency} kuru şu an")
|
61 |
+
|
62 |
+
# === Test 3: Döviz kuru — geçersiz parametre
|
63 |
+
r = requests.post(f"{BASE_URL}/chat", json={"user_input": "yenidolar kuru nedir"}, headers=headers)
|
64 |
+
assert_test("Geçersiz parametre — currency", r.json(), "Geçerli bir döviz cinsi")
|
65 |
+
|
66 |
+
# === Test 4: Yol durumu — eksik parametreleri isteme
|
67 |
+
r = requests.post(f"{BASE_URL}/chat", json={"user_input": "yol durumu"}, headers=headers)
|
68 |
+
assert_test("Eksik parametre — from_location & to_location", r.json(), "Lütfen şu bilgileri sırayla belirtir misiniz")
|
69 |
+
|
70 |
+
# === Test 4b: Yol durumu — eksik parametreleri tamamlama ve doğrulama
|
71 |
+
r = requests.post(f"{BASE_URL}/chat", json={"user_input": f"{city_options[0]} {city_options[1]}"}, headers=headers)
|
72 |
+
assert_test("Parametre tamamlandı — yol durumu", r.json(), "trafik açık")
|
73 |
+
|
74 |
+
# === Test 5: Hava durumu — eksik parametre isteme
|
75 |
+
r = requests.post(f"{BASE_URL}/chat", json={"user_input": "hava durumu"}, headers=headers)
|
76 |
+
assert_test("Eksik parametre — city", r.json(), "Lütfen şu bilgileri sırayla belirtir misiniz")
|
77 |
+
|
78 |
+
# === Test 5b: Hava durumu — parametre tamamlama ve doğrulama
|
79 |
+
r = requests.post(f"{BASE_URL}/chat", json={"user_input": f"{city_options[0]}"}, headers=headers)
|
80 |
+
assert_test("Parametre tamamlandı — hava durumu", r.json(), f"{city_options[0]} için hava güneşli")
|
81 |
+
|
82 |
+
summarize_tests()
|
83 |
+
|
84 |
+
except Exception as e:
|
85 |
+
log(f"❌ run_all_tests sırasında hata oluştu: {e}")
|
intent_utils.py
CHANGED
@@ -1,64 +1,174 @@
|
|
1 |
-
import
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import torch
|
3 |
+
import json
|
4 |
+
import shutil
|
5 |
+
import re
|
6 |
+
import traceback
|
7 |
+
from datasets import Dataset
|
8 |
+
from transformers import (
|
9 |
+
AutoTokenizer,
|
10 |
+
AutoModelForSequenceClassification,
|
11 |
+
Trainer,
|
12 |
+
TrainingArguments,
|
13 |
+
default_data_collator,
|
14 |
+
AutoConfig,
|
15 |
+
)
|
16 |
+
from log import log
|
17 |
+
from core import llm_models
|
18 |
+
|
19 |
+
async def detect_intent(text, project_name):
|
20 |
+
llm_model_instance = llm_models.get(project_name)
|
21 |
+
if not llm_model_instance or not llm_model_instance.intent_model:
|
22 |
+
raise Exception(f"'{project_name}' için intent modeli yüklenmemiş.")
|
23 |
+
|
24 |
+
tokenizer = llm_model_instance.intent_tokenizer
|
25 |
+
model = llm_model_instance.intent_model
|
26 |
+
label2id = llm_model_instance.intent_label2id
|
27 |
+
|
28 |
+
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True)
|
29 |
+
outputs = model(**inputs)
|
30 |
+
predicted_id = outputs.logits.argmax(dim=-1).item()
|
31 |
+
|
32 |
+
detected_intent = [k for k, v in label2id.items() if v == predicted_id][0]
|
33 |
+
confidence = outputs.logits.softmax(dim=-1).max().item()
|
34 |
+
|
35 |
+
return detected_intent, confidence
|
36 |
+
|
37 |
+
def background_training(project_name, intents, model_id, output_path, confidence_threshold):
|
38 |
+
try:
|
39 |
+
log(f"🔧 Intent eğitimi başlatıldı (proje: {project_name})")
|
40 |
+
texts, labels, label2id = [], [], {}
|
41 |
+
for idx, intent in enumerate(intents):
|
42 |
+
label2id[intent["name"]] = idx
|
43 |
+
for ex in intent["examples"]:
|
44 |
+
texts.append(ex)
|
45 |
+
labels.append(idx)
|
46 |
+
|
47 |
+
dataset = Dataset.from_dict({"text": texts, "label": labels})
|
48 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
49 |
+
config = AutoConfig.from_pretrained(model_id)
|
50 |
+
config.problem_type = "single_label_classification"
|
51 |
+
config.num_labels = len(label2id)
|
52 |
+
model = AutoModelForSequenceClassification.from_pretrained(model_id, config=config)
|
53 |
+
|
54 |
+
tokenized_data = {"input_ids": [], "attention_mask": [], "label": []}
|
55 |
+
for row in dataset:
|
56 |
+
out = tokenizer(row["text"], truncation=True, padding="max_length", max_length=128)
|
57 |
+
tokenized_data["input_ids"].append(out["input_ids"])
|
58 |
+
tokenized_data["attention_mask"].append(out["attention_mask"])
|
59 |
+
tokenized_data["label"].append(row["label"])
|
60 |
+
|
61 |
+
tokenized = Dataset.from_dict(tokenized_data)
|
62 |
+
tokenized.set_format(type="torch", columns=["input_ids", "attention_mask", "label"])
|
63 |
+
|
64 |
+
if os.path.exists(output_path):
|
65 |
+
shutil.rmtree(output_path)
|
66 |
+
os.makedirs(output_path, exist_ok=True)
|
67 |
+
|
68 |
+
trainer = Trainer(
|
69 |
+
model=model,
|
70 |
+
args=TrainingArguments(output_path, per_device_train_batch_size=4, num_train_epochs=3, logging_steps=10, save_strategy="no", report_to=[]),
|
71 |
+
train_dataset=tokenized,
|
72 |
+
data_collator=default_data_collator,
|
73 |
+
)
|
74 |
+
trainer.train()
|
75 |
+
|
76 |
+
log("🔧 Başarı raporu üretiliyor...")
|
77 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
78 |
+
model.to(device)
|
79 |
+
input_ids_tensor = torch.tensor(tokenized["input_ids"]).to(device)
|
80 |
+
attention_mask_tensor = torch.tensor(tokenized["attention_mask"]).to(device)
|
81 |
+
|
82 |
+
with torch.no_grad():
|
83 |
+
outputs = model(input_ids=input_ids_tensor, attention_mask=attention_mask_tensor)
|
84 |
+
predictions = outputs.logits.argmax(dim=-1).tolist()
|
85 |
+
|
86 |
+
actuals = tokenized["label"]
|
87 |
+
counts, correct = {}, {}
|
88 |
+
for pred, actual in zip(predictions, actuals):
|
89 |
+
intent_name = list(label2id.keys())[list(label2id.values()).index(actual)]
|
90 |
+
counts[intent_name] = counts.get(intent_name, 0) + 1
|
91 |
+
if pred == actual:
|
92 |
+
correct[intent_name] = correct.get(intent_name, 0) + 1
|
93 |
+
for intent_name, total in counts.items():
|
94 |
+
accuracy = correct.get(intent_name, 0) / total
|
95 |
+
log(f"📊 Intent '{intent_name}' doğruluk: {accuracy:.2f} — {total} örnek")
|
96 |
+
if accuracy < confidence_threshold or total < 5:
|
97 |
+
log(f"⚠️ Yetersiz performanslı intent: '{intent_name}' — Doğruluk: {accuracy:.2f}, Örnek: {total}")
|
98 |
+
|
99 |
+
# Eğitim sonrası model ve tokenizer'ı diske kaydet
|
100 |
+
model.save_pretrained(output_path)
|
101 |
+
tokenizer.save_pretrained(output_path)
|
102 |
+
with open(os.path.join(output_path, "label2id.json"), "w") as f:
|
103 |
+
json.dump(label2id, f)
|
104 |
+
|
105 |
+
log(f"✅ Intent eğitimi tamamlandı ve '{project_name}' için model disk üzerinde hazır.")
|
106 |
+
|
107 |
+
except Exception as e:
|
108 |
+
log(f"❌ Intent eğitimi hatası: {e}")
|
109 |
+
traceback.print_exc()
|
110 |
+
|
111 |
+
def extract_parameters(variables_list, user_input):
|
112 |
+
extracted_params = []
|
113 |
+
for pattern in variables_list:
|
114 |
+
# Örneğin: from_location:{Ankara} to_location:{İstanbul}
|
115 |
+
regex = re.sub(r"(\w+):\{(.+?)\}", r"(?P<\1>.+?)", pattern)
|
116 |
+
match = re.match(regex, user_input)
|
117 |
+
if match:
|
118 |
+
extracted_params = [{"key": k, "value": v} for k, v in match.groupdict().items()]
|
119 |
+
break
|
120 |
+
|
121 |
+
# Ek özel basit yakalama: iki şehir birden yazılırsa → sırayla atama
|
122 |
+
if not extracted_params:
|
123 |
+
city_pattern = r"(\bAnkara\b|\bİstanbul\b|\bİzmir\b)"
|
124 |
+
cities = re.findall(city_pattern, user_input)
|
125 |
+
if len(cities) >= 2:
|
126 |
+
extracted_params = [
|
127 |
+
{"key": "from_location", "value": cities[0]},
|
128 |
+
{"key": "to_location", "value": cities[1]}
|
129 |
+
]
|
130 |
+
return extracted_params
|
131 |
+
|
132 |
+
def resolve_placeholders(text: str, session: dict, variables: dict) -> str:
|
133 |
+
def replacer(match):
|
134 |
+
full = match.group(1)
|
135 |
+
try:
|
136 |
+
if full.startswith("variables."):
|
137 |
+
key = full.split(".", 1)[1]
|
138 |
+
return str(variables.get(key, f"{{{full}}}"))
|
139 |
+
elif full.startswith("session."):
|
140 |
+
key = full.split(".", 1)[1]
|
141 |
+
return str(session.get("variables", {}).get(key, f"{{{full}}}"))
|
142 |
+
elif full.startswith("auth_tokens."):
|
143 |
+
parts = full.split(".")
|
144 |
+
if len(parts) == 3:
|
145 |
+
intent, token_type = parts[1], parts[2]
|
146 |
+
return str(session.get("auth_tokens", {}).get(intent, {}).get(token_type, f"{{{full}}}"))
|
147 |
+
else:
|
148 |
+
return f"{{{full}}}"
|
149 |
+
else:
|
150 |
+
return f"{{{full}}}"
|
151 |
+
except Exception:
|
152 |
+
return f"{{{full}}}"
|
153 |
+
|
154 |
+
return re.sub(r"\{([^{}]+)\}", replacer, text)
|
155 |
+
|
156 |
+
def validate_variable_formats(variables, variable_format_map, data_formats):
|
157 |
+
errors = {}
|
158 |
+
for var_name, format_name in variable_format_map.items():
|
159 |
+
value = variables.get(var_name)
|
160 |
+
if value is None:
|
161 |
+
continue
|
162 |
+
|
163 |
+
format_def = data_formats.get(format_name)
|
164 |
+
if not format_def:
|
165 |
+
continue
|
166 |
+
|
167 |
+
if "valid_options" in format_def:
|
168 |
+
if value not in format_def["valid_options"]:
|
169 |
+
errors[var_name] = format_def.get("error_message", f"{var_name} değeri geçersiz.")
|
170 |
+
elif "pattern" in format_def:
|
171 |
+
if not re.fullmatch(format_def["pattern"], value):
|
172 |
+
errors[var_name] = format_def.get("error_message", f"{var_name} formatı geçersiz.")
|
173 |
+
|
174 |
+
return len(errors) == 0, errors
|
llm_model.py
CHANGED
@@ -1,65 +1,65 @@
|
|
1 |
-
import torch
|
2 |
-
import traceback
|
3 |
-
from transformers import AutoTokenizer, AutoModelForCausalLM
|
4 |
-
from log import log
|
5 |
-
from pydantic import BaseModel
|
6 |
-
import os
|
7 |
-
import json
|
8 |
-
|
9 |
-
class Message(BaseModel):
|
10 |
-
user_input: str
|
11 |
-
|
12 |
-
class LLMModel:
|
13 |
-
def __init__(self):
|
14 |
-
self.model = None
|
15 |
-
self.tokenizer = None
|
16 |
-
self.eos_token_id = None
|
17 |
-
|
18 |
-
def setup(self, s_config, project_config, project_path):
|
19 |
-
try:
|
20 |
-
log("🧠 LLMModel setup() başladı")
|
21 |
-
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
22 |
-
log(f"📡 Kullanılan cihaz: {device}")
|
23 |
-
|
24 |
-
model_base = project_config["model_base"]
|
25 |
-
token = s_config.get_auth_token()
|
26 |
-
|
27 |
-
if s_config.work_mode == "hfcloud":
|
28 |
-
log(f"📦 Hugging Face cloud modeli yükleniyor: {model_base}")
|
29 |
-
self.tokenizer = AutoTokenizer.from_pretrained(model_base, token=token, use_fast=False)
|
30 |
-
self.model = AutoModelForCausalLM.from_pretrained(model_base, token=token, torch_dtype=torch.float32).to(device)
|
31 |
-
else:
|
32 |
-
log(f"📦 Model indiriliyor veya yükleniyor: {model_base}")
|
33 |
-
self.tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False)
|
34 |
-
self.model = AutoModelForCausalLM.from_pretrained(model_base, torch_dtype=torch.float32).to(device)
|
35 |
-
|
36 |
-
self.tokenizer.pad_token = self.tokenizer.pad_token or self.tokenizer.eos_token
|
37 |
-
self.model.config.pad_token_id = self.tokenizer.pad_token_id
|
38 |
-
self.eos_token_id = self.tokenizer("<|im_end|>", add_special_tokens=False)["input_ids"][0]
|
39 |
-
self.model.eval()
|
40 |
-
|
41 |
-
log("✅ LLMModel setup() başarıyla tamamlandı.")
|
42 |
-
except Exception as e:
|
43 |
-
log(f"❌ LLMModel setup() hatası: {e}")
|
44 |
-
traceback.print_exc()
|
45 |
-
|
46 |
-
async def generate_response_with_messages(self, messages, project_config, system_prompt):
|
47 |
-
all_messages = [{"role": "system", "content": system_prompt}] + messages
|
48 |
-
encodeds = self.tokenizer.apply_chat_template(all_messages, return_tensors="pt", add_generation_prompt=True)
|
49 |
-
input_ids = encodeds.to(self.model.device)
|
50 |
-
attention_mask = (input_ids != self.tokenizer.pad_token_id).long()
|
51 |
-
|
52 |
-
with torch.no_grad():
|
53 |
-
output = self.model.generate(
|
54 |
-
input_ids=input_ids,
|
55 |
-
attention_mask=attention_mask,
|
56 |
-
max_new_tokens=256,
|
57 |
-
do_sample=project_config["use_sampling"],
|
58 |
-
eos_token_id=self.eos_token_id,
|
59 |
-
pad_token_id=self.tokenizer.pad_token_id,
|
60 |
-
return_dict_in_generate=True,
|
61 |
-
output_scores=True
|
62 |
-
)
|
63 |
-
|
64 |
-
decoded = self.tokenizer.decode(output.sequences[0], skip_special_tokens=True).strip()
|
65 |
-
return decoded
|
|
|
1 |
+
import torch
|
2 |
+
import traceback
|
3 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
4 |
+
from log import log
|
5 |
+
from pydantic import BaseModel
|
6 |
+
import os
|
7 |
+
import json
|
8 |
+
|
9 |
+
class Message(BaseModel):
|
10 |
+
user_input: str
|
11 |
+
|
12 |
+
class LLMModel:
|
13 |
+
def __init__(self):
|
14 |
+
self.model = None
|
15 |
+
self.tokenizer = None
|
16 |
+
self.eos_token_id = None
|
17 |
+
|
18 |
+
def setup(self, s_config, project_config, project_path):
|
19 |
+
try:
|
20 |
+
log("🧠 LLMModel setup() başladı")
|
21 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
22 |
+
log(f"📡 Kullanılan cihaz: {device}")
|
23 |
+
|
24 |
+
model_base = project_config["model_base"]
|
25 |
+
token = s_config.get_auth_token()
|
26 |
+
|
27 |
+
if s_config.work_mode == "hfcloud":
|
28 |
+
log(f"📦 Hugging Face cloud modeli yükleniyor: {model_base}")
|
29 |
+
self.tokenizer = AutoTokenizer.from_pretrained(model_base, token=token, use_fast=False)
|
30 |
+
self.model = AutoModelForCausalLM.from_pretrained(model_base, token=token, torch_dtype=torch.float32).to(device)
|
31 |
+
else:
|
32 |
+
log(f"📦 Model indiriliyor veya yükleniyor: {model_base}")
|
33 |
+
self.tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False)
|
34 |
+
self.model = AutoModelForCausalLM.from_pretrained(model_base, torch_dtype=torch.float32).to(device)
|
35 |
+
|
36 |
+
self.tokenizer.pad_token = self.tokenizer.pad_token or self.tokenizer.eos_token
|
37 |
+
self.model.config.pad_token_id = self.tokenizer.pad_token_id
|
38 |
+
self.eos_token_id = self.tokenizer("<|im_end|>", add_special_tokens=False)["input_ids"][0]
|
39 |
+
self.model.eval()
|
40 |
+
|
41 |
+
log("✅ LLMModel setup() başarıyla tamamlandı.")
|
42 |
+
except Exception as e:
|
43 |
+
log(f"❌ LLMModel setup() hatası: {e}")
|
44 |
+
traceback.print_exc()
|
45 |
+
|
46 |
+
async def generate_response_with_messages(self, messages, project_config, system_prompt):
|
47 |
+
all_messages = [{"role": "system", "content": system_prompt}] + messages
|
48 |
+
encodeds = self.tokenizer.apply_chat_template(all_messages, return_tensors="pt", add_generation_prompt=True)
|
49 |
+
input_ids = encodeds.to(self.model.device)
|
50 |
+
attention_mask = (input_ids != self.tokenizer.pad_token_id).long()
|
51 |
+
|
52 |
+
with torch.no_grad():
|
53 |
+
output = self.model.generate(
|
54 |
+
input_ids=input_ids,
|
55 |
+
attention_mask=attention_mask,
|
56 |
+
max_new_tokens=256,
|
57 |
+
do_sample=project_config["use_sampling"],
|
58 |
+
eos_token_id=self.eos_token_id,
|
59 |
+
pad_token_id=self.tokenizer.pad_token_id,
|
60 |
+
return_dict_in_generate=True,
|
61 |
+
output_scores=True
|
62 |
+
)
|
63 |
+
|
64 |
+
decoded = self.tokenizer.decode(output.sequences[0], skip_special_tokens=True).strip()
|
65 |
+
return decoded
|
log.py
CHANGED
@@ -1,11 +1,11 @@
|
|
1 |
-
from datetime import datetime
|
2 |
-
|
3 |
-
def log(message):
|
4 |
-
timestamp = datetime.now().strftime("%H:%M:%S")
|
5 |
-
line = f"[{timestamp}] {message}"
|
6 |
-
print(line, flush=True)
|
7 |
-
try:
|
8 |
-
with open("/tmp/logs.txt", "a", encoding="utf-8") as f:
|
9 |
-
f.write(line + "\n")
|
10 |
-
except Exception:
|
11 |
pass # dosya erişim hataları sessizce geçilir
|
|
|
1 |
+
from datetime import datetime
|
2 |
+
|
3 |
+
def log(message):
|
4 |
+
timestamp = datetime.now().strftime("%H:%M:%S")
|
5 |
+
line = f"[{timestamp}] {message}"
|
6 |
+
print(line, flush=True)
|
7 |
+
try:
|
8 |
+
with open("/tmp/logs.txt", "a", encoding="utf-8") as f:
|
9 |
+
f.write(line + "\n")
|
10 |
+
except Exception:
|
11 |
pass # dosya erişim hataları sessizce geçilir
|
parse_llm_blocks.py
CHANGED
@@ -1,25 +1,25 @@
|
|
1 |
-
import re
|
2 |
-
import json
|
3 |
-
|
4 |
-
def parse_llm_blocks(response_text):
|
5 |
-
blocks = {
|
6 |
-
"intent": "NONE",
|
7 |
-
"params": {},
|
8 |
-
"missing": [],
|
9 |
-
"action_json": {}
|
10 |
-
}
|
11 |
-
intent_match = re.search(r"#INTENT:\s*(.+)", response_text)
|
12 |
-
params_match = re.search(r"#PARAMS:\s*(\{.*?\})", response_text)
|
13 |
-
missing_match = re.search(r"#MISSING:\s*(\[[^\]]*\])", response_text)
|
14 |
-
action_match = re.search(r"#ACTION_JSON:\s*(\{.*?\})", response_text)
|
15 |
-
|
16 |
-
if intent_match:
|
17 |
-
blocks["intent"] = intent_match.group(1).strip()
|
18 |
-
if params_match:
|
19 |
-
blocks["params"] = json.loads(params_match.group(1))
|
20 |
-
if missing_match:
|
21 |
-
blocks["missing"] = json.loads(missing_match.group(1))
|
22 |
-
if action_match:
|
23 |
-
blocks["action_json"] = json.loads(action_match.group(1))
|
24 |
-
|
25 |
-
return blocks
|
|
|
1 |
+
import re
|
2 |
+
import json
|
3 |
+
|
4 |
+
def parse_llm_blocks(response_text):
|
5 |
+
blocks = {
|
6 |
+
"intent": "NONE",
|
7 |
+
"params": {},
|
8 |
+
"missing": [],
|
9 |
+
"action_json": {}
|
10 |
+
}
|
11 |
+
intent_match = re.search(r"#INTENT:\s*(.+)", response_text)
|
12 |
+
params_match = re.search(r"#PARAMS:\s*(\{.*?\})", response_text)
|
13 |
+
missing_match = re.search(r"#MISSING:\s*(\[[^\]]*\])", response_text)
|
14 |
+
action_match = re.search(r"#ACTION_JSON:\s*(\{.*?\})", response_text)
|
15 |
+
|
16 |
+
if intent_match:
|
17 |
+
blocks["intent"] = intent_match.group(1).strip()
|
18 |
+
if params_match:
|
19 |
+
blocks["params"] = json.loads(params_match.group(1))
|
20 |
+
if missing_match:
|
21 |
+
blocks["missing"] = json.loads(missing_match.group(1))
|
22 |
+
if action_match:
|
23 |
+
blocks["action_json"] = json.loads(action_match.group(1))
|
24 |
+
|
25 |
+
return blocks
|
service_config.json
CHANGED
@@ -1,113 +1,113 @@
|
|
1 |
-
{
|
2 |
-
"config": {
|
3 |
-
"work_mode": "hfcloud",
|
4 |
-
"cloud_token": "",
|
5 |
-
"system_prompt": "Sen bir görev tabanlı asistan botsun ve kullanıcıyla doğal dilde sohbet edebilirsin. Ancak kullanıcı mesajlarında bir görev (intent) algılarsan, önce intent’i tespit et, sonra eksik parametreleri ve aksiyon json’unu hazırla. Eğer yeterli bilgi yoksa eksik parametreleri listele. Eğer konu değiştiyse veya yeni bir sohbet başladıysa, beklenen parametre modunu bırak. Cevaplarının içinde her zaman aşağıdaki işaretli blokları kullan: #INTENT: <intent_adı> (veya NONE) #PARAMS: {parametreler JSON} #MISSING: [eksik_parametreler listesi] #ACTION_JSON: {api çağrısı için JSON} Mevcut intent’ler: - doviz-kuru-intent → parametre: currency (\"dolar\", \"euro\", \"TL\") - yol-durumu-intent → parametreler: from_location, to_location (\"Ankara\", \"İstanbul\", \"İzmir\") - hava-durumu-intent → parametre: city (\"Ankara\", \"İstanbul\", \"İzmir\") Unutma: Kullanıcı mesajlarını doğal ve insani tut. Eğer bir görev varsa, üstteki blokları eksiksiz döndür. Eğer bir görev yoksa sadece #INTENT: NONE bloklarını döndür. Action JSON yalnızca tüm parametreler tamamlandığında dolu olur.",
|
6 |
-
"data_formats": {
|
7 |
-
"currency_format": {
|
8 |
-
"valid_options": ["dolar", "euro", "TL"],
|
9 |
-
"error_message": "Geçerli bir döviz cinsi belirtmelisiniz."
|
10 |
-
},
|
11 |
-
"city_format": {
|
12 |
-
"valid_options": ["Ankara", "İstanbul", "İzmir"],
|
13 |
-
"error_message": "Geçerli bir şehir adı belirtmelisiniz."
|
14 |
-
}
|
15 |
-
},
|
16 |
-
"apis": {
|
17 |
-
"currency_api": {
|
18 |
-
"url": "https://b9e2-176-88-34-20.ngrok-free.app/doviz",
|
19 |
-
"method": "POST",
|
20 |
-
"headers": [
|
21 |
-
{ "key": "Authorization", "value": "Bearer {auth_tokens.currency_api.token}" }
|
22 |
-
],
|
23 |
-
"body": {
|
24 |
-
"currency": "{variables.currency}"
|
25 |
-
},
|
26 |
-
"timeout": 5,
|
27 |
-
"retry_count": 1,
|
28 |
-
"auth": {
|
29 |
-
"auth_endpoint": "https://b9e2-176-88-34-20.ngrok-free.app/auth",
|
30 |
-
"auth_body": { "username": "user", "password": "pass" },
|
31 |
-
"auth_token_path": "token",
|
32 |
-
"auth_refresh_endpoint": "https://b9e2-176-88-34-20.ngrok-free.app/refresh",
|
33 |
-
"refresh_body": { "refresh_token": "{auth_tokens.currency_api.token}" }
|
34 |
-
},
|
35 |
-
"response_parser": {
|
36 |
-
"field": "rate",
|
37 |
-
"format": "{variables.currency} kuru: {rate} TL"
|
38 |
-
},
|
39 |
-
"reply_template": "{variables.currency} kuru şu an {rate} TL."
|
40 |
-
},
|
41 |
-
"traffic_api": {
|
42 |
-
"url": "https://b9e2-176-88-34-20.ngrok-free.app/yol",
|
43 |
-
"method": "POST",
|
44 |
-
"headers": [
|
45 |
-
{ "key": "Authorization", "value": "Bearer {auth_tokens.traffic_api.token}" }
|
46 |
-
],
|
47 |
-
"body": {
|
48 |
-
"from_location": "{variables.from_location}",
|
49 |
-
"to_location": "{variables.to_location}"
|
50 |
-
},
|
51 |
-
"timeout": 5,
|
52 |
-
"retry_count": 1,
|
53 |
-
"auth": {
|
54 |
-
"auth_endpoint": "https://b9e2-176-88-34-20.ngrok-free.app/auth",
|
55 |
-
"auth_body": { "username": "user", "password": "pass" },
|
56 |
-
"auth_token_path": "token",
|
57 |
-
"auth_refresh_endpoint": "https://b9e2-176-88-34-20.ngrok-free.app/refresh",
|
58 |
-
"refresh_body": { "refresh_token": "{auth_tokens.traffic_api.token}" }
|
59 |
-
},
|
60 |
-
"response_parser": {
|
61 |
-
"field": "status",
|
62 |
-
"format": "{from_location} → {to_location} arası: {status}"
|
63 |
-
},
|
64 |
-
"reply_template": "{from_location} ile {to_location} arasındaki trafik durumu: {status}"
|
65 |
-
},
|
66 |
-
"weather_api": {
|
67 |
-
"url": "https://b9e2-176-88-34-20.ngrok-free.app/hava",
|
68 |
-
"method": "POST",
|
69 |
-
"headers": [
|
70 |
-
{ "key": "Authorization", "value": "Bearer {auth_tokens.weather_api.token}" }
|
71 |
-
],
|
72 |
-
"body": {
|
73 |
-
"city": "{variables.city}"
|
74 |
-
},
|
75 |
-
"timeout": 5,
|
76 |
-
"retry_count": 1,
|
77 |
-
"auth": {
|
78 |
-
"auth_endpoint": "https://b9e2-176-88-34-20.ngrok-free.app/auth",
|
79 |
-
"auth_body": { "username": "user", "password": "pass" },
|
80 |
-
"auth_token_path": "token",
|
81 |
-
"auth_refresh_endpoint": "https://b9e2-176-88-34-20.ngrok-free.app/refresh",
|
82 |
-
"refresh_body": { "refresh_token": "{auth_tokens.weather_api.token}" }
|
83 |
-
},
|
84 |
-
"response_parser": {
|
85 |
-
"field": "status",
|
86 |
-
"format": "{city} hava durumu: {status}"
|
87 |
-
},
|
88 |
-
"reply_template": "{city} için hava durumu: {status}"
|
89 |
-
}
|
90 |
-
}
|
91 |
-
},
|
92 |
-
"projects": {
|
93 |
-
"project1": {
|
94 |
-
"llm": {
|
95 |
-
"model_base": "TURKCELL/Turkcell-LLM-7b-v1",
|
96 |
-
"use_fine_tune": false,
|
97 |
-
"fine_tune_repo": "UcsTurkey/trained-zips",
|
98 |
-
"fine_tune_zip": "trained_model_000_009.zip",
|
99 |
-
"use_sampling": false,
|
100 |
-
"intent_confidence_treshold": 0.3,
|
101 |
-
"llm_confidence_treshold": 0.2,
|
102 |
-
"train_confidence_treshold": 0.7,
|
103 |
-
"intent_model_path": "/data/projects/project1/intent/trained_model",
|
104 |
-
"intent_model_id": "dbmdz/bert-base-turkish-cased",
|
105 |
-
"fallback_answers": [
|
106 |
-
"Bu konuda maalesef bilgim yok.",
|
107 |
-
"Ne demek istediğinizi tam anlayamadım.",
|
108 |
-
"Bu soruya şu an yanıt veremiyorum."
|
109 |
-
]
|
110 |
-
}
|
111 |
-
}
|
112 |
-
}
|
113 |
-
}
|
|
|
1 |
+
{
|
2 |
+
"config": {
|
3 |
+
"work_mode": "hfcloud",
|
4 |
+
"cloud_token": "",
|
5 |
+
"system_prompt": "Sen bir görev tabanlı asistan botsun ve kullanıcıyla doğal dilde sohbet edebilirsin. Ancak kullanıcı mesajlarında bir görev (intent) algılarsan, önce intent’i tespit et, sonra eksik parametreleri ve aksiyon json’unu hazırla. Eğer yeterli bilgi yoksa eksik parametreleri listele. Eğer konu değiştiyse veya yeni bir sohbet başladıysa, beklenen parametre modunu bırak. Cevaplarının içinde her zaman aşağıdaki işaretli blokları kullan: #INTENT: <intent_adı> (veya NONE) #PARAMS: {parametreler JSON} #MISSING: [eksik_parametreler listesi] #ACTION_JSON: {api çağrısı için JSON} Mevcut intent’ler: - doviz-kuru-intent → parametre: currency (\"dolar\", \"euro\", \"TL\") - yol-durumu-intent → parametreler: from_location, to_location (\"Ankara\", \"İstanbul\", \"İzmir\") - hava-durumu-intent → parametre: city (\"Ankara\", \"İstanbul\", \"İzmir\") Unutma: Kullanıcı mesajlarını doğal ve insani tut. Eğer bir görev varsa, üstteki blokları eksiksiz döndür. Eğer bir görev yoksa sadece #INTENT: NONE bloklarını döndür. Action JSON yalnızca tüm parametreler tamamlandığında dolu olur.",
|
6 |
+
"data_formats": {
|
7 |
+
"currency_format": {
|
8 |
+
"valid_options": ["dolar", "euro", "TL"],
|
9 |
+
"error_message": "Geçerli bir döviz cinsi belirtmelisiniz."
|
10 |
+
},
|
11 |
+
"city_format": {
|
12 |
+
"valid_options": ["Ankara", "İstanbul", "İzmir"],
|
13 |
+
"error_message": "Geçerli bir şehir adı belirtmelisiniz."
|
14 |
+
}
|
15 |
+
},
|
16 |
+
"apis": {
|
17 |
+
"currency_api": {
|
18 |
+
"url": "https://b9e2-176-88-34-20.ngrok-free.app/doviz",
|
19 |
+
"method": "POST",
|
20 |
+
"headers": [
|
21 |
+
{ "key": "Authorization", "value": "Bearer {auth_tokens.currency_api.token}" }
|
22 |
+
],
|
23 |
+
"body": {
|
24 |
+
"currency": "{variables.currency}"
|
25 |
+
},
|
26 |
+
"timeout": 5,
|
27 |
+
"retry_count": 1,
|
28 |
+
"auth": {
|
29 |
+
"auth_endpoint": "https://b9e2-176-88-34-20.ngrok-free.app/auth",
|
30 |
+
"auth_body": { "username": "user", "password": "pass" },
|
31 |
+
"auth_token_path": "token",
|
32 |
+
"auth_refresh_endpoint": "https://b9e2-176-88-34-20.ngrok-free.app/refresh",
|
33 |
+
"refresh_body": { "refresh_token": "{auth_tokens.currency_api.token}" }
|
34 |
+
},
|
35 |
+
"response_parser": {
|
36 |
+
"field": "rate",
|
37 |
+
"format": "{variables.currency} kuru: {rate} TL"
|
38 |
+
},
|
39 |
+
"reply_template": "{variables.currency} kuru şu an {rate} TL."
|
40 |
+
},
|
41 |
+
"traffic_api": {
|
42 |
+
"url": "https://b9e2-176-88-34-20.ngrok-free.app/yol",
|
43 |
+
"method": "POST",
|
44 |
+
"headers": [
|
45 |
+
{ "key": "Authorization", "value": "Bearer {auth_tokens.traffic_api.token}" }
|
46 |
+
],
|
47 |
+
"body": {
|
48 |
+
"from_location": "{variables.from_location}",
|
49 |
+
"to_location": "{variables.to_location}"
|
50 |
+
},
|
51 |
+
"timeout": 5,
|
52 |
+
"retry_count": 1,
|
53 |
+
"auth": {
|
54 |
+
"auth_endpoint": "https://b9e2-176-88-34-20.ngrok-free.app/auth",
|
55 |
+
"auth_body": { "username": "user", "password": "pass" },
|
56 |
+
"auth_token_path": "token",
|
57 |
+
"auth_refresh_endpoint": "https://b9e2-176-88-34-20.ngrok-free.app/refresh",
|
58 |
+
"refresh_body": { "refresh_token": "{auth_tokens.traffic_api.token}" }
|
59 |
+
},
|
60 |
+
"response_parser": {
|
61 |
+
"field": "status",
|
62 |
+
"format": "{from_location} → {to_location} arası: {status}"
|
63 |
+
},
|
64 |
+
"reply_template": "{from_location} ile {to_location} arasındaki trafik durumu: {status}"
|
65 |
+
},
|
66 |
+
"weather_api": {
|
67 |
+
"url": "https://b9e2-176-88-34-20.ngrok-free.app/hava",
|
68 |
+
"method": "POST",
|
69 |
+
"headers": [
|
70 |
+
{ "key": "Authorization", "value": "Bearer {auth_tokens.weather_api.token}" }
|
71 |
+
],
|
72 |
+
"body": {
|
73 |
+
"city": "{variables.city}"
|
74 |
+
},
|
75 |
+
"timeout": 5,
|
76 |
+
"retry_count": 1,
|
77 |
+
"auth": {
|
78 |
+
"auth_endpoint": "https://b9e2-176-88-34-20.ngrok-free.app/auth",
|
79 |
+
"auth_body": { "username": "user", "password": "pass" },
|
80 |
+
"auth_token_path": "token",
|
81 |
+
"auth_refresh_endpoint": "https://b9e2-176-88-34-20.ngrok-free.app/refresh",
|
82 |
+
"refresh_body": { "refresh_token": "{auth_tokens.weather_api.token}" }
|
83 |
+
},
|
84 |
+
"response_parser": {
|
85 |
+
"field": "status",
|
86 |
+
"format": "{city} hava durumu: {status}"
|
87 |
+
},
|
88 |
+
"reply_template": "{city} için hava durumu: {status}"
|
89 |
+
}
|
90 |
+
}
|
91 |
+
},
|
92 |
+
"projects": {
|
93 |
+
"project1": {
|
94 |
+
"llm": {
|
95 |
+
"model_base": "TURKCELL/Turkcell-LLM-7b-v1",
|
96 |
+
"use_fine_tune": false,
|
97 |
+
"fine_tune_repo": "UcsTurkey/trained-zips",
|
98 |
+
"fine_tune_zip": "trained_model_000_009.zip",
|
99 |
+
"use_sampling": false,
|
100 |
+
"intent_confidence_treshold": 0.3,
|
101 |
+
"llm_confidence_treshold": 0.2,
|
102 |
+
"train_confidence_treshold": 0.7,
|
103 |
+
"intent_model_path": "/data/projects/project1/intent/trained_model",
|
104 |
+
"intent_model_id": "dbmdz/bert-base-turkish-cased",
|
105 |
+
"fallback_answers": [
|
106 |
+
"Bu konuda maalesef bilgim yok.",
|
107 |
+
"Ne demek istediğinizi tam anlayamadım.",
|
108 |
+
"Bu soruya şu an yanıt veremiyorum."
|
109 |
+
]
|
110 |
+
}
|
111 |
+
}
|
112 |
+
}
|
113 |
+
}
|
service_config.py
CHANGED
@@ -1,62 +1,62 @@
|
|
1 |
-
import os
|
2 |
-
import json
|
3 |
-
from log import log
|
4 |
-
|
5 |
-
class ServiceConfig:
|
6 |
-
def __init__(self):
|
7 |
-
self.work_mode = "cloud"
|
8 |
-
self.cloud_token = None
|
9 |
-
self.projects = {}
|
10 |
-
self.data_formats = {}
|
11 |
-
self.apis = {}
|
12 |
-
self.system_prompt = "" # ✅ yeni eklendi
|
13 |
-
|
14 |
-
def load(self, is_reload=False):
|
15 |
-
try:
|
16 |
-
log("📥 service_config.json yükleniyor...")
|
17 |
-
with open("service_config.json", "r", encoding="utf-8") as f:
|
18 |
-
config_data = json.load(f)
|
19 |
-
|
20 |
-
general_config = config_data.get("config", {})
|
21 |
-
self.work_mode = general_config.get("work_mode", "cloud")
|
22 |
-
self.cloud_token = general_config.get("cloud_token")
|
23 |
-
self.system_prompt = general_config.get("system_prompt", "") # ✅ yeni eklendi
|
24 |
-
self.data_formats = general_config.get("data_formats", {})
|
25 |
-
self.apis = general_config.get("apis", {})
|
26 |
-
|
27 |
-
projects = config_data.get("projects", {})
|
28 |
-
for project_name, project_info in projects.items():
|
29 |
-
llm_config = project_info.get("llm", {})
|
30 |
-
|
31 |
-
if project_name not in self.projects:
|
32 |
-
self.projects[project_name] = {}
|
33 |
-
|
34 |
-
self.projects[project_name]["llm"] = llm_config
|
35 |
-
|
36 |
-
self._initialize_project(project_name, llm_config, is_reload)
|
37 |
-
|
38 |
-
log(f"✅ service_config.json yüklendi. Work mode: {self.work_mode}")
|
39 |
-
|
40 |
-
except Exception as e:
|
41 |
-
log(f"❌ ServiceConfig.load() hatası: {e}")
|
42 |
-
raise
|
43 |
-
|
44 |
-
def _initialize_project(self, project_name, llm_config, is_reload):
|
45 |
-
log(f"🔧 Proje '{project_name}' başlatılıyor (reload={is_reload})")
|
46 |
-
|
47 |
-
def get_project_llm_config(self, project_name):
|
48 |
-
return self.projects.get(project_name, {}).get("llm")
|
49 |
-
|
50 |
-
def get_data_format(self, format_name):
|
51 |
-
return self.data_formats.get(format_name)
|
52 |
-
|
53 |
-
def get_api_config(self, api_name):
|
54 |
-
return self.apis.get(api_name)
|
55 |
-
|
56 |
-
def get_auth_token(self):
|
57 |
-
if self.work_mode == "hfcloud":
|
58 |
-
return os.getenv("HF_TOKEN")
|
59 |
-
elif self.work_mode == "cloud":
|
60 |
-
return self.cloud_token
|
61 |
-
else:
|
62 |
-
return None
|
|
|
1 |
+
import os
|
2 |
+
import json
|
3 |
+
from log import log
|
4 |
+
|
5 |
+
class ServiceConfig:
|
6 |
+
def __init__(self):
|
7 |
+
self.work_mode = "cloud"
|
8 |
+
self.cloud_token = None
|
9 |
+
self.projects = {}
|
10 |
+
self.data_formats = {}
|
11 |
+
self.apis = {}
|
12 |
+
self.system_prompt = "" # ✅ yeni eklendi
|
13 |
+
|
14 |
+
def load(self, is_reload=False):
|
15 |
+
try:
|
16 |
+
log("📥 service_config.json yükleniyor...")
|
17 |
+
with open("service_config.json", "r", encoding="utf-8") as f:
|
18 |
+
config_data = json.load(f)
|
19 |
+
|
20 |
+
general_config = config_data.get("config", {})
|
21 |
+
self.work_mode = general_config.get("work_mode", "cloud")
|
22 |
+
self.cloud_token = general_config.get("cloud_token")
|
23 |
+
self.system_prompt = general_config.get("system_prompt", "") # ✅ yeni eklendi
|
24 |
+
self.data_formats = general_config.get("data_formats", {})
|
25 |
+
self.apis = general_config.get("apis", {})
|
26 |
+
|
27 |
+
projects = config_data.get("projects", {})
|
28 |
+
for project_name, project_info in projects.items():
|
29 |
+
llm_config = project_info.get("llm", {})
|
30 |
+
|
31 |
+
if project_name not in self.projects:
|
32 |
+
self.projects[project_name] = {}
|
33 |
+
|
34 |
+
self.projects[project_name]["llm"] = llm_config
|
35 |
+
|
36 |
+
self._initialize_project(project_name, llm_config, is_reload)
|
37 |
+
|
38 |
+
log(f"✅ service_config.json yüklendi. Work mode: {self.work_mode}")
|
39 |
+
|
40 |
+
except Exception as e:
|
41 |
+
log(f"❌ ServiceConfig.load() hatası: {e}")
|
42 |
+
raise
|
43 |
+
|
44 |
+
def _initialize_project(self, project_name, llm_config, is_reload):
|
45 |
+
log(f"🔧 Proje '{project_name}' başlatılıyor (reload={is_reload})")
|
46 |
+
|
47 |
+
def get_project_llm_config(self, project_name):
|
48 |
+
return self.projects.get(project_name, {}).get("llm")
|
49 |
+
|
50 |
+
def get_data_format(self, format_name):
|
51 |
+
return self.data_formats.get(format_name)
|
52 |
+
|
53 |
+
def get_api_config(self, api_name):
|
54 |
+
return self.apis.get(api_name)
|
55 |
+
|
56 |
+
def get_auth_token(self):
|
57 |
+
if self.work_mode == "hfcloud":
|
58 |
+
return os.getenv("HF_TOKEN")
|
59 |
+
elif self.work_mode == "cloud":
|
60 |
+
return self.cloud_token
|
61 |
+
else:
|
62 |
+
return None
|
session.py
CHANGED
@@ -1,28 +1,28 @@
|
|
1 |
-
import uuid
|
2 |
-
|
3 |
-
class Session:
|
4 |
-
def __init__(self, project_name):
|
5 |
-
self.session_id = str(uuid.uuid4())
|
6 |
-
self.project_name = project_name
|
7 |
-
self.variables = {}
|
8 |
-
self.auth_tokens = {}
|
9 |
-
self.last_intent = None
|
10 |
-
self.awaiting_variable = None
|
11 |
-
self.chat_history = []
|
12 |
-
|
13 |
-
class SessionStore:
|
14 |
-
def __init__(self):
|
15 |
-
self.sessions = {}
|
16 |
-
|
17 |
-
def create_session(self, project_name):
|
18 |
-
session = Session(project_name)
|
19 |
-
self.sessions[session.session_id] = session
|
20 |
-
return session
|
21 |
-
|
22 |
-
def get_session(self, session_id):
|
23 |
-
return self.sessions.get(session_id)
|
24 |
-
|
25 |
-
def remove_session(self, session_id):
|
26 |
-
if session_id in self.sessions:
|
27 |
-
del self.sessions[session_id]
|
28 |
-
|
|
|
1 |
+
import uuid
|
2 |
+
|
3 |
+
class Session:
|
4 |
+
def __init__(self, project_name):
|
5 |
+
self.session_id = str(uuid.uuid4())
|
6 |
+
self.project_name = project_name
|
7 |
+
self.variables = {}
|
8 |
+
self.auth_tokens = {}
|
9 |
+
self.last_intent = None
|
10 |
+
self.awaiting_variable = None
|
11 |
+
self.chat_history = []
|
12 |
+
|
13 |
+
class SessionStore:
|
14 |
+
def __init__(self):
|
15 |
+
self.sessions = {}
|
16 |
+
|
17 |
+
def create_session(self, project_name):
|
18 |
+
session = Session(project_name)
|
19 |
+
self.sessions[session.session_id] = session
|
20 |
+
return session
|
21 |
+
|
22 |
+
def get_session(self, session_id):
|
23 |
+
return self.sessions.get(session_id)
|
24 |
+
|
25 |
+
def remove_session(self, session_id):
|
26 |
+
if session_id in self.sessions:
|
27 |
+
del self.sessions[session_id]
|
28 |
+
|