Update main.py
Browse files
main.py
CHANGED
@@ -5,31 +5,48 @@ from gradio_client import Client
|
|
5 |
import uvicorn
|
6 |
import time
|
7 |
import uuid
|
|
|
|
|
8 |
|
9 |
-
#
|
10 |
-
|
|
|
11 |
|
12 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
def ask(user_prompt, system_prompt, model):
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
28 |
|
29 |
-
#
|
30 |
app = FastAPI()
|
31 |
|
32 |
-
#
|
33 |
class Message(BaseModel):
|
34 |
role: Literal["user", "assistant", "system"]
|
35 |
content: str
|
@@ -40,26 +57,39 @@ class ChatRequest(BaseModel):
|
|
40 |
temperature: Optional[float] = 0.7
|
41 |
top_p: Optional[float] = 0.95
|
42 |
max_tokens: Optional[int] = 512
|
43 |
-
# остальные параметры можно добавить при необходимости
|
44 |
|
|
|
45 |
@app.post("/v1/chat/completions")
|
46 |
-
async def chat_completion(request:
|
47 |
-
#
|
48 |
-
|
49 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
50 |
|
51 |
if not user_msg:
|
52 |
return {"error": "User message not found."}
|
53 |
|
54 |
-
#
|
55 |
-
assistant_reply = ask(user_msg, system_msg,
|
56 |
|
57 |
-
# Формируем ответ в стиле OpenAI API
|
58 |
response = {
|
59 |
"id": f"chatcmpl-{uuid.uuid4().hex[:12]}",
|
60 |
"object": "chat.completion",
|
61 |
"created": int(time.time()),
|
62 |
-
"model":
|
63 |
"choices": [
|
64 |
{
|
65 |
"index": 0,
|
@@ -71,7 +101,7 @@ async def chat_completion(request: ChatRequest):
|
|
71 |
}
|
72 |
],
|
73 |
"usage": {
|
74 |
-
"prompt_tokens": 0,
|
75 |
"completion_tokens": 0,
|
76 |
"total_tokens": 0
|
77 |
}
|
@@ -79,6 +109,6 @@ async def chat_completion(request: ChatRequest):
|
|
79 |
|
80 |
return response
|
81 |
|
82 |
-
#
|
83 |
if __name__ == "__main__":
|
84 |
uvicorn.run("local_openai_server:app", host="0.0.0.0", port=7860, reload=True)
|
|
|
5 |
import uvicorn
|
6 |
import time
|
7 |
import uuid
|
8 |
+
import logging
|
9 |
+
import json
|
10 |
|
11 |
+
# === Настройка логгера ===
|
12 |
+
logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(levelname)s] %(message)s")
|
13 |
+
logger = logging.getLogger(__name__)
|
14 |
|
15 |
+
# === Подключаемся к Gradio Space напрямую по URL ===
|
16 |
+
try:
|
17 |
+
gr_client = Client("https://nymbo-serverless-textgen-hub.hf.space")
|
18 |
+
except Exception as e:
|
19 |
+
logger.error(f"Ошибка при подключении к Gradio Client: {e}")
|
20 |
+
gr_client = None
|
21 |
+
|
22 |
+
# === Вызов нейросети ===
|
23 |
def ask(user_prompt, system_prompt, model):
|
24 |
+
if not gr_client:
|
25 |
+
return "[Ошибка: Gradio Client не инициализирован]"
|
26 |
+
|
27 |
+
try:
|
28 |
+
result = gr_client.predict(
|
29 |
+
history=[[user_prompt, None]],
|
30 |
+
system_msg=system_prompt,
|
31 |
+
max_tokens=512,
|
32 |
+
temperature=0.7,
|
33 |
+
top_p=0.95,
|
34 |
+
freq_penalty=0,
|
35 |
+
seed=-1,
|
36 |
+
custom_model=model,
|
37 |
+
search_term="",
|
38 |
+
selected_model=model,
|
39 |
+
api_name="/bot"
|
40 |
+
)
|
41 |
+
return result
|
42 |
+
except Exception as e:
|
43 |
+
logger.error(f"Ошибка при вызове Gradio predict: {e}")
|
44 |
+
return f"[Ошибка: {str(e)}]"
|
45 |
|
46 |
+
# === Инициализация FastAPI ===
|
47 |
app = FastAPI()
|
48 |
|
49 |
+
# === Pydantic модели ===
|
50 |
class Message(BaseModel):
|
51 |
role: Literal["user", "assistant", "system"]
|
52 |
content: str
|
|
|
57 |
temperature: Optional[float] = 0.7
|
58 |
top_p: Optional[float] = 0.95
|
59 |
max_tokens: Optional[int] = 512
|
|
|
60 |
|
61 |
+
# === Основной маршрут ===
|
62 |
@app.post("/v1/chat/completions")
|
63 |
+
async def chat_completion(request: Request):
|
64 |
+
# Логгируем заголовки и тело запроса
|
65 |
+
headers = dict(request.headers)
|
66 |
+
body = await request.body()
|
67 |
+
logger.info("== Входящий запрос ==")
|
68 |
+
logger.info(f"Заголовки: {headers}")
|
69 |
+
logger.info(f"Тело: {body.decode('utf-8')}")
|
70 |
+
|
71 |
+
try:
|
72 |
+
data = await request.json()
|
73 |
+
chat_request = ChatRequest(**data)
|
74 |
+
except Exception as e:
|
75 |
+
logger.error(f"Ошибка парсинга запроса: {e}")
|
76 |
+
return {"error": "Некорректный JSON"}
|
77 |
+
|
78 |
+
# Извлекаем сообщения
|
79 |
+
user_msg = next((m.content for m in reversed(chat_request.messages) if m.role == "user"), None)
|
80 |
+
system_msg = next((m.content for m in chat_request.messages if m.role == "system"), "You are a helpful AI assistant.")
|
81 |
|
82 |
if not user_msg:
|
83 |
return {"error": "User message not found."}
|
84 |
|
85 |
+
# Ответ от модели
|
86 |
+
assistant_reply = ask(user_msg, system_msg, chat_request.model)
|
87 |
|
|
|
88 |
response = {
|
89 |
"id": f"chatcmpl-{uuid.uuid4().hex[:12]}",
|
90 |
"object": "chat.completion",
|
91 |
"created": int(time.time()),
|
92 |
+
"model": chat_request.model,
|
93 |
"choices": [
|
94 |
{
|
95 |
"index": 0,
|
|
|
101 |
}
|
102 |
],
|
103 |
"usage": {
|
104 |
+
"prompt_tokens": 0,
|
105 |
"completion_tokens": 0,
|
106 |
"total_tokens": 0
|
107 |
}
|
|
|
109 |
|
110 |
return response
|
111 |
|
112 |
+
# === Запуск сервера ===
|
113 |
if __name__ == "__main__":
|
114 |
uvicorn.run("local_openai_server:app", host="0.0.0.0", port=7860, reload=True)
|