Spaces:
Sleeping
Sleeping
GoofyGoof
commited on
Commit
·
f280e4a
1
Parent(s):
13f9fc2
Update app.py
Browse files
app.py
CHANGED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from groq import Groq
|
3 |
+
|
4 |
+
# Инициализация клиента Groq
|
5 |
+
client = Groq()
|
6 |
+
|
7 |
+
# Функция для выполнения запроса к LLM API
|
8 |
+
def query_llm(prompt):
|
9 |
+
completion = client.chat.completions.create(
|
10 |
+
model="llama-3.3-70b-versatile",
|
11 |
+
messages=[{"role": "user", "content": prompt}],
|
12 |
+
temperature=1,
|
13 |
+
max_tokens=1024,
|
14 |
+
top_p=1,
|
15 |
+
stream=True,
|
16 |
+
stop=None,
|
17 |
+
)
|
18 |
+
|
19 |
+
response = ""
|
20 |
+
for chunk in completion:
|
21 |
+
response += chunk.choices[0].delta.content or ""
|
22 |
+
return response
|
23 |
+
|
24 |
+
# Gradio интерфейс
|
25 |
+
def chat_with_llm(prompt):
|
26 |
+
response = query_llm(prompt)
|
27 |
+
return response
|
28 |
+
|
29 |
+
interface = gr.Interface(
|
30 |
+
fn=chat_with_llm,
|
31 |
+
inputs="text",
|
32 |
+
outputs="text",
|
33 |
+
title="Chat with LLM",
|
34 |
+
description="Введите текст, чтобы отправить запрос к LLM."
|
35 |
+
)
|
36 |
+
|
37 |
+
interface.launch()
|