Update app.py
Browse files
app.py
CHANGED
@@ -1,23 +1,20 @@
|
|
1 |
-
|
|
|
2 |
import gradio as gr
|
3 |
|
4 |
-
#
|
5 |
-
|
|
|
|
|
|
|
|
|
6 |
|
7 |
-
# Функция
|
8 |
-
def
|
9 |
-
|
10 |
-
|
|
|
|
|
11 |
|
12 |
# Интерфейс Gradio
|
13 |
-
|
14 |
-
fn=chat,
|
15 |
-
inputs=gr.Textbox(lines=2, placeholder="Напиши сообщение..."),
|
16 |
-
outputs="text",
|
17 |
-
title="FlareGPT Light",
|
18 |
-
description="Чат с лёгкой моделью на Hugging Face Space"
|
19 |
-
)
|
20 |
-
|
21 |
-
# ОБЯЗАТЕЛЬНО
|
22 |
-
if __name__ == "__main__":
|
23 |
-
iface.launch()
|
|
|
1 |
+
import torch
|
2 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM, TextStreamer
|
3 |
import gradio as gr
|
4 |
|
5 |
+
# Загрузка модели и токенизатора
|
6 |
+
model_id = "microsoft/phi-2"
|
7 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
8 |
+
model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float32)
|
9 |
+
model.to("cpu")
|
10 |
+
streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
|
11 |
|
12 |
+
# Функция генерации ответа
|
13 |
+
def chat_fn(prompt):
|
14 |
+
inputs = tokenizer(prompt, return_tensors="pt").to("cpu")
|
15 |
+
outputs = model.generate(**inputs, max_new_tokens=100, streamer=streamer)
|
16 |
+
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
17 |
+
return response[len(prompt):].strip()
|
18 |
|
19 |
# Интерфейс Gradio
|
20 |
+
gr.Interface(fn=chat_fn, inputs="text", outputs="text", title="💬 Flare GPT — на Phi-2").launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|