Spaces:
Sleeping
Sleeping
# app.py | |
# ============= | |
# This is a complete app.py file for deploying the MTSAIR/Cotype-Nano model using Gradio and Hugging Face Transformers with chat and token streaming functionality. | |
import gradio as gr | |
from transformers import pipeline | |
# Load the model and pipeline | |
model_name = "MTSAIR/Cotype-Nano" | |
pipe = pipeline("text-generation", model=model_name, device="cpu") | |
# Define the system prompt | |
system_prompt = {"role": "system", "content": "Ты — ИИ-помощник. Тебе дано задание: необходимо сгенерировать подробный и развернутый ответ."} | |
# Define the Gradio interface | |
def generate_response(history, user_input): | |
messages = [system_prompt] + history + [{"role": "user", "content": user_input}] | |
response = pipe(messages, max_length=1024, return_full_text=False) | |
generated_text = response[0]['generated_text'] | |
history.append({"role": "user", "content": user_input}) | |
history.append({"role": "assistant", "content": generated_text}) | |
return history, "" | |
# Create the Gradio interface | |
with gr.Blocks() as demo: | |
gr.Markdown("## Cotype-Nano Text Generation Chat") | |
chatbot = gr.Chatbot([], elem_id="chatbot") | |
with gr.Row(): | |
txt = gr.Textbox( | |
show_label=False, | |
placeholder="Введите ваш запрос здесь...", | |
).style(container=False) | |
txt.submit(generate_response, [chatbot, txt], [chatbot, txt]) | |
# Launch the interface | |
if __name__ == "__main__": | |
demo.launch() | |