File size: 1,540 Bytes
2df152f
 
87947e6
2df152f
 
 
 
 
 
 
 
 
 
 
 
87947e6
 
 
 
 
 
 
2df152f
 
87947e6
 
 
 
 
 
 
 
 
 
 
 
2df152f
 
 
87947e6
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
# app.py
# =============
# This is a complete app.py file for deploying the MTSAIR/Cotype-Nano model using Gradio and Hugging Face Transformers with chat and token streaming functionality.

import gradio as gr
from transformers import pipeline

# Load the model and pipeline
model_name = "MTSAIR/Cotype-Nano"
pipe = pipeline("text-generation", model=model_name, device="cpu")

# Define the system prompt
system_prompt = {"role": "system", "content": "Ты — ИИ-помощник. Тебе дано задание: необходимо сгенерировать подробный и развернутый ответ."}

# Define the Gradio interface
def generate_response(history, user_input):
    messages = [system_prompt] + history + [{"role": "user", "content": user_input}]
    response = pipe(messages, max_length=1024, return_full_text=False)
    generated_text = response[0]['generated_text']
    history.append({"role": "user", "content": user_input})
    history.append({"role": "assistant", "content": generated_text})
    return history, ""

# Create the Gradio interface
with gr.Blocks() as demo:
    gr.Markdown("## Cotype-Nano Text Generation Chat")

    chatbot = gr.Chatbot([], elem_id="chatbot")

    with gr.Row():
        txt = gr.Textbox(
            show_label=False,
            placeholder="Введите ваш запрос здесь...",
        ).style(container=False)

    txt.submit(generate_response, [chatbot, txt], [chatbot, txt])

# Launch the interface
if __name__ == "__main__":
    demo.launch()