File size: 2,084 Bytes
2df152f
 
19ced57
2df152f
 
 
 
 
 
 
 
 
19ced57
2df152f
 
19ced57
87947e6
19ced57
87947e6
 
 
 
2df152f
19ced57
 
 
 
2df152f
87947e6
 
 
c0ff2ab
87947e6
 
 
 
19ced57
c0ff2ab
87947e6
19ced57
 
 
 
 
 
 
 
 
 
 
 
2df152f
 
 
87947e6
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
# app.py
# =============
# This is a complete app.py file for deploying the MTSAIR/Cotype-Nano model using Gradio and Hugging Face Transformers with chat and token streaming functionality, advanced settings, and English interface.

import gradio as gr
from transformers import pipeline

# Load the model and pipeline
model_name = "MTSAIR/Cotype-Nano"
pipe = pipeline("text-generation", model=model_name, device="cpu")

# Define the system prompt
system_prompt = {"role": "system", "content": "You are an AI assistant. Your task is to generate a detailed and comprehensive response."}

# Define the Gradio interface
def generate_response(history, user_input, temperature, max_tokens):
    messages = [system_prompt] + history + [{"role": "user", "content": user_input}]
    response = pipe(messages, max_length=max_tokens, temperature=temperature, return_full_text=False)
    generated_text = response[0]['generated_text']
    history.append({"role": "user", "content": user_input})
    history.append({"role": "assistant", "content": generated_text})
    return history, ""

# Function to clear chat history
def clear_chat():
    return [], ""

# Create the Gradio interface
with gr.Blocks() as demo:
    gr.Markdown("## Cotype-Nano Text Generation Chat")

    chatbot = gr.Chatbot([], elem_id="chatbot", type='messages')

    with gr.Row():
        txt = gr.Textbox(
            show_label=False,
            placeholder="Type your message here...",
        )

        send_btn = gr.Button("Send")

    with gr.Row():
        clear_btn = gr.Button("Clear Chat")

    with gr.Row():
        temperature_slider = gr.Slider(0, 1, 0.7, step=0.1, label="Temperature")
        max_tokens_slider = gr.Slider(1, 1000, 100, step=1, label="Max Tokens")

    send_btn.click(generate_response, [chatbot, txt, temperature_slider, max_tokens_slider], [chatbot, txt])
    txt.submit(generate_response, [chatbot, txt, temperature_slider, max_tokens_slider], [chatbot, txt])
    clear_btn.click(clear_chat, outputs=[chatbot, txt])

# Launch the interface
if __name__ == "__main__":
    demo.launch()