File size: 3,235 Bytes
cd650c7
 
43561b8
cd650c7
43561b8
 
cd650c7
43561b8
cd650c7
43561b8
 
 
 
 
 
cd650c7
43561b8
 
cd650c7
 
 
 
 
 
 
 
 
 
 
 
 
 
43561b8
cd650c7
43561b8
 
cd650c7
 
 
 
 
 
 
 
43561b8
cd650c7
33f2163
43561b8
 
 
 
cd650c7
 
43561b8
 
 
 
 
 
 
 
 
cd650c7
43561b8
cd650c7
 
43561b8
 
 
 
6d388f2
43561b8
 
cd650c7
43561b8
 
 
 
 
 
 
 
 
 
 
6d388f2
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
from huggingface_hub import InferenceClient
import gradio as gr
import pandas as pd

# Inference client initialization
client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")

# Function to format the prompt
def format_prompt(message, history):
    prompt = "<s>"
    for user_prompt, bot_response in history:
        prompt += f"[INST] {user_prompt} [/INST]"
        prompt += f" {bot_response}</s> "
    prompt += f"[INST] {message} [/INST]"
    return prompt

# Function to generate text based on prompt and history
def generate(prompt, history, system_prompt, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0):
    temperature = float(temperature)
    if temperature < 1e-2:
        temperature = 1e-2
    top_p = float(top_p)

    generate_kwargs = dict(
        temperature=temperature,
        max_new_tokens=max_new_tokens,
        top_p=top_p,
        repetition_penalty=repetition_penalty,
        do_sample=True,
        seed=42,
    )

    # Format the prompt
    formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)

    # Generate text using InferenceClient
    stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
    output = ""

    for response in stream:
        output += response.token.text
        yield output
    return output

# Additional input components for Gradio interface
additional_inputs=[
    gr.File(label="Upload CSV or Document", type="binary"),  # Max file size is 2 GB
    gr.Slider(label="Temperature", value=0.9, minimum=0.0, maximum=1.0, step=0.05, interactive=True, info="Higher values produce more diverse outputs"),
    gr.Slider(label="Max new tokens", value=256, minimum=0, maximum=5120, step=64, interactive=True, info="The maximum numbers of new tokens"),
    gr.Slider(label="Top-p (nucleus sampling)", value=0.90, minimum=0.0, maximum=1, step=0.05, interactive=True, info="Higher values sample more low-probability tokens"),
    gr.Slider(label="Repetition penalty", value=1.2, minimum=1.0, maximum=2.0, step=0.05, interactive=True, info="Penalize repeated tokens")
]

# Function to read uploaded CSV or Document
def read_file(file):
    if file is None:
        return None
    elif file.name.endswith('.csv'):
        return pd.read_csv(file)
    elif file.name.endswith('.txt'):
        with open(file.name, 'r') as f:
            return f.read()

# Gradio Chat Interface
gr.ChatInterface(
    fn=generate,
    inputs=[
        gr.Textbox(label="Prompt"),
        gr.Textbox(label="History", placeholder="User1: Hello\nBot: Hi there!\nUser1: How are you?"),
        gr.Textbox(label="System Prompt"),
        gr.File(label="Upload CSV or Document", type="binary"),  # Max file size is 2 GB
    ],
    outputs=gr.Textbox(label="Response"),
    title="Synthetic-data-generation-aze",
    additional_inputs=additional_inputs,
    examples=[
        ["What is the capital of France?", "Paris", "Ask me anything"],
        ["How are you?", "I'm good, thank you!", "User"],
    ],
    allow_flagging=False,
    allow_upvoting=False,
    allow_duplicate_of_same_input=False,
    flagging_options=["Inappropriate", "Incorrect", "Offensive"],
    thumbs=None,
).launch()