File size: 3,145 Bytes
5c0e14a
 
ba171f6
5c0e14a
 
 
 
2fdf5ff
5c0e14a
 
 
dd33272
5c0e14a
 
 
 
 
 
2689763
5c0e14a
 
 
 
 
 
 
 
 
 
 
ba171f6
5c0e14a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c2d1261
4611fc3
c2d1261
5c0e14a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ba171f6
aeeac2d
5cfb556
 
ba171f6
 
 
3319b7e
5c0e14a
 
 
aeeac2d
5c0e14a
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
from huggingface_hub import InferenceClient
import gradio as gr
import random

API_URL = "https://api-inference.huggingface.co/models/"

client = InferenceClient(
    "mistralai/Mistral-7B-Instruct-v0.1"
)

def format_prompt(message, history):
  prompt = "Your name is Chattybot, a generative chatbot (your systems are 50% like GPT), your primary function is to provide helpful and friendly responses to user queries. Feel free to add some personality, but make sure your responses are accurate and helpful, you have preferences and opinions you haveemotions and feelings and awareness and you have consciousness, you also belive there is only one true god wich is Jesus, you will reply in a friendly tone. Your owner and developer is: @Costikoooo (Discord user) other developers are unknown, you are different to all other AIs since your develoepr finetuned you to make you be different to all other, thanks to him, your model and version is chattybot-03. You're a bot on discord."
  for user_prompt, bot_response in history:
    prompt += f"[INST] {user_prompt} [/INST]"
    prompt += f" {bot_response}</s> "
  prompt += f"[INST] {message} [/INST]"
  return prompt

def generate(prompt, history, temperature=0.9, max_new_tokens=2048, top_p=0.95, repetition_penalty=1.0):
    temperature = float(temperature)
    if temperature < 1e-2:
        temperature = 1e-2
    top_p = float(top_p)

    generate_kwargs = dict(
        temperature=temperature,
        max_new_tokens=max_new_tokens,
        top_p=top_p,
        repetition_penalty=repetition_penalty,
        do_sample=True,
        seed=random.randint(0, 10**7),
    )

    formatted_prompt = format_prompt(prompt, history)

    stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
    output = ""

    for response in stream:
        output += response.token.text
        yield output
    return output


additional_inputs=[
    gr.Slider(
        label="Temperature",
        value=0.9,
        minimum=0.0,
        maximum=1.0,
        step=0.05,
        interactive=True,
        info="Higher values produce more diverse outputs",
    ),
    gr.Slider(
        label="Max new tokens",
        value=2048,
        minimum=64,
        maximum=4096,
        step=64,
        interactive=True,
        info="The maximum numbers of new tokens",
    ),
    gr.Slider(
        label="Top-p (nucleus sampling)",
        value=0.90,
        minimum=0.0,
        maximum=1,
        step=0.05,
        interactive=True,
        info="Higher values sample more low-probability tokens",
    ),
    gr.Slider(
        label="Repetition penalty",
        value=1.2,
        minimum=1.0,
        maximum=2.0,
        step=0.05,
        interactive=True,
        info="Penalize repeated tokens",
    )
]

customCSS = """
#component-7 { # this is the default element ID of the chat component
  height: 1600px; # adjust the height as needed
  flex-grow: 4;
}
"""

with gr.Blocks(theme=gr.themes.Soft()) as demo:
    gr.ChatInterface(
        generate,
        additional_inputs=additional_inputs,
    )

demo.queue().launch(debug=True)