File size: 5,274 Bytes
708432b
 
 
6e1c5cd
708432b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
import gradio as gr
from gradio_client import Client
from huggingface_hub import InferenceClient
import random  # Add this line to import the random module

ss_client = Client("https://omnibus-html-image-current-tab.hf.space/")

models=[
    "google/gemma-7b",
    "google/gemma-7b-it",
    "google/gemma-2b",
    "google/gemma-2b-it"
]
clients=[
InferenceClient(models[0]),
InferenceClient(models[1]),
InferenceClient(models[2]),
InferenceClient(models[3]),
]

VERBOSE=False

def load_models(inp):
    if VERBOSE==True:    
        print(type(inp))
        print(inp)
        print(models[inp])
    return gr.update(label=models[inp])

def format_prompt(message, history, cust_p):
    prompt = ""
    if history:
        for user_prompt, bot_response in history:
            prompt += f"<start_of_turn>user{user_prompt}<end_of_turn>"
            prompt += f"<start_of_turn>model{bot_response}<end_of_turn>"
            if VERBOSE==True:
                print(prompt)
    prompt+=cust_p.replace("USER_INPUT",message)
    return prompt

def chat_inf(system_prompt,prompt,history,memory,client_choice,seed,temp,tokens,top_p,rep_p,chat_mem,cust_p):
    hist_len=0
    client=clients[int(client_choice)-1]
    if not history:
        history = []
        hist_len=0
    if not memory:
        memory = []
        mem_len=0        
    if memory:
        for ea in memory[0-chat_mem:]:
            hist_len+=len(str(ea))
    in_len=len(system_prompt+prompt)+hist_len

    if (in_len+tokens) > 8000:
        history.append((prompt,"Wait, that's too many tokens, please reduce the 'Chat Memory' value, or reduce the 'Max new tokens' value"))
        yield history,memory
    else:
        generate_kwargs = dict(
            temperature=temp,
            max_new_tokens=tokens,
            top_p=top_p,
            repetition_penalty=rep_p,
            do_sample=True,
            seed=seed,
        )
        if system_prompt:
            formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", memory[0-chat_mem:],cust_p)
        else:
            formatted_prompt = format_prompt(prompt, memory[0-chat_mem:],cust_p)
        stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=True)
        output = ""
        for response in stream:
            output += response.token.text
            yield [(prompt,output)],memory
        history.append((prompt,output))
        memory.append((prompt,output))
        yield history,memory
        
    if VERBOSE==True:
        print("\n######### HIST "+str(in_len))
        print("\n######### TOKENS "+str(tokens))        

def get_screenshot(chat: list,height=5000,width=600,chatblock=[],theme="light",wait=3000,header=True):
    tog = 0
    if chatblock:
        tog = 3
    result = ss_client.predict(str(chat),height,width,chatblock,header,theme,wait,api_name="/run_script")
    out = f'https://omnibus-html-image-current-tab.hf.space/file={result[tog]}'
    return out

def clear_fn():
    return None,None,None,None
rand_val=random.randint(1,1111111111111111)

def check_rand(inp,val):
    if inp==True:
        return random.randint(1,1111111111111111)
    else:
        return int(val)
    
with gr.Blocks() as app:
    memory=gr.State()
    chat_b = gr.Chatbot(height=500)
    with gr.Group():
        with gr.Row():
            with gr.Column(scale=3):
                inp = gr.Textbox(label="Prompt")
                sys_inp = gr.Textbox(label="System Prompt (optional)")
                custom_prompt=gr.Textbox(label="Modify Prompt Format", info="For testing purposes. 'USER_INPUT' is where 'SYSTEM_PROMPT, PROMPT' will be placed", lines=3,value="<start_of_turn>userUSER_INPUT<end_of_turn><start_of_turn>model")                
                btn = gr.Button("Chat")
            with gr.Column(scale=1):
                with gr.Group():
                    rand = gr.Checkbox(label="Random Seed", value=True)
                    seed=gr.Slider(label="Seed", minimum=1, maximum=1111111111111111,step=1, value=rand_val)
                    tokens = gr.Slider(label="Max new tokens",value=1600,minimum=0,maximum=8000,step=64,interactive=True, visible=True,info="The maximum number of tokens")
                    temp=gr.Slider(label="Temperature",step=0.01, minimum=0.01, maximum=1.0, value=0.49)
                    top_p=gr.Slider(label="Top-P",step=0.01, minimum=0.01, maximum=1.0, value=0.49)
                    rep_p=gr.Slider(label="Repetition Penalty",step=0.01, minimum=0.1, maximum=2.0, value=0.99)
                    chat_mem=gr.Number(label="Chat Memory", info="Number of previous chats to retain",value=4)

    client_choice=gr.Dropdown(label="Models",type='index',choices=[c for c in models],value=models[0],interactive=True)
    client_choice.change(load_models,client_choice,[chat_b])
    app.load(load_models,client_choice,[chat_b])
    
    chat_sub=inp.submit(check_rand,[rand,seed],seed).then(chat_inf,[sys_inp,inp,chat_b,memory,client_choice,seed,temp,tokens,top_p,rep_p,chat_mem,custom_prompt],[chat_b,memory])
    go=btn.click(check_rand,[rand,seed],seed).then(chat_inf,[sys_inp,inp,chat_b,memory,client_choice,seed,temp,tokens,top_p,rep_p,chat_mem,custom_prompt],[chat_b,memory])

    app.queue(default_concurrency_limit=10).launch()