Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
import gradio as gr
|
2 |
from gradio_client import Client
|
3 |
from huggingface_hub import InferenceClient
|
4 |
-
import random
|
5 |
|
6 |
ss_client = Client("https://omnibus-html-image-current-tab.hf.space/")
|
7 |
|
@@ -27,7 +27,7 @@ def load_models(inp):
|
|
27 |
print(models[inp])
|
28 |
return gr.update(label=models[inp])
|
29 |
|
30 |
-
def format_prompt(message, history
|
31 |
prompt = ""
|
32 |
if history:
|
33 |
for user_prompt, bot_response in history:
|
@@ -35,10 +35,10 @@ def format_prompt(message, history, cust_p):
|
|
35 |
prompt += f"<start_of_turn>model{bot_response}<end_of_turn>"
|
36 |
if VERBOSE==True:
|
37 |
print(prompt)
|
38 |
-
prompt+=
|
39 |
return prompt
|
40 |
|
41 |
-
def chat_inf(system_prompt,prompt,history,memory,client_choice,seed,temp,tokens,top_p,rep_p,chat_mem
|
42 |
hist_len=0
|
43 |
client=clients[int(client_choice)-1]
|
44 |
if not history:
|
@@ -64,10 +64,7 @@ def chat_inf(system_prompt,prompt,history,memory,client_choice,seed,temp,tokens,
|
|
64 |
do_sample=True,
|
65 |
seed=seed,
|
66 |
)
|
67 |
-
|
68 |
-
formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", memory[0-chat_mem:],cust_p)
|
69 |
-
else:
|
70 |
-
formatted_prompt = format_prompt(prompt, memory[0-chat_mem:],cust_p)
|
71 |
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=True)
|
72 |
output = ""
|
73 |
for response in stream:
|
@@ -106,8 +103,6 @@ with gr.Blocks() as app:
|
|
106 |
with gr.Row():
|
107 |
with gr.Column(scale=3):
|
108 |
inp = gr.Textbox(label="Prompt")
|
109 |
-
sys_inp = gr.Textbox(label="System Prompt (optional)")
|
110 |
-
custom_prompt=gr.Textbox(label="Modify Prompt Format", info="For testing purposes. 'USER_INPUT' is where 'SYSTEM_PROMPT, PROMPT' will be placed", lines=3,value="<start_of_turn>userUSER_INPUT<end_of_turn><start_of_turn>model")
|
111 |
btn = gr.Button("Chat")
|
112 |
with gr.Column(scale=1):
|
113 |
with gr.Group():
|
@@ -123,7 +118,7 @@ with gr.Blocks() as app:
|
|
123 |
client_choice.change(load_models,client_choice,[chat_b])
|
124 |
app.load(load_models,client_choice,[chat_b])
|
125 |
|
126 |
-
chat_sub=inp.submit(check_rand,[rand,seed],seed).then(chat_inf,[
|
127 |
-
go=btn.click(check_rand,[rand,seed],seed).then(chat_inf,[
|
128 |
|
129 |
app.queue(default_concurrency_limit=10).launch()
|
|
|
1 |
import gradio as gr
|
2 |
from gradio_client import Client
|
3 |
from huggingface_hub import InferenceClient
|
4 |
+
import random
|
5 |
|
6 |
ss_client = Client("https://omnibus-html-image-current-tab.hf.space/")
|
7 |
|
|
|
27 |
print(models[inp])
|
28 |
return gr.update(label=models[inp])
|
29 |
|
30 |
+
def format_prompt(message, history):
|
31 |
prompt = ""
|
32 |
if history:
|
33 |
for user_prompt, bot_response in history:
|
|
|
35 |
prompt += f"<start_of_turn>model{bot_response}<end_of_turn>"
|
36 |
if VERBOSE==True:
|
37 |
print(prompt)
|
38 |
+
prompt += message
|
39 |
return prompt
|
40 |
|
41 |
+
def chat_inf(system_prompt,prompt,history,memory,client_choice,seed,temp,tokens,top_p,rep_p,chat_mem):
|
42 |
hist_len=0
|
43 |
client=clients[int(client_choice)-1]
|
44 |
if not history:
|
|
|
64 |
do_sample=True,
|
65 |
seed=seed,
|
66 |
)
|
67 |
+
formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", memory[0-chat_mem:])
|
|
|
|
|
|
|
68 |
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=True)
|
69 |
output = ""
|
70 |
for response in stream:
|
|
|
103 |
with gr.Row():
|
104 |
with gr.Column(scale=3):
|
105 |
inp = gr.Textbox(label="Prompt")
|
|
|
|
|
106 |
btn = gr.Button("Chat")
|
107 |
with gr.Column(scale=1):
|
108 |
with gr.Group():
|
|
|
118 |
client_choice.change(load_models,client_choice,[chat_b])
|
119 |
app.load(load_models,client_choice,[chat_b])
|
120 |
|
121 |
+
chat_sub=inp.submit(check_rand,[rand,seed],seed).then(chat_inf,[inp,inp,chat_b,memory,client_choice,seed,temp,tokens,top_p,rep_p,chat_mem],[chat_b,memory])
|
122 |
+
go=btn.click(check_rand,[rand,seed],seed).then(chat_inf,[inp,inp,chat_b,memory,client_choice,seed,temp,tokens,top_p,rep_p,chat_mem],[chat_b,memory])
|
123 |
|
124 |
app.queue(default_concurrency_limit=10).launch()
|