Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
@@ -30,30 +30,27 @@ from deep_translator import (GoogleTranslator,
|
|
30 |
QcriTranslator,
|
31 |
single_detection,
|
32 |
batch_detection)
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
|
47 |
|
48 |
-
# streamer = TextStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens = True)
|
49 |
|
50 |
-
|
51 |
-
|
52 |
-
def generate_text(prompt, max_length, top_p, top_k):
|
53 |
-
global messages
|
54 |
lang = single_detection(prompt, api_key='4ab77f25578d450f0902fb42c66d5e11')
|
55 |
-
|
56 |
-
|
57 |
en_translated = GoogleTranslator(source='auto', target='en').translate(prompt)
|
58 |
messages.append({"role": "user", "content": en_translated})
|
59 |
# messages.append({"role": "user", "content": prompt})
|
@@ -69,16 +66,10 @@ def generate_text(prompt, max_length, top_p, top_k):
|
|
69 |
)
|
70 |
generate_kwargs = dict(
|
71 |
max_length=int(max_length),top_p=float(top_p), do_sample=True,
|
72 |
-
top_k=int(top_k), streamer=streamer, temperature=
|
73 |
|
74 |
)
|
75 |
|
76 |
-
# _ = model.generate(input_ids, streamer = streamer, max_new_tokens = int(max_length), pad_token_id = tokenizer.eos_token_id,
|
77 |
-
# temperature=0.6, # Adjust this value
|
78 |
-
# top_k=int(top_k), # Adjust this value
|
79 |
-
# top_p=float(top_p), # Adjust this value
|
80 |
-
# repetition_penalty=1.2
|
81 |
-
# )
|
82 |
t = Thread(target=model.generate, args=(input_ids,), kwargs=generate_kwargs)
|
83 |
t.start()
|
84 |
|
@@ -92,17 +83,32 @@ def generate_text(prompt, max_length, top_p, top_k):
|
|
92 |
|
93 |
messages.append({"role": "assistant", "content": "".join(generated_text)})
|
94 |
|
95 |
-
|
96 |
-
|
97 |
-
""
|
98 |
-
|
99 |
-
gr.Textbox(label="Prompt text", lines=5),
|
100 |
-
gr.Textbox(label="max-lenth generation", value=100),
|
101 |
-
gr.Slider(0.0, 1.0, label="top-p value", value=0.95),
|
102 |
-
gr.Textbox(label="top-k", value=50,),
|
103 |
-
]
|
104 |
-
outputs = [gr.Textbox(label="Generated Text", lines= 10)]
|
105 |
-
|
106 |
-
demo = gr.Interface(fn=generate_text, inputs=inputs, outputs=outputs, description=description)
|
107 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
108 |
demo.launch(debug=True, share=True)
|
|
|
30 |
QcriTranslator,
|
31 |
single_detection,
|
32 |
batch_detection)
|
33 |
+
from pyaspeller import YandexSpeller
|
34 |
+
def error_correct_pyspeller(sample_text):
|
35 |
+
""" grammer correction of input text"""
|
36 |
+
speller = YandexSpeller()
|
37 |
+
fixed = speller.spelled(sample_text)
|
38 |
+
return fixed
|
39 |
|
40 |
+
def postprocerssing(inp_text: str):
|
41 |
+
"""Post preocessing of the llm response"""
|
42 |
+
inp_text = re.sub('<[^>]+>', '', inp_text)
|
43 |
+
inp_text = inp_text.split('##', 1)[0]
|
44 |
+
inp_text = error_correct_pyspeller(inp_text)
|
45 |
+
return inp_text
|
46 |
|
47 |
|
|
|
48 |
|
49 |
+
def llm_run(prompt, max_length, top_p, temprature, top_k, messages):
|
50 |
+
|
|
|
|
|
51 |
lang = single_detection(prompt, api_key='4ab77f25578d450f0902fb42c66d5e11')
|
52 |
+
if lang == 'en':
|
53 |
+
prompt = error_correct_pyspeller(prompt)
|
54 |
en_translated = GoogleTranslator(source='auto', target='en').translate(prompt)
|
55 |
messages.append({"role": "user", "content": en_translated})
|
56 |
# messages.append({"role": "user", "content": prompt})
|
|
|
66 |
)
|
67 |
generate_kwargs = dict(
|
68 |
max_length=int(max_length),top_p=float(top_p), do_sample=True,
|
69 |
+
top_k=int(top_k), streamer=streamer, temperature=int(temprature), repetition_penalty=1.2
|
70 |
|
71 |
)
|
72 |
|
|
|
|
|
|
|
|
|
|
|
|
|
73 |
t = Thread(target=model.generate, args=(input_ids,), kwargs=generate_kwargs)
|
74 |
t.start()
|
75 |
|
|
|
83 |
|
84 |
messages.append({"role": "assistant", "content": "".join(generated_text)})
|
85 |
|
86 |
+
def clear_memory(messages):
|
87 |
+
messages.clear()
|
88 |
+
return "Memory cleaned."
|
89 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
90 |
|
91 |
+
with gr.Blocks(theme=gr.themes.Default(primary_hue=gr.themes.colors.orange, secondary_hue=gr.themes.colors.pink)) as demo:
|
92 |
+
stored_message = gr.State([])
|
93 |
+
with gr.Row():
|
94 |
+
with gr.Column(scale=2):
|
95 |
+
prompt_text = gr.Textbox(lines=7, label="Prompt", scale=2)
|
96 |
+
with gr.Row():
|
97 |
+
btn1 = gr.Button("Submit", scale=1)
|
98 |
+
btn2 = gr.Button("Clear", scale=1)
|
99 |
+
btn3 = gr.Button("Clean Memory", scale=2)
|
100 |
+
with gr.Column(scale=2):
|
101 |
+
out_text = gr.Text(lines=15, label="Output", scale=2)
|
102 |
+
btn1.click(fn=llm_run, inputs=[
|
103 |
+
prompt_text,
|
104 |
+
gr.Textbox(label="Max-Lenth generation", value=500),
|
105 |
+
gr.Slider(0.0, 1.0, label="Top-P value", value=0.90),
|
106 |
+
gr.Slider(0.0, 1.0, label="Temprature value", value=0.65),
|
107 |
+
gr.Textbox(label="Top-K", value=50,),
|
108 |
+
stored_message
|
109 |
+
], outputs=out_text)
|
110 |
+
btn2.click(lambda: [None, None], outputs=[text1, out_text])
|
111 |
+
btn3.click(fn=clear_memory, inputs=[stored_message], outputs=[out_text])
|
112 |
+
|
113 |
+
# demo = gr.Interface(fn=llm_run, inputs=["text"], outputs="text")
|
114 |
demo.launch(debug=True, share=True)
|