Spaces:
Build error
Build error
update app
Browse files
app.py
CHANGED
@@ -149,7 +149,14 @@ def clear_prompt():
|
|
149 |
return "","",""
|
150 |
|
151 |
with gr.Blocks() as demo:
|
152 |
-
gr.Markdown("
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
153 |
with gr.Row():
|
154 |
with gr.Column():
|
155 |
prompt = gr.Textbox(lines=17,label="Prompt",placeholder="Enter Prompt", interactive=True)
|
@@ -173,5 +180,5 @@ with gr.Blocks() as demo:
|
|
173 |
btn_stop.click(stop_threads,cancels=click_run)
|
174 |
gr.Examples(examples, inputs=[prompt, chk_boxes, max_tokens, temperature, top_p, rep_penalty, stop])
|
175 |
|
176 |
-
demo.queue(concurrency_count=
|
177 |
demo.launch()
|
|
|
149 |
return "","",""
|
150 |
|
151 |
with gr.Blocks() as demo:
|
152 |
+
gr.Markdown("# <p style='text-align: center;'>BLOOM vs BLOOMZ Comparison</p>")
|
153 |
+
gr.Markdown("")
|
154 |
+
gr.Markdown("Test Inference on the [BLOOM](https://huggingface.co/bigscience/bloom) and [BLOOMZ](https://huggingface.co/bigscience/bloomz) 176 Billion Parameter models using Petals. \
|
155 |
+
Please consider contributing your unused GPU cycles to the [Petals Swarm](https://github.com/bigscience-workshop/petals) to speed up inference. <br />\n \
|
156 |
+
Due to heavy resource requirements of these large models, token generation can take upwards of 3-5 seconds per token. Try to keep Max Tokens to a minimum.")
|
157 |
+
gr.Markdown("")
|
158 |
+
gr.Markdown("Special thanks to [RFT Capital](https://www.rftcapital.com/) for supporting our experiments with compute time dontations.")
|
159 |
+
gr.Markdown("Type a Prompt and then click **Run** to see the output.")
|
160 |
with gr.Row():
|
161 |
with gr.Column():
|
162 |
prompt = gr.Textbox(lines=17,label="Prompt",placeholder="Enter Prompt", interactive=True)
|
|
|
180 |
btn_stop.click(stop_threads,cancels=click_run)
|
181 |
gr.Examples(examples, inputs=[prompt, chk_boxes, max_tokens, temperature, top_p, rep_penalty, stop])
|
182 |
|
183 |
+
demo.queue(concurrency_count=1)
|
184 |
demo.launch()
|