added examples and caching
Browse files
app.py
CHANGED
@@ -13,6 +13,13 @@ system_message = "\nYou are a helpful, respectful and honest assistant. Always a
|
|
13 |
title = "Llama2 70B Chatbot"
|
14 |
description = """This Space demonstrates model [Llama-2-70b-chat-hf](https://huggingface.co/meta-llama/Llama-2-70b-chat-hf) by Meta, running on Inference Endpoints using text-generation-inference. To have your own dedicated endpoint, you can [deploy it on Inference Endpoints](https://ui.endpoints.huggingface.co/). """
|
15 |
css = """.toast-wrap { display: none !important } """
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
|
17 |
def predict(message, chatbot):
|
18 |
|
@@ -60,4 +67,4 @@ def predict(message, chatbot):
|
|
60 |
gr.Warning(f"KeyError: {e} occurred for JSON object: {json_obj}")
|
61 |
continue
|
62 |
|
63 |
-
gr.ChatInterface(predict, title=title, description=description, css=css).queue(concurrency_count=75).launch()
|
|
|
13 |
title = "Llama2 70B Chatbot"
|
14 |
description = """This Space demonstrates model [Llama-2-70b-chat-hf](https://huggingface.co/meta-llama/Llama-2-70b-chat-hf) by Meta, running on Inference Endpoints using text-generation-inference. To have your own dedicated endpoint, you can [deploy it on Inference Endpoints](https://ui.endpoints.huggingface.co/). """
|
15 |
css = """.toast-wrap { display: none !important } """
|
16 |
+
examples=[
|
17 |
+
'Hello there! How are you doing?',
|
18 |
+
'Can you explain to me briefly what is Python programming language?',
|
19 |
+
'Explain the plot of Cinderella in a sentence.',
|
20 |
+
'How many hours does it take a man to eat a Helicopter?',
|
21 |
+
"Write a 100-word article on 'Benefits of Open-Source in AI research'",
|
22 |
+
]
|
23 |
|
24 |
def predict(message, chatbot):
|
25 |
|
|
|
67 |
gr.Warning(f"KeyError: {e} occurred for JSON object: {json_obj}")
|
68 |
continue
|
69 |
|
70 |
+
gr.ChatInterface(predict, title=title, description=description, css=css, examples=examples, cache_examples=True).queue(concurrency_count=75).launch()
|