Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,8 +1,9 @@
|
|
1 |
from huggingface_hub import InferenceClient
|
2 |
import gradio as gr
|
3 |
|
|
|
4 |
client = InferenceClient(
|
5 |
-
"mistralai/Mistral-7B-Instruct-v0.
|
6 |
)
|
7 |
|
8 |
|
@@ -92,7 +93,7 @@ css = """
|
|
92 |
with gr.Blocks(css=css) as ai_chat:
|
93 |
gr.HTML("<h1><center>AI Conversation<h1><center>")
|
94 |
gr.HTML("<h3><center>How can I help you? You can converse with me and say more💬<h3><center>")
|
95 |
-
gr.HTML("<h3><center>To try, select
|
96 |
gr.HTML("<h3><center>Have a wonderful day! 📚<h3><center>")
|
97 |
gr.ChatInterface(
|
98 |
generate,
|
@@ -100,6 +101,15 @@ with gr.Blocks(css=css) as ai_chat:
|
|
100 |
examples=[["List fun activities in Boston."], ["How to spend a weekend in San Francisco?"], ["What is the secret to life?"], ["Write me a recipe for a quick vegeterain breakfast."],["What is the future for software developers?."],
|
101 |
["Create a plan for daily healthy habbits."], ["What is optogenetic simulation?"], ["How to conduct a neuroscience experiment using holography?"], ["Tell me lifestyle of people living in Auckland, NZ"], ["Make a tour plan for Los Angeles metro area."]]
|
102 |
)
|
103 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
104 |
#ai_chat.queue(concurrency_limit=None, max_size=250).launch(debug=True)
|
105 |
ai_chat.queue(max_size=250).launch(debug=True)
|
|
|
1 |
from huggingface_hub import InferenceClient
|
2 |
import gradio as gr
|
3 |
|
4 |
+
# Upgraded to Mistral-7B-v0.3
|
5 |
client = InferenceClient(
|
6 |
+
"mistralai/Mistral-7B-Instruct-v0.3"
|
7 |
)
|
8 |
|
9 |
|
|
|
93 |
with gr.Blocks(css=css) as ai_chat:
|
94 |
gr.HTML("<h1><center>AI Conversation<h1><center>")
|
95 |
gr.HTML("<h3><center>How can I help you? You can converse with me and say more💬<h3><center>")
|
96 |
+
gr.HTML("<h3><center>To try, select a prompt from below and hit submit<h3><center>")
|
97 |
gr.HTML("<h3><center>Have a wonderful day! 📚<h3><center>")
|
98 |
gr.ChatInterface(
|
99 |
generate,
|
|
|
101 |
examples=[["List fun activities in Boston."], ["How to spend a weekend in San Francisco?"], ["What is the secret to life?"], ["Write me a recipe for a quick vegeterain breakfast."],["What is the future for software developers?."],
|
102 |
["Create a plan for daily healthy habbits."], ["What is optogenetic simulation?"], ["How to conduct a neuroscience experiment using holography?"], ["Tell me lifestyle of people living in Auckland, NZ"], ["Make a tour plan for Los Angeles metro area."]]
|
103 |
)
|
104 |
+
'''
|
105 |
+
By enabling the queue you can control when users know their position in the queue, and set a limit on maximum number of events allowed.
|
106 |
+
Parameters:
|
107 |
+
status_update_rate: If "auto", Queue will send status estimations to all clients whenever a job is finished. Otherwise Queue will send status at regular intervals set by this parameter as the number of seconds.
|
108 |
+
api_open: If True, the REST routes of the backend will be open, allowing requests made directly to those endpoints to skip the queue.
|
109 |
+
max_size: The maximum number of events the queue will store at any given moment. If the queue is full, new events will not be added and a user will receive a message saying that the queue is full. If None, the queue size will be unlimited.
|
110 |
+
concurrency_count: Deprecated. Set the concurrency_limit directly on event listeners e.g. btn.click(fn, ..., concurrency_limit=10) or gr.Interface(concurrency_limit=10). If necessary, the total number of workers can be configured via `max_threads` in launch().
|
111 |
+
default_concurrency_limit: The default value of `concurrency_limit` to use for event listeners that don't specify a value. Can be set by environment variable GRADIO_DEFAULT_CONCURRENCY_LIMIT. Defaults to 1 if not set otherwise.
|
112 |
+
replace deprecated concurency_count to concurrency_limit
|
113 |
+
'''
|
114 |
#ai_chat.queue(concurrency_limit=None, max_size=250).launch(debug=True)
|
115 |
ai_chat.queue(max_size=250).launch(debug=True)
|