Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,9 +1,10 @@
|
|
1 |
-
|
2 |
import gradio as gr
|
3 |
from gradio_client import Client
|
4 |
|
5 |
-
title = "Llama2 70B Chatbot"
|
6 |
|
|
|
|
|
|
|
7 |
description = """
|
8 |
This Space demonstrates model [Llama-2-70b-chat-hf](https://huggingface.co/meta-llama/Llama-2-70b-chat-hf) by Meta, a Llama 2 model with 70B parameters fine-tuned for chat instructions.
|
9 |
| Model | Llama2 | Llama2-hf | Llama2-chat | Llama2-chat-hf |
|
@@ -11,40 +12,50 @@ This Space demonstrates model [Llama-2-70b-chat-hf](https://huggingface.co/meta-
|
|
11 |
| 70B | [Link](https://huggingface.co/meta-llama/Llama-2-70b) | [Link](https://huggingface.co/meta-llama/Llama-2-70b-hf) | [Link](https://huggingface.co/meta-llama/Llama-2-70b-chat) | [Link](https://huggingface.co/meta-llama/Llama-2-70b-chat-hf) |
|
12 |
"""
|
13 |
css = """.toast-wrap { display: none !important } """
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
# ]
|
22 |
|
23 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
|
25 |
|
26 |
def transcribe(audio):
|
27 |
whisper_client = Client("https://sanchit-gandhi-whisper-large-v2.hf.space/")
|
28 |
-
|
29 |
-
f.write(audio[0])
|
30 |
return whisper_client.predict(
|
31 |
-
|
32 |
"transcribe", # str in 'Task' Radio component
|
33 |
api_name="/predict"
|
34 |
)
|
35 |
|
36 |
|
37 |
-
#
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
max_new_tokens, # int | float (numeric value between 0 and 4096)
|
45 |
-
0.3, # int | float (numeric value between 0.0 and 1)
|
46 |
-
1, # int | float (numeric value between 1.0 and 2.0)
|
47 |
-
api_name="/chat_1")
|
48 |
|
49 |
-
demo
|
50 |
-
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
from gradio_client import Client
|
3 |
|
|
|
4 |
|
5 |
+
|
6 |
+
|
7 |
+
title = "Llama2 70B Chatbot"
|
8 |
description = """
|
9 |
This Space demonstrates model [Llama-2-70b-chat-hf](https://huggingface.co/meta-llama/Llama-2-70b-chat-hf) by Meta, a Llama 2 model with 70B parameters fine-tuned for chat instructions.
|
10 |
| Model | Llama2 | Llama2-hf | Llama2-chat | Llama2-chat-hf |
|
|
|
12 |
| 70B | [Link](https://huggingface.co/meta-llama/Llama-2-70b) | [Link](https://huggingface.co/meta-llama/Llama-2-70b-hf) | [Link](https://huggingface.co/meta-llama/Llama-2-70b-chat) | [Link](https://huggingface.co/meta-llama/Llama-2-70b-chat-hf) |
|
13 |
"""
|
14 |
css = """.toast-wrap { display: none !important } """
|
15 |
+
examples=[
|
16 |
+
['Hello there! How are you doing?'],
|
17 |
+
['Can you explain to me briefly what is Python programming language?'],
|
18 |
+
['Explain the plot of Cinderella in a sentence.'],
|
19 |
+
['How many hours does it take a man to eat a Helicopter?'],
|
20 |
+
["Write a 100-word article on 'Benefits of Open-Source in AI research'"],
|
21 |
+
]
|
|
|
22 |
|
23 |
|
24 |
+
# Stream text
|
25 |
+
def predict(message, chatbot, system_prompt="", temperature=0.9, max_new_tokens=4096):
|
26 |
+
|
27 |
+
client = Client("https://ysharma-explore-llamav2-with-tgi.hf.space/")
|
28 |
+
return client.predict(
|
29 |
+
message, # str in 'Message' Textbox component
|
30 |
+
system_prompt, # str in 'Optional system prompt' Textbox component
|
31 |
+
temperature, # int | float (numeric value between 0.0 and 1.0)
|
32 |
+
max_new_tokens, # int | float (numeric value between 0 and 4096)
|
33 |
+
0.3, # int | float (numeric value between 0.0 and 1)
|
34 |
+
1, # int | float (numeric value between 1.0 and 2.0)
|
35 |
+
api_name="/chat"
|
36 |
+
)
|
37 |
+
|
38 |
|
39 |
|
40 |
def transcribe(audio):
|
41 |
whisper_client = Client("https://sanchit-gandhi-whisper-large-v2.hf.space/")
|
42 |
+
|
|
|
43 |
return whisper_client.predict(
|
44 |
+
audio, # str (filepath or URL to file) in 'inputs' Audio component
|
45 |
"transcribe", # str in 'Task' Radio component
|
46 |
api_name="/predict"
|
47 |
)
|
48 |
|
49 |
|
50 |
+
# Gradio Demo
|
51 |
+
with gr.Blocks(theme=gr.themes.Base()) as demo:
|
52 |
+
gr.DuplicateButton()
|
53 |
+
text = gr.Textbox()
|
54 |
+
micro = gradio.Microphone()
|
55 |
+
micro.stop_recording(transcribe, [micro],[text])
|
56 |
+
gr.ChatInterface(predict,text, title=title, description=description, css=css, examples=examples)
|
|
|
|
|
|
|
|
|
57 |
|
58 |
+
demo.queue().launch(debug=True)
|
59 |
+
|
60 |
+
|
61 |
+
|