Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -7,19 +7,22 @@ from transformers import pipeline
|
|
7 |
api_key = os.getenv("LLAMA")
|
8 |
login(api_key)
|
9 |
|
10 |
-
# Load the
|
11 |
llama_model = gr.load("models/meta-llama/Llama-3.1-8B-Instruct")
|
12 |
|
13 |
-
# Function to handle
|
14 |
def chat_with_llama(user_input):
|
15 |
-
#
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
|
|
|
|
|
|
20 |
with gr.Blocks(css=".title {font-size: 3em; font-weight: bold; text-align: center; color: #4A90E2;}") as demo:
|
21 |
|
22 |
-
# Header
|
23 |
gr.Markdown(
|
24 |
"""
|
25 |
<div class="title">🦙 Llama 3.1 Chatbot 🦙</div>
|
@@ -28,20 +31,20 @@ with gr.Blocks(css=".title {font-size: 3em; font-weight: bold; text-align: cente
|
|
28 |
elem_classes="header"
|
29 |
)
|
30 |
|
31 |
-
# Main
|
32 |
with gr.Row():
|
33 |
with gr.Column(scale=1):
|
34 |
user_input = gr.Textbox(label="Your question", placeholder="Type your question here...", lines=4)
|
35 |
with gr.Column(scale=1):
|
36 |
response_output = gr.Textbox(label="Llama's response", lines=4)
|
37 |
|
38 |
-
# Button to submit
|
39 |
submit_button = gr.Button("Submit", variant="primary")
|
40 |
|
41 |
# Link the input and output
|
42 |
submit_button.click(fn=chat_with_llama, inputs=user_input, outputs=response_output)
|
43 |
|
44 |
-
# Footer
|
45 |
gr.Markdown(
|
46 |
"""
|
47 |
<div style="text-align:center; font-size:0.8em; color:gray;">Developed with ❤️ using Llama 3.1 and Gradio</div>
|
|
|
7 |
api_key = os.getenv("LLAMA")
|
8 |
login(api_key)
|
9 |
|
10 |
+
# Load the model using the Hugging Face Hub
|
11 |
llama_model = gr.load("models/meta-llama/Llama-3.1-8B-Instruct")
|
12 |
|
13 |
+
# Function to handle input and output
|
14 |
def chat_with_llama(user_input):
|
15 |
+
# Ensure user_input is correctly formatted as a string
|
16 |
+
if isinstance(user_input, str):
|
17 |
+
response = llama_model([user_input]) # Pass input as a list
|
18 |
+
return response[0] # Return the first element of the output list
|
19 |
+
else:
|
20 |
+
return "Invalid input. Please provide a valid text."
|
21 |
+
|
22 |
+
# Customize the Gradio interface
|
23 |
with gr.Blocks(css=".title {font-size: 3em; font-weight: bold; text-align: center; color: #4A90E2;}") as demo:
|
24 |
|
25 |
+
# Header
|
26 |
gr.Markdown(
|
27 |
"""
|
28 |
<div class="title">🦙 Llama 3.1 Chatbot 🦙</div>
|
|
|
31 |
elem_classes="header"
|
32 |
)
|
33 |
|
34 |
+
# Main Input/Output Section
|
35 |
with gr.Row():
|
36 |
with gr.Column(scale=1):
|
37 |
user_input = gr.Textbox(label="Your question", placeholder="Type your question here...", lines=4)
|
38 |
with gr.Column(scale=1):
|
39 |
response_output = gr.Textbox(label="Llama's response", lines=4)
|
40 |
|
41 |
+
# Button to submit input
|
42 |
submit_button = gr.Button("Submit", variant="primary")
|
43 |
|
44 |
# Link the input and output
|
45 |
submit_button.click(fn=chat_with_llama, inputs=user_input, outputs=response_output)
|
46 |
|
47 |
+
# Footer
|
48 |
gr.Markdown(
|
49 |
"""
|
50 |
<div style="text-align:center; font-size:0.8em; color:gray;">Developed with ❤️ using Llama 3.1 and Gradio</div>
|