Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,15 +1,21 @@
|
|
1 |
import gradio as gr
|
2 |
-
from transformers import pipeline
|
|
|
3 |
|
4 |
-
#
|
5 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
|
7 |
# Define the function to handle chat
|
8 |
def chat(message):
|
9 |
-
# Prepare the input format
|
10 |
-
messages = [{"role": "user", "content": message}]
|
11 |
# Generate the response using the model
|
12 |
-
response = pipe(
|
13 |
# Extract and return the generated text
|
14 |
return response[0]['generated_text']
|
15 |
|
@@ -18,7 +24,7 @@ interface = gr.Interface(
|
|
18 |
fn=chat,
|
19 |
inputs=gr.inputs.Textbox(label="Enter your message"),
|
20 |
outputs="text",
|
21 |
-
title="
|
22 |
description="Chat with the Mistral-7B-Instruct model to get responses to your queries."
|
23 |
)
|
24 |
|
|
|
1 |
import gradio as gr
|
2 |
+
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
|
3 |
+
from huggingface_hub import login
|
4 |
|
5 |
+
# Login to Hugging Face (ensure you've set up the token)
|
6 |
+
login(token='your_huggingface_token_here')
|
7 |
+
|
8 |
+
# Initialize the tokenizer and model with authentication
|
9 |
+
tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.3")
|
10 |
+
model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-Instruct-v0.3")
|
11 |
+
|
12 |
+
# Initialize the text generation pipeline
|
13 |
+
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
14 |
|
15 |
# Define the function to handle chat
|
16 |
def chat(message):
|
|
|
|
|
17 |
# Generate the response using the model
|
18 |
+
response = pipe(message, max_length=50)
|
19 |
# Extract and return the generated text
|
20 |
return response[0]['generated_text']
|
21 |
|
|
|
24 |
fn=chat,
|
25 |
inputs=gr.inputs.Textbox(label="Enter your message"),
|
26 |
outputs="text",
|
27 |
+
title="Text Generation Bot",
|
28 |
description="Chat with the Mistral-7B-Instruct model to get responses to your queries."
|
29 |
)
|
30 |
|