Spaces:
Runtime error
Runtime error
File size: 1,046 Bytes
3d089cc f5e33dc 3d089cc ea18f0f f5e33dc ea18f0f f5e33dc 3d089cc 7bc4b50 f5e33dc 7bc4b50 6ed91c2 3d089cc 6ed91c2 7bc4b50 6ed91c2 f5e33dc 7bc4b50 6ed91c2 3d089cc 7bc4b50 3d089cc |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 |
import gradio as gr
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
from huggingface_hub import login
# Initialize the Hugging Face token
token = 'hf_your_actual_token_here'
login(token=token)
# Initialize the text generation pipeline
tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.3")
model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-Instruct-v0.3")
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
# Define the function to handle chat
def chat(message):
# Generate the response using the model
response = pipe(message, max_length=50)
# Extract and return the generated text
return response[0]['generated_text']
# Create the Gradio interface
interface = gr.Interface(
fn=chat,
inputs=gr.inputs.Textbox(label="Enter your message"),
outputs="text",
title="Text Generation Bot",
description="Chat with the Mistral-7B-Instruct model to get responses to your queries."
)
# Launch the Gradio interface
interface.launch() |