Spaces:
Sleeping
Sleeping
import gradio as gr | |
from transformers import AutoModelForCausalLM, AutoTokenizer | |
# Load the Llama model | |
model_name = "meta-llama/Llama-3.2-3B-Instruct" | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
model = AutoModelForCausalLM.from_pretrained(model_name) | |
# Define chatbot function | |
def chat_with_llama(user_input): | |
inputs = tokenizer(user_input, return_tensors="pt") | |
outputs = model.generate(inputs.input_ids, max_length=100, do_sample=True, temperature=0.7) | |
response = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
return response | |
# Gradio interface | |
interface = gr.Interface( | |
fn=chat_with_llama, | |
inputs=gr.Textbox(lines=2, placeholder="Ask me anything!"), | |
outputs=gr.Textbox(), | |
title="Llama 3.2 3B Chatbot", | |
description="A simple chatbot powered by Llama 3.2 3B." | |
) | |
# Launch the app | |
interface.launch() | |