Spaces:
Sleeping
Sleeping
import gradio as gr | |
from transformers import AutoModelForCausalLM, AutoTokenizer | |
import torch | |
# Load pre-trained model and tokenizer | |
def load_model(model_name): | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
model = AutoModelForCausalLM.from_pretrained(model_name) | |
device = "cuda" if torch.cuda.is_available() else "cpu" | |
model = model.to(device) | |
return tokenizer, model, device | |
# Function to generate chat responses | |
def chat_with_niti(message, history): | |
tokenizer, model, device = load_model("facebook/mbart-large-50") | |
input_ids = tokenizer.encode(message, return_tensors="pt").to(device) | |
output = model.generate( | |
input_ids, | |
max_length=100, | |
temperature=0.7, | |
num_return_sequences=1, | |
pad_token_id=tokenizer.eos_token_id | |
) | |
response = tokenizer.decode(output[0], skip_special_tokens=True) | |
return response | |
# Create Gradio chat interface | |
demo = gr.ChatInterface( | |
fn=chat_with_niti, | |
title="Niti - Your AI Chatbot", | |
description="Ask Niti anything in Hindi, Hinglish, or English!" | |
) | |
# Launch the interface | |
demo.launch() |