testbot / app.py
tahiryaqoob's picture
Update app.py
9b4ed7e verified
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer, Trainer, TrainingArguments
from datasets import load_dataset
# Load Dataset
dataset_url = "tahiryaqoob/BISELahore" # Replace with your dataset repository
dataset = load_dataset(dataset_url, split="train")
# Load Pretrained Model and Tokenizer
model_name = "microsoft/DialoGPT-medium"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
# Assign Padding Token
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token # Use EOS token as padding token
# Fine-tuning Function
def preprocess_data(example):
inputs = tokenizer(example['question'], truncation=True, padding="max_length", max_length=128)
outputs = tokenizer(example['answer'], truncation=True, padding="max_length", max_length=128)
inputs['labels'] = outputs['input_ids']
return inputs
# Tokenize Dataset
tokenized_dataset = dataset.map(preprocess_data, batched=True)
# Fine-Tune the Model
training_args = TrainingArguments(
output_dir="./results",
num_train_epochs=1,
per_device_train_batch_size=2,
save_steps=500,
save_total_limit=2,
)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=tokenized_dataset,
)
# Train the Model
trainer.train()
# Save the Fine-Tuned Model
model.save_pretrained("./bise_chatbot_model")
tokenizer.save_pretrained("./bise_chatbot_model")
# Define Chatbot Function
def chatbot_response(user_input):
inputs = tokenizer.encode(user_input, return_tensors="pt")
outputs = model.generate(inputs, max_length=100, num_return_sequences=1, do_sample=True)
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
return response
# Create Gradio Interface
iface = gr.Interface(
fn=chatbot_response,
inputs="text",
outputs="text",
title="BISE Lahore Chatbot",
description="Ask your questions about BISE Lahore services."
)
iface.launch()