aquibmoin's picture
Create app.py
d349925 verified
raw
history blame
1.66 kB
import gradio as gr
from transformers import AutoTokenizer, AutoModel
import torch
from sklearn.metrics.pairwise import cosine_similarity
import numpy as np
# Load the model and tokenizer
model_name = "nasa-impact/nasa-smd-ibm-st-v2"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModel.from_pretrained(model_name)
def encode_text(text):
inputs = tokenizer(text, return_tensors='pt', padding=True, truncation=True)
outputs = model(**inputs)
return outputs.last_hidden_state.mean(dim=1).detach().numpy()
def find_best_response(user_input, response_pool):
user_embedding = encode_text(user_input)
response_embeddings = np.array([encode_text(resp) for resp in response_pool])
similarities = cosine_similarity(user_embedding, response_embeddings).flatten()
best_response_index = np.argmax(similarities)
return response_pool[best_response_index]
# Define some example responses for the chatbot to choose from
response_pool = [
"Hello! How can I help you today?",
"I'm here to assist you with any questions you have.",
"What would you like to know more about?",
"Can you please provide more details?",
"I'm not sure about that. Could you clarify?"
]
def chatbot(user_input):
best_response = find_best_response(user_input, response_pool)
return best_response
# Create the Gradio interface
iface = gr.Interface(
fn=chatbot,
inputs=gr.Textbox(lines=2, placeholder="Enter your message here..."),
outputs="text",
title="Bi-encoder Chatbot",
description="A simple chatbot using a bi-encoder model to find the best response."
)
# Launch the interface
iface.launch()