Mohammed-AlQurashi's picture
Update app.py
649d8a4 verified
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain_community.vectorstores import FAISS
import gradio as gr
from langchain_community.vectorstores import FAISS
from langchain.prompts import PromptTemplate
from langchain_core.output_parsers import StrOutputParser
from langchain.llms import OpenAI
# Update the model name to a supported model
llm = OpenAI(openai_api_key="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", model_name="gpt-3.5-turbo-instruct")
openai_embeddings = OpenAIEmbeddings(openai_api_key='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')
db = FAISS.load_local("faiss_index",openai_embeddings, allow_dangerous_deserialization=True)
parser = StrOutputParser()
template = """
Answer the question based on the context below. If you can't
answer the question, answer with "I don't know".
Context: {context}
Question: {question}
"""
prompt = PromptTemplate.from_template(template)
# Define the function that will handle the question and generate a response
def generate_response(question):
try:
# Step 1: Convert the question into an embedding vector (you need your specific embedding model for this)
embedding_vector = openai_embeddings.embed_query(question) # Placeholder for embedding conversion
# Step 2: Search for relevant documents in the FAISS vectorstore
docs = db.similarity_search_by_vector(embedding_vector)
# Step 3: Format the prompt with the retrieved documents as context
formatted_prompt = prompt.format(context=docs, question=question)
# Step 4: Invoke the model to generate a response
response_from_model = llm.invoke(formatted_prompt)
parsed_response = parser.parse(response_from_model)
# Step 5: Return the parsed response
return parsed_response
except Exception as e:
print("Error in generate_response:", e)
return "An error occurred. Please try again."
# Create the Gradio interface
iface = gr.Interface(
fn=generate_response,
inputs=gr.Textbox(lines=2, placeholder="Enter your question here..."),
outputs="text",
title="Question Answering App with Contextual Retrieval"
)
# Launch the app
iface.launch()