datak / app.py
LOUIS SANNA
Init app
a7c1ef2
raw
history blame
2.04 kB
from langchain.embeddings import OpenAIEmbeddings # for creating embeddings
from langchain.vectorstores import Chroma # for the vectorization part
from langchain.chains import ConversationalRetrievalChain
from langchain.llms import OpenAI # the LLM model we'll use (CHatGPT)
import gradio as gr
from gradio import inputs, outputs
from gradio.mix import Parallel
max_sources = 4
embedding = OpenAIEmbeddings()
vectordb = Chroma(persist_directory="/chroma", embedding_function=embedding)
pdf_qa = ConversationalRetrievalChain.from_llm(OpenAI(temperature=0.9, model_name="gpt-3.5-turbo"),
vectordb.as_retriever(), return_source_documents=True)
def chat_pdf(query, chat_history=""):
result = pdf_qa({"question": query, "chat_history": chat_history})
answer = result["answer"]
source_docs = result["source_documents"]
print("source_docs", len(source_docs))
cleaned_docs = []
for doc in source_docs:
cleaned_content = doc.page_content
metadata_info = f"Metadata: {doc.metadata}\n"
cleaned_docs.append(metadata_info + cleaned_content)
# Pad the outputs to match the number of output components in the Gradio interface
padded_outputs = [answer] + cleaned_docs + [""] * (max_sources - len(cleaned_docs))
return padded_outputs
return [answer] + cleaned_docs
def create_outputs(num_sources):
outputs = [gr.outputs.Textbox(label="Answer")]
for i in range(1, num_sources + 1):
outputs.append(gr.outputs.Textbox(label=f"Source Document {i}"))
return outputs
iface = gr.Interface(
fn=chat_pdf,
inputs=[gr.inputs.Textbox(label="Query")],
outputs=create_outputs(max_sources),
layout="vertical",
examples=[
["Give 2 species of fulgoroidea"],
["What colors are found among fulgoroidea?"],
["Why are fulgoroidea so cute?"]
# Add more example queries if desired
],
css=".answer, .source_documents {width: 45%; float: left; margin-right: 20px;}"
)
iface.launch(debug=True)