Spaces:
Sleeping
Sleeping
File size: 3,996 Bytes
d101a5e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 |
from langchain.chains import ConversationalRetrievalChain
from langchain.chat_models import ChatOpenAI
from langchain.document_loaders import TextLoader
from langchain.document_loaders import PyPDFLoader
from langchain.vectorstores.faiss import FAISS
from langchain.embeddings import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
import panel as pn
import os
import tempfile
from langchain.chains import RetrievalQA
file_input = pn.widgets.FileInput(width=300)
openaikey = pn.widgets.PasswordInput(
value="", placeholder="Enter your OpenAI API Key here...", width=300
)
prompt = pn.widgets.TextEditor(
value="", placeholder="Enter your questions here...", height=160, toolbar=False
)
run_button = pn.widgets.Button(name="Run!")
select_k = pn.widgets.IntSlider(
name="Number of relevant chunks", start=1, end=5, step=1, value=2
)
select_chain_type = pn.widgets.RadioButtonGroup(
name='Chain type',
options=['stuff', 'map_reduce', "refine", "map_rerank"]
)
widgets = pn.Row(
pn.Column(prompt, run_button, margin=5),
pn.Card(
"Chain type:",
pn.Column(select_chain_type, select_k),
title="Advanced settings", margin=10
), width=600
)
def qa(file, query, chain_type, k):
loader = PyPDFLoader(file)
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
vector_store = FAISS.from_documents(texts, embeddings)
retriever = vector_store.as_retriever(
search_type="similarity", search_kwargs={"k": k})
model = ChatOpenAI(model='gpt-3.5-turbo')
# qa = ConversationalRetrievalChain.from_llm(
# model, retriever=retriever, chain_type=chain_type)
qa = RetrievalQA.from_chain_type(
model, chain_type=chain_type, retriever=retriever, return_source_documents=False)
result = qa({"query": query})
# print(result['result'])
# return result['answer']
return result['result']
convos = [] # store all panel objects in a list
def temfile_create(file_input):
with tempfile.NamedTemporaryFile(suffix='.pdf', delete=False) as temp_file:
temp_file.write(file_input.value)
temp_file.flush()
temp_file.seek(0)
# Do something with the temporary file here, such as passing the file path to another function
return temp_file.name
def qa_result(_):
os.environ["OPENAI_API_KEY"] = openaikey.value
if file_input.value is not None:
# file_input.save("/.cache/temp.pdf")
pdf_file = temfile_create(file_input)
prompt_text = prompt.value
if prompt_text:
result = qa(file=pdf_file, query=prompt_text,
chain_type=select_chain_type.value, k=select_k.value)
convos.extend([
pn.Row(
pn.panel("\U0001F60A", width=10),
prompt_text,
width=600
),
pn.Row(
pn.panel("\U0001F916", width=10),
# result['answer'],
result,
width=600
)
])
return pn.Column(*convos, margin=15, width=575, min_height=400)
qa_interactive = pn.panel(
pn.bind(qa_result, run_button),
loading_indicator=True,
)
output = pn.WidgetBox('*Output will show up here:*',
qa_interactive, width=630, scroll=True)
# layout
# try:
pn.Column(
pn.pane.Markdown("""
## \U0001F60A! Question Answering with your PDF file
Step 1: Upload a PDF file \n
Step 2: Enter your OpenAI API key. This costs $$. You will need to set up billing info at [OpenAI](https://platform.openai.com/account). \n
Step 3: Type your question at the bottom and click "Run" \n
"""),
pn.Row(file_input, openaikey),
output,
widgets
).servable()
# except Exception as ex:
# pass
|