Update app.py
Browse files
app.py
CHANGED
@@ -13,12 +13,11 @@ from langchain.memory import ConversationBufferMemory
|
|
13 |
import streamlit as st
|
14 |
import os
|
15 |
|
16 |
-
|
17 |
-
|
18 |
-
def db(texts,text_splitter,name):
|
19 |
chunks = text_splitter.split_text(texts)
|
20 |
embeddings = OpenAIEmbeddings()
|
21 |
-
db = Chroma.from_texts(chunks, embeddings
|
22 |
retriever = db.as_retriever(search_type="similarity", search_kwargs={"k":2})
|
23 |
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
|
24 |
qa = ConversationalRetrievalChain.from_llm(llm=OpenAI(temperature=0), retriever=retriever, chain_type="refine",memory=memory)
|
@@ -32,11 +31,20 @@ def ai(prompt):
|
|
32 |
#prompt=system_prompt+str(": question is :")+prompt
|
33 |
result = qa({"question": prompt, "chat_history": chat_history})
|
34 |
return result["answer"]
|
35 |
-
|
|
|
|
|
36 |
def main():
|
37 |
global qa, chat_history
|
38 |
placeholder=st.empty()
|
39 |
placeholder.empty()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
40 |
placeholder.title("Upload or Chat PDF")
|
41 |
#st.header("PDF/URL QA")
|
42 |
#global system_prompt
|
@@ -47,8 +55,6 @@ def main():
|
|
47 |
pdf = st.file_uploader("Upload your PDF", type="pdf")
|
48 |
if pdf is not None:
|
49 |
#print(pdf)
|
50 |
-
name=st.text_input("Name of your pdf")
|
51 |
-
name=name.lower()
|
52 |
pdf_reader = PdfReader(pdf)
|
53 |
texts = ""
|
54 |
for page in pdf_reader.pages:
|
@@ -58,7 +64,7 @@ def main():
|
|
58 |
chunk_size = 1000,
|
59 |
chunk_overlap = 0
|
60 |
)
|
61 |
-
qa=db(texts,text_splitter,
|
62 |
chat_history = []
|
63 |
st.header("PDF/URL QA")
|
64 |
query = st.text_input("Ask a question in PDF")
|
@@ -70,14 +76,15 @@ def main():
|
|
70 |
elif page == 'Random talk':
|
71 |
chat_history=[]
|
72 |
st.header("Start Chatting")
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
|
|
81 |
|
82 |
|
83 |
if __name__ == "__main__":
|
|
|
13 |
import streamlit as st
|
14 |
import os
|
15 |
|
16 |
+
def db(texts,text_splitter,api):
|
17 |
+
|
|
|
18 |
chunks = text_splitter.split_text(texts)
|
19 |
embeddings = OpenAIEmbeddings()
|
20 |
+
db = Chroma.from_texts(chunks, embeddings)
|
21 |
retriever = db.as_retriever(search_type="similarity", search_kwargs={"k":2})
|
22 |
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
|
23 |
qa = ConversationalRetrievalChain.from_llm(llm=OpenAI(temperature=0), retriever=retriever, chain_type="refine",memory=memory)
|
|
|
31 |
#prompt=system_prompt+str(": question is :")+prompt
|
32 |
result = qa({"question": prompt, "chat_history": chat_history})
|
33 |
return result["answer"]
|
34 |
+
def set_environment_variable(api_key):
|
35 |
+
# Set the environment variable for the OpenAI API key
|
36 |
+
os.environ["OPENAI_API_KEY"] = api_key
|
37 |
def main():
|
38 |
global qa, chat_history
|
39 |
placeholder=st.empty()
|
40 |
placeholder.empty()
|
41 |
+
placeholder.title("your openai api key")
|
42 |
+
global api
|
43 |
+
api=st.text_input("enter here")
|
44 |
+
if st.button("Load API Key"):
|
45 |
+
# Set the environment variable with the provided API key
|
46 |
+
set_environment_variable(api)
|
47 |
+
st.success("API key loaded successfully!")
|
48 |
placeholder.title("Upload or Chat PDF")
|
49 |
#st.header("PDF/URL QA")
|
50 |
#global system_prompt
|
|
|
55 |
pdf = st.file_uploader("Upload your PDF", type="pdf")
|
56 |
if pdf is not None:
|
57 |
#print(pdf)
|
|
|
|
|
58 |
pdf_reader = PdfReader(pdf)
|
59 |
texts = ""
|
60 |
for page in pdf_reader.pages:
|
|
|
64 |
chunk_size = 1000,
|
65 |
chunk_overlap = 0
|
66 |
)
|
67 |
+
qa=db(texts,text_splitter,api)
|
68 |
chat_history = []
|
69 |
st.header("PDF/URL QA")
|
70 |
query = st.text_input("Ask a question in PDF")
|
|
|
76 |
elif page == 'Random talk':
|
77 |
chat_history=[]
|
78 |
st.header("Start Chatting")
|
79 |
+
message=st.text_input("Your message")
|
80 |
+
if st.button('reply'):
|
81 |
+
prompt = "\"Act like a personal assistant. You can respond to questions, translate sentences, summarize news, and give recommendations. " + message + "\""
|
82 |
+
# Call the OpenAI Api to process our prompt
|
83 |
+
openai_response = openai.Completion.create(model="text-davinci-003", prompt=prompt,max_tokens=4000)
|
84 |
+
print("openai response:", openai_response)
|
85 |
+
# Parse the response to get the response text for our prompt
|
86 |
+
response_text = openai_response.choices[0].text
|
87 |
+
st.write( response_text)
|
88 |
|
89 |
|
90 |
if __name__ == "__main__":
|