shukdevdatta123 commited on
Commit
c1432d5
Β·
verified Β·
1 Parent(s): 3a7df09

Upload 3 files

Browse files
Files changed (3) hide show
  1. .env +1 -0
  2. app4.py +165 -0
  3. conversations.json +1 -0
.env ADDED
@@ -0,0 +1 @@
 
 
1
+ OPENAI_API_KEY=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
app4.py ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from llama_index.core import VectorStoreIndex, Document
3
+ from llama_index.llms.openai import OpenAI
4
+ from llama_index.core import Settings
5
+ import os
6
+ import pdfplumber
7
+ from docx import Document as DocxDocument
8
+ from dotenv import load_dotenv
9
+ import json
10
+
11
+ # Load environment variables from .env file
12
+ load_dotenv()
13
+
14
+ st.header("Chat with the Streamlit docs πŸ’¬ πŸ“š")
15
+
16
+ # Sidebar for OpenAI API Key
17
+ if 'openai_api_key' not in st.session_state:
18
+ st.session_state.openai_api_key = ""
19
+
20
+ # Input for OpenAI API Key
21
+ st.session_state.openai_api_key = st.sidebar.text_input("Enter your OpenAI API Key:",
22
+ type="password",
23
+ value=st.session_state.openai_api_key)
24
+
25
+ # Initialize session state for messages
26
+ if "messages" not in st.session_state:
27
+ st.session_state.messages = [
28
+ {"role": "assistant", "content": "Ask me a question about the documents you uploaded!"}
29
+ ]
30
+
31
+ # Function to read PDF files
32
+ def read_pdf(file):
33
+ with pdfplumber.open(file) as pdf:
34
+ text = ''
35
+ for page in pdf.pages:
36
+ text += page.extract_text() + '\n'
37
+ return text
38
+
39
+ # Function to read DOCX files
40
+ def read_docx(file):
41
+ doc = DocxDocument(file)
42
+ text = ''
43
+ for paragraph in doc.paragraphs:
44
+ text += paragraph.text + '\n'
45
+ return text
46
+
47
+ @st.cache_resource(show_spinner=False)
48
+ def load_data(uploaded_files):
49
+ with st.spinner("Loading and indexing the documents – hang tight! This should take 1-2 minutes."):
50
+ docs = []
51
+ for uploaded_file in uploaded_files:
52
+ if uploaded_file.type == "application/pdf":
53
+ text = read_pdf(uploaded_file)
54
+ docs.append(Document(text=text))
55
+ elif uploaded_file.type == "application/vnd.openxmlformats-officedocument.wordprocessingml.document":
56
+ text = read_docx(uploaded_file)
57
+ docs.append(Document(text=text))
58
+
59
+ Settings.llm = OpenAI(model="gpt-3.5-turbo", temperature=0.5,
60
+ system_prompt="You are an expert on the Streamlit Python library and your job is to answer technical questions. Assume that all questions are related to the Streamlit Python library. Keep your answers technical and based on facts – do not hallucinate features.")
61
+
62
+ index = VectorStoreIndex.from_documents(docs, settings=Settings.llm)
63
+ return index
64
+
65
+ # Function to save the conversation
66
+ def save_conversation():
67
+ with open("conversations.json", "a") as f:
68
+ json.dump(st.session_state.messages, f)
69
+ f.write("\n")
70
+
71
+ # Function to load previous conversations
72
+ def load_conversations():
73
+ if os.path.exists("conversations.json"):
74
+ with open("conversations.json", "r") as f:
75
+ conversations = [json.loads(line) for line in f]
76
+ return conversations
77
+ return []
78
+
79
+ # Function to delete selected conversations
80
+ def delete_selected_conversations(selected_indices):
81
+ conversations = load_conversations()
82
+ remaining_conversations = [conv for i, conv in enumerate(conversations) if i not in selected_indices]
83
+ with open("conversations.json", "w") as f:
84
+ for conv in remaining_conversations:
85
+ json.dump(conv, f)
86
+ f.write("\n")
87
+
88
+ # File uploader for multiple PDF and DOCX files
89
+ uploaded_files = st.file_uploader("Upload PDF or DOCX files", type=["pdf", "docx"], accept_multiple_files=True)
90
+
91
+ if uploaded_files and st.session_state.openai_api_key:
92
+ index = load_data(uploaded_files)
93
+ chat_engine = index.as_chat_engine(chat_mode="condense_question", verbose=True)
94
+
95
+ # User input for questions
96
+ if prompt := st.chat_input("Your question"):
97
+ st.session_state.messages.append({"role": "user", "content": prompt})
98
+
99
+ for message in st.session_state.messages:
100
+ with st.chat_message(message["role"]):
101
+ st.write(message["content"])
102
+
103
+ if len(st.session_state.messages) > 0 and st.session_state.messages[-1]["role"] != "assistant":
104
+ with st.chat_message("assistant"):
105
+ with st.spinner("Thinking..."):
106
+ response = chat_engine.chat(prompt)
107
+ st.write(response.response)
108
+ message = {"role": "assistant", "content": response.response}
109
+ st.session_state.messages.append(message)
110
+
111
+ if st.button("Save Conversation"):
112
+ if st.session_state.messages:
113
+ st.session_state.confirm_save = True
114
+
115
+ if st.session_state.get('confirm_save', False):
116
+ st.warning("Do you want to save the conversation?")
117
+ col1, col2 = st.columns(2)
118
+ with col1:
119
+ if st.button("Yes"):
120
+ save_conversation()
121
+ st.success("Conversation saved!")
122
+ st.session_state.confirm_save = False
123
+ with col2:
124
+ if st.button("No"):
125
+ st.session_state.confirm_save = False
126
+
127
+ if st.button("End Conversation"):
128
+ st.session_state.messages = []
129
+ st.success("Conversation ended. You can start a new one!")
130
+
131
+ else:
132
+ st.sidebar.warning("Please enter your OpenAI API key and upload PDF or DOCX files to proceed.")
133
+
134
+ # Sidebar to toggle visibility of previous conversations
135
+ if 'show_conversations' not in st.session_state:
136
+ st.session_state.show_conversations = False
137
+
138
+ if st.sidebar.button("Toggle Previous Conversations"):
139
+ st.session_state.show_conversations = not st.session_state.show_conversations
140
+
141
+ # Show previous conversations if the toggle is enabled
142
+ if st.session_state.show_conversations:
143
+ st.sidebar.subheader("Previous Conversations")
144
+ conversations = load_conversations()
145
+
146
+ if conversations:
147
+ selected_indices = []
148
+ for i, conv in enumerate(conversations):
149
+ st.sidebar.write(f"Conversation {i + 1}:")
150
+ for message in conv:
151
+ st.sidebar.write(f"{message['role']}: {message['content']}")
152
+ # Checkbox for selecting conversation to delete
153
+ if st.sidebar.checkbox(f"Select Conversation {i + 1} for Deletion", key=f"delete_checkbox_{i}"):
154
+ selected_indices.append(i)
155
+
156
+ if st.sidebar.button("Delete Selected Conversations"):
157
+ if selected_indices:
158
+ delete_selected_conversations(selected_indices)
159
+ st.success("Selected conversations deleted. Please Refresh to See the Effect!")
160
+ st.session_state.messages = [] # Optional: reset messages for a fresh start
161
+
162
+ else:
163
+ st.sidebar.write("No previous conversations found.")
164
+ else:
165
+ st.sidebar.write("Previous conversations are hidden. Click 'Toggle Previous Conversations' to show.")
conversations.json ADDED
@@ -0,0 +1 @@
 
 
1
+ [{"role": "assistant", "content": "Ask me a question about Streamlit's open-source Python library!"}, {"role": "user", "content": "Pubface: Celebrity face identification based on deep learning\n\nexplain what this paper is about"}, {"role": "assistant", "content": "The paper \"Pubface: Celebrity face identification based on deep learning\" describes a real-time application called PubFace that focuses on recognizing celebrities in public spaces. The application employs a new pose invariant face recognition deep neural network algorithm with a very low error rate. To achieve this, the authors built a novel dataset with over five million labeled faces, fine-tuned the deep convolutional neural network (CNN) VGG-16 architecture on this dataset, and deployed the model on a Raspberry Pi 3 using the OpenCV dnn module. The paper discusses the importance of facial recognition technology, the use of deep learning techniques for face recognition systems, and the challenges associated with collecting large face datasets for training purposes."}]