Muhammad Adnan commited on
Commit
b7013d9
·
1 Parent(s): 6356586

Add application file

Browse files
Files changed (2) hide show
  1. app.py +170 -0
  2. requirements.txt +15 -0
app.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from transformers import pipeline
3
+ import pdfplumber
4
+ import logging
5
+ import pandas as pd
6
+ import docx
7
+ import pickle
8
+ import os
9
+ from hashlib import sha256
10
+
11
+ # Set up logging
12
+ logging.basicConfig(level=logging.INFO)
13
+ logger = logging.getLogger(__name__)
14
+
15
+ # Initialize QA pipeline with a pre-trained RoBERTa QA model
16
+ @st.cache_resource
17
+ def init_qa_model():
18
+ try:
19
+ logger.info("Initializing QA model...")
20
+ qa_pipeline = pipeline("question-answering", model="deepset/roberta-base-squad2")
21
+ logger.info("QA model loaded successfully.")
22
+ return qa_pipeline
23
+ except Exception as e:
24
+ logger.error(f"Error loading QA model: {e}")
25
+ st.error(f"Error loading the QA model: {e}")
26
+ return None
27
+
28
+ # Function to extract text from PDF
29
+ def extract_text_from_pdf(pdf_file):
30
+ try:
31
+ with pdfplumber.open(pdf_file) as pdf:
32
+ text = ''
33
+ for page in pdf.pages:
34
+ page_text = page.extract_text()
35
+ if page_text:
36
+ text += page_text
37
+ return text or "No text found in the PDF."
38
+ except Exception as e:
39
+ logger.error(f"Error extracting text from PDF: {e}")
40
+ return "Error extracting text from PDF."
41
+
42
+ # Function to extract text from TXT files
43
+ def extract_text_from_txt(txt_file):
44
+ try:
45
+ return txt_file.getvalue().decode("utf-8") or "No text found in the TXT file."
46
+ except Exception as e:
47
+ logger.error(f"Error extracting text from TXT file: {e}")
48
+ return "Error extracting text from TXT file."
49
+
50
+ # Function to extract text from CSV files
51
+ def extract_text_from_csv(csv_file):
52
+ try:
53
+ df = pd.read_csv(csv_file)
54
+ return df.to_string(index=False) or "No text found in the CSV file."
55
+ except Exception as e:
56
+ logger.error(f"Error extracting text from CSV file: {e}")
57
+ return "Error extracting text from CSV file."
58
+
59
+ # Function to extract text from DOCX files
60
+ def extract_text_from_docx(docx_file):
61
+ try:
62
+ doc = docx.Document(docx_file)
63
+ return "\n".join([para.text for para in doc.paragraphs]) or "No text found in the DOCX file."
64
+ except Exception as e:
65
+ logger.error(f"Error extracting text from DOCX file: {e}")
66
+ return "Error extracting text from DOCX file."
67
+
68
+ # Function to create a unique cache key for the document
69
+ def generate_cache_key(text):
70
+ return sha256(text.encode('utf-8')).hexdigest()
71
+
72
+ # Function to cache embeddings
73
+ def cache_embeddings(embeddings, cache_key):
74
+ try:
75
+ cache_path = f"embeddings_cache/{cache_key}.pkl"
76
+ if not os.path.exists('../embeddings_cache'):
77
+ os.makedirs('../embeddings_cache')
78
+ with open(cache_path, 'wb') as f:
79
+ pickle.dump(embeddings, f)
80
+ logger.info(f"Embeddings cached successfully with key {cache_key}")
81
+ except Exception as e:
82
+ logger.error(f"Error caching embeddings: {e}")
83
+
84
+ # Function to load cached embeddings
85
+ def load_cached_embeddings(cache_key):
86
+ try:
87
+ cache_path = f"embeddings_cache/{cache_key}.pkl"
88
+ if os.path.exists(cache_path):
89
+ with open(cache_path, 'rb') as f:
90
+ embeddings = pickle.load(f)
91
+ logger.info(f"Embeddings loaded from cache with key {cache_key}")
92
+ return embeddings
93
+ return None
94
+ except Exception as e:
95
+ logger.error(f"Error loading cached embeddings: {e}")
96
+ return None
97
+
98
+ # Main function for the app
99
+ def main():
100
+ st.title("Adnan AI Labs QA System")
101
+ st.markdown("Upload documents (PDF, TXT, CSV, or DOCX) or add context manually, and ask questions.")
102
+
103
+ uploaded_files = st.file_uploader("Upload Documents", type=["pdf", "txt", "csv", "docx"], accept_multiple_files=True)
104
+ extracted_text_box = st.text_area("Manually add extra context for answering questions", height=200)
105
+
106
+ # Initialize QA model
107
+ qa_pipeline = init_qa_model()
108
+ document_texts = []
109
+
110
+ # Extract text from each uploaded file
111
+ if uploaded_files:
112
+ for uploaded_file in uploaded_files:
113
+ if uploaded_file.type == "application/pdf":
114
+ document_texts.append(extract_text_from_pdf(uploaded_file))
115
+ elif uploaded_file.type == "text/plain":
116
+ document_texts.append(extract_text_from_txt(uploaded_file))
117
+ elif uploaded_file.type in ["application/vnd.ms-excel", "text/csv"]:
118
+ document_texts.append(extract_text_from_csv(uploaded_file))
119
+ elif uploaded_file.type == "application/vnd.openxmlformats-officedocument.wordprocessingml.document":
120
+ document_texts.append(extract_text_from_docx(uploaded_file))
121
+
122
+ # Combine all extracted texts and manual context
123
+ combined_context = "\n".join(document_texts) + "\n" + extracted_text_box
124
+
125
+ # Check if any content is available to answer questions
126
+ user_question = st.text_input("Ask a question:")
127
+ if user_question and combined_context.strip():
128
+ if st.button("Get Answer"):
129
+ with st.spinner('Processing your question...'):
130
+ # Generate a unique cache key for the combined context
131
+ cache_key = generate_cache_key(combined_context)
132
+
133
+ # Check for cached embeddings
134
+ cached_embeddings = load_cached_embeddings(cache_key)
135
+ if cached_embeddings is None:
136
+ # Process document embeddings if not cached
137
+ logger.info("Generating new embeddings...")
138
+ # embeddings = model.encode(combined_context)
139
+ cache_embeddings(cached_embeddings, cache_key) # Cache the embeddings
140
+
141
+ # Use the QA pipeline to answer the question
142
+ answer = qa_pipeline(question=user_question, context=combined_context)
143
+ if answer['answer']:
144
+ st.write("Answer:", answer['answer'])
145
+ else:
146
+ st.warning("No suitable answer found. Please rephrase your question.")
147
+ else:
148
+ if not user_question:
149
+ st.info("Please enter a question to get an answer.")
150
+ elif not combined_context.strip():
151
+ st.info("Please upload a document or add context manually.")
152
+
153
+ # Display Buy Me a Coffee button
154
+
155
+
156
+ st.markdown("""
157
+ <div style="text-align: center;">
158
+ <p>If you find this project useful, consider buying me a coffee to support further development! ☕️</p>
159
+ <a href="https://buymeacoffee.com/adnanailabs">
160
+ <img src="https://cdn.buymeacoffee.com/buttons/v2/default-yellow.png" alt="Buy Me a Coffee" style="height: 50px;">
161
+ </a>
162
+ </div>
163
+ """, unsafe_allow_html=True)
164
+
165
+ if __name__ == "__main__":
166
+ try:
167
+ main()
168
+ except Exception as e:
169
+ logger.critical(f"Critical error: {e}")
170
+ st.error(f"A critical error occurred: {e}")
requirements.txt ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ huggingface-hub==0.26.2
2
+ sentence-transformers==3.2.1
3
+ torch==2.5.1
4
+ transformers==4.46.2
5
+ streamlit==1.40.0
6
+ scikit-learn==1.5.2
7
+ spacy==3.8.2
8
+ requests==2.32.3
9
+ numpy==2.0.2
10
+ pandas==2.2.3
11
+ pydantic==2.9.2
12
+ beautifulsoup4==4.12.3
13
+
14
+ # spaCy language model
15
+ https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-3.8.0/en_core_web_sm-3.8.0.tar.gz