acecalisto3 commited on
Commit
686de7f
·
verified ·
1 Parent(s): 4d93cec

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +282 -106
app.py CHANGED
@@ -1,183 +1,359 @@
1
  import gradio as gr
2
  import logging
3
  import time
 
 
4
  import random
5
  import nltk
6
- import numpy as np
7
- from typing import List
8
- from datasets import load_dataset
 
 
 
9
  from transformers import AutoTokenizer, AutoModelForCausalLM
10
  from sentence_transformers import SentenceTransformer
 
 
 
11
  from sklearn.metrics.pairwise import cosine_similarity
 
 
 
 
12
 
13
  # Configure logging
14
  logging.basicConfig(
15
- level=logging.INFO,
16
- format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
17
  )
18
- logger = logging.getLogger(__name__)
19
 
20
- # Download NLTK data
21
- try:
22
- nltk.download('punkt', quiet=True)
23
- except Exception as e:
24
- logger.warning(f"Failed to download NLTK data: {e}")
25
 
 
26
  class Config:
27
  MODEL_NAME = "microsoft/DialoGPT-medium"
28
  EMBEDDING_MODEL = "sentence-transformers/all-MiniLM-L6-v2"
29
- MAX_TOKENS = 1000
30
- SIMILARITY_THRESHOLD = 0.5
31
- CHUNK_SIZE = 512
 
 
 
 
 
 
32
 
 
33
  class ResourceItem:
34
- def __init__(self, content: str):
 
35
  self.content = content
36
- self.embedding = None
 
37
  self.chunks = []
38
  self.chunk_embeddings = []
39
 
 
 
 
40
  def create_chunks(self, chunk_size=Config.CHUNK_SIZE):
41
- """Split content into overlapping chunks for better context preservation"""
42
- words = self.content.split()
43
- overlap = chunk_size // 4 # 25% overlap
44
-
45
- for i in range(0, len(words), chunk_size - overlap):
46
- chunk = ' '.join(words[i:i + chunk_size])
47
- if chunk:
48
- self.chunks.append(chunk)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49
 
 
 
50
  class SchoolChatbot:
51
  def __init__(self):
52
  logger.info("Initializing SchoolChatbot...")
53
  self.setup_models()
54
- self.resources = []
55
- self.load_and_index_resources()
56
 
57
  def setup_models(self):
58
  try:
59
  logger.info("Setting up models...")
 
60
  self.tokenizer = AutoTokenizer.from_pretrained(Config.MODEL_NAME)
61
  self.model = AutoModelForCausalLM.from_pretrained(Config.MODEL_NAME)
62
  self.embedding_model = SentenceTransformer(Config.EMBEDDING_MODEL)
 
 
 
 
63
  logger.info("Models setup completed successfully.")
64
  except Exception as e:
65
  logger.error(f"Failed to setup models: {e}")
66
- raise RuntimeError("Failed to initialize required models")
67
-
68
- def load_and_index_resources(self):
69
- logger.info("Loading dataset and indexing resources...")
70
- dataset = load_dataset("acecalisto3/sspnc")
71
-
72
- for item in dataset['train']:
73
- content = item['text'] # Adjust this based on the actual structure of your dataset
74
- resource_item = ResourceItem(content)
75
- resource_item.create_chunks()
76
- resource_item.chunk_embeddings = [
77
- self.embedding_model.encode(chunk)
78
- for chunk in resource_item.chunks
79
- ]
80
- if resource_item.chunk_embeddings:
81
- resource_item.embedding = np.mean(resource_item.chunk_embeddings, axis=0)
82
- self.resources.append(resource_item)
83
-
84
- logger.info(f"Loaded and indexed {len(self.resources)} resources from the dataset.")
85
-
86
- def find_best_matching_chunks(self, query, n_chunks=3):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
87
  if not self.resources:
 
88
  return []
89
-
90
  try:
91
  query_embedding = self.embedding_model.encode(query)
92
- all_chunks = []
93
-
94
  for resource in self.resources:
95
- for chunk, embedding in zip(resource.chunks, resource.chunk_embeddings):
96
- score = cosine_similarity([query_embedding], [embedding])[0][0]
 
 
 
 
 
97
  if score > Config.SIMILARITY_THRESHOLD:
98
- all_chunks.append((chunk, score))
99
-
100
- # Sort by similarity score and get top n chunks
101
- all_chunks.sort(key=lambda x: x[1], reverse=True)
102
- return all_chunks[:n_chunks]
103
-
104
  except Exception as e:
105
  logger.error(f"Error finding matching chunks: {e}")
106
  return []
107
 
108
- def generate_response(self, user_input):
 
109
  try:
110
- # Find best matching chunks
111
- best_chunks = self.find_best_matching_chunks (user_input)
112
-
113
  if not best_chunks:
114
- return "I apologize, but I couldn't find any relevant information in my knowledge base. Could you please rephrase your question or ask about something else?"
115
-
116
- # Prepare context from best matching chunks
 
117
  context = "\n".join([chunk[0] for chunk in best_chunks])
118
-
119
- # Prepare conversation history
120
- conversation = f"Context: {context}\n:User {user_input}\nAssistant:"
121
-
122
- # Generate response
123
- input_ids = self.tokenizer.encode(conversation, return_tensors='pt')
124
- response_ids = self.model.generate(
125
- input_ids,
126
- max_length=Config.MAX_TOKENS,
127
- pad_token_id=self.tokenizer.eos_token_id,
 
 
 
 
 
 
 
 
 
128
  temperature=0.7,
129
  top_p=0.9,
130
- do_sample=True
 
131
  )
132
-
133
- response = self.tokenizer.decode(
134
- response_ids[:, input_ids.shape[-1]:][0],
135
- skip_special_tokens=True
136
- )
137
-
138
- return response
139
-
 
 
 
 
 
 
 
 
 
 
 
 
140
  except Exception as e:
141
- logger.error(f"Error generating response: {e}")
142
- return "I apologize, but I encountered an error while processing your question. Please try again."
143
 
144
- def create_gradio_interface(chatbot):
145
- def respond(user_input):
 
 
 
 
 
146
  return chatbot.generate_response(user_input)
147
-
148
  interface = gr.Interface(
149
  fn=respond,
150
  inputs=gr.Textbox(
151
  label="Ask a Question",
152
- placeholder="Type your question here...",
153
- lines=2
154
  ),
155
  outputs=gr.Textbox(
156
  label="Answer",
157
  placeholder="Response will appear here...",
158
- lines=5
159
  ),
160
- title="School Information Chatbot",
161
- description="Ask about school events, policies, or other information. The chatbot will provide answers based on available school documents and resources.",
162
- examples=[
163
- ["What events are happening this week?"],
164
- ["When is the next board meeting?"],
165
- ["What is the school's attendance policy?"]
166
  ],
167
  theme=gr.themes.Soft(),
168
- flagging_mode="never"
 
 
169
  )
170
  return interface
171
 
 
172
  if __name__ == "__main__":
 
 
 
173
  try:
174
- chatbot = SchoolChatbot()
175
- interface = create_gradio_interface(chatbot)
176
- interface.launch(
177
- server_name="0.0.0.0",
 
 
 
 
 
 
178
  server_port=7860,
179
- share=False,
180
- debug=True
181
  )
 
 
 
 
 
182
  except Exception as e:
183
- logger.error(f"Failed to start application: {e}")
 
 
1
  import gradio as gr
2
  import logging
3
  import time
4
+ from datetime import datetime
5
+ from typing import List, Optional, Tuple
6
  import random
7
  import nltk
8
+ # nltk.download('punkt') # Ensure punkt is downloaded if needed
9
+ from nltk.tokenize import sent_tokenize
10
+ import io
11
+ # from joblib import dump, load # Not used currently, commented out
12
+
13
+ # Import Hugging Face libraries
14
  from transformers import AutoTokenizer, AutoModelForCausalLM
15
  from sentence_transformers import SentenceTransformer
16
+ from datasets import load_dataset # Added for dataset loading
17
+
18
+ # Import ML/Data libraries
19
  from sklearn.metrics.pairwise import cosine_similarity
20
+ import numpy as np
21
+
22
+ # Standard libraries
23
+ from concurrent.futures import ThreadPoolExecutor # Still useful for embedding generation
24
 
25
  # Configure logging
26
  logging.basicConfig(
27
+ level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
 
28
  )
29
+ logger = logging.getLogger(__name__) # Use __name__ for logger
30
 
31
+ # Download NLTK data (optional, might not be strictly needed depending on chunking)
32
+ # try:
33
+ # nltk.download('punkt', quiet=True)
34
+ # except Exception as e:
35
+ # logger.warning(f"Failed to download NLTK data: {e}")
36
 
37
+ # --- Configuration ---
38
  class Config:
39
  MODEL_NAME = "microsoft/DialoGPT-medium"
40
  EMBEDDING_MODEL = "sentence-transformers/all-MiniLM-L6-v2"
41
+ MAX_TOKENS_RESPONSE = 150 # Max tokens for the generated response part
42
+ MAX_TOKENS_INPUT = 800 # Max tokens allowed for context + query (adjust based on model limits)
43
+ SIMILARITY_THRESHOLD = 0.3 # Adjusted threshold, tune as needed
44
+ CHUNK_SIZE = 300 # Smaller chunk size might be better for dataset entries
45
+ MAX_WORKERS = 5 # For parallel embedding generation
46
+ DATASET_NAME = "acecalisto3/sspnc" # Hugging Face Dataset ID
47
+ DATASET_SPLIT = "train" # Which split of the dataset to use
48
+ TEXT_COLUMNS = ["Subject", "Body"] # Columns containing text to index
49
+ SOURCE_INFO_COLUMNS = ["Subject", "Date"] # Columns to use for source attribution
50
 
51
+ # --- Data Structures ---
52
  class ResourceItem:
53
+ def __init__(self, source_id: str, content: str, resource_type: str):
54
+ self.source_id = source_id # Changed 'url' to 'source_id' for clarity
55
  self.content = content
56
+ self.type = resource_type
57
+ self.embedding = None # Overall embedding (optional now, as we use chunk embeddings)
58
  self.chunks = []
59
  self.chunk_embeddings = []
60
 
61
+ def __str__(self):
62
+ return f"ResourceItem(type={self.type}, source_id={self.source_id}, content_length={len(self.content)})"
63
+
64
  def create_chunks(self, chunk_size=Config.CHUNK_SIZE):
65
+ """Split content into overlapping chunks using sentence tokenization for better boundaries"""
66
+ if not self.content:
67
+ logger.warning(f"Content is empty for source_id: {self.source_id}. Skipping chunk creation.")
68
+ return
69
+
70
+ try:
71
+ sentences = sent_tokenize(self.content)
72
+ except LookupError:
73
+ logger.warning("NLTK 'punkt' tokenizer not found. Falling back to simple whitespace splitting. Consider running nltk.download('punkt')")
74
+ # Fallback to word splitting if sentence tokenization fails
75
+ words = self.content.split()
76
+ overlap = chunk_size // 4
77
+ for i in range(0, len(words), chunk_size - overlap):
78
+ chunk = ' '.join(words[i : i + chunk_size])
79
+ if chunk:
80
+ self.chunks.append(chunk)
81
+ return
82
+ except Exception as e:
83
+ logger.error(f"Error during sentence tokenization for {self.source_id}: {e}. Skipping chunk creation.")
84
+ return
85
+
86
+
87
+ current_chunk = ""
88
+ overlap_sentences = max(1, chunk_size // 100) # Overlap a few sentences
89
+ last_sentences = []
90
+
91
+ for sentence in sentences:
92
+ # If adding the next sentence exceeds chunk size (considering words approx)
93
+ if len((current_chunk + " " + sentence).split()) > chunk_size:
94
+ if current_chunk: # Add the completed chunk
95
+ self.chunks.append(current_chunk.strip())
96
+ # Start new chunk with overlap
97
+ current_chunk = " ".join(last_sentences) + " " + sentence
98
+ else:
99
+ current_chunk += " " + sentence
100
+
101
+ # Keep track of last sentences for overlap
102
+ last_sentences.append(sentence)
103
+ if len(last_sentences) > overlap_sentences:
104
+ last_sentences.pop(0)
105
+
106
+ # Add the last remaining chunk
107
+ if current_chunk.strip():
108
+ self.chunks.append(current_chunk.strip())
109
+
110
+ if not self.chunks:
111
+ logger.warning(f"No chunks created for source_id: {self.source_id}. Content might be too short or tokenization failed.")
112
 
113
+
114
+ # --- Chatbot Core Logic ---
115
  class SchoolChatbot:
116
  def __init__(self):
117
  logger.info("Initializing SchoolChatbot...")
118
  self.setup_models()
119
+ self.resources: List[ResourceItem] = []
120
+ self.load_and_index_dataset() # Changed from crawl_and_index_resources
121
 
122
  def setup_models(self):
123
  try:
124
  logger.info("Setting up models...")
125
+ # Consider adding device mapping if GPU is available: device_map="auto"
126
  self.tokenizer = AutoTokenizer.from_pretrained(Config.MODEL_NAME)
127
  self.model = AutoModelForCausalLM.from_pretrained(Config.MODEL_NAME)
128
  self.embedding_model = SentenceTransformer(Config.EMBEDDING_MODEL)
129
+ # Ensure tokenizer has a padding token
130
+ if self.tokenizer.pad_token is None:
131
+ self.tokenizer.pad_token = self.tokenizer.eos_token
132
+ self.model.config.pad_token_id = self.model.config.eos_token_id
133
  logger.info("Models setup completed successfully.")
134
  except Exception as e:
135
  logger.error(f"Failed to setup models: {e}")
136
+ raise RuntimeError("Failed to initialize required models") from e
137
+
138
+ def load_and_index_dataset(self):
139
+ logger.info(f"Loading dataset: {Config.DATASET_NAME}, split: {Config.DATASET_SPLIT}")
140
+ try:
141
+ # Load the dataset
142
+ dataset = load_dataset(Config.DATASET_NAME, split=Config.DATASET_SPLIT)
143
+ logger.info(f"Dataset loaded successfully. Number of rows: {len(dataset)}")
144
+
145
+ # Process dataset rows in parallel (for embedding generation)
146
+ with ThreadPoolExecutor(max_workers=Config.MAX_WORKERS) as executor:
147
+ futures = []
148
+ for i, row in enumerate(dataset):
149
+ # Combine text from specified columns
150
+ text_content = " ".join([str(row[col]) for col in Config.TEXT_COLUMNS if row.get(col)])
151
+ text_content = text_content.strip() # Remove leading/trailing whitespace
152
+
153
+ # Create a source identifier
154
+ source_parts = [f"{col}: {row[col]}" for col in Config.SOURCE_INFO_COLUMNS if row.get(col)]
155
+ source_id = f"Dataset Entry {i} ({'; '.join(source_parts)})" # More informative ID
156
+
157
+ if not text_content:
158
+ logger.warning(f"Row {i} has no content in specified columns. Skipping.")
159
+ continue
160
+
161
+ # Submit the processing task
162
+ futures.append(executor.submit(self.process_and_store_resource, source_id, text_content, 'dataset_entry'))
163
+
164
+ # Wait for all futures to complete and collect results
165
+ for future in futures:
166
+ try:
167
+ result_item = future.result()
168
+ if result_item:
169
+ self.resources.append(result_item)
170
+ except Exception as e:
171
+ logger.error(f"Error processing dataset entry in thread: {e}")
172
+
173
+ logger.info(f"Dataset processing completed. Indexed {len(self.resources)} resources.")
174
+
175
+ except Exception as e:
176
+ logger.error(f"Failed to load or process dataset {Config.DATASET_NAME}: {e}")
177
+ # Decide if the app should continue without data or raise an error
178
+ # raise RuntimeError("Failed to load data") from e # Option: halt if data fails
179
+
180
+ def process_and_store_resource(self, source_id: str, text_data: str, resource_type: str) -> Optional[ResourceItem]:
181
+ """Creates ResourceItem, chunks, and generates embeddings for a single data entry."""
182
+ try:
183
+ # Create resource item and split into chunks
184
+ item = ResourceItem(source_id, text_data, resource_type)
185
+ item.create_chunks()
186
+
187
+ if not item.chunks:
188
+ logger.warning(f"No chunks generated for {source_id}. Skipping storage.")
189
+ return None
190
+
191
+ # Generate embeddings for chunks (can be slow, hence the thread pool)
192
+ chunk_embeddings_list = self.embedding_model.encode(item.chunks, show_progress_bar=False) # Batch encode
193
+ item.chunk_embeddings = chunk_embeddings_list
194
+
195
+ # Calculate average embedding (optional, might not be needed if only using chunk search)
196
+ # if item.chunk_embeddings:
197
+ # item.embedding = np.mean(item.chunk_embeddings, axis=0)
198
+
199
+ logger.debug(f"Processed resource: {source_id} (type={resource_type}), {len(item.chunks)} chunks.")
200
+ return item # Return the processed item
201
+
202
+ except Exception as e:
203
+ logger.error(f"Error processing/storing resource {source_id}: {e}")
204
+ return None # Return None on error
205
+
206
+ # store_resource is now process_and_store_resource and called within the thread pool
207
+
208
+ def find_best_matching_chunks(self, query: str, n_chunks: int = 3) -> List[Tuple[str, float, str]]:
209
+ """Finds the most relevant text chunks based on semantic similarity."""
210
  if not self.resources:
211
+ logger.warning("No resources loaded or indexed. Cannot find matches.")
212
  return []
213
+
214
  try:
215
  query_embedding = self.embedding_model.encode(query)
216
+ all_chunks_with_scores = []
217
+
218
  for resource in self.resources:
219
+ if not resource.chunks or not resource.chunk_embeddings:
220
+ continue # Skip resources with no chunks/embeddings
221
+
222
+ # Calculate similarity between query and all chunks of the current resource
223
+ similarities = cosine_similarity([query_embedding], resource.chunk_embeddings)[0]
224
+
225
+ for chunk, score in zip(resource.chunks, similarities):
226
  if score > Config.SIMILARITY_THRESHOLD:
227
+ all_chunks_with_scores.append((chunk, float(score), resource.source_id)) # Use source_id
228
+
229
+ # Sort by similarity score (descending) and return top n
230
+ all_chunks_with_scores.sort(key=lambda x: x[1], reverse=True)
231
+ return all_chunks_with_scores[:n_chunks]
232
+
233
  except Exception as e:
234
  logger.error(f"Error finding matching chunks: {e}")
235
  return []
236
 
237
+ def generate_response(self, user_input: str) -> str:
238
+ """Generates a response based on user input and retrieved context."""
239
  try:
240
+ # 1. Find relevant context chunks
241
+ best_chunks = self.find_best_matching_chunks(user_input)
242
+
243
  if not best_chunks:
244
+ logger.info(f"No relevant chunks found for query: '{user_input}'")
245
+ return "I couldn't find specific information related to your question in the provided documents. Could you please rephrase or ask about a different topic?"
246
+
247
+ # 2. Prepare context and source attribution
248
  context = "\n".join([chunk[0] for chunk in best_chunks])
249
+ # Use source_id from the chunk tuple (index 2)
250
+ source_ids = sorted(list(set(chunk[2] for chunk in best_chunks)))
251
+ sources_text = "\n\nSources:\n" + "\n".join([f"- {sid}" for sid in source_ids])
252
+
253
+ # 3. Prepare input for the language model
254
+ # Ensure the input doesn't exceed model limits
255
+ prompt_template = f"Based on the following information:\n{context}\n\nAnswer the question: {user_input}\nAnswer:"
256
+ # prompt_template = f"Context: {context}\nUser: {user_input}\nAssistant:" # Alternative simpler prompt
257
+
258
+ # 4. Tokenize and truncate if necessary
259
+ input_ids = self.tokenizer.encode(prompt_template, return_tensors='pt', max_length=Config.MAX_TOKENS_INPUT, truncation=True)
260
+
261
+ # 5. Generate response using the language model
262
+ logger.info("Generating response with LLM...")
263
+ output_sequences = self.model.generate(
264
+ input_ids=input_ids,
265
+ max_new_tokens=Config.MAX_TOKENS_RESPONSE, # Control length of *new* tokens
266
+ pad_token_id=self.tokenizer.pad_token_id,
267
+ eos_token_id=self.tokenizer.eos_token_id,
268
  temperature=0.7,
269
  top_p=0.9,
270
+ do_sample=True,
271
+ num_return_sequences=1 # Generate one response
272
  )
273
+
274
+ # Decode the generated part of the response
275
+ # response_text = self.tokenizer.decode(output_sequences[0], skip_special_tokens=True)
276
+ # Decode only the newly generated tokens, excluding the prompt
277
+ response_text = self.tokenizer.decode(output_sequences[0][input_ids.shape[-1]:], skip_special_tokens=True)
278
+
279
+
280
+ # Basic post-processing (optional)
281
+ response_text = response_text.strip()
282
+ # Remove potential repetition of the question if the model includes it
283
+ if user_input.lower() in response_text.lower()[:len(user_input)+10]:
284
+ response_text = response_text.split(user_input, 1)[-1].strip("? ")
285
+
286
+
287
+ logger.info(f"Generated response (before sources): {response_text}")
288
+
289
+ # 6. Combine response and sources
290
+ full_response = response_text + sources_text
291
+ return full_response
292
+
293
  except Exception as e:
294
+ logger.exception(f"Error generating response: {e}") # Use logger.exception to include stack trace
295
+ return "I apologize, but I encountered an error while processing your question. Please check the logs or try again later."
296
 
297
+ # --- Gradio Interface ---
298
+ def create_gradio_interface(chatbot: SchoolChatbot):
299
+ """Creates and returns the Gradio web interface."""
300
+ def respond(user_input: str) -> str:
301
+ if not user_input:
302
+ return "Please enter a question."
303
+ # Add basic input sanitization if needed
304
  return chatbot.generate_response(user_input)
305
+
306
  interface = gr.Interface(
307
  fn=respond,
308
  inputs=gr.Textbox(
309
  label="Ask a Question",
310
+ placeholder="Type your question about the school information...",
311
+ lines=3, # Increased lines slightly
312
  ),
313
  outputs=gr.Textbox(
314
  label="Answer",
315
  placeholder="Response will appear here...",
316
+ lines=10, # Increased lines for longer answers + sources
317
  ),
318
+ title="School Information Chatbot (Dataset Powered)",
319
+ description="Ask about information contained in the school dataset. The chatbot uses AI to find relevant details and generate answers.",
320
+ examples=[ # Update examples based on dataset content
321
+ ["What are the main subjects covered in the documents?"],
322
+ ["Are there any mentions of specific events or dates?"],
323
+ ["Summarize the key points about [topic from dataset]."]
324
  ],
325
  theme=gr.themes.Soft(),
326
+ allow_flagging="never", # Changed from flagging_mode
327
+ # Optional: Add feedback capabilities
328
+ # feedback=["thumbs", "textbox"],
329
  )
330
  return interface
331
 
332
+ # --- Main Execution ---
333
  if __name__ == "__main__":
334
+ # Install necessary libraries if running for the first time
335
+ # pip install gradio transformers sentence-transformers torch datasets scikit-learn nltk numpy beautifulsoup4 requests PyPDF2 icalendar fake-useragent joblib # Ensure all are installed
336
+ print("Starting application...")
337
  try:
338
+ # 1. Initialize the chatbot (loads models and data)
339
+ school_chatbot = SchoolChatbot()
340
+
341
+ # 2. Create the Gradio interface
342
+ app_interface = create_gradio_interface(school_chatbot)
343
+
344
+ # 3. Launch the interface
345
+ print("Launching Gradio Interface...")
346
+ app_interface.launch(
347
+ server_name="0.0.0.0", # Accessible on the local network
348
  server_port=7860,
349
+ share=False, # Set to True to get a public link (use with caution)
350
+ debug=False # Set to True for more detailed Gradio logs (can be verbose)
351
  )
352
+ print("Interface launched. Access it at http://localhost:7860 (or the relevant IP)")
353
+
354
+ except ImportError as ie:
355
+ logger.error(f"ImportError: {ie}. Make sure all required libraries are installed.")
356
+ print(f"ImportError: {ie}. Please install the missing library (e.g., pip install {ie.name}).")
357
  except Exception as e:
358
+ logger.critical(f"Failed to start the application: {e}", exc_info=True) # Log critical error with stack trace
359
+ print(f"Critical error during startup: {e}")