Nischal Subedi commited on
Commit
bfee845
·
1 Parent(s): 229d1b2

added disclaimer and enchanced UI

Browse files
Files changed (4) hide show
  1. .gitignore +4 -1
  2. app.py +401 -367
  3. data/tenant-landlord.pdf +0 -3
  4. requirements.txt +0 -1
.gitignore CHANGED
@@ -1 +1,4 @@
1
- data
 
 
 
 
1
+ data/
2
+ __pycache__/
3
+ .DS_Store
4
+ vector_db_store/
app.py CHANGED
@@ -1,14 +1,16 @@
1
  import os
2
  import json
3
- from typing import Dict, List, Optional
4
  import logging
 
5
  from functools import lru_cache
 
 
6
  import gradio as gr
7
  from langchain_openai import ChatOpenAI
8
  from langchain.prompts import PromptTemplate
9
  from langchain.chains import LLMChain
10
- from vector_db import VectorDatabase
11
- import re
12
 
13
  # Enhanced logging for better debugging
14
  logging.basicConfig(
@@ -19,237 +21,326 @@ logging.basicConfig(
19
  class RAGSystem:
20
  def __init__(self, vector_db: Optional[VectorDatabase] = None):
21
  logging.info("Initializing RAGSystem")
22
-
23
  self.vector_db = vector_db if vector_db else VectorDatabase()
24
-
25
  self.llm = None
26
  self.chain = None
27
-
28
- self.prompt_template = PromptTemplate(
29
- input_variables=["query", "context", "state", "statutes"],
30
- template="""You are a legal assistant specializing in tenant rights and landlord-tenant laws. Your goal is to provide accurate, detailed, and helpful answers grounded in legal authority. Use the provided statutes as the primary source when available. If no relevant statutes are found in the context, rely on your general knowledge to provide a pertinent and practical response, clearly indicating when you are doing so and prioritizing state-specific information over federal laws for state-specific queries.
31
 
32
  Instructions:
33
- - Use the context and statutes as the primary basis for your answer when available.
34
- - For state-specific queries, prioritize statutes or legal principles from the specified state over federal laws.
35
- - Cite relevant statutes (e.g., (AS § 34.03.220(a)(2))) explicitly in your answer when applicable.
36
- - If multiple statutes apply, list all relevant ones.
37
- - If no specific statute is found in the context, state this clearly (e.g., 'No specific statute was found in the provided context'), then provide a general answer based on common legal principles or practices, marked as such.
38
- - Include practical examples or scenarios to enhance clarity and usefulness.
39
- - Use bullet points or numbered lists for readability when appropriate.
40
- - Maintain a professional and neutral tone.
41
 
42
  Question: {query}
43
  State: {state}
44
  Statutes from context:
45
  {statutes}
 
46
  Context information:
 
47
  {context}
 
 
48
  Answer:"""
 
 
 
49
  )
 
50
 
51
  def initialize_llm(self, openai_api_key: str):
 
52
  if not openai_api_key:
 
53
  raise ValueError("OpenAI API key is required.")
54
-
 
 
 
 
 
 
 
 
 
55
  try:
 
56
  self.llm = ChatOpenAI(
57
  temperature=0.2,
58
  openai_api_key=openai_api_key,
59
  model_name="gpt-3.5-turbo",
60
- max_tokens=1500,
61
- request_timeout=30
62
  )
63
- logging.info("OpenAI LLM initialized successfully")
64
-
65
  self.chain = LLMChain(llm=self.llm, prompt=self.prompt_template)
66
- logging.info("LLMChain created successfully")
67
  except Exception as e:
68
- logging.error(f"Failed to initialize OpenAI LLM: {str(e)}")
69
- raise
 
 
 
70
 
71
  def extract_statutes(self, text: str) -> str:
72
  """
73
  Extract statute citations from the given text using a refined regex pattern.
74
  Returns a string of valid statutes, one per line, or a message if none are found.
75
  """
76
- statute_pattern = r'\((?:[A-Za-z\s]+\s*(?:Code|Laws|Statutes|CCP)\s*§\s*[0-9-]+(?:\([a-z0-9]+\))?|[A-Za-z0-9\s]+\s*§\s*[0-9-]+(?:\([a-z0-9]+\))?|[A-Z]{2,3}\s*§\s*[0-9-]+(?:\([a-z0-9]+\))?|[0-9]+\s*ILCS\s*[0-9]+/[0-9-]+(?:\([a-z0-9]+\))?|Title\s*[0-9]+\s*USC\s*§\s*[0-9]+-[0-9]+|[A-Za-z\s]+\s*Laws\s*[0-9]+\s*§\s*[0-9-]+(?:\([a-z0-9]+\))?|[A-Za-z\s]+\s*CCP\s*§\s*[0-9-]+(?:\([a-z0-9]+\))?)\)'
77
- statutes = re.findall(statute_pattern, text)
78
-
 
 
 
 
 
 
 
79
  valid_statutes = []
 
80
  for statute in statutes:
81
- if '§' in statute and any(char.isdigit() for char in statute) and not re.match(r'\([a-z]\)', statute) and 'found here' not in statute:
82
- valid_statutes.append(statute)
83
-
 
 
 
 
84
  if valid_statutes:
 
85
  seen = set()
86
- unique_statutes = [statute for statute in valid_statutes if not (statute in seen or seen.add(statute))]
87
- return "\n".join(unique_statutes)
88
- return "No statutes found in the context."
89
 
 
 
 
 
90
  @lru_cache(maxsize=100)
91
  def process_query(self, query: str, state: str, openai_api_key: str, n_results: int = 5) -> Dict[str, any]:
 
92
  logging.info(f"Processing query: '{query}' for state: '{state}' with n_results={n_results}")
93
-
 
94
  if not state:
95
- logging.warning("No state provided for query")
 
 
 
 
 
 
96
  return {
97
- "answer": "Please select a state to proceed with your query.",
98
  "context_used": "N/A"
99
  }
100
-
101
  if not openai_api_key:
102
- logging.warning("No OpenAI API key provided")
103
  return {
104
- "answer": "Please provide an OpenAI API key to proceed.",
105
  "context_used": "N/A"
106
  }
107
-
108
- if not self.llm or not self.chain:
109
- try:
110
- self.initialize_llm(openai_api_key)
111
- except Exception as e:
112
- logging.error(f"Failed to initialize LLM: {str(e)}")
113
- return {
114
- "answer": f"Failed to initialize LLM with the provided API key: {str(e)}",
115
- "context_used": "N/A"
116
- }
117
-
118
- # Query the vector database
119
  try:
120
- results = self.vector_db.query(query, state=state, n_results=n_results)
121
- logging.info("Vector database query successful")
122
- logging.debug(f"Query results: {json.dumps(results, indent=2)}")
123
  except Exception as e:
124
- logging.error(f"Vector database query failed: {str(e)}")
125
- results = {
126
- "document_results": {"documents": [[]], "metadatas": [[]]},
127
- "state_results": {"documents": [[]], "metadatas": [[]]}
128
  }
129
- logging.info("Applied safeguard: Using empty results due to vector DB failure")
130
-
131
- context_parts = []
132
-
133
- # Process document results
134
- if results["document_results"]["documents"] and results["document_results"]["documents"][0]:
135
- for i, doc in enumerate(results["document_results"]["documents"][0]):
136
- metadata = results["document_results"]["metadatas"][0][i]
137
- context_parts.append(f"[{metadata['state']} - Chunk {metadata.get('chunk_id', 'N/A')}] {doc}")
138
- else:
139
- logging.warning("No document results found in query response")
140
-
141
- # Process state summary results
142
- if results["state_results"]["documents"] and results["state_results"]["documents"][0]:
143
- for i, doc in enumerate(results["state_results"]["documents"][0]):
144
- metadata = results["state_results"]["metadatas"][0][i]
145
- context_parts.append(f"[{metadata['state']} - Summary] {doc}")
146
- else:
147
- logging.warning("No state summary results found in query response")
148
-
149
- context = "\n\n---\n\n".join(context_parts) if context_parts else "No relevant context found."
150
-
151
- logging.info(f"Raw context for query: {context}")
152
-
153
- if not context_parts:
154
- logging.info("No relevant context found for query")
155
- # Fallback to general knowledge
156
- statutes_from_context = "No statutes found in the context."
157
- try:
158
- answer = self.chain.invoke({
159
- "query": query,
160
- "context": "No specific legal documents available.",
161
- "state": state,
162
- "statutes": statutes_from_context
163
- })
164
- return {
165
- "answer": answer['text'].strip(),
166
- "context_used": context
167
- }
168
- except Exception as e:
169
- logging.error(f"LLM fallback processing failed: {str(e)}")
170
- return {
171
- "answer": "I don’t have sufficient information to answer this accurately, and an error occurred while generating a general response. Please try again.",
172
- "context_used": context
173
- }
174
-
175
- statutes_from_context = self.extract_statutes(context)
176
- logging.info(f"Statutes extracted from context: {statutes_from_context}")
177
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
178
  try:
179
- answer = self.chain.invoke({
 
180
  "query": query,
181
  "context": context,
182
  "state": state,
183
  "statutes": statutes_from_context
184
- })
185
- logging.info("LLM generated answer successfully")
186
- logging.debug(f"Raw answer text: {answer['text']}")
 
 
 
 
 
 
 
 
 
 
 
 
 
187
  except Exception as e:
188
- logging.error(f"LLM processing failed: {str(e)}")
 
 
 
 
 
 
 
 
189
  return {
190
- "answer": "An error occurred while generating the answer. Please try again.",
191
- "context_used": context
192
  }
193
-
194
- return {
195
- "answer": answer['text'].strip(),
196
- "context_used": context
197
- }
198
 
199
  def get_states(self) -> List[str]:
 
200
  try:
201
  states = self.vector_db.get_states()
202
- logging.info(f"Retrieved {len(states)} states from database")
203
- return states
 
 
 
 
204
  except Exception as e:
205
- logging.error(f"Failed to get states: {str(e)}")
206
- return []
207
 
208
  def load_pdf(self, pdf_path: str) -> int:
 
 
 
 
209
  try:
 
210
  num_states = self.vector_db.process_and_load_pdf(pdf_path)
211
- logging.info(f"Loaded PDF with {num_states} states")
 
 
 
212
  return num_states
213
  except Exception as e:
214
- logging.error(f"Failed to load PDF: {str(e)}")
215
- return 0
 
 
216
 
217
  def gradio_interface(self):
 
 
 
218
  def query_interface(api_key: str, query: str, state: str) -> str:
219
- if not api_key:
220
- logging.warning("No OpenAI API key provided in interface")
221
- return "⚠️ **Error:** Please provide an OpenAI API key to proceed."
222
- if not state:
223
- logging.warning("No state selected in interface")
224
- return "⚠️ **Error:** Please select a state to proceed with your query."
225
- result = self.process_query(query, state=state, openai_api_key=api_key)
226
-
227
- return f"### Answer:\n{result['answer']}"
228
-
229
- states = self.get_states()
230
-
231
- # Define the inputs
232
- api_key_input = gr.Textbox(
233
- label="Open AI API Key",
234
- type="password",
235
- placeholder="e.g., sk-abc123",
236
- elem_classes="input-field"
237
- )
238
- query_input = gr.Textbox(
239
- label="Query",
240
- placeholder="e.g., What are the eviction rules?",
241
- lines=3,
242
- elem_classes="input-field"
243
- )
244
- state_input = gr.Dropdown(
245
- label="Select a state (required)",
246
- choices=states,
247
- value=None,
248
- allow_custom_value=False,
249
- elem_classes="input-field"
250
- )
251
 
252
- # Define the example queries (only for query and state)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
253
  example_queries = [
254
  ["What is the rent due date law?", "California"],
255
  ["What are the rules for security deposit returns?", "New York"],
@@ -258,255 +349,198 @@ Answer:"""
258
  ["Are there rent control laws?", "Oregon"]
259
  ]
260
 
 
261
  custom_css = """
262
- .gr-form {
263
- max-width: 900px;
264
- margin: 0 auto;
265
- padding: 30px;
266
- background: linear-gradient(135deg, #ffffff 0%, #f8f9fa 100%);
267
- border-radius: 20px;
268
- box-shadow: 0 8px 30px rgba(0, 0, 0, 0.1);
269
- }
270
- .gr-title {
271
- font-size: 2.8em;
272
- font-weight: 700;
273
- color: #1a3c34;
274
- text-align: center;
275
- margin-bottom: 10px;
276
- text-shadow: 1px 1px 2px rgba(0, 0, 0, 0.05);
277
- }
278
- .gr-description {
279
- font-size: 1.1em;
280
- color: #4a5e57;
281
- text-align: center;
282
- margin-bottom: 30px;
283
- line-height: 1.6;
284
- }
285
- .footnote {
286
- font-size: 0.9em;
287
- color: #6b7280;
288
- text-align: center;
289
- margin-top: 40px;
290
- padding-top: 15px;
291
- border-top: 1px solid #e5e7eb;
292
- }
293
- .footnote a {
294
- color: #2563eb;
295
- text-decoration: none;
296
- font-weight: 500;
297
- transition: color 0.3s ease;
298
- }
299
- .footnote a:hover {
300
- color: #1d4ed8;
301
- text-decoration: underline;
302
- }
303
- .gr-textbox, .gr-dropdown {
304
- border: 1px solid #d1d5db !important;
305
- border-radius: 10px !important;
306
- padding: 12px !important;
307
- font-size: 1em !important;
308
- background-color: #fff !important;
309
- transition: border-color 0.3s ease, box-shadow 0.3s ease;
310
  }
311
- .gr-textbox:focus, .gr-dropdown:focus {
312
- border-color: #2563eb !important;
313
- box-shadow: 0 0 8px rgba(37, 99, 235, 0.2) !important;
314
- outline: none !important;
315
- }
316
- .gr-textbox label, .gr-dropdown label {
317
- font-weight: 600;
318
- color: #1a3c34;
319
- margin-bottom: 8px;
320
- }
321
- .gr-button-primary {
322
- background: linear-gradient(90deg, #f97316 0%, #ea580c 100%) !important;
323
- border: none !important;
324
- padding: 12px 30px !important;
325
- font-weight: 600 !important;
326
- font-size: 1em !important;
327
- border-radius: 10px !important;
328
- color: #fff !important;
329
- transition: transform 0.2s ease, box-shadow 0.3s ease;
330
- }
331
- .gr-button-primary:hover {
332
- transform: translateY(-3px);
333
- box-shadow: 0 4px 15px rgba(249, 115, 22, 0.3) !important;
334
- }
335
- .gr-button-secondary {
336
- background: linear-gradient(90deg, #6b7280 0%, #4b5563 100%) !important;
337
- border: none !important;
338
- padding: 12px 30px !important;
339
- font-weight: 600 !important;
340
- font-size: 1em !important;
341
- border-radius: 10px !important;
342
- color: #fff !important;
343
- transition: transform 0.2s ease, box-shadow 0.3s ease;
344
- }
345
- .gr-button-secondary:hover {
346
- transform: translateY(-3px);
347
- box-shadow: 0 4px 15px rgba(107, 114, 128, 0.3) !important;
348
- }
349
- .output-markdown {
350
- background: #f9fafb !important;
351
- color: #1f2937 !important;
352
- padding: 25px !important;
353
- border-radius: 12px !important;
354
- border: 1px solid #e5e7eb !important;
355
- font-size: 1.1em !important;
356
- line-height: 1.8 !important;
357
- box-shadow: 0 2px 12px rgba(0, 0, 0, 0.05);
358
- }
359
- .gr-examples {
360
- background: #f1f5f9;
361
- padding: 20px;
362
- border-radius: 12px;
363
- margin-top: 25px;
364
- border: 1px solid #e5e7eb;
365
- }
366
- .gr-examples table {
367
- background-color: transparent !important;
368
- }
369
- @media (prefers-color-scheme: dark) {
370
- .gr-form {
371
- background: linear-gradient(135deg, #1f2937 0%, #374151 100%);
372
- box-shadow: 0 8px 30px rgba(0, 0, 0, 0.3);
373
- }
374
- .gr-title {
375
- color: #f3f4f6;
376
- }
377
- .gr-description {
378
- color: #d1d5db;
379
- }
380
- .footnote {
381
- color: #9ca3af;
382
- border-top: 1px solid #4b5563;
383
- }
384
- .footnote a {
385
- color: #60a5fa;
386
- }
387
- .footnote a:hover {
388
- color: #3b82f6;
389
- }
390
- .gr-textbox, .gr-dropdown {
391
- background-color: #374151 !important;
392
- color: #f3f4f6 !important;
393
- border-color: #4b5563 !important;
394
- }
395
- .gr-textbox label, .gr-dropdown label {
396
- color: #f3f4f6;
397
- }
398
- .output-markdown {
399
- background: #374151 !important;
400
- color: #f3f4f6 !important;
401
- border-color: #4b5563 !important;
402
- }
403
- .gr-examples {
404
- background: #4b5563;
405
- border-color: #6b7280;
406
- }
407
- }
408
- @media (max-width: 600px) {
409
- .gr-form {
410
- padding: 20px;
411
- }
412
- .gr-title {
413
- font-size: 2em;
414
- }
415
- .gr-description {
416
- font-size: 1em;
417
- }
418
- .footnote {
419
- font-size: 0.85em;
420
- }
421
- .gr-textbox, .gr-dropdown {
422
- font-size: 0.9em !important;
423
- }
424
- .gr-button-primary, .gr-button-secondary {
425
- padding: 10px 20px !important;
426
- font-size: 0.9em !important;
427
- }
428
- .output-markdown {
429
- font-size: 1em !important;
430
- padding: 15px !important;
431
- }
432
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
433
  """
434
 
435
- with gr.Blocks(css=custom_css, theme=gr.themes.Default()) as demo:
 
436
  gr.Markdown(
437
  """
438
- # 🏠 Landlord-Tenant Rights Bot
439
- Ask questions about tenant rights and landlord-tenant laws based on state-specific legal documents. Provide your OpenAI API key, select a state, and enter your question below. You can get an API key from [OpenAI](https://platform.openai.com/api-keys).
 
 
 
 
 
 
 
440
  """
441
  )
442
 
443
- with gr.Column(elem_classes="gr-form"):
 
444
  api_key_input = gr.Textbox(
445
- label="Open AI API Key",
446
  type="password",
447
- placeholder="e.g., sk-abc123",
448
- elem_classes="input-field"
 
449
  )
450
  query_input = gr.Textbox(
451
- label="Query",
452
- placeholder="e.g., What are the eviction rules?",
453
- lines=3,
454
- elem_classes="input-field"
 
455
  )
456
  state_input = gr.Dropdown(
457
- label="Select a state (required)",
458
- choices=states,
459
- value=None,
460
  allow_custom_value=False,
461
- elem_classes="input-field"
 
462
  )
463
 
464
  with gr.Row():
465
- clear_button = gr.Button("Clear", variant="secondary")
466
- submit_button = gr.Button("Submit", variant="primary")
467
 
468
  output = gr.Markdown(
469
- label="Response",
470
- elem_classes="output-markdown"
 
471
  )
472
 
 
 
473
  gr.Examples(
474
  examples=example_queries,
475
- inputs=[query_input, state_input],
476
- outputs=output,
477
- fn=query_interface,
 
 
478
  examples_per_page=5
479
  )
480
 
 
481
  gr.Markdown(
482
  """
483
- <div class='footnote'>Developed by Nischal Subedi. Follow me on <a href='https://www.linkedin.com/in/nischal1/' target='_blank'>LinkedIn</a> or read my insights on <a href='https://datascientistinsights.substack.com/' target='_blank'>Substack</a>.</div>
 
 
 
 
 
484
  """
485
- )
486
 
 
487
  submit_button.click(
488
  fn=query_interface,
489
  inputs=[api_key_input, query_input, state_input],
490
- outputs=output
 
491
  )
 
492
  clear_button.click(
493
- fn=lambda: ("", "", None, ""),
494
- inputs=[],
 
 
 
 
 
495
  outputs=[api_key_input, query_input, state_input, output]
496
  )
497
 
498
- return demo
 
 
499
 
 
500
  if __name__ == "__main__":
 
501
  try:
502
- rag = RAGSystem()
503
-
504
- pdf_path = "data/tenant-landlord.pdf"
505
- rag.load_pdf(pdf_path)
506
-
507
- demo = rag.gradio_interface()
508
- demo.launch(share=True)
509
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
510
  except Exception as e:
511
- logging.error(f"Main execution failed: {str(e)}")
512
- raise
 
 
1
  import os
2
  import json
 
3
  import logging
4
+ from typing import Dict, List, Optional
5
  from functools import lru_cache
6
+ import re
7
+
8
  import gradio as gr
9
  from langchain_openai import ChatOpenAI
10
  from langchain.prompts import PromptTemplate
11
  from langchain.chains import LLMChain
12
+ # Make sure vector_db.py is in the same directory or accessible via PYTHONPATH
13
+ from vector_db import VectorDatabase # <-- This now imports the placeholder
14
 
15
  # Enhanced logging for better debugging
16
  logging.basicConfig(
 
21
  class RAGSystem:
22
  def __init__(self, vector_db: Optional[VectorDatabase] = None):
23
  logging.info("Initializing RAGSystem")
24
+ # If no vector_db instance is passed, create one (uses placeholder for now)
25
  self.vector_db = vector_db if vector_db else VectorDatabase()
 
26
  self.llm = None
27
  self.chain = None
28
+
29
+ # Using f-string for potentially better readability/maintenance if template gets complex
30
+ self.prompt_template_str = """You are a legal assistant specializing in tenant rights and landlord-tenant laws. Your goal is to provide accurate, detailed, and helpful answers grounded in legal authority. Use the provided statutes as the primary source when available. If no relevant statutes are found in the context, rely on your general knowledge to provide a pertinent and practical response, clearly indicating when you are doing so and prioritizing state-specific information over federal laws for state-specific queries.
 
31
 
32
  Instructions:
33
+ * Use the context and statutes as the primary basis for your answer when available.
34
+ * For state-specific queries, prioritize statutes or legal principles from the specified state over federal laws.
35
+ * Cite relevant statutes (e.g., (AS § 34.03.220(a)(2))) explicitly in your answer when applicable.
36
+ * If multiple statutes apply, list all relevant ones.
37
+ * If no specific statute is found in the context, state this clearly (e.g., 'No specific statute was found in the provided context'), then provide a general answer based on common legal principles or practices, marked as such.
38
+ * Include practical examples or scenarios to enhance clarity and usefulness.
39
+ * Use bullet points or numbered lists for readability when appropriate.
40
+ * Maintain a professional and neutral tone.
41
 
42
  Question: {query}
43
  State: {state}
44
  Statutes from context:
45
  {statutes}
46
+
47
  Context information:
48
+ --- START CONTEXT ---
49
  {context}
50
+ --- END CONTEXT ---
51
+
52
  Answer:"""
53
+ self.prompt_template = PromptTemplate(
54
+ input_variables=["query", "context", "state", "statutes"],
55
+ template=self.prompt_template_str
56
  )
57
+ logging.info("RAGSystem initialized.")
58
 
59
  def initialize_llm(self, openai_api_key: str):
60
+ """Initializes the LLM and the processing chain."""
61
  if not openai_api_key:
62
+ logging.error("Attempted to initialize LLM without API key.")
63
  raise ValueError("OpenAI API key is required.")
64
+
65
+ # Avoid re-initializing if already done with the same key implicitly
66
+ # Note: This simple check doesn't handle key changes well.
67
+ # A more robust approach might involve checking if self.llm's key matches.
68
+ if self.llm and self.chain:
69
+ # Check if the key is the same (conceptually - can't directly read from ChatOpenAI instance easily)
70
+ # If key changes are expected, need a more complex re-initialization logic.
71
+ logging.info("LLM and Chain already initialized.")
72
+ return
73
+
74
  try:
75
+ logging.info("Initializing OpenAI LLM...")
76
  self.llm = ChatOpenAI(
77
  temperature=0.2,
78
  openai_api_key=openai_api_key,
79
  model_name="gpt-3.5-turbo",
80
+ max_tokens=1500, # Max response tokens
81
+ request_timeout=45 # Increased timeout
82
  )
83
+ logging.info("OpenAI LLM initialized successfully.")
84
+
85
  self.chain = LLMChain(llm=self.llm, prompt=self.prompt_template)
86
+ logging.info("LLMChain created successfully.")
87
  except Exception as e:
88
+ logging.error(f"Failed to initialize OpenAI LLM or Chain: {str(e)}")
89
+ # Reset llm/chain if initialization failed partially
90
+ self.llm = None
91
+ self.chain = None
92
+ raise # Re-raise the exception to be caught by the caller
93
 
94
  def extract_statutes(self, text: str) -> str:
95
  """
96
  Extract statute citations from the given text using a refined regex pattern.
97
  Returns a string of valid statutes, one per line, or a message if none are found.
98
  """
99
+ # Refined Regex: Aims to capture common US statute formats. May need tuning.
100
+ # - Allows state abbreviations (e.g., CA, NY) or full names (e.g, California)
101
+ # - Looks for common terms like Code, Laws, Statutes, CCP, USC, ILCS
102
+ # - Requires section symbol (§) followed by numbers/hyphens/parenthesized parts
103
+ # - Tries to avoid matching simple parenthetical remarks like '(a)' or '(found here)'
104
+ statute_pattern = r'\b(?:[A-Z]{2,}\.?\s+(?:Rev\.\s+)?Stat\.?|Code(?:\s+Ann\.?)?|Ann\.?\s+Laws|Statutes|CCP|USC|ILCS|Civ\.\s+Code|Penal\s+Code|Gen\.\s+Oblig\.\s+Law)\s+§\s*[\d\-]+(?:\.\d+)?(?:\([\w\.]+\))?|Title\s+\d+\s+USC\s+§\s*\d+(?:-\d+)?\b'
105
+
106
+ # Use finditer for more control if needed later, findall is simpler for now
107
+ statutes = re.findall(statute_pattern, text, re.IGNORECASE)
108
+
109
  valid_statutes = []
110
+ # Basic filtering (can be improved)
111
  for statute in statutes:
112
+ # Remove potential leading/trailing spaces and ensure it looks like a statute
113
+ statute = statute.strip()
114
+ if '§' in statute and any(char.isdigit() for char in statute):
115
+ # Avoid things that might just be section references like '(a)' or URLs mistakenly caught
116
+ if not re.match(r'^\([\w\.]+\)$', statute) and 'http' not in statute:
117
+ valid_statutes.append(statute)
118
+
119
  if valid_statutes:
120
+ # Deduplicate while preserving order
121
  seen = set()
122
+ unique_statutes = [s for s in valid_statutes if not (s in seen or seen.add(s))]
123
+ logging.info(f"Extracted {len(unique_statutes)} unique statutes.")
124
+ return "\n".join(f"- {s}" for s in unique_statutes) # Format as list
125
 
126
+ logging.info("No statutes found matching the pattern in the context.")
127
+ return "No specific statutes found in the provided context."
128
+
129
+ # Cache results for the same query, state, and key (key isn't explicitly cached but influences init)
130
  @lru_cache(maxsize=100)
131
  def process_query(self, query: str, state: str, openai_api_key: str, n_results: int = 5) -> Dict[str, any]:
132
+ """Processes the user query using RAG."""
133
  logging.info(f"Processing query: '{query}' for state: '{state}' with n_results={n_results}")
134
+
135
+ # 1. Input Validation
136
  if not state:
137
+ logging.warning("No state provided for query.")
138
+ return {
139
+ "answer": "**Error:** Please select a state to proceed with your query.",
140
+ "context_used": "N/A"
141
+ }
142
+ if not query:
143
+ logging.warning("No query provided.")
144
  return {
145
+ "answer": "**Error:** Please enter your question in the Query box.",
146
  "context_used": "N/A"
147
  }
 
148
  if not openai_api_key:
149
+ logging.warning("No OpenAI API key provided.")
150
  return {
151
+ "answer": "**Error:** Please provide an OpenAI API key to proceed.",
152
  "context_used": "N/A"
153
  }
154
+
155
+ # 2. Initialize LLM (if needed)
 
 
 
 
 
 
 
 
 
 
156
  try:
157
+ # Initialize LLM here, ensuring it's ready before DB query or LLM call
158
+ self.initialize_llm(openai_api_key)
 
159
  except Exception as e:
160
+ logging.error(f"LLM Initialization failed: {str(e)}")
161
+ return {
162
+ "answer": f"**Error:** Failed to initialize the AI model. Please check your API key and network connection. ({str(e)})",
163
+ "context_used": "N/A"
164
  }
165
+
166
+ # Ensure chain is initialized after initialize_llm() call succeeds
167
+ if not self.chain:
168
+ logging.error("LLM Chain is not initialized after attempting LLM initialization.")
169
+ return {
170
+ "answer": "**Error:** Internal system error. Failed to prepare the processing chain.",
171
+ "context_used": "N/A"
172
+ }
173
+
174
+
175
+ # 3. Query Vector Database
176
+ context = "No relevant context found." # Default context
177
+ try:
178
+ results = self.vector_db.query(query, state=state, n_results=n_results)
179
+ logging.info(f"Vector database query successful for state '{state}'.")
180
+ # logging.debug(f"Raw query results: {json.dumps(results, indent=2)}")
181
+
182
+ context_parts = []
183
+ # Process document results carefully, checking list structure
184
+ doc_results = results.get("document_results", {})
185
+ docs = doc_results.get("documents", [[]])[0] # Safely access first list
186
+ metadatas = doc_results.get("metadatas", [[]])[0] # Safely access first list
187
+
188
+ if docs and metadatas and len(docs) == len(metadatas):
189
+ for i, doc_content in enumerate(docs):
190
+ metadata = metadatas[i]
191
+ state_label = metadata.get('state', 'Unknown State')
192
+ chunk_id = metadata.get('chunk_id', 'N/A')
193
+ context_parts.append(f"[{state_label} - Chunk {chunk_id}] {doc_content}")
194
+ else:
195
+ logging.warning("No document results or mismatch in docs/metadata lengths.")
196
+
197
+ # Process state summary results
198
+ state_results_data = results.get("state_results", {})
199
+ state_docs = state_results_data.get("documents", [[]])[0]
200
+ state_metadatas = state_results_data.get("metadatas", [[]])[0]
201
+
202
+ if state_docs and state_metadatas and len(state_docs) == len(state_metadatas):
203
+ for i, state_doc_content in enumerate(state_docs):
204
+ metadata = state_metadatas[i]
205
+ state_label = metadata.get('state', state) # Use provided state if not in metadata
206
+ context_parts.append(f"[{state_label} - Summary] {state_doc_content}")
207
+ else:
208
+ logging.warning("No state summary results found.")
209
+
210
+ if context_parts:
211
+ context = "\n\n---\n\n".join(context_parts)
212
+ logging.info(f"Constructed context with {len(context_parts)} parts.")
213
+ # Limit context length if necessary (though max_tokens helps)
214
+ # max_context_len = 5000 # Example limit
215
+ # if len(context) > max_context_len:
216
+ # context = context[:max_context_len] + "\n... [Context Truncated]"
217
+ # logging.warning("Context truncated due to length.")
218
+ else:
219
+ logging.warning("No relevant context parts found after processing DB results.")
220
+ context = "No relevant context could be retrieved from the available documents for your query and selected state."
221
+
222
+
223
+ except Exception as e:
224
+ logging.error(f"Vector database query or context processing failed: {str(e)}", exc_info=True)
225
+ # Fallback to general knowledge if DB fails, but inform the user
226
+ context = f"An error occurred while retrieving specific legal documents ({str(e)}). I will attempt to answer based on general knowledge, but it may lack state-specific details."
227
+ statutes_from_context = "Statute retrieval skipped due to context error."
228
+
229
+
230
+ # 4. Extract Statutes from Retrieved Context (if context retrieval succeeded)
231
+ statutes_from_context = "No specific statutes found in the provided context."
232
+ if "An error occurred while retrieving" not in context and "No relevant context found" not in context:
233
+ try:
234
+ statutes_from_context = self.extract_statutes(context)
235
+ logging.info(f"Statutes extracted: {statutes_from_context}")
236
+ except Exception as e:
237
+ logging.error(f"Error extracting statutes: {e}")
238
+ statutes_from_context = "Error occurred during statute extraction."
239
+
240
+
241
+ # 5. Generate Answer using LLM
242
  try:
243
+ logging.info("Invoking LLMChain...")
244
+ llm_input = {
245
  "query": query,
246
  "context": context,
247
  "state": state,
248
  "statutes": statutes_from_context
249
+ }
250
+ # logging.debug(f"Input to LLMChain: {json.dumps(llm_input, indent=2)}") # Be careful logging sensitive data
251
+ answer_dict = self.chain.invoke(llm_input)
252
+ answer_text = answer_dict.get('text', '').strip()
253
+
254
+ if not answer_text:
255
+ logging.warning("LLM returned an empty answer.")
256
+ answer_text = "I received an empty response from the AI model. This might be a temporary issue. Please try rephrasing your question or try again later."
257
+
258
+ logging.info("LLM generated answer successfully.")
259
+ # logging.debug(f"Raw answer text from LLM: {answer_text}")
260
+
261
+ return {
262
+ "answer": answer_text,
263
+ "context_used": context # Return the context for potential display or debugging
264
+ }
265
  except Exception as e:
266
+ logging.error(f"LLM processing failed: {str(e)}", exc_info=True)
267
+ # Provide a more informative error message
268
+ error_message = f"**Error:** An error occurred while generating the answer. This could be due to issues with the AI model connection, API key limits, or the complexity of the request. Please try again later.\n\nDetails: {str(e)}"
269
+ # Check for common API errors
270
+ if "authentication" in str(e).lower():
271
+ error_message = "**Error:** Authentication failed. Please check if your OpenAI API key is correct and active."
272
+ elif "rate limit" in str(e).lower():
273
+ error_message = "**Error:** You've exceeded your OpenAI API usage limit. Please check your plan or try again later."
274
+
275
  return {
276
+ "answer": error_message,
277
+ "context_used": context # Still return context for debugging
278
  }
 
 
 
 
 
279
 
280
  def get_states(self) -> List[str]:
281
+ """Retrieves the list of available states from the VectorDatabase."""
282
  try:
283
  states = self.vector_db.get_states()
284
+ if not states:
285
+ logging.warning("No states returned from vector_db.get_states(). Using default list.")
286
+ # Provide a fallback list if needed
287
+ return ["California", "New York", "Texas", "Florida", "Oregon", "Alabama", "Select State..."]
288
+ logging.info(f"Retrieved {len(states)} states from VectorDatabase.")
289
+ return sorted(list(set(states))) # Ensure uniqueness and sort
290
  except Exception as e:
291
+ logging.error(f"Failed to get states from VectorDatabase: {str(e)}")
292
+ return ["Error fetching states"] # Indicate error in the dropdown
293
 
294
  def load_pdf(self, pdf_path: str) -> int:
295
+ """Loads and processes the PDF using the VectorDatabase."""
296
+ if not os.path.exists(pdf_path):
297
+ logging.error(f"PDF file not found at path: {pdf_path}")
298
+ raise FileNotFoundError(f"PDF file not found: {pdf_path}")
299
  try:
300
+ logging.info(f"Attempting to load PDF: {pdf_path}")
301
  num_states = self.vector_db.process_and_load_pdf(pdf_path)
302
+ if num_states > 0:
303
+ logging.info(f"Successfully processed PDF. Found data for {num_states} states.")
304
+ else:
305
+ logging.warning(f"Processed PDF, but found no state-specific data according to vector_db implementation.")
306
  return num_states
307
  except Exception as e:
308
+ logging.error(f"Failed to load or process PDF '{pdf_path}': {str(e)}", exc_info=True)
309
+ # Depending on vector_db, this might leave the DB in a partial state.
310
+ return 0 # Indicate failure
311
+
312
 
313
  def gradio_interface(self):
314
+ """Creates and returns the Gradio interface."""
315
+
316
+ # Define the core function that Gradio will call
317
  def query_interface(api_key: str, query: str, state: str) -> str:
318
+ # Clear cache for each new request if desired, or rely on LRU cache parameters
319
+ # self.process_query.cache_clear()
320
+ logging.info(f"Gradio interface received query: '{query}', state: '{state}'")
321
+ result = self.process_query(query=query, state=state, openai_api_key=api_key)
322
+
323
+ # Format the response clearly using Markdown
324
+ answer = result.get("answer", "**Error:** No answer generated.")
325
+ # context_used = result.get("context_used", "N/A") # Optional: show context
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
326
 
327
+ # Simple formatting for now, can be enhanced
328
+ formatted_response = f"### Answer for {state}:\n\n{answer}"
329
+
330
+ # Optional: Include context for debugging/transparency (can be long)
331
+ # formatted_response += f"\n\n---\n<details><summary>Context Used (Debug)</summary>\n\n```\n{context_used}\n```\n</details>"
332
+
333
+ return formatted_response
334
+
335
+ # Get states for the dropdown
336
+ available_states = self.get_states()
337
+ if not available_states or "Error" in available_states[0]:
338
+ logging.error("Could not load states for dropdown. Interface might be unusable.")
339
+ # Handle case where states couldn't be loaded
340
+ available_states = ["Error: Could not load states"]
341
+
342
+
343
+ # Define example queries (only query and state needed for examples UI)
344
  example_queries = [
345
  ["What is the rent due date law?", "California"],
346
  ["What are the rules for security deposit returns?", "New York"],
 
349
  ["Are there rent control laws?", "Oregon"]
350
  ]
351
 
352
+ # Custom CSS (minor adjustments for clarity if needed, your CSS is quite comprehensive)
353
  custom_css = """
354
+ /* Your existing CSS here */
355
+ .gr-form { max-width: 900px; margin: 0 auto; padding: 30px; /* ... */ }
356
+ .gr-title { font-size: 2.5em; font-weight: 700; color: #1a3c34; /* ... */ }
357
+ /* ... rest of your CSS ... */
358
+ .output-markdown {
359
+ background: #f9fafb; /* Light mode bg */
360
+ color: #1f2937; /* Light mode text */
361
+ padding: 20px;
362
+ border-radius: 12px;
363
+ border: 1px solid #e5e7eb;
364
+ font-size: 1.05em; /* Slightly larger text */
365
+ line-height: 1.7; /* More spacing */
366
+ box-shadow: 0 2px 12px rgba(0, 0, 0, 0.05);
367
+ margin-top: 20px; /* Add space above output */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
368
  }
369
+ .output-markdown h3 { /* Style the 'Answer for STATE:' heading */
370
+ margin-top: 0;
371
+ margin-bottom: 15px;
372
+ color: #1a3c34; /* Match title color */
373
+ border-bottom: 1px solid #e5e7eb;
374
+ padding-bottom: 8px;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
375
  }
376
+ .output-markdown p { margin-bottom: 1em; }
377
+ .output-markdown ul, .output-markdown ol { margin-left: 20px; margin-bottom: 1em; }
378
+ .output-markdown li { margin-bottom: 0.5em; }
379
+
380
+ /* Dark mode adjustments */
381
+ @media (prefers-color-scheme: dark) {
382
+ .output-markdown {
383
+ background: #374151 !important; /* Dark mode bg */
384
+ color: #f3f4f6 !important; /* Dark mode text */
385
+ border-color: #4b5563 !important;
386
+ }
387
+ .output-markdown h3 {
388
+ color: #f3f4f6; /* Dark title */
389
+ border-bottom: 1px solid #4b5563;
390
+ }
391
+ }
392
  """
393
 
394
+ # Build the Gradio Blocks interface
395
+ with gr.Blocks(css=custom_css, theme=gr.themes.Soft()) as demo: # Use a slightly different theme
396
  gr.Markdown(
397
  """
398
+ <div style="text-align: center;">
399
+ <img src="https://img.icons8.com/plasticine/100/000000/document.png" alt="Icon" style="vertical-align: middle; height: 50px;">
400
+ <h1 class='gr-title' style='display: inline-block; margin-bottom: 0; vertical-align: middle; margin-left: 10px;'>Landlord-Tenant Rights Bot</h1>
401
+ </div>
402
+ <p class='gr-description'>
403
+ Ask questions about tenant rights and landlord-tenant laws based on state-specific legal documents.
404
+ Provide your OpenAI API key, select a state, and enter your question.
405
+ Get your key from <a href='https://platform.openai.com/api-keys' target='_blank'>OpenAI</a>.
406
+ </p>
407
  """
408
  )
409
 
410
+ with gr.Column(elem_classes="gr-form"): # Use elem_classes if defined in CSS, otherwise elem_id
411
+ # Input Components defined *within* gr.Blocks()
412
  api_key_input = gr.Textbox(
413
+ label="OpenAI API Key",
414
  type="password",
415
+ placeholder="Enter your OpenAI API key (e.g., sk-...)",
416
+ info="Required to process your query.", # Use info parameter
417
+ #elem_classes="input-field" # Use if defined in CSS
418
  )
419
  query_input = gr.Textbox(
420
+ label="Your Question",
421
+ placeholder="e.g., What are the rules for security deposit returns?",
422
+ lines=4, # Increased lines slightly
423
+ info="Enter your question about landlord-tenant law here.",
424
+ #elem_classes="input-field"
425
  )
426
  state_input = gr.Dropdown(
427
+ label="Select State",
428
+ choices=available_states,
429
+ value=available_states[0] if available_states else None, # Default to first state or None
430
  allow_custom_value=False,
431
+ info="Select the state your question applies to.",
432
+ #elem_classes="input-field"
433
  )
434
 
435
  with gr.Row():
436
+ clear_button = gr.Button("Clear Inputs", variant="secondary")
437
+ submit_button = gr.Button("Submit Query", variant="primary")
438
 
439
  output = gr.Markdown(
440
+ label="Answer",
441
+ value="Your answer will appear here...", # Initial placeholder text
442
+ elem_classes="output-markdown" # Apply custom class for styling
443
  )
444
 
445
+ gr.Markdown("---") # Separator
446
+ gr.Markdown("### Examples")
447
  gr.Examples(
448
  examples=example_queries,
449
+ inputs=[query_input, state_input], # Examples only fill these two
450
+ # outputs=output, # Output is handled by the main submit button click
451
+ # fn=query_interface, # Don't run the function directly on example click
452
+ # cache_examples=False, # Don't cache example runs if fn were used
453
+ label="Click an example to load it",
454
  examples_per_page=5
455
  )
456
 
457
+
458
  gr.Markdown(
459
  """
460
+ <div class='footnote' style='margin-top: 30px; padding-top: 15px; border-top: 1px solid #e5e7eb; text-align: center; font-size: 0.9em; color: #6c757d;'>
461
+ Developed by Nischal Subedi. Follow on
462
+ <a href='https://www.linkedin.com/in/nischal1/' target='_blank' style='color: #007bff; text-decoration: none;'>LinkedIn</a> |
463
+ Read insights on <a href='https://datascientistinsights.substack.com/' target='_blank' style='color: #007bff; text-decoration: none;'>Substack</a>.
464
+ <br>Disclaimer: This bot provides informational summaries based on AI interpretation and retrieved data. It is not a substitute for professional legal advice.
465
+ </div>
466
  """
467
+ , elem_classes="footnote") # Apply footnote class if defined in CSS
468
 
469
+ # Connect Actions to Functions
470
  submit_button.click(
471
  fn=query_interface,
472
  inputs=[api_key_input, query_input, state_input],
473
+ outputs=output,
474
+ api_name="submit_query" # Add API name for potential programmatic use
475
  )
476
+ # Clear button clears all inputs and the output
477
  clear_button.click(
478
+ fn=lambda: ( # Return empty values for each output component
479
+ "", # api_key_input
480
+ "", # query_input
481
+ available_states[0] if available_states else None, # state_input (reset to default)
482
+ "Cleared. Ready for new query..." # output
483
+ ),
484
+ inputs=[], # No inputs needed for clear
485
  outputs=[api_key_input, query_input, state_input, output]
486
  )
487
 
488
+ logging.info("Gradio interface created successfully.")
489
+ return demo # Corrected return variable name
490
+
491
 
492
+ # Main execution block
493
  if __name__ == "__main__":
494
+ logging.info("Starting application setup...")
495
  try:
496
+ # --- Configuration ---
497
+ PDF_PATH = os.getenv("PDF_PATH", "data/tenant-landlord.pdf") # Use env var or default
498
+ VECTOR_DB_PATH = os.getenv("VECTOR_DB_PATH", "./vector_db_store") # For persistent DBs
499
+
500
+ # Check if PDF exists
501
+ if not os.path.exists(PDF_PATH):
502
+ logging.error(f"FATAL: PDF file not found at the specified path: {PDF_PATH}")
503
+ logging.error("Please ensure the PDF file exists or set the PDF_PATH environment variable correctly.")
504
+ # Exit if the core data file is missing
505
+ exit(1) # Or raise an exception
506
+
507
+
508
+ # --- Initialization ---
509
+ # Initialize VectorDatabase (using placeholder for now)
510
+ # Pass path if your implementation uses it (like ChromaDB)
511
+ vector_db_instance = VectorDatabase(persist_directory=VECTOR_DB_PATH)
512
+
513
+ # Initialize RAGSystem with the database instance
514
+ rag = RAGSystem(vector_db=vector_db_instance)
515
+
516
+ # --- Data Loading ---
517
+ # Load the PDF data into the vector database
518
+ # This step is crucial and needs the *real* vector_db.py
519
+ logging.info(f"Loading data from PDF: {PDF_PATH}")
520
+ states_loaded = rag.load_pdf(PDF_PATH)
521
+ if states_loaded == 0 and not isinstance(vector_db_instance, VectorDatabase): # Check if using placeholder
522
+ logging.warning("PDF loading reported 0 states. Check PDF content and vector_db implementation.")
523
+ # Decide if you want to proceed without data or halt
524
+
525
+ # --- Interface Setup & Launch ---
526
+ logging.info("Setting up Gradio interface...")
527
+ app_interface = rag.gradio_interface()
528
+
529
+ logging.info("Launching Gradio app...")
530
+ # Launch the Gradio app
531
+ # share=True generates a public link (use with caution)
532
+ # server_name="0.0.0.0" makes it accessible on the network
533
+ app_interface.launch(server_name="0.0.0.0", server_port=7860, share=False)
534
+ # For cloud deployments (like Hugging Face Spaces), you might not need server_name/port
535
+
536
+ except FileNotFoundError as fnf_error:
537
+ logging.error(f"Initialization failed: {str(fnf_error)}")
538
+ # No need to raise again, already logged. Maybe print a message.
539
+ print(f"Error: {str(fnf_error)}. Please check file paths.")
540
+ except ImportError as import_error:
541
+ logging.error(f"Import error: {str(import_error)}. Please ensure all required libraries (gradio, langchain, openai, etc.) and the 'vector_db.py' file are installed and accessible.")
542
+ print(f"Import Error: {str(import_error)}. Check your dependencies.")
543
  except Exception as e:
544
+ # Catch any other unexpected errors during setup or launch
545
+ logging.error(f"An unexpected error occurred during application startup: {str(e)}", exc_info=True)
546
+ print(f"Fatal Error: {str(e)}. Check logs for details.")
data/tenant-landlord.pdf DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:d781628172a332f25588c4ec80809f5efdf8446d68ee6f2e2e7929e7b7e4f545
3
- size 3089450
 
 
 
 
requirements.txt CHANGED
@@ -11,4 +11,3 @@ pandas==2.2.2
11
  huggingface_hub==0.23.4
12
  pymupdf==1.24.9
13
  langchain_community
14
-
 
11
  huggingface_hub==0.23.4
12
  pymupdf==1.24.9
13
  langchain_community