Johan713 commited on
Commit
60feff1
·
verified ·
1 Parent(s): 8f70fd1

Update app2.py

Browse files
Files changed (1) hide show
  1. app2.py +1255 -424
app2.py CHANGED
@@ -1,424 +1,1255 @@
1
- import gradio as gr
2
- from dataclasses import dataclass
3
- import os
4
- from uuid import uuid4
5
- import requests
6
- import wikipedia
7
- import googlesearch
8
- from sentence_transformers import SentenceTransformer
9
- import PyPDF2
10
- import docx
11
- import faiss
12
- import numpy as np
13
- import json
14
- import re
15
- from sklearn.feature_extraction.text import TfidfVectorizer
16
- from concurrent.futures import ThreadPoolExecutor
17
- import nltk
18
- import spacy
19
- from dotenv import load_dotenv
20
-
21
- load_dotenv()
22
-
23
- nltk.download('wordnet')
24
- nltk.download('punkt')
25
-
26
- try:
27
- spacy.cli.download("en_core_web_sm")
28
- except Exception as e:
29
- print(f"Error downloading spacy model: {e}")
30
-
31
- DEPLOYED = os.getenv("DEPLOYED", "false").lower() == "true"
32
- MODEL_NAME = "tiiuae/falcon-180B-chat"
33
- HEADERS = {"Authorization": f"Bearer {os.getenv('HF_TOKEN')}"}
34
- ENDPOINT_URL = f"https://api-inference.huggingface.co/models/{MODEL_NAME}"
35
- DEFAULT_INSTRUCTIONS = """LexAI is an advanced legal AI assistant powered by Falcon 180B, with capabilities including contract analysis, legal research, predictive litigation analysis, intelligent legal drafting, answering common legal questions, document summarization, legal entity recognition, sentiment analysis, and more. LexAI can perform document retrieval, Wikipedia searches, and internet searches to provide comprehensive assistance."""
36
-
37
- sentence_model = SentenceTransformer('all-MiniLM-L6-v2')
38
- ner_model = spacy.load("en_core_web_sm")
39
-
40
- index = faiss.IndexFlatL2(384)
41
-
42
- tfidf_vectorizer = TfidfVectorizer(stop_words='english')
43
-
44
- @dataclass
45
- class Rating:
46
- prompt: str
47
- response: str
48
- ratings: list[str]
49
-
50
- class Document:
51
- def __init__(self, content: str, metadata: dict):
52
- self.content = content
53
- self.metadata = metadata
54
- self.embedding = None
55
-
56
- def compute_embedding(self):
57
- self.embedding = sentence_model.encode([self.content])[0]
58
-
59
- class DocumentStore:
60
- def __init__(self):
61
- self.documents = []
62
- self.index = faiss.IndexFlatL2(384)
63
-
64
- def add_document(self, document: Document):
65
- document.compute_embedding()
66
- self.documents.append(document)
67
- self.index.add(np.array([document.embedding]))
68
-
69
- def search(self, query: str, k: int = 5):
70
- query_vector = sentence_model.encode([query])[0]
71
- distances, indices = self.index.search(np.array([query_vector]), k)
72
- return [self.documents[i] for i in indices[0]]
73
-
74
- document_store = DocumentStore()
75
-
76
- def extract_text_from_file(file):
77
- if file.name.endswith('.pdf'):
78
- reader = PyPDF2.PdfReader(file)
79
- text = ""
80
- for page in reader.pages:
81
- text += page.extract_text()
82
- elif file.name.endswith('.docx'):
83
- doc = docx.Document(file)
84
- text = "\n".join([paragraph.text for paragraph in doc.paragraphs])
85
- else:
86
- text = file.read().decode('utf-8')
87
- return text
88
-
89
- def query_falcon(prompt: str, max_tokens: int = 100, temperature: float = 0.7) -> str:
90
- payload = {
91
- "inputs": prompt,
92
- "parameters": {
93
- "max_new_tokens": max_tokens,
94
- "do_sample": True,
95
- "temperature": temperature,
96
- "top_p": 0.9,
97
- "stop": ["User:"]
98
- }
99
- }
100
-
101
- response = requests.post(ENDPOINT_URL, headers=HEADERS, json=payload)
102
- if response.status_code == 200:
103
- return response.json()[0]['generated_text']
104
- else:
105
- print(f"Error: {response.status_code} - {response.text}")
106
- return "Error occurred while querying Falcon 180B."
107
-
108
- def summarize_text(text: str, max_length: int = 150) -> str:
109
- prompt = f"Summarize the following text in about {max_length} words:\n\n{text}\n\nSummary:"
110
- return query_falcon(prompt, max_tokens=max_length)
111
-
112
- def extract_legal_entities(text: str):
113
- doc = ner_model(text)
114
- return [ent.text for ent in doc.ents if ent.label_ in ["PERSON", "ORG", "GPE", "LAW"]]
115
-
116
- def analyze_sentiment(text: str) -> str:
117
- prompt = f"Analyze the sentiment of the following text. Respond with either 'Positive', 'Negative', or 'Neutral':\n\n{text}\n\nSentiment:"
118
- return query_falcon(prompt, max_tokens=10)
119
-
120
- def extract_keywords(text: str, top_n: int = 5):
121
- prompt = f"Extract the top {top_n} keywords from the following text:\n\n{text}\n\nKeywords:"
122
- response = query_falcon(prompt, max_tokens=50)
123
- return [keyword.strip() for keyword in response.split(',')][:top_n]
124
-
125
- def get_legal_definitions(term: str) -> str:
126
- prompt = f"Provide a legal definition for the term '{term}':"
127
- return query_falcon(prompt, max_tokens=100).strip()
128
-
129
- def perform_case_law_search(query: str):
130
- prompt = f"Perform a case law search for the following query and provide a summary of relevant cases:\n\n{query}\n\nRelevant cases:"
131
- return query_falcon(prompt, max_tokens=200)
132
-
133
- def generate_legal_document(document_type: str, details: dict) -> str:
134
- prompt = f"Generate a {document_type} with the following details:\n"
135
- for key, value in details.items():
136
- prompt += f"{key}: {value}\n"
137
- prompt += f"\nGenerated {document_type}:"
138
- return query_falcon(prompt, max_tokens=500).strip()
139
-
140
- def perform_wikipedia_search(query):
141
- try:
142
- search_results = wikipedia.search(query)
143
- if search_results:
144
- page = wikipedia.page(search_results[0])
145
- summary = wikipedia.summary(search_results[0], sentences=3)
146
- return f"Wikipedia: {summary}\n\nFull article: {page.url}"
147
- else:
148
- return "No Wikipedia results found."
149
- except:
150
- return "Error occurred while searching Wikipedia."
151
-
152
- def perform_internet_search(query):
153
- try:
154
- search_results = list(googlesearch.search(query, num_results=3))
155
- if search_results:
156
- return "Internet search results:\n" + "\n".join(search_results)
157
- else:
158
- return "No internet search results found."
159
- except:
160
- return "Error occurred while performing internet search."
161
-
162
- def chat_accordion():
163
- with gr.Accordion("Parameters", open=False):
164
- temperature = gr.Slider(
165
- minimum=0.1,
166
- maximum=1.0,
167
- value=0.7,
168
- step=0.1,
169
- interactive=True,
170
- label="Temperature",
171
- )
172
- top_p = gr.Slider(
173
- minimum=0.1,
174
- maximum=0.99,
175
- value=0.9,
176
- step=0.01,
177
- interactive=True,
178
- label="p (nucleus sampling)",
179
- )
180
-
181
- max_tokens = gr.Slider(
182
- minimum=64,
183
- maximum=1024,
184
- value=64,
185
- step=1,
186
- interactive=True,
187
- label="Max Tokens",
188
- )
189
-
190
- session_id = gr.Textbox(
191
- value=uuid4,
192
- interactive=False,
193
- visible=False,
194
- )
195
-
196
- with gr.Accordion("Instructions", open=False, visible=False):
197
- instructions = gr.Textbox(
198
- placeholder="The Instructions",
199
- value=DEFAULT_INSTRUCTIONS,
200
- lines=16,
201
- interactive=True,
202
- label="Instructions",
203
- max_lines=16,
204
- show_label=False,
205
- )
206
- with gr.Row():
207
- with gr.Column():
208
- user_name = gr.Textbox(
209
- lines=1,
210
- label="username",
211
- value="User",
212
- interactive=True,
213
- placeholder="Username: ",
214
- show_label=False,
215
- max_lines=1,
216
- )
217
- with gr.Column():
218
- bot_name = gr.Textbox(
219
- lines=1,
220
- value="LexAI",
221
- interactive=True,
222
- placeholder="Bot Name",
223
- show_label=False,
224
- max_lines=1,
225
- visible=False,
226
- )
227
-
228
- return temperature, top_p, instructions, user_name, bot_name, session_id, max_tokens
229
-
230
- def format_chat_prompt(message: str, chat_history, instructions: str, user_name: str, bot_name: str):
231
- instructions = instructions or DEFAULT_INSTRUCTIONS
232
- instructions = instructions.strip()
233
- prompt = instructions
234
- for turn in chat_history:
235
- user_message, bot_message = turn
236
- prompt = f"{prompt}\n{user_name}: {user_message}\n{bot_name}: {bot_message}"
237
- prompt = f"{prompt}\n{user_name}: {message}\n{bot_name}:"
238
- return prompt
239
-
240
- def run_chat(message: str, history, instructions: str, user_name: str, bot_name: str, temperature: float, top_p: float, session_id: str, max_tokens: int, uploaded_file: gr.File):
241
- if uploaded_file is not None:
242
- document_text = extract_text_from_file(uploaded_file)
243
- summary = summarize_text(document_text)
244
- legal_entities = extract_legal_entities(document_text)
245
- sentiment = analyze_sentiment(document_text)
246
- keywords = extract_keywords(document_text)
247
-
248
- doc = Document(content=document_text, metadata={
249
- "filename": uploaded_file.name,
250
- "summary": summary,
251
- "legal_entities": legal_entities,
252
- "sentiment": sentiment,
253
- "keywords": keywords
254
- })
255
- document_store.add_document(doc)
256
-
257
- message += f"\n[System: A document '{uploaded_file.name}' has been uploaded and processed.]"
258
-
259
- relevant_docs = document_store.search(message)
260
- retrieved_context = "\n".join([doc.content for doc in relevant_docs])
261
-
262
- with ThreadPoolExecutor(max_workers=2) as executor:
263
- wiki_future = executor.submit(perform_wikipedia_search, message)
264
- internet_future = executor.submit(perform_internet_search, message)
265
-
266
- wiki_result = wiki_future.result()
267
- internet_result = internet_future.result()
268
-
269
- case_law_results = perform_case_law_search(message)
270
-
271
- full_context = f"""Retrieved Documents:\n{retrieved_context}
272
-
273
- Wikipedia Search:\n{wiki_result}
274
-
275
- Internet Search:\n{internet_result}
276
-
277
- Relevant Case Law:\n{case_law_results}
278
- """
279
-
280
- prompt = format_chat_prompt(message, history, instructions, user_name, bot_name)
281
- prompt += f"\nAdditional Context:\n{full_context}\n\nBased on the above information, please provide a comprehensive response:"
282
-
283
- response = query_falcon(prompt, max_tokens=max_tokens, temperature=temperature)
284
-
285
- response = post_process_output(response, message)
286
-
287
- return response
288
-
289
- def post_process_output(output: str, original_query: str) -> str:
290
- if "define" in original_query.lower() or "meaning of" in original_query.lower():
291
- terms = re.findall(r'\b(?!(?:the|a|an)\b)\w+', original_query)
292
- for term in terms:
293
- definition = get_legal_definitions(term)
294
- output += f"\n\nLegal definition of '{term}': {definition}"
295
-
296
- if "draft" in original_query.lower() or "create document" in original_query.lower():
297
- doc_type_match = re.search(r'draft a (\w+)', original_query.lower())
298
- if doc_type_match:
299
- doc_type = doc_type_match.group(1)
300
- details = extract_document_details(original_query)
301
- generated_document = generate_legal_document(doc_type, details)
302
- output += f"\n\nGenerated {doc_type.capitalize()}:\n\n{generated_document}"
303
-
304
- if "analyze" in original_query.lower() and "document" in original_query.lower():
305
- sentiment = analyze_sentiment(output)
306
- output += f"\n\nOverall sentiment of the analysis: {sentiment}"
307
-
308
- return output
309
-
310
- def extract_document_details(query: str) -> dict:
311
- details = {}
312
- if "parties" in query.lower():
313
- details["parties"] = re.search(r'parties: (.*?)(,|\.|$)', query, re.IGNORECASE).group(1)
314
- if "date" in query.lower():
315
- details["date"] = re.search(r'date: (.*?)(,|\.|$)', query, re.IGNORECASE).group(1)
316
- if "terms" in query.lower():
317
- details["terms"] = re.search(r'terms: (.*?)(,|\.|$)', query, re.IGNORECASE).group(1)
318
- return details
319
-
320
- def chat_tab():
321
- with gr.Column():
322
- with gr.Row():
323
- (
324
- temperature,
325
- top_p,
326
- instructions,
327
- user_name,
328
- bot_name,
329
- session_id,
330
- max_tokens
331
- ) = chat_accordion()
332
-
333
- with gr.Column():
334
- with gr.Blocks():
335
- prompt_examples = [
336
- ["Analyze this contract for potential risks and summarize key points."],
337
- ["Find relevant case law for an intellectual property dispute in the software industry."],
338
- ["What are the key clauses in a non-disclosure agreement? Draft a template."],
339
- ["Predict the outcome of this employment discrimination case based on recent precedents."],
340
- ["Draft a cease and desist letter for trademark infringement. Parties: TechCorp and InnovateNow, Date: 2023-07-22"],
341
- ]
342
- file_upload = gr.File(label="Upload Legal Document for Analysis")
343
- gr.ChatInterface(
344
- fn=run_chat,
345
- chatbot=gr.Chatbot(
346
- height=620,
347
- render=False,
348
- show_label=False,
349
- avatar_images=("images/user_icon.png", "images/lexai_icon.png"),
350
- ),
351
- textbox=gr.Textbox(
352
- placeholder="Ask LexAI about legal matters, analyze documents, or request drafting assistance...",
353
- render=False,
354
- scale=7,
355
- ),
356
- examples=prompt_examples,
357
- additional_inputs=[
358
- instructions,
359
- user_name,
360
- bot_name,
361
- temperature,
362
- top_p,
363
- session_id,
364
- max_tokens,
365
- file_upload
366
- ],
367
- submit_btn="Send",
368
- stop_btn="Stop",
369
- retry_btn="🔄 Retry",
370
- undo_btn="↩️ Delete",
371
- clear_btn="🗑️ Clear",
372
- )
373
-
374
- def introduction():
375
- with gr.Column(scale=2):
376
- gr.Image("images/lexai_logo.png", elem_id="banner-image", show_label=False)
377
- with gr.Column(scale=5):
378
- gr.Markdown(
379
- """# LexAI: Advanced Legal AI Assistant
380
- **LexAI is a cutting-edge AI-driven legal assistant with a wide range of capabilities to support legal professionals and provide information to the public.**
381
-
382
- This demo is powered by the state-of-the-art Falcon 180B language model and specialized legal knowledge.
383
-
384
- 🧪 LexAI offers the following key features:
385
- 1. Contract Analysis and Risk Assessment
386
- 2. Legal Research Assistant with Case Law Search
387
- 3. Predictive Litigation Analysis
388
- 4. Intelligent Legal Document Drafting
389
- 5. Legal Chatbot for Public Access
390
- 6. Document Retrieval, Analysis, and Summarization
391
- 7. Legal Entity Recognition
392
- 8. Sentiment Analysis for Legal Texts
393
- 9. Keyword Extraction from Legal Documents
394
- 10. Legal Definition Lookup
395
- 11. Wikipedia and Internet Search Integration
396
-
397
- ⚠️ **Disclaimer**: LexAI is an AI assistant and does not substitute for professional legal advice. Always consult with a qualified legal professional for specific legal matters.
398
- """
399
- )
400
-
401
- def main():
402
- with gr.Blocks(
403
- css="""#chat_container {height: 820px; width: 1000px; margin-left: auto; margin-right: auto;}
404
- #chatbot {height: 600px; overflow: auto;}
405
- #create_container {height: 750px; margin-left: 0px; margin-right: 0px;}
406
- #tokenizer_renderer span {white-space: pre-wrap}
407
- """
408
- ) as demo:
409
- with gr.Row():
410
- introduction()
411
- with gr.Row():
412
- chat_tab()
413
-
414
- return demo
415
-
416
- def start_demo():
417
- demo = main()
418
- if DEPLOYED:
419
- demo.queue().launch(show_api=False)
420
- else:
421
- demo.queue().launch(share=True)
422
-
423
- if __name__ == "__main__":
424
- start_demo()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import pandas as pd
3
+ import plotly.express as px
4
+ import requests
5
+ from ai71 import AI71
6
+ import PyPDF2
7
+ import io
8
+ import random
9
+ import docx
10
+ from docx import Document
11
+ from docx.shared import Inches
12
+ from datetime import datetime
13
+ import re
14
+ import base64
15
+ from typing import List, Dict, Any
16
+ import matplotlib.pyplot as plt
17
+ from bs4 import BeautifulSoup
18
+ from io import StringIO
19
+ import wikipedia
20
+ from googleapiclient.discovery import build
21
+ from typing import List, Optional
22
+ from httpx_sse import SSEError
23
+
24
+ # Error handling for optional dependencies
25
+ try:
26
+ from streamlit_lottie import st_lottie
27
+ except ImportError:
28
+ st.error("Missing dependency: streamlit_lottie. Please install it using 'pip install streamlit-lottie'")
29
+ st.stop()
30
+
31
+ # Constants
32
+ AI71_API_KEY = "api71-api-92fc2ef9-9f3c-47e5-a019-18e257b04af2"
33
+
34
+ # Initialize AI71 client
35
+ try:
36
+ ai71 = AI71(AI71_API_KEY)
37
+ except Exception as e:
38
+ st.error(f"Failed to initialize AI71 client: {str(e)}")
39
+ st.stop()
40
+
41
+ # Initialize chat history and other session state variables
42
+ if "chat_history" not in st.session_state:
43
+ st.session_state.chat_history = []
44
+ if "uploaded_documents" not in st.session_state:
45
+ st.session_state.uploaded_documents = []
46
+ if "case_precedents" not in st.session_state:
47
+ st.session_state.case_precedents = []
48
+
49
+ def analyze_uploaded_document(file):
50
+ content = ""
51
+ if file.type == "application/pdf":
52
+ pdf_reader = PyPDF2.PdfReader(file)
53
+ for page in pdf_reader.pages:
54
+ content += page.extract_text()
55
+ elif file.type == "application/vnd.openxmlformats-officedocument.wordprocessingml.document":
56
+ doc = docx.Document(file)
57
+ for para in doc.paragraphs:
58
+ content += para.text + "\n"
59
+ else:
60
+ content = file.getvalue().decode("utf-8")
61
+ return content
62
+
63
+ def get_document_based_response(prompt, document_content):
64
+ messages = [
65
+ {"role": "system", "content": "You are a helpful legal assistant. Answer questions based on the provided document content."},
66
+ {"role": "user", "content": f"Document content: {document_content}\n\nQuestion: {prompt}"}
67
+ ]
68
+ try:
69
+ completion = ai71.chat.completions.create(
70
+ model="tiiuae/falcon-180b-chat",
71
+ messages=messages,
72
+ stream=False,
73
+ )
74
+ return completion.choices[0].message.content
75
+ except Exception as e:
76
+ return f"An error occurred while processing your request: {str(e)}"
77
+
78
+ def get_ai_response(prompt: str) -> str:
79
+ """Gets the AI response based on the given prompt."""
80
+ messages = [
81
+ {"role": "system", "content": "You are a helpful legal assistant with advanced capabilities."},
82
+ {"role": "user", "content": prompt}
83
+ ]
84
+ try:
85
+ # First, try streaming
86
+ response = ""
87
+ for chunk in ai71.chat.completions.create(
88
+ model="tiiuae/falcon-180b-chat",
89
+ messages=messages,
90
+ stream=True,
91
+ ):
92
+ if chunk.choices[0].delta.content:
93
+ response += chunk.choices[0].delta.content
94
+ return response
95
+ except Exception as e:
96
+ print(f"Streaming failed, falling back to non-streaming request. Error: {e}")
97
+ try:
98
+ # Fall back to non-streaming request
99
+ completion = ai71.chat.completions.create(
100
+ model="tiiuae/falcon-180b-chat",
101
+ messages=messages,
102
+ stream=False,
103
+ )
104
+ return completion.choices[0].message.content
105
+ except Exception as e:
106
+ print(f"An error occurred while getting AI response: {e}")
107
+ return f"I apologize, but I encountered an error while processing your request. Error: {str(e)}"
108
+
109
+ def display_chat_history():
110
+ for message in st.session_state.chat_history:
111
+ if isinstance(message, tuple):
112
+ if len(message) == 2:
113
+ user_msg, bot_msg = message
114
+ st.info(f"**You:** {user_msg}")
115
+ st.success(f"**Bot:** {bot_msg}")
116
+ else:
117
+ st.error(f"Unexpected message format: {message}")
118
+ elif isinstance(message, dict):
119
+ if message.get('type') == 'wikipedia':
120
+ st.success(f"**Bot:** Wikipedia Summary:\n{message.get('summary', 'No summary available.')}\n" +
121
+ (f"[Read more on Wikipedia]({message.get('url')})" if message.get('url') else ""))
122
+ elif message.get('type') == 'web_search':
123
+ web_results_msg = "Web Search Results:\n"
124
+ for result in message.get('results', []):
125
+ web_results_msg += f"[{result.get('title', 'No title')}]({result.get('link', '#')})\n{result.get('snippet', 'No snippet available.')}\n\n"
126
+ st.success(f"**Bot:** {web_results_msg}")
127
+ else:
128
+ st.error(f"Unknown message type: {message}")
129
+ else:
130
+ st.error(f"Unexpected message format: {message}")
131
+
132
+ def analyze_document(file) -> str:
133
+ """Analyzes uploaded legal documents."""
134
+ content = ""
135
+ if file.type == "application/pdf":
136
+ pdf_reader = PyPDF2.PdfReader(file)
137
+ for page in pdf_reader.pages:
138
+ content += page.extract_text()
139
+ elif file.type == "application/vnd.openxmlformats-officedocument.wordprocessingml.document":
140
+ doc = docx.Document(file)
141
+ for para in doc.paragraphs:
142
+ content += para.text + "\n"
143
+ else:
144
+ content = file.getvalue().decode("utf-8")
145
+
146
+ return content[:5000] # Limit content to 5000 characters for analysis
147
+
148
+ def search_web(query: str, num_results: int = 3) -> List[Dict[str, str]]:
149
+ try:
150
+ service = build("customsearch", "v1", developerKey="AIzaSyD-1OMuZ0CxGAek0PaXrzHOmcDWFvZQtm8")
151
+
152
+ # Add legal-specific terms to the query
153
+ legal_query = f"legal {query} law case precedent"
154
+
155
+ # Execute the search request
156
+ res = service.cse().list(q=legal_query, cx="877170db56f5c4629", num=num_results * 2).execute()
157
+
158
+ results = []
159
+ if "items" in res:
160
+ for item in res["items"]:
161
+ # Check if the result is relevant (you may need to adjust these conditions)
162
+ if any(keyword in item["title"].lower() or keyword in item["snippet"].lower()
163
+ for keyword in ["law", "legal", "court", "case", "attorney", "lawyer"]):
164
+ result = {
165
+ "title": item["title"],
166
+ "link": item["link"],
167
+ "snippet": item["snippet"]
168
+ }
169
+ results.append(result)
170
+ if len(results) == num_results:
171
+ break
172
+
173
+ return results
174
+ except Exception as e:
175
+ print(f"Error performing web search: {e}")
176
+ return []
177
+
178
+ def perform_web_search(query: str) -> List[Dict[str, Any]]:
179
+ """
180
+ Performs a web search to find recent legal cost estimates.
181
+ """
182
+ url = f"https://www.google.com/search?q={query}"
183
+ headers = {'User-Agent': 'Mozilla/5.0'}
184
+ response = requests.get(url, headers=headers)
185
+ soup = BeautifulSoup(response.content, 'html.parser')
186
+
187
+ results = []
188
+ for g in soup.find_all('div', class_='g'):
189
+ anchors = g.find_all('a')
190
+ if anchors:
191
+ link = anchors[0]['href']
192
+ title = g.find('h3', class_='r')
193
+ if title:
194
+ title = title.text
195
+ else:
196
+ title = "No title"
197
+ snippet = g.find('div', class_='s')
198
+ if snippet:
199
+ snippet = snippet.text
200
+ else:
201
+ snippet = "No snippet"
202
+
203
+ # Extract cost estimates from the snippet
204
+ cost_estimates = extract_cost_estimates(snippet)
205
+
206
+ if cost_estimates:
207
+ results.append({
208
+ "title": title,
209
+ "link": link,
210
+ "cost_estimates": cost_estimates
211
+ })
212
+
213
+ return results[:3] # Return top 3 results with cost estimates
214
+
215
+ def search_wikipedia(query: str, sentences: int = 2) -> Dict[str, str]:
216
+ try:
217
+ # Ensure query is a string before slicing
218
+ truncated_query = str(query)[:300]
219
+
220
+ # Search Wikipedia
221
+ search_results = wikipedia.search(truncated_query, results=5)
222
+
223
+ if not search_results:
224
+ return {"summary": "No Wikipedia article found.", "url": "", "title": ""}
225
+
226
+ # Try to get a summary for each result until successful
227
+ for result in search_results:
228
+ try:
229
+ page = wikipedia.page(result)
230
+ summary = wikipedia.summary(result, sentences=sentences)
231
+ return {"summary": summary, "url": page.url, "title": page.title}
232
+ except wikipedia.exceptions.DisambiguationError as e:
233
+ continue
234
+ except wikipedia.exceptions.PageError:
235
+ continue
236
+
237
+ # If no summary found after trying all results
238
+ return {"summary": "No relevant Wikipedia article found.", "url": "", "title": ""}
239
+ except Exception as e:
240
+ print(f"Error searching Wikipedia: {e}")
241
+ return {"summary": f"Error searching Wikipedia: {str(e)}", "url": "", "title": ""}
242
+
243
+ def comprehensive_document_analysis(content: str) -> Dict[str, Any]:
244
+ """Performs a comprehensive analysis of the document, including web and Wikipedia searches."""
245
+ try:
246
+ analysis_prompt = f"Analyze the following legal document and provide a summary, potential issues, and key clauses:\n\n{content}"
247
+ document_analysis = get_ai_response(analysis_prompt)
248
+
249
+ # Extract main topics or keywords from the document
250
+ topic_extraction_prompt = f"Extract the main topics or keywords from the following document summary:\n\n{document_analysis}"
251
+ topics = get_ai_response(topic_extraction_prompt)
252
+
253
+ web_results = search_web(topics)
254
+ wiki_results = search_wikipedia(topics)
255
+
256
+ return {
257
+ "document_analysis": document_analysis,
258
+ "related_articles": web_results or [], # Ensure this is always a list
259
+ "wikipedia_summary": wiki_results
260
+ }
261
+ except Exception as e:
262
+ print(f"Error in comprehensive document analysis: {e}")
263
+ return {
264
+ "document_analysis": "Error occurred during analysis.",
265
+ "related_articles": [],
266
+ "wikipedia_summary": {"summary": "Error occurred", "url": "", "title": ""}
267
+ }
268
+
269
+ def extract_important_info(text: str) -> str:
270
+ """Extracts and highlights important information from the given text."""
271
+ prompt = f"Extract and highlight the most important legal information from the following text. Use markdown to emphasize key points:\n\n{text}"
272
+ return get_ai_response(prompt)
273
+
274
+ def fetch_detailed_content(url: str) -> str:
275
+ try:
276
+ response = requests.get(url)
277
+ response.raise_for_status()
278
+ soup = BeautifulSoup(response.text, 'html.parser')
279
+
280
+ # Extract main content (this may need to be adjusted based on the structure of the target websites)
281
+ main_content = soup.find('main') or soup.find('article') or soup.find('div', class_='content')
282
+
283
+ if main_content:
284
+ # Extract text from paragraphs
285
+ paragraphs = main_content.find_all('p')
286
+ content = "\n\n".join([p.get_text() for p in paragraphs])
287
+
288
+ # Limit content to a reasonable length (e.g., first 1000 characters)
289
+ return content[:1000] + "..." if len(content) > 1000 else content
290
+ else:
291
+ return "Unable to extract detailed content from the webpage."
292
+ except Exception as e:
293
+ return f"Error fetching detailed content: {str(e)}"
294
+
295
+ def query_public_case_law(query: str) -> List[Dict[str, Any]]:
296
+ """
297
+ Query publicly available case law databases and perform a web search to find related cases.
298
+ """
299
+ # Perform a web search to find relevant case law
300
+ search_url = f"https://www.google.com/search?q={query}+case+law+site:law.justia.com"
301
+ headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'}
302
+
303
+ try:
304
+ response = requests.get(search_url, headers=headers)
305
+ response.raise_for_status()
306
+ soup = BeautifulSoup(response.text, 'html.parser')
307
+
308
+ search_results = soup.find_all('div', class_='g')
309
+ cases = []
310
+
311
+ for result in search_results[:5]: # Limit to top 5 results
312
+ title_elem = result.find('h3', class_='r')
313
+ link_elem = result.find('a')
314
+ snippet_elem = result.find('div', class_='s')
315
+
316
+ if title_elem and link_elem and snippet_elem:
317
+ title = title_elem.text
318
+ link = link_elem['href']
319
+ snippet = snippet_elem.text
320
+
321
+ # Extract case name and citation from the title
322
+ case_info = title.split(' - ')
323
+ if len(case_info) >= 2:
324
+ case_name = case_info[0]
325
+ citation = case_info[1]
326
+ else:
327
+ case_name = title
328
+ citation = "Citation not found"
329
+
330
+ cases.append({
331
+ "case_name": case_name,
332
+ "citation": citation,
333
+ "summary": snippet,
334
+ "url": link
335
+ })
336
+
337
+ return cases
338
+ except requests.RequestException as e:
339
+ print(f"Error querying case law: {e}")
340
+ return []
341
+
342
+ def find_case_precedents(case_details: str) -> Dict[str, Any]:
343
+ """Finds relevant case precedents based on provided details."""
344
+ try:
345
+ # Initial AI analysis of the case details
346
+ analysis_prompt = f"Analyze the following case details and identify key legal concepts and relevant precedents:\n\n{case_details}"
347
+ initial_analysis = get_ai_response(analysis_prompt)
348
+
349
+ # Query public case law databases
350
+ public_cases = query_public_case_law(case_details)
351
+
352
+ # Perform web search (existing functionality)
353
+ web_results = search_web(f"legal precedent {case_details}", num_results=3)
354
+
355
+ # Perform Wikipedia search (existing functionality)
356
+ wiki_result = search_wikipedia(f"legal case {case_details}")
357
+
358
+ # Compile all information
359
+ compilation_prompt = f"""Compile a comprehensive summary of case precedents based on the following information:
360
+
361
+ Initial Analysis: {initial_analysis}
362
+
363
+ Public Case Law Results:
364
+ {public_cases}
365
+
366
+ Web Search Results:
367
+ {web_results}
368
+
369
+ Wikipedia Information:
370
+ {wiki_result['summary']}
371
+
372
+ Provide a well-structured summary highlighting the most relevant precedents and legal principles."""
373
+
374
+ final_summary = get_ai_response(compilation_prompt)
375
+
376
+ return {
377
+ "summary": final_summary,
378
+ "public_cases": public_cases,
379
+ "web_results": web_results,
380
+ "wikipedia": wiki_result
381
+ }
382
+ except Exception as e:
383
+ print(f"An error occurred in find_case_precedents: {e}")
384
+ return {
385
+ "summary": f"An error occurred while finding case precedents: {str(e)}",
386
+ "public_cases": [],
387
+ "web_results": [],
388
+ "wikipedia": {
389
+ 'title': 'Error',
390
+ 'summary': 'Unable to retrieve Wikipedia information',
391
+ 'url': ''
392
+ }
393
+ }
394
+
395
+ def estimate_legal_costs(case_type: str, complexity: str, country: str, state: str = None) -> Dict[str, Any]:
396
+ """
397
+ Estimates legal costs based on case type, complexity, and location.
398
+ Performs web searches for more accurate estimates and lawyer recommendations.
399
+ """
400
+ # Base cost ranges per hour (in USD) for different countries
401
+ base_costs = {
402
+ "USA": {"Simple": (150, 300), "Moderate": (250, 500), "Complex": (400, 1000)},
403
+ "UK": {"Simple": (100, 250), "Moderate": (200, 400), "Complex": (350, 800)},
404
+ "Canada": {"Simple": (125, 275), "Moderate": (225, 450), "Complex": (375, 900)},
405
+ }
406
+
407
+ # Adjust costs based on case type
408
+ case_type_multipliers = {
409
+ "Civil Litigation": 1.2,
410
+ "Criminal Defense": 1.5,
411
+ "Family Law": 1.0,
412
+ "Corporate Law": 1.3,
413
+ }
414
+
415
+ # Estimate number of hours based on complexity
416
+ estimated_hours = {
417
+ "Simple": (10, 30),
418
+ "Moderate": (30, 100),
419
+ "Complex": (100, 300)
420
+ }
421
+
422
+ # Get base cost range for the specified country and complexity
423
+ country_costs = base_costs.get(country, base_costs["USA"])
424
+ min_rate, max_rate = country_costs[complexity]
425
+
426
+ # Adjust rates based on case type
427
+ multiplier = case_type_multipliers.get(case_type, 1.0)
428
+ min_rate *= multiplier
429
+ max_rate *= multiplier
430
+
431
+ # Calculate total cost range
432
+ min_hours, max_hours = estimated_hours[complexity]
433
+ min_total = min_rate * min_hours
434
+ max_total = max_rate * max_hours
435
+
436
+ # Perform web search for recent cost estimates
437
+ search_query = f"{case_type} legal costs {country} {state if state else ''}"
438
+ web_results = search_web(search_query)
439
+
440
+ web_estimates = []
441
+ for result in web_results:
442
+ estimates = extract_cost_estimates(result['snippet'])
443
+ if estimates:
444
+ web_estimates.append({
445
+ 'source': result['title'],
446
+ 'link': result['link'],
447
+ 'estimates': estimates
448
+ })
449
+
450
+ # Search for lawyers or law firms
451
+ lawyer_search_query = f"top rated {case_type} lawyers {country} {state if state else ''}"
452
+ lawyer_results = search_web(lawyer_search_query)
453
+
454
+ # Generate cost breakdown
455
+ cost_breakdown = {
456
+ "Hourly rate range": f"${min_rate:.2f} - ${max_rate:.2f}",
457
+ "Estimated hours": f"{min_hours} - {max_hours}",
458
+ "Total cost range": f"${min_total:.2f} - ${max_total:.2f}",
459
+ "Web search estimates": web_estimates
460
+ }
461
+
462
+ # Potential high-cost areas
463
+ high_cost_areas = [
464
+ "Expert witnesses (especially in complex cases)",
465
+ "Extensive document review and e-discovery",
466
+ "Multiple depositions",
467
+ "Prolonged trial periods",
468
+ "Appeals process"
469
+ ]
470
+
471
+ # Cost-saving tips
472
+ cost_saving_tips = [
473
+ "Consider alternative dispute resolution methods like mediation or arbitration",
474
+ "Be organized and provide all relevant documents upfront to reduce billable hours",
475
+ "Communicate efficiently with your lawyer, bundling questions when possible",
476
+ "Ask for detailed invoices and review them carefully",
477
+ "Discuss fee arrangements, such as flat fees or contingency fees, where applicable"
478
+ ]
479
+
480
+ lawyer_tips = [
481
+ "Research and compare multiple lawyers or law firms",
482
+ "Ask for references and read client reviews",
483
+ "Discuss fee structures and payment plans upfront",
484
+ "Consider lawyers with specific expertise in your case type",
485
+ "Ensure clear communication and understanding of your case"
486
+ ]
487
+
488
+ return {
489
+ "cost_breakdown": cost_breakdown,
490
+ "high_cost_areas": high_cost_areas,
491
+ "cost_saving_tips": cost_saving_tips,
492
+ "lawyer_recommendations": lawyer_results,
493
+ "finding_best_lawyer_tips": lawyer_tips,
494
+ "web_search_results": web_results # Add this new key
495
+ }
496
+
497
+ def legal_cost_estimator_ui():
498
+ st.subheader("Legal Cost Estimator")
499
+
500
+ case_type = st.selectbox("Select case type", ["Civil Litigation", "Criminal Defense", "Family Law", "Corporate Law"])
501
+ complexity = st.selectbox("Select case complexity", ["Simple", "Moderate", "Complex"])
502
+ country = st.selectbox("Select country", ["USA", "UK", "Canada"])
503
+
504
+ if country == "USA":
505
+ state = st.selectbox("Select state", ["California", "New York", "Texas", "Florida"])
506
+ else:
507
+ state = None
508
+
509
+ if st.button("Estimate Costs"):
510
+ with st.spinner("Estimating costs and performing web search..."):
511
+ cost_estimate = estimate_legal_costs(case_type, complexity, country, state)
512
+
513
+ st.write("### Estimated Legal Costs")
514
+ for key, value in cost_estimate["cost_breakdown"].items():
515
+ if key != "Web search estimates":
516
+ st.write(f"**{key}:** {value}")
517
+
518
+ st.write("### Web Search Estimates")
519
+ if cost_estimate["cost_breakdown"]["Web search estimates"]:
520
+ for result in cost_estimate["cost_breakdown"]["Web search estimates"]:
521
+ st.write(f"**Source:** [{result['source']}]({result['link']})")
522
+ st.write("**Estimated Costs:**")
523
+ for estimate in result['estimates']:
524
+ st.write(f"- {estimate}")
525
+ st.write("---")
526
+ else:
527
+ st.write("No specific cost estimates found from web search.")
528
+
529
+ st.write("### Potential High-Cost Areas")
530
+ for area in cost_estimate["high_cost_areas"]:
531
+ st.write(f"- {area}")
532
+
533
+ st.write("### Cost-Saving Tips")
534
+ for tip in cost_estimate["cost_saving_tips"]:
535
+ st.write(f"- {tip}")
536
+
537
+ st.write("### Recommended Lawyers/Law Firms")
538
+ for lawyer in cost_estimate["lawyer_recommendations"][:5]: # Display top 5 recommendations
539
+ st.write(f"**[{lawyer['title']}]({lawyer['link']})**")
540
+ st.write(lawyer["snippet"])
541
+ st.write("---")
542
+
543
+ def extract_cost_estimates(text: str) -> List[str]:
544
+ """
545
+ Extracts cost estimates from the given text.
546
+ """
547
+ patterns = [
548
+ r'\$\d{1,3}(?:,\d{3})*(?:\.\d{2})?', # Matches currency amounts like $1,000.00
549
+ r'\d{1,3}(?:,\d{3})*(?:\.\d{2})?\s*(?:USD|GBP|CAD|EUR)', # Matches amounts with currency codes
550
+ r'(?:USD|GBP|CAD|EUR)\s*\d{1,3}(?:,\d{3})*(?:\.\d{2})?' # Matches currency codes before amounts
551
+ ]
552
+
553
+ estimates = []
554
+ for pattern in patterns:
555
+ matches = re.findall(pattern, text)
556
+ estimates.extend(matches)
557
+
558
+ return estimates
559
+
560
+ def generate_legal_form(form_type: str, user_details: Dict[str, str], nation: str, state: str = None) -> Dict[str, Any]:
561
+ """
562
+ Generates a legal form based on user input, nation, and state (if applicable).
563
+ Creates downloadable .txt and .docx files.
564
+ """
565
+ current_date = datetime.now().strftime("%B %d, %Y")
566
+
567
+ # Helper function to get jurisdiction-specific clauses
568
+ def get_jurisdiction_clauses(form_type, nation, state):
569
+ # This would ideally be a comprehensive database of clauses for different jurisdictions
570
+ # For demonstration, we'll use a simplified version
571
+ clauses = {
572
+ "USA": {
573
+ "Power of Attorney": "This Power of Attorney is governed by the laws of the State of {state}.",
574
+ "Non-Disclosure Agreement": "This Agreement shall be governed by and construed in accordance with the laws of the State of {state}.",
575
+ "Simple Will": "This Will shall be construed in accordance with the laws of the State of {state}.",
576
+ "Lease Agreement": "This Lease Agreement is subject to the landlord-tenant laws of the State of {state}.",
577
+ "Employment Contract": "This Employment Contract is governed by the labor laws of the State of {state}."
578
+ },
579
+ "UK": {
580
+ "Power of Attorney": "This Power of Attorney is governed by the laws of England and Wales.",
581
+ "Non-Disclosure Agreement": "This Agreement shall be governed by and construed in accordance with the laws of England and Wales.",
582
+ "Simple Will": "This Will shall be construed in accordance with the laws of England and Wales.",
583
+ "Lease Agreement": "This Lease Agreement is subject to the landlord and tenant laws of England and Wales.",
584
+ "Employment Contract": "This Employment Contract is governed by the employment laws of England and Wales."
585
+ },
586
+ # Add more countries as needed
587
+ }
588
+ return clauses.get(nation, {}).get(form_type, "").format(state=state)
589
+
590
+ jurisdiction_clause = get_jurisdiction_clauses(form_type, nation, state)
591
+
592
+ if form_type == "Power of Attorney":
593
+ form_content = f"""
594
+ POWER OF ATTORNEY
595
+
596
+ This Power of Attorney is made on {current_date}.
597
+
598
+ I, {user_details['principal_name']}, hereby appoint {user_details['agent_name']} as my Attorney-in-Fact ("Agent").
599
+
600
+ My Agent shall have full power and authority to act on my behalf. This power and authority shall authorize my Agent to manage and conduct all of my affairs and to exercise all of my legal rights and powers, including all rights and powers that I may acquire in the future. My Agent's powers shall include, but not be limited to:
601
+
602
+ 1. {', '.join(user_details['powers'])}
603
+
604
+ This Power of Attorney shall become effective immediately and shall continue until it is revoked by me.
605
+
606
+ {jurisdiction_clause}
607
+
608
+ Signed this {current_date}.
609
+
610
+ ______________________
611
+ {user_details['principal_name']} (Principal)
612
+
613
+ ______________________
614
+ {user_details['agent_name']} (Agent)
615
+
616
+ ______________________
617
+ Witness
618
+
619
+ ______________________
620
+ Witness
621
+ """
622
+
623
+ elif form_type == "Non-Disclosure Agreement":
624
+ form_content = f"""
625
+ NON-DISCLOSURE AGREEMENT
626
+
627
+ This Non-Disclosure Agreement (the "Agreement") is entered into on {current_date} by and between:
628
+
629
+ {user_details['party_a']} ("Party A")
630
+ and
631
+ {user_details['party_b']} ("Party B")
632
+
633
+ 1. Purpose: This Agreement is entered into for the purpose of {user_details['purpose']}.
634
+
635
+ 2. Confidential Information: Both parties may disclose certain confidential and proprietary information to each other in connection with the Purpose.
636
+
637
+ 3. Non-Disclosure: Both parties agree to keep all Confidential Information strictly confidential and not to disclose such information to any third parties for a period of {user_details['duration']} years from the date of this Agreement.
638
+
639
+ {jurisdiction_clause}
640
+
641
+ IN WITNESS WHEREOF, the parties hereto have executed this Non-Disclosure Agreement as of the date first above written.
642
+
643
+ ______________________
644
+ {user_details['party_a']}
645
+
646
+ ______________________
647
+ {user_details['party_b']}
648
+ """
649
+
650
+ elif form_type == "Simple Will":
651
+ beneficiaries = user_details['beneficiaries'].split('\n')
652
+ beneficiary_clauses = "\n".join([f"{i+1}. I give, devise, and bequeath to {b.strip()} [insert specific bequest or share of estate]." for i, b in enumerate(beneficiaries)])
653
+
654
+ form_content = f"""
655
+ LAST WILL AND TESTAMENT
656
+
657
+ I, {user_details['testator_name']}, being of sound mind, do hereby make, publish, and declare this to be my Last Will and Testament, hereby revoking all previous wills and codicils made by me.
658
+
659
+ 1. EXECUTOR: I appoint {user_details['executor_name']} to be the Executor of this, my Last Will and Testament.
660
+
661
+ 2. BEQUESTS:
662
+ {beneficiary_clauses}
663
+
664
+ 3. RESIDUARY ESTATE: I give, devise, and bequeath all the rest, residue, and remainder of my estate to [insert beneficiary or beneficiaries].
665
+
666
+ 4. POWERS OF EXECUTOR: I grant to my Executor full power and authority to sell, lease, mortgage, or otherwise dispose of the whole or any part of my estate.
667
+
668
+ {jurisdiction_clause}
669
+
670
+ IN WITNESS WHEREOF, I have hereunto set my hand to this my Last Will and Testament on {current_date}.
671
+
672
+ ______________________
673
+ {user_details['testator_name']} (Testator)
674
+
675
+ WITNESSES:
676
+ On the date last above written, {user_details['testator_name']}, known to us to be the Testator, signed this Will in our presence and declared it to be their Last Will and Testament. At the Testator's request, in the Testator's presence, and in the presence of each other, we have signed our names as witnesses:
677
+
678
+ ______________________
679
+ Witness 1
680
+
681
+ ______________________
682
+ Witness 2
683
+ """
684
+
685
+ elif form_type == "Lease Agreement":
686
+ form_content = f"""
687
+ LEASE AGREEMENT
688
+
689
+ This Lease Agreement (the "Lease") is made on {current_date} by and between:
690
+
691
+ {user_details['landlord_name']} ("Landlord")
692
+ and
693
+ {user_details['tenant_name']} ("Tenant")
694
+
695
+ 1. PREMISES: The Landlord hereby leases to the Tenant the property located at {user_details['property_address']}.
696
+
697
+ 2. TERM: The term of this Lease shall be for {user_details['lease_term']} months, beginning on {user_details['start_date']} and ending on {user_details['end_date']}.
698
+
699
+ 3. RENT: The Tenant shall pay rent in the amount of {user_details['rent_amount']} per month, due on the {user_details['rent_due_day']} day of each month.
700
+
701
+ 4. SECURITY DEPOSIT: The Tenant shall pay a security deposit of {user_details['security_deposit']} upon signing this Lease.
702
+
703
+ {jurisdiction_clause}
704
+
705
+ IN WITNESS WHEREOF, the parties hereto have executed this Lease Agreement as of the date first above written.
706
+
707
+ ______________________
708
+ {user_details['landlord_name']} (Landlord)
709
+
710
+ ______________________
711
+ {user_details['tenant_name']} (Tenant)
712
+ """
713
+
714
+ elif form_type == "Employment Contract":
715
+ form_content = f"""
716
+ EMPLOYMENT CONTRACT
717
+
718
+ This Employment Contract (the "Contract") is made on {current_date} by and between:
719
+
720
+ {user_details['employer_name']} ("Employer")
721
+ and
722
+ {user_details['employee_name']} ("Employee")
723
+
724
+ 1. POSITION: The Employee is hired for the position of {user_details['job_title']}.
725
+
726
+ 2. DUTIES: The Employee's duties shall include, but are not limited to: {user_details['job_duties']}.
727
+
728
+ 3. COMPENSATION: The Employee shall be paid a {user_details['pay_frequency']} salary of {user_details['salary_amount']}.
729
+
730
+ 4. TERM: This Contract shall commence on {user_details['start_date']} and continue until terminated by either party.
731
+
732
+ 5. BENEFITS: The Employee shall be entitled to the following benefits: {user_details['benefits']}.
733
+
734
+ {jurisdiction_clause}
735
+
736
+ IN WITNESS WHEREOF, the parties hereto have executed this Employment Contract as of the date first above written.
737
+
738
+ ______________________
739
+ {user_details['employer_name']} (Employer)
740
+
741
+ ______________________
742
+ {user_details['employee_name']} (Employee)
743
+ """
744
+
745
+ else:
746
+ return {"error": "Unsupported form type"}
747
+
748
+ # Generate .txt file
749
+ txt_file = io.StringIO()
750
+ txt_file.write(form_content)
751
+ txt_file.seek(0)
752
+
753
+ # Generate .docx file
754
+ docx_file = io.BytesIO()
755
+ doc = Document()
756
+ doc.add_paragraph(form_content)
757
+ doc.save(docx_file)
758
+ docx_file.seek(0)
759
+
760
+ return {
761
+ "form_content": form_content,
762
+ "txt_file": txt_file,
763
+ "docx_file": docx_file
764
+ }
765
+
766
+ CASE_TYPES = [
767
+ "Civil Rights", "Contract", "Real Property", "Tort", "Labor", "Intellectual Property",
768
+ "Bankruptcy", "Immigration", "Social Security", "Tax", "Constitutional", "Criminal",
769
+ "Environmental", "Antitrust", "Securities", "Administrative", "Admiralty", "Family Law",
770
+ "Probate", "Personal Injury"
771
+ ]
772
+
773
+ DATA_SOURCES = {
774
+ "Civil Rights": "https://www.uscourts.gov/sites/default/files/data_tables/jb_c2_0930.2022.pdf",
775
+ "Contract": "https://www.uscourts.gov/sites/default/files/data_tables/jb_c2_0930.2022.pdf",
776
+ "Real Property": "https://www.uscourts.gov/sites/default/files/data_tables/jb_c2_0930.2022.pdf",
777
+ "Tort": "https://www.uscourts.gov/sites/default/files/data_tables/jb_c2_0930.2022.pdf",
778
+ "Labor": "https://www.uscourts.gov/sites/default/files/data_tables/jb_c2_0930.2022.pdf",
779
+ "Intellectual Property": "https://www.uscourts.gov/sites/default/files/data_tables/jb_c2_0930.2022.pdf",
780
+ "Bankruptcy": "https://www.uscourts.gov/sites/default/files/data_tables/jb_f_0930.2022.pdf",
781
+ "Immigration": "https://www.justice.gov/eoir/workload-and-adjudication-statistics",
782
+ "Social Security": "https://www.ssa.gov/open/data/hearings-and-appeals-filed.html",
783
+ "Tax": "https://www.ustaxcourt.gov/statistics.html",
784
+ "Constitutional": "https://www.uscourts.gov/sites/default/files/data_tables/jb_c2_0930.2022.pdf",
785
+ "Criminal": "https://www.uscourts.gov/sites/default/files/data_tables/jb_d1_0930.2022.pdf",
786
+ "Environmental": "https://www.epa.gov/enforcement/enforcement-annual-results-numbers-glance-fiscal-year-2022",
787
+ "Antitrust": "https://www.uscourts.gov/sites/default/files/data_tables/jb_c2_0930.2022.pdf",
788
+ "Securities": "https://www.sec.gov/files/enforcement-annual-report-2022.pdf",
789
+ "Administrative": "https://www.uscourts.gov/sites/default/files/data_tables/jb_c2_0930.2022.pdf",
790
+ "Admiralty": "https://www.uscourts.gov/sites/default/files/data_tables/jb_c2_0930.2022.pdf",
791
+ "Family Law": "https://www.uscourts.gov/sites/default/files/data_tables/jb_c2_0930.2022.pdf",
792
+ "Probate": "https://www.uscourts.gov/sites/default/files/data_tables/jb_c2_0930.2022.pdf",
793
+ "Personal Injury": "https://www.uscourts.gov/sites/default/files/data_tables/jb_c2_0930.2022.pdf"
794
+ }
795
+
796
+ def fetch_case_data(case_type: str) -> pd.DataFrame:
797
+ """Fetches actual historical data for the given case type."""
798
+ url = DATA_SOURCES[case_type]
799
+ response = requests.get(url)
800
+ if response.status_code == 200:
801
+ if url.endswith('.pdf'):
802
+ # For PDF sources, we'll use a placeholder DataFrame
803
+ # In a real-world scenario, you'd need to implement PDF parsing
804
+ df = pd.DataFrame({
805
+ 'Year': range(2013, 2023),
806
+ 'Number of Cases': [random.randint(1000, 5000) for _ in range(10)]
807
+ })
808
+ else:
809
+ # For non-PDF sources, we'll assume CSV format
810
+ df = pd.read_csv(StringIO(response.text))
811
+ else:
812
+ st.error(f"Failed to fetch data for {case_type}. Using placeholder data.")
813
+ df = pd.DataFrame({
814
+ 'Year': range(2013, 2023),
815
+ 'Number of Cases': [random.randint(1000, 5000) for _ in range(10)]
816
+ })
817
+ return df
818
+
819
+ def visualize_case_trends(case_type: str):
820
+ """Visualizes case trends based on case type using actual historical data."""
821
+ df = fetch_case_data(case_type)
822
+
823
+ fig = px.line(df, x='Year', y='Number of Cases', title=f"Trend of {case_type} Cases")
824
+ fig.update_layout(
825
+ xaxis_title="Year",
826
+ yaxis_title="Number of Cases",
827
+ hovermode="x unified"
828
+ )
829
+ fig.update_traces(mode="lines+markers")
830
+
831
+ return fig, df # Return both the image and the raw data
832
+
833
+ # --- Streamlit App ---
834
+ # Custom CSS to improve the overall look
835
+ st.markdown("""
836
+ <style>
837
+ .reportview-container {
838
+ background: #f0f2f6;
839
+ }
840
+ .main .block-container {
841
+ padding-top: 2rem;
842
+ padding-bottom: 2rem;
843
+ padding-left: 5rem;
844
+ padding-right: 5rem;
845
+ }
846
+ h1 {
847
+ color: #1E3A8A;
848
+ }
849
+ h2 {
850
+ color: #3B82F6;
851
+ }
852
+ .stButton>button {
853
+ background-color: #3B82F6;
854
+ color: white;
855
+ border-radius: 5px;
856
+ }
857
+ .stTextInput>div>div>input {
858
+ border-radius: 5px;
859
+ }
860
+ </style>
861
+ """, unsafe_allow_html=True)
862
+
863
+ def load_lottieurl(url: str):
864
+ try:
865
+ r = requests.get(url)
866
+ r.raise_for_status() # Raises a HTTPError if the status is 4xx, 5xx
867
+ return r.json()
868
+ except requests.HTTPError as http_err:
869
+ print(f"HTTP error occurred while loading Lottie animation: {http_err}")
870
+ except requests.RequestException as req_err:
871
+ print(f"Error occurred while loading Lottie animation: {req_err}")
872
+ except ValueError as json_err:
873
+ print(f"Error decoding JSON for Lottie animation: {json_err}")
874
+ return None
875
+
876
+ # Streamlit App
877
+ st.title("Lex AI - Advanced Legal Assistant")
878
+
879
+ # Sidebar with feature selection
880
+ with st.sidebar:
881
+ st.title(" AI")
882
+ st.subheader("Advanced Legal Assistant")
883
+
884
+ feature = st.selectbox(
885
+ "Select a feature",
886
+ ["Legal Chatbot", "Document Analysis", "Case Precedent Finder", "Legal Cost Estimator", "Legal Form Generator", "Case Trend Visualizer"]
887
+ )
888
+ if feature == "Legal Chatbot":
889
+ st.subheader("Legal Chatbot")
890
+
891
+ if 'chat_history' not in st.session_state:
892
+ st.session_state.chat_history = []
893
+
894
+ if 'uploaded_document' not in st.session_state:
895
+ st.session_state.uploaded_document = None
896
+
897
+ if 'chat_mode' not in st.session_state:
898
+ st.session_state.chat_mode = "normal"
899
+
900
+ # Document upload
901
+ uploaded_file = st.file_uploader("Upload a legal document (PDF, DOCX, or TXT)", type=["pdf", "docx", "txt"])
902
+
903
+ if uploaded_file:
904
+ st.session_state.uploaded_document = analyze_uploaded_document(uploaded_file)
905
+ st.success("Document uploaded successfully!")
906
+
907
+ # Chat mode toggle
908
+ if st.session_state.uploaded_document:
909
+ if st.button("Switch Chat Mode"):
910
+ st.session_state.chat_mode = "document" if st.session_state.chat_mode == "normal" else "normal"
911
+
912
+ st.write(f"Current mode: {'Document-based' if st.session_state.chat_mode == 'document' else 'Normal'} chat")
913
+
914
+ display_chat_history()
915
+
916
+ user_input = st.text_input("Your legal question:")
917
+
918
+ if user_input and st.button("Send"):
919
+ with st.spinner("Processing your question..."):
920
+ if st.session_state.chat_mode == "document" and st.session_state.uploaded_document:
921
+ ai_response = get_document_based_response(user_input, st.session_state.uploaded_document)
922
+ st.session_state.chat_history.append((user_input, ai_response))
923
+ else:
924
+ ai_response = get_ai_response(user_input)
925
+ st.session_state.chat_history.append((user_input, ai_response))
926
+
927
+ # Perform Wikipedia search
928
+ wiki_result = search_wikipedia(user_input)
929
+ st.session_state.chat_history.append({
930
+ 'type': 'wikipedia',
931
+ 'summary': wiki_result.get("summary", "No summary available."),
932
+ 'url': wiki_result.get("url", "")
933
+ })
934
+
935
+ # Perform web search
936
+ web_results = search_web(user_input)
937
+ st.session_state.chat_history.append({
938
+ 'type': 'web_search',
939
+ 'results': web_results
940
+ })
941
+
942
+ st.rerun()
943
+
944
+ elif feature == "Document Analysis":
945
+ st.subheader("Legal Document Analyzer")
946
+
947
+ uploaded_file = st.file_uploader("Upload a legal document (PDF, DOCX, or TXT)", type=["pdf", "docx", "txt"])
948
+
949
+ if uploaded_file and st.button("Analyze Document"):
950
+ with st.spinner("Analyzing document and gathering additional information..."):
951
+ try:
952
+ document_content = analyze_document(uploaded_file)
953
+ analysis_results = comprehensive_document_analysis(document_content)
954
+
955
+ st.write("Document Analysis:")
956
+ st.write(analysis_results.get("document_analysis", "No analysis available."))
957
+
958
+ st.write("Related Articles:")
959
+ for article in analysis_results.get("related_articles", []):
960
+ st.write(f"- [{article.get('title', 'No title')}]({article.get('link', '#')})")
961
+ st.write(f" {article.get('snippet', 'No snippet available.')}")
962
+
963
+ st.write("Wikipedia Summary:")
964
+ wiki_info = analysis_results.get("wikipedia_summary", {})
965
+ st.write(f"**{wiki_info.get('title', 'No title')}**")
966
+ st.write(wiki_info.get('summary', 'No summary available.'))
967
+ if wiki_info.get('url'):
968
+ st.write(f"[Read more on Wikipedia]({wiki_info['url']})")
969
+ except Exception as e:
970
+ st.error(f"An error occurred during document analysis: {str(e)}")
971
+
972
+ elif feature == "Case Precedent Finder":
973
+ st.subheader("Case Precedent Finder")
974
+
975
+ # Initialize session state for precedents if not exists
976
+ if 'precedents' not in st.session_state:
977
+ st.session_state.precedents = None
978
+
979
+ # Initialize session state for visibility toggles if not exists
980
+ if 'visibility_toggles' not in st.session_state:
981
+ st.session_state.visibility_toggles = {}
982
+
983
+ case_details = st.text_area("Enter case details:")
984
+ if st.button("Find Precedents"):
985
+ with st.spinner("Searching for relevant case precedents..."):
986
+ try:
987
+ st.session_state.precedents = find_case_precedents(case_details)
988
+ except Exception as e:
989
+ st.error(f"An error occurred while finding case precedents: {str(e)}")
990
+
991
+ # Display results if precedents are available
992
+ if st.session_state.precedents:
993
+ precedents = st.session_state.precedents
994
+
995
+ st.write("### Summary of Relevant Case Precedents")
996
+ st.markdown(precedents["summary"])
997
+
998
+ st.write("### Related Cases from Public Databases")
999
+ for i, case in enumerate(precedents["public_cases"], 1):
1000
+ st.write(f"**{i}. {case['case_name']} - {case['citation']}**")
1001
+ st.write(f"Summary: {case['summary']}")
1002
+ st.write(f"[Read full case]({case['url']})")
1003
+ st.write("---")
1004
+
1005
+ st.write("### Additional Web Results")
1006
+ for i, result in enumerate(precedents["web_results"], 1):
1007
+ st.write(f"**{i}. [{result['title']}]({result['link']})**")
1008
+
1009
+ # Create a unique key for each toggle
1010
+ toggle_key = f"toggle_{i}"
1011
+
1012
+ # Initialize the toggle state if it doesn't exist
1013
+ if toggle_key not in st.session_state.visibility_toggles:
1014
+ st.session_state.visibility_toggles[toggle_key] = False
1015
+
1016
+ # Create a button to toggle visibility
1017
+ if st.button(f"{'Hide' if st.session_state.visibility_toggles[toggle_key] else 'Show'} Full Details for Result {i}", key=f"button_{i}"):
1018
+ st.session_state.visibility_toggles[toggle_key] = not st.session_state.visibility_toggles[toggle_key]
1019
+
1020
+ # Show full details if toggle is True
1021
+ if st.session_state.visibility_toggles[toggle_key]:
1022
+ # Fetch and display more detailed content
1023
+ detailed_content = fetch_detailed_content(result['link'])
1024
+ st.markdown(detailed_content)
1025
+ else:
1026
+ # Show a brief summary when details are hidden
1027
+ brief_summary = result['snippet'].split('\n')[0][:200] + "..." if len(result['snippet']) > 200 else result['snippet'].split('\n')[0]
1028
+ st.write(f"Brief Summary: {brief_summary}")
1029
+
1030
+ st.write("---")
1031
+
1032
+ st.write("### Wikipedia Information")
1033
+ wiki_info = precedents["wikipedia"]
1034
+ st.write(f"**[{wiki_info['title']}]({wiki_info['url']})**")
1035
+ st.markdown(wiki_info['summary'])
1036
+
1037
+ elif feature == "Legal Cost Estimator":
1038
+ st.subheader("Legal Cost Estimator")
1039
+
1040
+ case_type = st.selectbox("Select case type", ["Civil Litigation", "Criminal Defense", "Family Law", "Corporate Law"], key="cost_estimator_case_type")
1041
+ complexity = st.selectbox("Select case complexity", ["Simple", "Moderate", "Complex"], key="cost_estimator_complexity")
1042
+ country = st.selectbox("Select country", ["USA", "UK", "Canada"], key="cost_estimator_country")
1043
+
1044
+ if country == "USA":
1045
+ state = st.selectbox("Select state", ["California", "New York", "Texas", "Florida"], key="cost_estimator_state")
1046
+ else:
1047
+ state = None
1048
+
1049
+ # Initialize cost_estimate
1050
+ cost_estimate = None
1051
+
1052
+ if st.button("Estimate Costs"):
1053
+ with st.spinner("Estimating costs and performing web search..."):
1054
+ cost_estimate = estimate_legal_costs(case_type, complexity, country, state)
1055
+
1056
+ # Check if cost_estimate is available before displaying results
1057
+ if cost_estimate:
1058
+ st.write("### Estimated Legal Costs")
1059
+ for key, value in cost_estimate["cost_breakdown"].items():
1060
+ st.write(f"**{key}:** {value}")
1061
+
1062
+ st.write("### Web Search Results")
1063
+ if cost_estimate["web_search_results"]:
1064
+ for result in cost_estimate["web_search_results"]:
1065
+ st.write(f"**[{result['title']}]({result['link']})**")
1066
+ st.write(result["snippet"])
1067
+ st.write("---")
1068
+ else:
1069
+ st.write("No specific cost estimates found from web search.")
1070
+
1071
+ st.write("### Potential High-Cost Areas")
1072
+ for area in cost_estimate["high_cost_areas"]:
1073
+ st.write(f"- {area}")
1074
+
1075
+ st.write("### Cost-Saving Tips")
1076
+ for tip in cost_estimate["cost_saving_tips"]:
1077
+ st.write(f"- {tip}")
1078
+
1079
+ st.write("### Tips for Finding the Best Legal Representation")
1080
+ for tip in cost_estimate["finding_best_lawyer_tips"]:
1081
+ st.write(f"- {tip}")
1082
+
1083
+ st.write("### Recommended Lawyers/Law Firms")
1084
+ for lawyer in cost_estimate["lawyer_recommendations"][:5]: # Display top 5 recommendations
1085
+ st.write(f"**[{lawyer['title']}]({lawyer['link']})**")
1086
+ st.write(lawyer["snippet"])
1087
+ st.write("---")
1088
+ else:
1089
+ st.write("Click 'Estimate Costs' to see the results.")
1090
+
1091
+ elif feature == "Legal Form Generator":
1092
+ st.subheader("Legal Form Generator")
1093
+
1094
+ form_type = st.selectbox("Select form type", ["Power of Attorney", "Non-Disclosure Agreement", "Simple Will", "Lease Agreement", "Employment Contract"], key="form_generator_type")
1095
+
1096
+ nation = st.selectbox("Select nation", ["USA", "UK"], key="form_generator_nation")
1097
+ if nation == "USA":
1098
+ state = st.selectbox("Select state", ["California", "New York", "Texas", "Florida"], key="form_generator_state")
1099
+ else:
1100
+ state = None
1101
+
1102
+ user_details = {}
1103
+ if form_type == "Power of Attorney":
1104
+ user_details["principal_name"] = st.text_input("Principal's Full Name:")
1105
+ user_details["agent_name"] = st.text_input("Agent's Full Name:")
1106
+ user_details["powers"] = st.multiselect("Select powers to grant", ["Financial Decisions", "Healthcare Decisions", "Real Estate Transactions"])
1107
+ elif form_type == "Non-Disclosure Agreement":
1108
+ user_details["party_a"] = st.text_input("First Party's Name:")
1109
+ user_details["party_b"] = st.text_input("Second Party's Name:")
1110
+ user_details["purpose"] = st.text_input("Purpose of Disclosure:")
1111
+ user_details["duration"] = st.number_input("Duration of Agreement (in years):", min_value=1, max_value=10)
1112
+ elif form_type == "Simple Will":
1113
+ user_details["testator_name"] = st.text_input("Testator's Full Name:")
1114
+ user_details["beneficiaries"] = st.text_area("List Beneficiaries (one per line):")
1115
+ user_details["executor_name"] = st.text_input("Executor's Full Name:")
1116
+ elif form_type == "Lease Agreement":
1117
+ user_details["landlord_name"] = st.text_input("Landlord's Full Name:")
1118
+ user_details["tenant_name"] = st.text_input("Tenant's Full Name:")
1119
+ user_details["property_address"] = st.text_input("Property Address:")
1120
+ user_details["lease_term"] = st.number_input("Lease Term (in months):", min_value=1, max_value=60)
1121
+ user_details["start_date"] = st.date_input("Lease Start Date:")
1122
+ user_details["end_date"] = st.date_input("Lease End Date:")
1123
+ user_details["rent_amount"] = st.number_input("Monthly Rent Amount:", min_value=0)
1124
+ user_details["rent_due_day"] = st.number_input("Rent Due Day of Month:", min_value=1, max_value=31)
1125
+ user_details["security_deposit"] = st.number_input("Security Deposit Amount:", min_value=0)
1126
+ elif form_type == "Employment Contract":
1127
+ user_details["employer_name"] = st.text_input("Employer's Full Name:")
1128
+ user_details["employee_name"] = st.text_input("Employee's Full Name:")
1129
+ user_details["job_title"] = st.text_input("Job Title:")
1130
+ user_details["job_duties"] = st.text_area("Job Duties:")
1131
+ user_details["pay_frequency"] = st.selectbox("Pay Frequency:", ["Weekly", "Bi-weekly", "Monthly"])
1132
+ user_details["salary_amount"] = st.number_input("Salary Amount:", min_value=0)
1133
+ user_details["start_date"] = st.date_input("Employment Start Date:")
1134
+ user_details["benefits"] = st.text_area("Employee Benefits:")
1135
+
1136
+ if st.button("Generate Form"):
1137
+ generated_form = generate_legal_form(form_type, user_details, nation, state)
1138
+
1139
+ if "error" in generated_form:
1140
+ st.error(generated_form["error"])
1141
+ else:
1142
+ st.write("### Generated Legal Form:")
1143
+ st.text(generated_form["form_content"])
1144
+
1145
+ # Provide download buttons for .txt and .docx files
1146
+ txt_download = generated_form["txt_file"].getvalue()
1147
+ docx_download = generated_form["docx_file"].getvalue()
1148
+
1149
+ st.download_button(
1150
+ label="Download as .txt",
1151
+ data=txt_download,
1152
+ file_name=f"{form_type.lower().replace(' ', '_')}_{nation}{'_' + state if state else ''}.txt",
1153
+ mime="text/plain"
1154
+ )
1155
+
1156
+ st.download_button(
1157
+ label="Download as .docx",
1158
+ data=docx_download,
1159
+ file_name=f"{form_type.lower().replace(' ', '_')}_{nation}{'_' + state if state else ''}.docx",
1160
+ mime="application/vnd.openxmlformats-officedocument.wordprocessingml.document"
1161
+ )
1162
+
1163
+ st.warning("Please note: This generated form is a template based on general principles of the selected jurisdiction. It should be reviewed by a legal professional licensed in the relevant jurisdiction before use.")
1164
+
1165
+ elif feature == "Case Trend Visualizer":
1166
+ st.subheader("Case Trend Visualizer")
1167
+
1168
+ case_type = st.selectbox("Select case type to visualize", CASE_TYPES)
1169
+
1170
+ if st.button("Visualize Trend"):
1171
+ with st.spinner("Fetching and visualizing data..."):
1172
+ fig, df = visualize_case_trends(case_type)
1173
+ # Update session state
1174
+ st.session_state.df = df
1175
+ st.session_state.fig = fig
1176
+
1177
+ st.plotly_chart(fig, use_container_width=True)
1178
+
1179
+ # Display statistics
1180
+ st.subheader("Case Statistics")
1181
+ total_cases = df['Number of Cases'].sum()
1182
+ avg_cases = df['Number of Cases'].mean()
1183
+ max_year = df.loc[df['Number of Cases'].idxmax(), 'Year']
1184
+ min_year = df.loc[df['Number of Cases'].idxmin(), 'Year']
1185
+
1186
+ col1, col2, col3 = st.columns(3)
1187
+ col1.metric("Total Cases", f"{total_cases:,}")
1188
+ col2.metric("Average Cases per Year", f"{avg_cases:,.0f}")
1189
+ col3.metric("Years", f"{min_year} - {max_year}")
1190
+
1191
+ # Raw Data
1192
+ st.subheader("Raw Data")
1193
+ st.dataframe(df)
1194
+
1195
+ # Download options
1196
+ csv = df.to_csv(index=False)
1197
+ st.download_button(
1198
+ label="Download data as CSV",
1199
+ data=csv,
1200
+ file_name=f"{case_type.lower().replace(' ', '_')}_trend_data.csv",
1201
+ mime="text/csv",
1202
+ )
1203
+
1204
+ # Additional resources
1205
+ st.subheader("Additional Resources")
1206
+ st.markdown(f"[Data Source]({DATA_SOURCES[case_type]})")
1207
+ st.markdown("[US Courts Statistics](https://www.uscourts.gov/statistics-reports)")
1208
+ st.markdown("[Federal Judicial Caseload Statistics](https://www.uscourts.gov/statistics-reports/analysis-reports/federal-judicial-caseload-statistics)")
1209
+ st.markdown(f"[Legal Information Institute](https://www.law.cornell.edu/wex/{case_type.lower().replace(' ', '_')})")
1210
+
1211
+ # Explanatory text
1212
+ st.subheader("Understanding the Trend")
1213
+ explanation = f"""
1214
+ The graph above shows the trend of {case_type} cases over time. Here are some key points to consider:
1215
+
1216
+ 1. Overall Trend: Observe whether the number of cases is generally increasing, decreasing, or remaining stable over the years.
1217
+ 2. Peak Years: The year {max_year} saw the highest number of cases ({df['Number of Cases'].max():,}). This could be due to various factors such as changes in legislation, economic conditions, or social trends.
1218
+ 3. Low Points: The year {min_year} had the lowest number of cases ({df['Number of Cases'].min():,}). Consider what might have contributed to this decrease.
1219
+ 4. Recent Trends: Pay attention to the most recent years to understand current patterns in {case_type} cases.
1220
+ 5. Contextual Factors: Remember that these numbers can be influenced by various factors, including changes in law, court procedures, societal changes, and more.
1221
+
1222
+ For a deeper understanding of these trends and their implications, consider consulting with legal professionals or reviewing academic research in this area.
1223
+ """
1224
+ st.markdown(explanation)
1225
+
1226
+ # Interactive elements
1227
+ st.subheader("Interactive Analysis")
1228
+ analysis_type = st.radio("Select analysis type:", ["Year-over-Year Change", "Moving Average"])
1229
+
1230
+ if analysis_type == "Year-over-Year Change":
1231
+ df['YoY Change'] = df['Number of Cases'].pct_change() * 100
1232
+ yoy_fig = px.bar(df, x='Year', y='YoY Change', title="Year-over-Year Change in Case Numbers")
1233
+ st.plotly_chart(yoy_fig, use_container_width=True)
1234
+
1235
+ elif analysis_type == "Moving Average":
1236
+ window = st.slider("Select moving average window:", 2, 5, 3)
1237
+ df['Moving Average'] = df['Number of Cases'].rolling(window=window).mean()
1238
+ ma_fig = px.line(df, x='Year', y=['Number of Cases', 'Moving Average'], title=f"{window}-Year Moving Average")
1239
+ st.plotly_chart(ma_fig, use_container_width=True)
1240
+
1241
+ # Add a footer with a disclaimer
1242
+ # Footer
1243
+ st.markdown("---")
1244
+ st.markdown(
1245
+ """
1246
+ <div style="text-align: center;">
1247
+ <p>© 2023 Lex AI. All rights reserved.</p>
1248
+ <p><small>Disclaimer: This tool provides general legal information and assistance. It is not a substitute for professional legal advice. Please consult with a qualified attorney for specific legal matters.</small></p>
1249
+ </div>
1250
+ """,
1251
+ unsafe_allow_html=True
1252
+ )
1253
+
1254
+ if __name__ == "__main__":
1255
+ st.sidebar.info("Select a feature from the dropdown above to get started.")