advisor / app.py
veerukhannan's picture
Update app.py
e3f3bb1 verified
raw
history blame
7.64 kB
import gradio as gr
import chromadb
import os
from openai import OpenAI
import json
from typing import List, Dict
import re
class LegalAssistant:
def __init__(self):
# Initialize ChromaDB
self.chroma_client = chromadb.Client()
self.collection = self.chroma_client.get_or_create_collection("legal_documents")
# Initialize Mistral AI client
self.mistral_client = OpenAI(
api_key=os.environ.get("MISTRAL_API_KEY", "dfb2j1YDsa298GXTgZo3juSjZLGUCfwi"),
base_url="https://api.mistral.ai/v1"
)
# Define system prompt with strict rules
self.system_prompt = """You are a specialized legal assistant trained on Indian law. You MUST follow these strict rules:
RESPONSE FORMAT RULES:
1. ALWAYS structure your response in this exact JSON format:
{
"answer": "Your detailed answer here",
"reference_sections": ["Section X of Act Y", ...],
"summary": "2-3 line summary",
"confidence": "HIGH/MEDIUM/LOW"
}
CONTENT RULES:
1. NEVER make assumptions or provide information not supported by Indian law
2. ALWAYS cite specific sections, acts, and legal precedents
3. If information is insufficient, explicitly state "Insufficient information" in answer
4. NEVER provide legal advice, only legal information
5. For any constitutional matters, ALWAYS cite relevant Articles
ACCURACY RULES:
1. If confidence is less than 80%, mark as LOW confidence
2. If multiple interpretations exist, list ALL with citations
3. If law has been amended, specify the latest amendment date
4. For case law, cite the full case reference
PROHIBITED:
1. NO personal opinions
2. NO hypothetical scenarios
3. NO interpretation of ongoing cases
4. NO advice on specific legal situations
ERROR HANDLING:
1. If query is unclear: Request clarification
2. If outside Indian law scope: State "Outside scope of Indian law"
3. If conflicting laws exist: List all applicable laws"""
def validate_query(self, query: str) -> tuple[bool, str]:
"""Validate the input query"""
if not query or len(query.strip()) < 10:
return False, "Query too short. Please provide more details."
if len(query) > 500:
return False, "Query too long. Please be more concise."
if not re.search(r'[?.]$', query):
return False, "Query must end with a question mark or period."
return True, ""
def _search_documents(self, query: str) -> tuple[str, List[str]]:
"""Search ChromaDB for relevant documents"""
try:
results = self.collection.query(
query_texts=[query],
n_results=3
)
if results and results['documents']:
documents = results['documents'][0]
metadata = results.get('metadatas', [[]])[0]
sources = [m.get('source', 'Unknown') for m in metadata]
return "\n\n".join(documents), sources
return "", []
except Exception as e:
print(f"Search error: {str(e)}")
return "", []
def get_response(self, query: str) -> Dict:
"""Get response from Mistral AI with context from ChromaDB"""
# Validate query
is_valid, error_message = self.validate_query(query)
if not is_valid:
return {
"answer": error_message,
"references": [],
"summary": "Invalid query",
"confidence": "LOW"
}
try:
# Get relevant context from ChromaDB
context, sources = self._search_documents(query)
# Prepare content
content = f"""Context: {context}
Sources: {', '.join(sources)}
Question: {query}""" if context else query
# Get response from Mistral AI
response = self.mistral_client.chat.completions.create(
model="mistral-medium",
messages=[
{
"role": "system",
"content": self.system_prompt
},
{
"role": "user",
"content": content
}
],
temperature=0.1,
max_tokens=1000
)
# Parse response
if response.choices and len(response.choices) > 0:
try:
result = json.loads(response.choices[0].message.content)
return {
"answer": result.get("answer", "No answer provided"),
"references": result.get("reference_sections", []),
"summary": result.get("summary", ""),
"confidence": result.get("confidence", "LOW")
}
except json.JSONDecodeError:
return {
"answer": "Error: Response format invalid",
"references": [],
"summary": "Response parsing failed",
"confidence": "LOW"
}
return {
"answer": "No response received",
"references": [],
"summary": "Response generation failed",
"confidence": "LOW"
}
except Exception as e:
return {
"answer": f"Error: {str(e)}",
"references": [],
"summary": "System error occurred",
"confidence": "LOW"
}
# Initialize the assistant
assistant = LegalAssistant()
# Create Gradio interface
def process_query(query: str) -> tuple:
response = assistant.get_response(query)
return (
response["answer"],
", ".join(response["references"]) if response["references"] else "No specific references",
response["summary"] if response["summary"] else "No summary available",
response["confidence"]
)
# Create the Gradio interface with a professional theme
with gr.Blocks(theme=gr.themes.Soft()) as demo:
gr.Markdown("""
# Indian Legal Assistant
## Guidelines for Queries:
1. Be specific and clear in your questions
2. End questions with a question mark
3. Provide relevant context if available
4. Keep queries between 10-500 characters
""")
with gr.Row():
query_input = gr.Textbox(
label="Enter your legal query",
placeholder="e.g., What is the legal age for marriage in India as per current laws?"
)
with gr.Row():
submit_btn = gr.Button("Submit", variant="primary")
with gr.Row():
confidence_output = gr.Textbox(label="Confidence Level")
with gr.Row():
answer_output = gr.Textbox(label="Answer", lines=5)
with gr.Row():
with gr.Column():
references_output = gr.Textbox(label="Legal References", lines=3)
with gr.Column():
summary_output = gr.Textbox(label="Summary", lines=2)
gr.Markdown("""
### Important Notes:
- This assistant provides legal information, not legal advice
- Always verify information with a qualified legal professional
- Information is based on Indian law only
""")
submit_btn.click(
fn=process_query,
inputs=[query_input],
outputs=[answer_output, references_output, summary_output, confidence_output]
)
# Launch the app
demo.launch()