Spaces:
Runtime error
Runtime error
import os | |
import gradio as gr | |
from openai import OpenAI | |
from add_embeddings import LegalDocumentProcessor | |
import json | |
from typing import Dict, List | |
import markdown | |
from pathlib import Path | |
import time | |
from functools import lru_cache | |
import hashlib | |
# Initialize clients and models | |
client = OpenAI( | |
api_key=os.environ.get("MISTRAL_API_KEY"), | |
base_url="https://api.mistral.ai/v1" | |
) | |
print("Starting Nyaya-Mitra initialization...") | |
# Initialize document processor | |
try: | |
print("Initializing document processor...") | |
doc_processor = LegalDocumentProcessor() | |
print("Processing documents...") | |
doc_processor.process_and_store_documents() | |
print("Document processor initialized successfully") | |
except Exception as e: | |
print(f"Error initializing document processor: {str(e)}") | |
doc_processor = None | |
# Cache for storing responses | |
response_cache = {} | |
def get_cache_key(query: str, context: str) -> str: | |
"""Generate a cache key from query and context""" | |
combined = f"{query}|{context}" | |
return hashlib.md5(combined.encode()).hexdigest() | |
def get_cached_response(cache_key: str) -> str: | |
"""Get cached response if available""" | |
return response_cache.get(cache_key) | |
def get_mistral_response(query: str, context: str, max_retries: int = 3) -> str: | |
"""Get response from Mistral AI with rate limiting and caching""" | |
cache_key = get_cache_key(query, context) | |
cached_response = get_cached_response(cache_key) | |
if cached_response: | |
return cached_response | |
system_prompt = """You are Nyaya-Mitra (न्याय-मित्र), a helpful legal assistant for the Indian justice system. | |
Provide concise, accurate responses based on the context provided. Focus on the most relevant information. | |
Guidelines: | |
1. Be precise and cite specific sections | |
2. Explain concepts simply | |
3. Suggest next steps if applicable | |
4. Note that you cannot provide legal advice | |
5. Keep responses focused and relevant | |
Format responses clearly using markdown.""" | |
for attempt in range(max_retries): | |
try: | |
response = client.chat.completions.create( | |
model="mistral-medium", | |
messages=[ | |
{"role": "system", "content": system_prompt}, | |
{"role": "user", "content": f"Context: {context}\n\nQuestion: {query}"} | |
] | |
) | |
result = response.choices[0].message.content | |
response_cache[cache_key] = result | |
return result | |
except Exception as e: | |
if "rate limit" in str(e).lower(): | |
if attempt < max_retries - 1: | |
wait_time = (attempt + 1) * 2 | |
time.sleep(wait_time) | |
continue | |
return f"""I apologize, but I'm currently experiencing high demand. Please try: | |
1. Waiting a few moments before asking another question | |
2. Making your question more specific | |
3. Breaking down your question into smaller parts | |
Error details: {str(e)}""" | |
return "I apologize, but I'm currently unable to process your request. Please try again in a few moments." | |
def format_sources(metadatas: List[Dict]) -> str: | |
"""Format source information for display""" | |
seen_sources = set() | |
sources = [] | |
for metadata in metadatas: | |
source_key = f"{metadata['law_code']} ({metadata['source']})" | |
if source_key not in seen_sources: | |
sources.append(f"- {source_key}") | |
seen_sources.add(source_key) | |
return "\n".join(sources) | |
def chat_interface(message: str, history: List[List[str]]) -> str: | |
"""Main chat interface function""" | |
try: | |
if doc_processor is None: | |
return """I apologize, but I'm having trouble accessing the legal documents. | |
Please try refreshing the page or waiting a moment.""" | |
# Search for relevant context | |
results = doc_processor.search_documents(message) | |
context = "\n".join(results["documents"]) | |
# Get response from Mistral | |
response = get_mistral_response(message, context) | |
# Add source information | |
sources = format_sources(results["metadatas"]) | |
full_response = f"{response}\n\n**Sources:**\n{sources}" | |
# Format response with markdown | |
formatted_response = markdown.markdown(full_response) | |
return formatted_response | |
except Exception as e: | |
return f"""I apologize, but I encountered an error. Please try: | |
1. Making your question more specific | |
2. Waiting a moment and trying again | |
3. Refreshing the page | |
Error: {str(e)}""" | |
# Create Gradio interface | |
iface = gr.ChatInterface( | |
fn=chat_interface, | |
title="Nyaya-Mitra (न्याय-मित्र)", | |
description="""# Welcome to Nyaya-Mitra! | |
Your AI Legal Assistant for India's New Criminal Laws: | |
- 📚 Bharatiya Nyaya Sanhita (BNS) | |
- 📋 Bharatiya Nagarik Suraksha Sanhita (BNSS) | |
- ⚖️ Bharatiya Sakshya Adhiniyam (BSA) | |
*Note: This is an AI assistant for information only. For legal advice, please consult qualified legal professionals.*""", | |
theme="soft", | |
examples=[ | |
"What are the main provisions for cybercrime in BNS?", | |
"How do I file a complaint under BNSS?", | |
"What is the process for electronic evidence under BSA?", | |
"What are the key changes in mob lynching laws?", | |
"How does BNS define organized crime?" | |
] | |
) | |
# Launch the interface | |
if __name__ == "__main__": | |
iface.launch() | |