Spaces:
Running
Running
from langchain_core.prompts import PromptTemplate | |
from langchain.chains import create_retrieval_chain | |
from langchain.chains.combine_documents import create_stuff_documents_chain | |
import gradio as gr | |
from transformers import pipeline | |
import numpy as np | |
from langchain_ollama import OllamaLLM | |
from langchain_huggingface import HuggingFaceEmbeddings | |
from load_document import load_data | |
from split_document import split_docs | |
from embed_docs import embed_docs | |
from retrieve import retrieve | |
from datetime import datetime | |
from js import js | |
from theme import theme | |
import os | |
import glob | |
import requests | |
# Initialize our speech pipeline | |
transcriber = pipeline("automatic-speech-recognition", model="openai/whisper-base.en", device="cpu") | |
pretext = "" | |
def send_to_chat(question: str, history=None): | |
payload = { | |
"question": question | |
} | |
response = requests.post("https://sadiksmart0-the-law.hf.space/query", json=payload) | |
if response.status_code != 200: | |
print(f"Error {response.status_code}:") | |
return f"Error {response.status_code}: Okay Unable to fetch response from server." | |
return response.json().get("answer", "No answer returned.") | |
pretext = "" | |
def transcribe(audio): | |
global pretext | |
if audio is None: | |
return "Please record again. Loud and clear audio is required." | |
sr, y = audio | |
# Convert to mono if stereo | |
if y.ndim > 1: | |
y = y.mean(axis=1) | |
y = y.astype(np.float32) | |
y /= np.max(np.abs(y)) | |
pretext = transcriber({"sampling_rate": sr, "raw": y})["text"] | |
return pretext | |
with gr.Blocks(title="Know The Law", theme=theme, js=js) as demo: | |
with gr.Row(): | |
with gr.Column(): | |
gr.Markdown("# Know The Law") | |
audio_input = gr.Audio( | |
label="Input Audio", | |
sources=["microphone"], | |
type="numpy", | |
container=True, | |
interactive=True, | |
waveform_options=gr.WaveformOptions(waveform_color="#B83A4B"), | |
) | |
output_text = gr.Textbox( | |
interactive=True, | |
submit_btn=True, # Enables submit event | |
label="Transcription Output", | |
visible=False # Made it invincible | |
) | |
audio_input.change(transcribe, inputs=[audio_input], outputs=[output_text]) | |
gr.Markdown("# What does the Law say?") | |
chat = gr.ChatInterface( | |
send_to_chat, | |
chatbot=gr.Chatbot(height=300, type="messages"), | |
textbox=gr.Textbox( | |
placeholder="Ask me a question related to Nigerian law", | |
container=True, | |
scale=7, | |
submit_btn=True, | |
type="text" | |
), | |
type="messages", | |
examples=[ | |
"How can I file a complaint against police misconduct?", | |
"What is the process for obtaining a court order?", | |
"What are the legal requirements for starting a business in Nigeria?" | |
], | |
title="Law Assistant", | |
run_examples_on_click=True, | |
save_history=True, | |
cache_examples=True | |
) | |
chat_input = chat.textbox # Get chatbot's text input | |
# Autofill chatbot input box with transcribed text | |
output_text.change(lambda x: x, inputs=[output_text], outputs=[chat_input]) | |
# Submit event: When user presses Enter on output_text, it submits to chat | |
output_text.submit(send_to_chat, inputs=[output_text, chat.chatbot], outputs=chat.chatbot) | |
if __name__ == "__main__": | |
demo.launch(share=True, server_port=8001) |