Hawkeye_AI / app.py
michaelmc1618's picture
Update app.py
3b53447 verified
raw
history blame
5.82 kB
import os
os.system('pip install torch') # or 'pip install tensorflow'
os.system('pip install transformers')
os.system('pip install datasets')
os.system('pip install gradio')
os.system('pip install minijinja')
os.system('pip install PyMuPDF')
import gradio as gr
from huggingface_hub import InferenceClient
from transformers import pipeline
from datasets import load_dataset
import fitz # PyMuPDF
client = InferenceClient()
dataset = load_dataset("ibunescu/qa_legal_dataset_train")
def score_argument_from_outcome(outcome, argument):
prosecutor_score = 0
if "Prosecutor" in outcome:
prosecutor_score = outcome.count("Prosecutor") * 2
if "won" in outcome and "Prosecutor" in outcome:
prosecutor_score += 10
return prosecutor_score
def chat_between_bots(system_message1, system_message2, max_tokens, temperature, top_p, history1, history2, shared_history, message):
response1, history1 = list(respond(message, history1, system_message1, max_tokens, temperature, top_p))[-1]
response2, history2 = list(respond(message, history2, system_message2, max_tokens, temperature, top_p))[-1]
return response1, response2, history1, history2, shared_history
def extract_text_from_pdf(pdf_file):
text = ""
doc = fitz.open(pdf_file)
for page in doc:
text += page.get_text()
return text
def ask_about_pdf(pdf_text, question):
prompt = f"PDF Content: {pdf_text}\n\nQuestion: {question}\n\nAnswer:"
response = ""
for message in client.chat_completion(
[{"role": "system", "content": "You are a legal expert answering questions based on the PDF content provided."},
{"role": "user", "content": prompt}],
max_tokens=512,
stream=True,
temperature=0.6,
top_p=0.95,
):
token = message.choices[0].delta.content
if token is not None:
response += token
return response
def update_pdf_gallery_and_extract_text(pdf_files):
if len(pdf_files) > 0:
pdf_text = extract_text_from_pdf(pdf_files[0].name)
else:
pdf_text = ""
return pdf_files, pdf_text
def add_message(history, message):
history.append(message)
return history, gr.Textbox(value=None, interactive=False)
def bot(history):
system_message = "You are a helpful assistant."
messages = [{"role": "system", "content": system_message}]
for val in history:
if val[0]:
messages.append({"role": "user", "content": val[0]})
if val[1]:
messages.append({"role": "assistant", "content": val[1]})
response = ""
for message in client.chat_completion(
messages,
max_tokens=150,
stream=True,
temperature=0.6,
top_p=0.95,
):
token = message.choices[0].delta.content
if token is not None:
response += token
history[-1][1] = response
yield history
def print_like_dislike(x: gr.LikeData):
print(x.index, x.value, x.liked)
def reset_conversation():
return [], [], "", "", ""
def save_conversation(history1, history2, shared_history):
return history1, history2, shared_history
custom_css = """
.scroll-box {
max-height: 400px;
overflow-y: auto;
}
"""
with gr.Blocks(css=custom_css) as demo:
history1 = gr.State([])
history2 = gr.State([])
shared_history = gr.State([])
pdf_files = gr.State([])
pdf_text = gr.State("")
with gr.Tab("Argument Evaluation"):
message = gr.Textbox(label="Case to Argue")
system_message1 = "System message for bot 1"
system_message2 = "System message for bot 2"
max_tokens = 150
temperature = 0.6
top_p = 0.95
prosecutor_response = gr.Textbox(label="Prosecutor Response", interactive=False)
defense_response = gr.Textbox(label="Defense Response", interactive=False)
prosecutor_score_color = gr.Textbox(label="Prosecutor Score Color", interactive=False)
defense_score_color = gr.Textbox(label="Defense Score Color", interactive=False)
shared_argument = gr.Textbox(label="Case Outcome", interactive=True)
submit_btn = gr.Button("Argue")
clear_btn = gr.Button("Clear and Reset")
save_btn = gr.Button("Save Conversation")
submit_btn.click(chat_between_bots, inputs=[system_message1, system_message2, max_tokens, temperature, top_p, history1, history2, shared_history, message], outputs=[prosecutor_response, defense_response, history1, history2, shared_argument, prosecutor_score_color, defense_score_color])
clear_btn.click(reset_conversation, outputs=[history1, history2, shared_history, prosecutor_response, defense_response, shared_argument])
save_btn.click(save_conversation, inputs=[history1, history2, shared_history], outputs=[history1, history2, shared_history])
with gr.Tab("PDF Management"):
pdf_upload = gr.File(label="Upload Case Files (PDF)", file_types=[".pdf"])
pdf_gallery = gr.Gallery(label="PDF Gallery")
pdf_view = gr.Textbox(label="PDF Content", interactive=False, elem_classes=["scroll-box"])
pdf_question = gr.Textbox(label="Ask a Question about the PDF")
pdf_answer = gr.Textbox(label="Answer", interactive=False, elem_classes=["scroll-box"])
pdf_upload_btn = gr.Button("Update PDF Gallery")
pdf_ask_btn = gr.Button("Ask")
pdf_upload_btn.click(update_pdf_gallery_and_extract_text, inputs=[pdf_upload], outputs=[pdf_gallery, pdf_text])
pdf_text.change(fn=lambda x: x, inputs=pdf_text, outputs=pdf_view)
pdf_ask_btn.click(ask_about_pdf, inputs=[pdf_text, pdf_question], outputs=pdf_answer)
with gr.Tab("Chatbot"):
chatbot = gr.Chatbot()
demo.launch()