import os os.system('pip install transformers') os.system('pip install datasets') os.system('pip install gradio') os.system('pip install minijinja') import gradio as gr from huggingface_hub import InferenceClient from transformers import pipeline from datasets import load_dataset import time dataset = load_dataset("ibunescu/qa_legal_dataset_train") # Use a pipeline as a high-level helper pipe = pipeline("fill-mask", model="nlpaueb/legal-bert-base-uncased") client = InferenceClient("HuggingFaceH4/zephyr-7b-beta") def respond( message, history: list[tuple[str, str]], system_message, max_tokens, temperature, top_p, ): messages = [{"role": "system", "content": system_message}] for val in history: if val[0]: messages.append({"role": "user", "content": val[0]}) if val[1]: messages.append({"role": "assistant", "content": val[1]}) messages.append({"role": "user", "content": message}) response = "" for message in client.chat_completion( messages, max_tokens=max_tokens, stream=True, temperature=temperature, top_p=top_p, ): token = message.choices[0].delta.content if token is not None: response += token yield response, history + [(message, response)] def generate_case_outcome(prosecutor_response, defense_response): prompt = f"Prosecutor's Argument: {prosecutor_response}\nDefense Attorney's Argument: {defense_response}\n\nEvaluate both arguments and determine who won the case. Provide reasons for your decision." evaluation = "" for message in client.chat_completion( [{"role": "system", "content": "You are a legal expert evaluating the arguments presented by the prosecution and the defense."}, {"role": "user", "content": prompt}], max_tokens=512, stream=True, temperature=0.6, top_p=0.95, ): token = message.choices[0].delta.content if token is not None: evaluation += token return evaluation def score_argument_from_outcome(outcome, argument): # Simplified scoring based on keywords in the outcome if "Prosecutor" in outcome: prosecutor_score = outcome.count("Prosecutor") * 2 if "won" in outcome and "Prosecutor" in outcome: prosecutor_score += 10 else: prosecutor_score = 0 if "Defense" in outcome: defense_score = outcome.count("Defense") * 2 if "won" in outcome and "Defense" in outcome: defense_score += 10 else: defense_score = 0 return prosecutor_score if "Prosecutor" in argument else defense_score def color_code(score): if score > 50: return "green" elif score > 30: return "yellow" else: return "red" # Custom CSS for white background and black text for input and output boxes custom_css = """ body { background-color: #ffffff; color: #000000; font-family: Arial, sans-serif; } .gradio-container { max-width: 1000px; margin: 0 auto; padding: 20px; background-color: #ffffff; border: 1px solid #e0e0e0; border-radius: 8px; box-shadow: 0 2px 5px rgba(0, 0, 0, 0.1); } .gr-button { background-color: #ffffff !important; border-color: #ffffff !important; color: #000000 !important; } .gr-button:hover { background-color: #ffffff !important; border-color: #004085 !important; } .gr-input, .gr-textbox, .gr-slider, .gr-markdown, .gr-chatbox { border-radius: 4px; border: 1px solid #ced4da; background-color: #ffffff !important; color: #000000 !important; } .gr-input:focus, .gr-textbox:focus, .gr-slider:focus { border-color: #ffffff; outline: 0; box-shadow: 0 0 0 0.2rem rgba(255, 255, 255, 1.0); } #flagging-button { display: none; } footer { display: none; } .chatbox .chat-container .chat-message { background-color: #ffffff !important; color: #000000 !important; } .chatbox .chat-container .chat-message-input { background-color: #ffffff !important; color: #000000 !important; } .gr-markdown { background-color: #ffffff !important; color: #000000 !important; } .gr-markdown h1, .gr-markdown h2, .gr-markdown h3, .gr-markdown h4, .gr-markdown h5, .gr-markdown h6, .gr-markdown p, .gr-markdown ul, .gr-markdown ol, .gr-markdown li { color: #000000 !important; } .score-box { width: 60px; height: 60px; display: flex; align-items: center; justify-content: center; font-size: 12px; font-weight: bold; color: black; margin: 5px; } .scroll-box { max-height: 200px; overflow-y: scroll; border: 1px solid #ced4da; padding: 10px; border-radius: 4px; } """ # Function to facilitate the conversation between the two chatbots def chat_between_bots(system_message1, system_message2, max_tokens, temperature, top_p, history1, history2, shared_history, message): response1, history1 = list(respond(message, history1, system_message1, max_tokens, temperature, top_p))[-1] response2, history2 = list(respond(message, history2, system_message2, max_tokens, temperature, top_p))[-1] shared_history.append(f"Prosecutor: {response1}") shared_history.append(f"Defense Attorney: {response2}") max_length = max(len(response1), len(response2)) response1 = response1[:max_length] response2 = response2[:max_length] outcome = generate_case_outcome(response1, response2) score1 = score_argument_from_outcome(outcome, "Prosecutor") score2 = score_argument_from_outcome(outcome, "Defense") prosecutor_color = color_code(score1) defense_color = color_code(score2) prosecutor_score_color = f"
Score: {score1}
" defense_score_color = f"
Score: {score2}
" return response1, response2, history1, history2, shared_history, outcome, prosecutor_score_color, defense_score_color def update_pdf_gallery(pdf_files): return pdf_files def add_message(history, message): for x in message["files"]: history.append(((x,), None)) if message["text"] is not None: history.append((message["text"], None)) return history, gr.MultimodalTextbox(value=None, interactive=False) def bot(history): response = "**That's cool!**" history[-1][1] = "" for character in response: history[-1][1] += character time.sleep(0.05) yield history def print_like_dislike(x: gr.LikeData): print(x.index, x.value, x.liked) with gr.Blocks(css=custom_css) as demo: history1 = gr.State([]) history2 = gr.State([]) shared_history = gr.State([]) pdf_files = gr.State([]) with gr.Tab("Argument Evaluation"): message = gr.Textbox(label="Case to Argue") system_message1 = gr.State("You are an expert Prosecutor. Give your best arguments for the case on behalf of the prosecution.") system_message2 = gr.State("You are an expert Defense Attorney. Give your best arguments for the case on behalf of the Defense.") max_tokens = gr.State(512) temperature = gr.State(0.6) top_p = gr.State(0.95) with gr.Row(): with gr.Column(scale=4): prosecutor_response = gr.Textbox(label="Prosecutor's Response", interactive=True, elem_classes=["scroll-box"]) with gr.Column(scale=1): prosecutor_score_color = gr.HTML() with gr.Column(scale=4): defense_response = gr.Textbox(label="Defense Attorney's Response", interactive=True, elem_classes=["scroll-box"]) with gr.Column(scale=1): defense_score_color = gr.HTML() shared_argument = gr.Textbox(label="Case Outcome", interactive=True) submit_btn = gr.Button("Argue") submit_btn.click(chat_between_bots, inputs=[system_message1, system_message2, max_tokens, temperature, top_p, history1, history2, shared_history, message], outputs=[prosecutor_response, defense_response, history1, history2, shared_history, shared_argument, prosecutor_score_color, defense_score_color]) with gr.Tab("PDF Management"): pdf_upload = gr.File(label="Upload Case Files (PDF)", file_types=[".pdf"]) pdf_gallery = gr.Gallery(label="PDF Gallery") pdf_upload_btn = gr.Button("Update PDF Gallery") pdf_upload_btn.click(update_pdf_gallery, inputs=[pdf_upload], outputs=[pdf_gallery, pdf_files]) with gr.Tab("Chatbot"): chatbot = gr.Chatbot( [], elem_id="chatbot", bubble_full_width=False ) chat_input = gr.MultimodalTextbox(interactive=True, file_types=["image"], placeholder="Enter message or upload file...", show_label=False) chat_msg = chat_input.submit(add_message, [chatbot, chat_input], [chatbot, chat_input]) bot_msg = chat_msg.then(bot, chatbot, chatbot, api_name="bot_response") bot_msg.then(lambda: gr.MultimodalTextbox(interactive=True), None, [chat_input]) chatbot.like(print_like_dislike, None, None) demo.queue() demo.launch()