Spaces:
Running
Running
import gradio as gr | |
import requests | |
import json | |
from reportlab.lib.pagesizes import letter | |
from reportlab.pdfgen import canvas | |
from textwrap import wrap | |
# Hugging Face Inference API details | |
HF_API_URL = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.3" | |
import os | |
HF_API_KEY = os.getenv("HF_API_KEY") # Read from environment variable | |
HEADERS = {"Authorization": f"Bearer {HF_API_KEY}"} | |
# Function to call Hugging Face API | |
def query_hf_api(prompt): | |
payload = {"inputs": prompt} | |
response = requests.post(HF_API_URL, headers=HEADERS, json=payload) | |
if response.status_code == 200: | |
return response.json()[0]["generated_text"] | |
else: | |
return f"Error: {response.status_code} - {response.text}" | |
# Debate function | |
def conduct_debate(topic): | |
# AI Assistant 1's response | |
prompt_1 = f"The topic of debate is: {topic}. Present your argument in favor." | |
response_1 = query_hf_api(prompt_1) | |
# AI Assistant 2's response | |
prompt_2 = f"The topic of debate is: {topic}. Present your argument against." | |
response_2 = query_hf_api(prompt_2) | |
# Judge's conclusion | |
prompt_judge = f"The debate topic was: {topic}. One argument in favor: {response_1}. One argument against: {response_2}. As a judge, analyze the arguments and provide a fair conclusion." | |
conclusion = query_hf_api(prompt_judge) | |
return response_1, response_2, conclusion | |
# Function to generate a properly formatted PDF | |
from reportlab.lib.pagesizes import letter | |
from reportlab.pdfgen import canvas | |
from reportlab.lib.utils import simpleSplit | |
def generate_pdf(topic, response_1, response_2, conclusion): | |
pdf_filename = "debate_result.pdf" | |
c = canvas.Canvas(pdf_filename, pagesize=letter) | |
c.setFont("Helvetica", 12) | |
page_width, page_height = letter | |
x_margin, y_margin = 50, 750 # Starting position for text | |
line_height = 16 # Space between lines | |
def add_text(text, x, y): | |
wrapped_text = simpleSplit(text, "Helvetica", 12, page_width - 2 * x_margin) | |
for line in wrapped_text: | |
if y < 50: # If we reach the bottom margin, create a new page | |
c.showPage() | |
c.setFont("Helvetica", 12) | |
y = y_margin # Reset y position | |
c.drawString(x, y, line) | |
y -= line_height | |
return y | |
y_position = y_margin | |
y_position = add_text(f"Debate Topic: {topic}", x_margin, y_position - 20) | |
y_position = add_text("Argument in Favor:", x_margin, y_position - 30) | |
y_position = add_text(response_1, x_margin, y_position - 10) | |
y_position = add_text("Argument Against:", x_margin, y_position - 30) | |
y_position = add_text(response_2, x_margin, y_position - 10) | |
y_position = add_text("Judge's Conclusion:", x_margin, y_position - 30) | |
y_position = add_text(conclusion, x_margin, y_position - 10) | |
c.save() | |
return pdf_filename | |
# Gradio UI | |
def debate_interface(topic): | |
response_1, response_2, conclusion = conduct_debate(topic) | |
pdf_file = generate_pdf(topic, response_1, response_2, conclusion) | |
return response_1, response_2, conclusion, pdf_file | |
iface = gr.Interface( | |
fn=debate_interface, | |
inputs=gr.Textbox(label="Enter Debate Topic"), | |
outputs=[ | |
gr.Textbox(label="Argument in Favor"), | |
gr.Textbox(label="Argument Against"), | |
gr.Textbox(label="Judge's Conclusion"), | |
gr.File(label="Download Debate PDF") | |
], | |
title="AI Debate Platform", | |
description="Enter a debate topic and let two AI assistants argue, with a third AI model acting as a judge." | |
) | |
iface.launch(share=True) | |