Spaces:
Running
Running
File size: 3,652 Bytes
a2d40f6 9d38e7c a2d40f6 9d38e7c 91e37e3 a2d40f6 9d38e7c 91e37e3 9d38e7c 91e37e3 9d38e7c 91e37e3 9d38e7c 91e37e3 9d38e7c 91e37e3 9d38e7c 91e37e3 9d38e7c a2d40f6 91e37e3 a2d40f6 ff3fd00 f6d7cd2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 |
import gradio as gr
import requests
import json
from reportlab.lib.pagesizes import letter
from reportlab.pdfgen import canvas
from textwrap import wrap
# Hugging Face Inference API details
HF_API_URL = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.3"
import os
HF_API_KEY = os.getenv("HF_API_KEY") # Read from environment variable
HEADERS = {"Authorization": f"Bearer {HF_API_KEY}"}
# Function to call Hugging Face API
def query_hf_api(prompt):
payload = {"inputs": prompt}
response = requests.post(HF_API_URL, headers=HEADERS, json=payload)
if response.status_code == 200:
return response.json()[0]["generated_text"]
else:
return f"Error: {response.status_code} - {response.text}"
# Debate function
def conduct_debate(topic):
# AI Assistant 1's response
prompt_1 = f"The topic of debate is: {topic}. Present your argument in favor."
response_1 = query_hf_api(prompt_1)
# AI Assistant 2's response
prompt_2 = f"The topic of debate is: {topic}. Present your argument against."
response_2 = query_hf_api(prompt_2)
# Judge's conclusion
prompt_judge = f"The debate topic was: {topic}. One argument in favor: {response_1}. One argument against: {response_2}. As a judge, analyze the arguments and provide a fair conclusion."
conclusion = query_hf_api(prompt_judge)
return response_1, response_2, conclusion
# Function to generate a properly formatted PDF
from reportlab.lib.pagesizes import letter
from reportlab.pdfgen import canvas
from reportlab.lib.utils import simpleSplit
def generate_pdf(topic, response_1, response_2, conclusion):
pdf_filename = "debate_result.pdf"
c = canvas.Canvas(pdf_filename, pagesize=letter)
c.setFont("Helvetica", 12)
page_width, page_height = letter
x_margin, y_margin = 50, 750 # Starting position for text
line_height = 16 # Space between lines
def add_text(text, x, y):
wrapped_text = simpleSplit(text, "Helvetica", 12, page_width - 2 * x_margin)
for line in wrapped_text:
if y < 50: # If we reach the bottom margin, create a new page
c.showPage()
c.setFont("Helvetica", 12)
y = y_margin # Reset y position
c.drawString(x, y, line)
y -= line_height
return y
y_position = y_margin
y_position = add_text(f"Debate Topic: {topic}", x_margin, y_position - 20)
y_position = add_text("Argument in Favor:", x_margin, y_position - 30)
y_position = add_text(response_1, x_margin, y_position - 10)
y_position = add_text("Argument Against:", x_margin, y_position - 30)
y_position = add_text(response_2, x_margin, y_position - 10)
y_position = add_text("Judge's Conclusion:", x_margin, y_position - 30)
y_position = add_text(conclusion, x_margin, y_position - 10)
c.save()
return pdf_filename
# Gradio UI
def debate_interface(topic):
response_1, response_2, conclusion = conduct_debate(topic)
pdf_file = generate_pdf(topic, response_1, response_2, conclusion)
return response_1, response_2, conclusion, pdf_file
iface = gr.Interface(
fn=debate_interface,
inputs=gr.Textbox(label="Enter Debate Topic"),
outputs=[
gr.Textbox(label="Argument in Favor"),
gr.Textbox(label="Argument Against"),
gr.Textbox(label="Judge's Conclusion"),
gr.File(label="Download Debate PDF")
],
title="AI Debate Platform",
description="Enter a debate topic and let two AI assistants argue, with a third AI model acting as a judge."
)
iface.launch(share=True)
|