File size: 3,279 Bytes
a2d40f6
 
 
 
 
9d38e7c
a2d40f6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9d38e7c
a2d40f6
 
 
9d38e7c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a2d40f6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ff3fd00
f6d7cd2
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
import gradio as gr
import requests
import json
from reportlab.lib.pagesizes import letter
from reportlab.pdfgen import canvas
from textwrap import wrap

# Hugging Face Inference API details
HF_API_URL = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.3"

import os
HF_API_KEY = os.getenv("HF_API_KEY")  # Read from environment variable

HEADERS = {"Authorization": f"Bearer {HF_API_KEY}"}

# Function to call Hugging Face API
def query_hf_api(prompt):
    payload = {"inputs": prompt}
    response = requests.post(HF_API_URL, headers=HEADERS, json=payload)
    if response.status_code == 200:
        return response.json()[0]["generated_text"]
    else:
        return f"Error: {response.status_code} - {response.text}"

# Debate function
def conduct_debate(topic):
    # AI Assistant 1's response
    prompt_1 = f"The topic of debate is: {topic}. Present your argument in favor."
    response_1 = query_hf_api(prompt_1)

    # AI Assistant 2's response
    prompt_2 = f"The topic of debate is: {topic}. Present your argument against."
    response_2 = query_hf_api(prompt_2)

    # Judge's conclusion
    prompt_judge = f"The debate topic was: {topic}. One argument in favor: {response_1}. One argument against: {response_2}. As a judge, analyze the arguments and provide a fair conclusion."
    conclusion = query_hf_api(prompt_judge)

    return response_1, response_2, conclusion

# Function to generate a properly formatted PDF
def generate_pdf(topic, response_1, response_2, conclusion):
    pdf_filename = "debate_result.pdf"
    c = canvas.Canvas(pdf_filename, pagesize=letter)
    c.setFont("Helvetica", 12)
    y_position = 750
    
    def draw_wrapped_text(text, x, y, max_width=450):
        lines = wrap(text, width=80)  # Wrap text for better formatting
        for line in lines:
            c.drawString(x, y, line)
            y -= 20
        return y
    
    c.drawString(100, y_position, f"Debate Topic: {topic}")
    y_position -= 30
    
    c.drawString(100, y_position, "Argument in Favor:")
    y_position -= 20
    y_position = draw_wrapped_text(response_1, 100, y_position)
    
    y_position -= 30  # Space between sections
    c.drawString(100, y_position, "Argument Against:")
    y_position -= 20
    y_position = draw_wrapped_text(response_2, 100, y_position)
    
    y_position -= 30  # Space between sections
    c.drawString(100, y_position, "Judge's Conclusion:")
    y_position -= 20
    y_position = draw_wrapped_text(conclusion, 100, y_position)
    
    c.save()
    return pdf_filename

# Gradio UI
def debate_interface(topic):
    response_1, response_2, conclusion = conduct_debate(topic)
    pdf_file = generate_pdf(topic, response_1, response_2, conclusion)
    
    return response_1, response_2, conclusion, pdf_file

iface = gr.Interface(
    fn=debate_interface,
    inputs=gr.Textbox(label="Enter Debate Topic"),
    outputs=[
        gr.Textbox(label="Argument in Favor"),
        gr.Textbox(label="Argument Against"),
        gr.Textbox(label="Judge's Conclusion"),
        gr.File(label="Download Debate PDF")
    ],
    title="AI Debate Platform",
    description="Enter a debate topic and let two AI assistants argue, with a third AI model acting as a judge."
)

iface.launch(share=True)