Spaces:
Running
Running
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import requests
|
3 |
+
import json
|
4 |
+
from reportlab.lib.pagesizes import letter
|
5 |
+
from reportlab.pdfgen import canvas
|
6 |
+
|
7 |
+
# Hugging Face Inference API details
|
8 |
+
HF_API_URL = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.3"
|
9 |
+
|
10 |
+
import os
|
11 |
+
HF_API_KEY = os.getenv("HF_API_KEY") # Read from environment variable
|
12 |
+
|
13 |
+
|
14 |
+
HEADERS = {"Authorization": f"Bearer {HF_API_KEY}"}
|
15 |
+
|
16 |
+
# Function to call Hugging Face API
|
17 |
+
def query_hf_api(prompt):
|
18 |
+
payload = {"inputs": prompt}
|
19 |
+
response = requests.post(HF_API_URL, headers=HEADERS, json=payload)
|
20 |
+
if response.status_code == 200:
|
21 |
+
return response.json()[0]["generated_text"]
|
22 |
+
else:
|
23 |
+
return f"Error: {response.status_code} - {response.text}"
|
24 |
+
|
25 |
+
# Debate function
|
26 |
+
def conduct_debate(topic):
|
27 |
+
# AI Assistant 1's response
|
28 |
+
prompt_1 = f"The topic of debate is: {topic}. Present your argument in favor."
|
29 |
+
response_1 = query_hf_api(prompt_1)
|
30 |
+
|
31 |
+
# AI Assistant 2's response
|
32 |
+
prompt_2 = f"The topic of debate is: {topic}. Present your argument against."
|
33 |
+
response_2 = query_hf_api(prompt_2)
|
34 |
+
|
35 |
+
# Judge's conclusion
|
36 |
+
prompt_judge = f"The debate topic was: {topic}. One argument in favor: {response_1}. One argument against: {response_2}. As a judge, analyze the arguments and provide a fair conclusion."
|
37 |
+
conclusion = query_hf_api(prompt_judge)
|
38 |
+
|
39 |
+
return response_1, response_2, conclusion
|
40 |
+
|
41 |
+
# Function to generate a PDF
|
42 |
+
def generate_pdf(topic, response_1, response_2, conclusion):
|
43 |
+
pdf_filename = "debate_result.pdf"
|
44 |
+
|
45 |
+
c = canvas.Canvas(pdf_filename, pagesize=letter)
|
46 |
+
c.drawString(100, 750, f"Debate Topic: {topic}")
|
47 |
+
c.drawString(100, 730, "Argument in Favor:")
|
48 |
+
c.drawString(100, 710, response_1[:1000]) # Limiting text for PDF readability
|
49 |
+
c.drawString(100, 690, "Argument Against:")
|
50 |
+
c.drawString(100, 670, response_2[:1000])
|
51 |
+
c.drawString(100, 650, "Judge's Conclusion:")
|
52 |
+
c.drawString(100, 630, conclusion[:1000])
|
53 |
+
c.save()
|
54 |
+
|
55 |
+
return pdf_filename
|
56 |
+
|
57 |
+
# Gradio UI
|
58 |
+
def debate_interface(topic):
|
59 |
+
response_1, response_2, conclusion = conduct_debate(topic)
|
60 |
+
pdf_file = generate_pdf(topic, response_1, response_2, conclusion)
|
61 |
+
|
62 |
+
return response_1, response_2, conclusion, pdf_file
|
63 |
+
|
64 |
+
iface = gr.Interface(
|
65 |
+
fn=debate_interface,
|
66 |
+
inputs=gr.Textbox(label="Enter Debate Topic"),
|
67 |
+
outputs=[
|
68 |
+
gr.Textbox(label="Argument in Favor"),
|
69 |
+
gr.Textbox(label="Argument Against"),
|
70 |
+
gr.Textbox(label="Judge's Conclusion"),
|
71 |
+
gr.File(label="Download Debate PDF")
|
72 |
+
],
|
73 |
+
title="AI Debate Platform",
|
74 |
+
description="Enter a debate topic and let two AI assistants argue, with a third AI model acting as a judge."
|
75 |
+
)
|
76 |
+
|
77 |
+
iface.launch()
|