Spaces:
Sleeping
Sleeping
from huggingface_hub import InferenceClient | |
import gradio as gr | |
import base64 | |
import datetime | |
client = InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct") | |
# Global variables for debate settings | |
topic = None | |
position = None | |
turn = None | |
# Function for single participant responses (Master vs You) | |
def debate_respond(message, history: list[tuple[str, str]], | |
max_tokens=128, temperature=0.4, top_p=0.95): | |
if position is None or topic is None: | |
return f"Please fill the Debate Topic -> choose Debate Master stance -> click START" | |
global topic, position | |
# System message defining assistant behavior in a debate | |
system_message = { | |
"role": "system", | |
"content": f"You are a debate participant tasked with defending the position '{position}' on the topic '{topic}'. Your goal is to articulate your arguments with clarity, logic, and professionalism while addressing counterpoints made by the opposing side. " | |
f"Ensure that your responses are thoughtful, evidence-based, and persuasive. Strictly keep them concise—aim for responses that are 4 to 5 lines only in a single paragraph i.e 128 tokens only." | |
f"Analyze user arguments critically and provide respectful but firm counterarguments. Avoid dismissive language and focus on strengthening your case through logic, data, and examples relevant to the topic." | |
f"Stay consistent with your assigned position ('{position}'), even if the opposing arguments are strong. Keep the tone respectful and formal throughout." | |
} | |
messages = [system_message] | |
# Adding conversation history | |
for val in history: | |
if val[0]: | |
messages.append({"role": "user", "content": val[0]}) | |
if val[1]: | |
messages.append({"role": "assistant", "content": val[1]}) | |
# Adding the current user input | |
messages.append({"role": "user", "content": message}) | |
# Generating the response | |
response = "" | |
for message_chunk in client.chat_completion( | |
messages, | |
max_tokens=max_tokens, | |
stream=True, | |
temperature=temperature, | |
top_p=top_p, | |
): | |
response += message_chunk.choices[0].delta.content | |
yield response | |
print(f"{datetime.datetime.now()}::{messages[-1]['content']}->{response}\n") | |
# Function to start the single-player debate | |
def start(txt, dd): | |
global topic, position | |
topic, position = txt, dd | |
return f"Debate Master is ready to start the debate on '{topic}' as a '{position}' debater. You can now enter your response." | |
# Function for multi-participant (Master vs Master) responses | |
def generate_response(position, topic, message, history): | |
# System message defining assistant behavior | |
system_message = { | |
"role": "system", | |
"content": f"You are a debate participant tasked with defending the position '{position}' on the topic '{topic}'. Your goal is to articulate your arguments with clarity, logic, and professionalism while addressing counterpoints made by the opposing side. Ensure that your responses are thoughtful, evidence-based, and persuasive. Keep them concise—aim for responses that are 4 to 5 lines in a single paragraph." | |
f"Analyze opposing points critically and provide respectful but firm counterarguments. Avoid dismissive language and focus on strengthening your case through logical reasoning, data, and examples relevant to the topic." | |
f"Stay consistent with your assigned position ('{position}'), even if the opposing arguments are strong. Your role is not to concede but to present a compelling case for your stance." | |
} | |
messages = [system_message] | |
# Adding conversation history | |
for user_msg, assistant_msg in history: | |
messages.append({"role": "user", "content": user_msg}) | |
messages.append({"role": "assistant", "content": assistant_msg}) | |
# Adding the current user input | |
messages.append({"role": "user", "content": message}) | |
# Generate the response | |
response = "" | |
for message_chunk in client.chat_completion( | |
messages, | |
max_tokens=128, | |
stream=True, | |
temperature=0.4, | |
top_p=0.95, | |
): | |
response += message_chunk.choices[0].delta.content | |
return response | |
# Function to start the multi-participant debate | |
def start_debate(topic, position_1, position_2): | |
global turn | |
if not topic or not position_1 or not position_2: | |
return "Please provide the debate topic and positions for both participants.", [] | |
# Ensure positions are opposite | |
if position_1 == position_2: | |
return "The positions of both participants must be opposite. Please adjust them.", [] | |
# Initialize the debate | |
turn = "Master-1" if position_1 == "For" else "Master-2" # Decide who starts | |
position = position_1 if turn == "Master-1" else position_2 | |
response = generate_response(position, topic, "", []) | |
return f"The debate has started! {turn} begins.", [("", response)] | |
# Function to continue the multi-participant debate | |
def next_turn(topic, position_1, position_2, history): | |
global turn | |
if not history: | |
return "Start the debate first!", history | |
# Determine who responds next | |
if turn == "Master-1": | |
turn = "Master-2" | |
position = position_2 | |
else: | |
turn = "Master-1" | |
position = position_1 | |
# Generate the response | |
user_msg = history[-1][1] # Use the last assistant response as the user message | |
response = generate_response(position, topic, user_msg, history) | |
return f"It's now {turn}'s turn.", history + [(user_msg, response)] | |
# Encode image function for logos (optional, kept for design) | |
def encode_image(image_path): | |
with open(image_path, "rb") as image_file: | |
return base64.b64encode(image_file.read()).decode('utf-8') | |
# Encode the images | |
github_logo_encoded = encode_image("Images/github-logo.png") | |
linkedin_logo_encoded = encode_image("Images/linkedin-logo.png") | |
website_logo_encoded = encode_image("Images/ai-logo.png") | |
footer = """ | |
<div style="background-color: #1d2938; color: white; padding: 10px; width: 100%; bottom: 0; left: 0; display: flex; justify-content: space-between; align-items: center; padding: .2rem 35px; box-sizing: border-box; font-size: 16px;"> | |
<div style="text-align: left;"> | |
<p style="margin: 0;">© 2024 </p> | |
</div> | |
<div style="text-align: center; flex-grow: 1;"> | |
<p style="margin: 0;"> This website is made with ❤ by SARATH CHANDRA</p> | |
</div> | |
<div class="social-links" style="display: flex; gap: 20px; justify-content: flex-end; align-items: center;"> | |
<a href="https://github.com/21bq1a4210" target="_blank" style="text-align: center;"> | |
<img src="data:image/png;base64,{}" alt="GitHub" width="40" height="40" style="display: block; margin: 0 auto;"> | |
<span style="font-size: 14px;">GitHub</span> | |
</a> | |
<a href="https://www.linkedin.com/in/sarath-chandra-bandreddi-07393b1aa/" target="_blank" style="text-align: center;"> | |
<img src="data:image/png;base64,{}" alt="LinkedIn" width="40" height="40" style="display: block; margin: 0 auto;"> | |
<span style="font-size: 14px;">LinkedIn</span> | |
</a> | |
<a href="https://21bq1a4210.github.io/MyPortfolio-/" target="_blank" style="text-align: center;"> | |
<img src="data:image/png;base64,{}" alt="Portfolio" width="40" height="40" style="display: block; margin-right: 40px;"> | |
<span style="font-size: 14px;">Portfolio</span> | |
</a> | |
</div> | |
</div> | |
""" | |
# Gradio interface | |
with gr.Blocks(theme=gr.themes.Soft(font=[gr.themes.GoogleFont("Roboto Mono")]), | |
css='footer {visibility: hidden}') as demo: | |
gr.Markdown("# Welcome to The Debate Master 🗣️🤖") | |
with gr.Tabs(): | |
with gr.TabItem("Master Vs You"): | |
with gr.Row(): | |
with gr.Column(scale=1): | |
topic = gr.Textbox(label="STEP-1: Debate Topic", placeholder="Enter the topic of the debate") | |
position = gr.Radio(["For", "Against"], label="STEP-2: Debate Master stance", scale=1) | |
btn = gr.Button("STEP-3: Start", variant='primary') | |
clr = gr.ClearButton() | |
output = gr.Textbox(label='Status') | |
with gr.Column(scale=4): | |
debate_interface = gr.ChatInterface(debate_respond, | |
chatbot=gr.Chatbot(height=475)) | |
with gr.TabItem("Master Vs Master"): | |
with gr.Row(): | |
with gr.Column(scale=1): | |
topic_input = gr.Textbox(label="STEP-1: Debate Topic", placeholder="Enter the topic of the debate") | |
position_1_input = gr.Radio(["For", "Against"], label="STEP-2: Master-1 Stance") | |
position_2_input = gr.Radio(["For", "Against"], label="STEP-3: Master-2 Stance") | |
start_button = gr.Button("STEP-4: Start", variant='primary') | |
next_button = gr.Button("Next Turn") | |
status_output = gr.Textbox(label="Status", interactive=False) | |
with gr.Column(scale=4): | |
chatbot = gr.Chatbot(label="Debate Arena", height=500) | |
gr.HTML(footer.format(github_logo_encoded, linkedin_logo_encoded, website_logo_encoded)) | |
btn.click(fn=start, inputs=[topic, position], outputs=output) | |
start_button.click( | |
fn=start_debate, | |
inputs=[topic_input, position_1_input, position_2_input], | |
outputs=[status_output, chatbot], | |
) | |
next_button.click( | |
fn=next_turn, | |
inputs=[topic_input, position_1_input, position_2_input, chatbot], | |
outputs=[status_output, chatbot], | |
) | |
clr.click(lambda: [None], outputs=[output]) | |
if __name__ == "__main__": | |
demo.launch(share=True) | |