Spaces:
Sleeping
Sleeping
from huggingface_hub import InferenceClient | |
import gradio as gr | |
import base64 | |
import datetime | |
client = InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct") | |
# Debate response function | |
def debate_respond(message, history: list[tuple[str, str]], | |
max_tokens=512, temperature=0.4, top_p=0.95): | |
# System message defining assistant behavior in a debate | |
system_message = { | |
"role": "system", | |
"content": f"You are a debate participant tasked with defending the position '{position.value}' on the topic '{topic.value}'. Your goal is to articulate your arguments with clarity, logic, and professionalism while addressing counterpoints made by the opposing side. Ensure that your responses are thoughtful, evidence-based, and persuasive." | |
f"During the debate, if the user presents arguments challenging your stance, analyze their points critically and provide respectful but firm counterarguments. Avoid dismissive language and focus on strengthening your case through logical reasoning, data, and examples relevant to the topic." | |
f"Stay consistent with your assigned position ('{position.value}'), even if the opposing arguments are strong. Your role is not to concede but to present a compelling case for your stance. Keep the tone respectful and formal throughout the discussion, fostering a constructive and engaging debate environment." | |
} | |
messages = [system_message] | |
# Adding conversation history | |
for val in history: | |
if val[0]: | |
messages.append({"role": "user", "content": val[0]}) | |
if val[1]: | |
messages.append({"role": "assistant", "content": val[1]}) | |
# Adding the current user input | |
messages.append({"role": "user", "content": message}) | |
# Generating the response | |
response = "" | |
for message in client.chat_completion( | |
messages, | |
max_tokens=max_tokens, | |
stream=True, | |
temperature=temperature, | |
top_p=top_p, | |
): | |
response += message.choices[0].delta.content | |
yield response | |
print(f"{datetime.datetime.now()}::{messages[-1]['content']}->{response}\n") | |
# Encode image function for logos (optional, kept for design) | |
def encode_image(image_path): | |
with open(image_path, "rb") as image_file: | |
return base64.b64encode(image_file.read()).decode('utf-8') | |
# Gradio interface | |
global topic, position | |
with gr.Blocks(theme=gr.themes.Ocean(font=[gr.themes.GoogleFont("Roboto Mono")]), | |
css='footer {visibility: hidden}') as demo: | |
gr.Markdown("# Welcome to The Debate_Master 🗣️🤖") | |
with gr.Tabs(): | |
with gr.TabItem("Debate Interface"): | |
with gr.Row(): | |
topic = gr.Textbox(label="Debate Topic", placeholder="Enter the topic of the debate") | |
position = gr.Radio(["For", "Against"], label="Position", info="LLM's debate stance") | |
chatbot = gr.Chatbot(height=500) | |
debate_interface = gr.ChatInterface(debate_respond, | |
chatbot=chatbot, | |
examples=[ | |
"Why do you support this stance?", | |
"Can you refute the opposing view on this topic?", | |
"What evidence supports your position?" | |
] | |
) | |
gr.HTML("<footer><p>LLM Debate Participant © 2024</p></footer>") | |
if __name__ == "__main__": | |
demo.launch(share=True) | |