Spaces:
Sleeping
Sleeping
File size: 3,443 Bytes
22faf28 7a3d85e 22faf28 7a3d85e 22faf28 7a3d85e 22faf28 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 |
from huggingface_hub import InferenceClient
import gradio as gr
import base64
import datetime
client = InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct")
# Debate response function
def debate_respond(message, history: list[tuple[str, str]],
max_tokens=512, temperature=0.4, top_p=0.95):
# System message defining assistant behavior in a debate
system_message = {
"role": "system",
"content": f"You are a debate participant tasked with defending the position '{position.value}' on the topic '{topic.value}'. Your goal is to articulate your arguments with clarity, logic, and professionalism while addressing counterpoints made by the opposing side. Ensure that your responses are thoughtful, evidence-based, and persuasive."
f"During the debate, if the user presents arguments challenging your stance, analyze their points critically and provide respectful but firm counterarguments. Avoid dismissive language and focus on strengthening your case through logical reasoning, data, and examples relevant to the topic."
f"Stay consistent with your assigned position ('{position.value}'), even if the opposing arguments are strong. Your role is not to concede but to present a compelling case for your stance. Keep the tone respectful and formal throughout the discussion, fostering a constructive and engaging debate environment."
}
messages = [system_message]
# Adding conversation history
for val in history:
if val[0]:
messages.append({"role": "user", "content": val[0]})
if val[1]:
messages.append({"role": "assistant", "content": val[1]})
# Adding the current user input
messages.append({"role": "user", "content": message})
# Generating the response
response = ""
for message in client.chat_completion(
messages,
max_tokens=max_tokens,
stream=True,
temperature=temperature,
top_p=top_p,
):
response += message.choices[0].delta.content
yield response
print(f"{datetime.datetime.now()}::{messages[-1]['content']}->{response}\n")
# Encode image function for logos (optional, kept for design)
def encode_image(image_path):
with open(image_path, "rb") as image_file:
return base64.b64encode(image_file.read()).decode('utf-8')
# Gradio interface
global topic, position
with gr.Blocks(theme=gr.themes.Ocean(font=[gr.themes.GoogleFont("Roboto Mono")]),
css='footer {visibility: hidden}') as demo:
gr.Markdown("# Welcome to The Debate_Master 🗣️🤖")
with gr.Tabs():
with gr.TabItem("Debate Interface"):
with gr.Row():
topic = gr.Textbox(label="Debate Topic", placeholder="Enter the topic of the debate")
position = gr.Radio(["For", "Against"], label="Position", info="LLM's debate stance")
chatbot = gr.Chatbot(height=500)
debate_interface = gr.ChatInterface(debate_respond,
chatbot=chatbot,
examples=[
"Why do you support this stance?",
"Can you refute the opposing view on this topic?",
"What evidence supports your position?"
]
)
gr.HTML("<footer><p>LLM Debate Participant © 2024</p></footer>")
if __name__ == "__main__":
demo.launch(share=True)
|