debate-gpt / app.py
Sugamdeol's picture
Create app.py
9aa7803 verified
raw
history blame
1.89 kB
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM
# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3.1-8B")
model = AutoModelForCausalLM.from_pretrained("meta-llama/Meta-Llama-3.1-8B")
def generate_text(prompt, max_length=150):
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_length=max_length, num_return_sequences=1, do_sample=True)
return tokenizer.decode(outputs[0], skip_special_tokens=True)
def generate_argument(topic, stance):
prompt = f"Generate a compelling argument for the following debate topic.\nTopic: {topic}\nStance: {stance}\nArgument:"
response = generate_text(prompt, max_length=200)
argument = response.split("Argument:")[1].strip()
return argument
def generate_counterargument(topic, original_argument):
prompt = f"Generate a strong counterargument for the following debate topic and argument.\nTopic: {topic}\nOriginal Argument: {original_argument}\nCounterargument:"
response = generate_text(prompt, max_length=200)
counterargument = response.split("Counterargument:")[1].strip()
return counterargument
def debate_assistant(topic, stance):
argument = generate_argument(topic, stance)
counterargument = generate_counterargument(topic, argument)
return f"Argument: {argument}\n\nCounterargument: {counterargument}"
# Create the Gradio interface
iface = gr.Interface(
fn=debate_assistant,
inputs=[
gr.Textbox(label="Debate Topic"),
gr.Radio(["For", "Against"], label="Stance")
],
outputs=gr.Textbox(label="Generated Debate Arguments"),
title="AI-powered Debate Assistant (Meta-Llama 3.1)",
description="Enter a debate topic and choose a stance to generate arguments and counterarguments using Meta-Llama 3.1."
)
# Launch the interface
iface.launch()