Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,44 +1,47 @@
|
|
1 |
-
import
|
2 |
-
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
|
3 |
import gradio as gr
|
4 |
|
5 |
-
# Load
|
6 |
-
|
7 |
-
|
8 |
-
|
|
|
9 |
|
10 |
-
#
|
11 |
personas = {
|
12 |
-
"π’ Optimist": "
|
13 |
-
"π΄ Pessimist": "
|
14 |
-
"π‘ Neutral": "
|
15 |
}
|
16 |
|
17 |
-
#
|
18 |
-
def
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
prompt = f"You are a debater. {style}. The topic is: '{topic}'. What is your opinion?"
|
26 |
-
out = pipe(prompt, max_new_tokens=120, temperature=0.7)[0]["generated_text"]
|
27 |
-
responses[label] = out.strip()
|
28 |
-
except Exception as e:
|
29 |
-
responses[label] = f"β Error generating response: {str(e)}"
|
30 |
-
|
31 |
-
result = "\n\n".join([f"### {label}\n{resp}" for label, resp in responses.items()])
|
32 |
-
return result
|
33 |
|
34 |
-
#
|
35 |
-
|
36 |
-
|
37 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
outputs=gr.Markdown(),
|
39 |
title="ποΈ Multi-Agent Debate Simulator",
|
40 |
-
description="
|
41 |
-
)
|
42 |
-
|
43 |
-
# Launch the app
|
44 |
-
demo.launch()
|
|
|
1 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
|
|
2 |
import gradio as gr
|
3 |
|
4 |
+
# Load stronger open LLM (Mistral-7B-Instruct)
|
5 |
+
model_name = "mistralai/Mistral-7B-Instruct-v0.1"
|
6 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
7 |
+
model = AutoModelForCausalLM.from_pretrained(model_name)
|
8 |
+
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
9 |
|
10 |
+
# Define agent styles
|
11 |
personas = {
|
12 |
+
"π’ Optimist": "An individual who sees the positive side and hopeful outcomes of any topic.",
|
13 |
+
"π΄ Pessimist": "A critic who focuses on the flaws, risks, and downsides of the topic.",
|
14 |
+
"π‘ Neutral": "An unbiased observer who presents a balanced and fair viewpoint."
|
15 |
}
|
16 |
|
17 |
+
# Prompt template
|
18 |
+
def build_prompt(topic, persona_desc):
|
19 |
+
return (
|
20 |
+
f"You are a skilled debater. Take on this persona: {persona_desc}\n"
|
21 |
+
f"Debate Topic: \"{topic}\"\n"
|
22 |
+
f"Respond in detail and back your opinion with reasoning.\n"
|
23 |
+
f"Your argument:"
|
24 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
|
26 |
+
# Generate debate
|
27 |
+
def debate(topic):
|
28 |
+
results = {}
|
29 |
+
for label, desc in personas.items():
|
30 |
+
prompt = build_prompt(topic, desc)
|
31 |
+
output = pipe(prompt, max_new_tokens=200, temperature=0.9, top_p=0.95)[0]["generated_text"]
|
32 |
+
argument = output.split("Your argument:")[-1].strip()
|
33 |
+
results[label] = argument
|
34 |
+
return results
|
35 |
+
|
36 |
+
# Gradio UI
|
37 |
+
def run_debate(topic):
|
38 |
+
response = debate(topic)
|
39 |
+
return "\n\n".join([f"**{k}**:\n{v}" for k, v in response.items()])
|
40 |
+
|
41 |
+
gr.Interface(
|
42 |
+
fn=run_debate,
|
43 |
+
inputs=gr.Textbox(label="Enter a Debate Topic"),
|
44 |
outputs=gr.Markdown(),
|
45 |
title="ποΈ Multi-Agent Debate Simulator",
|
46 |
+
description="Debates from multiple perspectives using Mistral-7B on Hugging Face π€."
|
47 |
+
).launch()
|
|
|
|
|
|