Go-Raw commited on
Commit
daa1921
Β·
verified Β·
1 Parent(s): 485970c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +38 -35
app.py CHANGED
@@ -1,44 +1,47 @@
1
- import os
2
- from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
3
  import gradio as gr
4
 
5
- # Load FLAN-T5 model
6
- tokenizer = AutoTokenizer.from_pretrained("google/flan-t5-base")
7
- model = AutoModelForSeq2SeqLM.from_pretrained("google/flan-t5-base")
8
- pipe = pipeline("text2text-generation", model=model, tokenizer=tokenizer)
 
9
 
10
- # Agent styles
11
  personas = {
12
- "🟒 Optimist": "Take a positive and hopeful viewpoint",
13
- "πŸ”΄ Pessimist": "Take a critical or negative viewpoint",
14
- "🟑 Neutral": "Respond with a balanced and unbiased view"
15
  }
16
 
17
- # Generate responses
18
- def generate_debate(topic):
19
- if not topic.strip():
20
- return "⚠️ Please enter a valid debate topic."
21
-
22
- responses = {}
23
- for label, style in personas.items():
24
- try:
25
- prompt = f"You are a debater. {style}. The topic is: '{topic}'. What is your opinion?"
26
- out = pipe(prompt, max_new_tokens=120, temperature=0.7)[0]["generated_text"]
27
- responses[label] = out.strip()
28
- except Exception as e:
29
- responses[label] = f"❌ Error generating response: {str(e)}"
30
-
31
- result = "\n\n".join([f"### {label}\n{resp}" for label, resp in responses.items()])
32
- return result
33
 
34
- # Gradio Interface
35
- demo = gr.Interface(
36
- fn=generate_debate,
37
- inputs=gr.Textbox(lines=2, placeholder="e.g. Should AI replace teachers?", label="Enter a Debate Topic"),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
  outputs=gr.Markdown(),
39
  title="πŸŽ™οΈ Multi-Agent Debate Simulator",
40
- description="This app simulates a debate from different viewpoints using FLAN-T5 (Google) on Hugging Face πŸ€— Transformers. Just enter a topic and see how Optimist, Pessimist, and Neutral personas respond!"
41
- )
42
-
43
- # Launch the app
44
- demo.launch()
 
1
+ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
 
2
  import gradio as gr
3
 
4
+ # Load stronger open LLM (Mistral-7B-Instruct)
5
+ model_name = "mistralai/Mistral-7B-Instruct-v0.1"
6
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
7
+ model = AutoModelForCausalLM.from_pretrained(model_name)
8
+ pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
9
 
10
+ # Define agent styles
11
  personas = {
12
+ "🟒 Optimist": "An individual who sees the positive side and hopeful outcomes of any topic.",
13
+ "πŸ”΄ Pessimist": "A critic who focuses on the flaws, risks, and downsides of the topic.",
14
+ "🟑 Neutral": "An unbiased observer who presents a balanced and fair viewpoint."
15
  }
16
 
17
+ # Prompt template
18
+ def build_prompt(topic, persona_desc):
19
+ return (
20
+ f"You are a skilled debater. Take on this persona: {persona_desc}\n"
21
+ f"Debate Topic: \"{topic}\"\n"
22
+ f"Respond in detail and back your opinion with reasoning.\n"
23
+ f"Your argument:"
24
+ )
 
 
 
 
 
 
 
 
25
 
26
+ # Generate debate
27
+ def debate(topic):
28
+ results = {}
29
+ for label, desc in personas.items():
30
+ prompt = build_prompt(topic, desc)
31
+ output = pipe(prompt, max_new_tokens=200, temperature=0.9, top_p=0.95)[0]["generated_text"]
32
+ argument = output.split("Your argument:")[-1].strip()
33
+ results[label] = argument
34
+ return results
35
+
36
+ # Gradio UI
37
+ def run_debate(topic):
38
+ response = debate(topic)
39
+ return "\n\n".join([f"**{k}**:\n{v}" for k, v in response.items()])
40
+
41
+ gr.Interface(
42
+ fn=run_debate,
43
+ inputs=gr.Textbox(label="Enter a Debate Topic"),
44
  outputs=gr.Markdown(),
45
  title="πŸŽ™οΈ Multi-Agent Debate Simulator",
46
+ description="Debates from multiple perspectives using Mistral-7B on Hugging Face πŸ€—."
47
+ ).launch()