Go-Raw commited on
Commit
5aeb336
Β·
verified Β·
1 Parent(s): c2ebf8a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -16
app.py CHANGED
@@ -1,35 +1,36 @@
1
- from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
2
  import gradio as gr
3
 
4
- # Use lightweight, instruction-tuned model
5
- model_name = "google/flan-t5-base"
6
  tokenizer = AutoTokenizer.from_pretrained(model_name)
7
- model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
8
- pipe = pipeline("text2text-generation", model=model, tokenizer=tokenizer)
9
 
10
- # Personas
11
  personas = {
12
- "🟒 Optimist": "Provide a positive and hopeful opinion, mentioning 2 benefits.",
13
- "πŸ”΄ Pessimist": "Provide a critical opinion, mentioning 2 drawbacks.",
14
- "🟑 Neutral": "Provide a balanced perspective, listing pros and cons."
15
  }
16
 
17
- # Debate function
18
  def generate_debate(topic):
19
  results = []
20
  for label, instruction in personas.items():
21
- prompt = f"You are a debater. Topic: '{topic}'. {instruction}"
22
- response = pipe(prompt, max_new_tokens=120, temperature=0.7)[0]['generated_text'].strip()
23
- results.append(f"### {label}\n{response}")
 
 
 
 
 
24
  return "\n\n".join(results)
25
 
26
- # Gradio Interface
27
  demo = gr.Interface(
28
  fn=generate_debate,
29
  inputs=gr.Textbox(label="Debate Topic"),
30
  outputs=gr.Markdown(),
31
- title="πŸŽ™οΈ Multi-Agent Debate Simulator",
32
- description="Debate from 3 perspectives (Optimist, Pessimist, Neutral) using FLAN-T5-Base."
33
  )
34
 
35
  demo.launch()
 
1
+ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
2
  import gradio as gr
3
 
4
+ model_name = "EleutherAI/gpt-neo-1.3B"
 
5
  tokenizer = AutoTokenizer.from_pretrained(model_name)
6
+ model = AutoModelForCausalLM.from_pretrained(model_name)
7
+ pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
8
 
 
9
  personas = {
10
+ "🟒 Optimist": "Give an optimistic argument with 2 benefits and one example.",
11
+ "πŸ”΄ Pessimist": "Give a critical argument with 2 drawbacks and one example.",
12
+ "🟑 Neutral": "List pros and cons and give a neutral summary."
13
  }
14
 
 
15
  def generate_debate(topic):
16
  results = []
17
  for label, instruction in personas.items():
18
+ prompt = (
19
+ f"Debate Topic: {topic}\n"
20
+ f"Persona: {label}\n"
21
+ f"{instruction}\n"
22
+ f"Write 3–4 sentences."
23
+ )
24
+ output = pipe(prompt, max_new_tokens=180, temperature=0.7)[0]['generated_text']
25
+ results.append(f"### {label}\n{output.strip()}")
26
  return "\n\n".join(results)
27
 
 
28
  demo = gr.Interface(
29
  fn=generate_debate,
30
  inputs=gr.Textbox(label="Debate Topic"),
31
  outputs=gr.Markdown(),
32
+ title="πŸŽ™οΈ Debate Simulator with GPT-Neo",
33
+ description="Debate with Optimist, Pessimist & Neutral perspectives using GPT-Neo-1.3B."
34
  )
35
 
36
  demo.launch()