Go-Raw commited on
Commit
cba3f60
Β·
verified Β·
1 Parent(s): 613701b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -11
app.py CHANGED
@@ -1,33 +1,35 @@
1
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
2
  import gradio as gr
3
 
4
- model_name = "facebook/mbart-large-50-many-to-many-mmt"
 
5
  tokenizer = AutoTokenizer.from_pretrained(model_name)
6
  model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
7
  pipe = pipeline("text2text-generation", model=model, tokenizer=tokenizer)
8
 
9
- # Role-based prompt
10
  personas = {
11
- "🟒 Optimist": "You are an optimist. Defend the topic with positivity. Give 2 reasons.",
12
- "πŸ”΄ Pessimist": "You are a pessimist. Criticize the topic. Mention 2 problems.",
13
- "🟑 Neutral": "You are a neutral thinker. Present pros and cons fairly."
14
  }
15
 
 
16
  def generate_debate(topic):
17
  responses = {}
18
  for label, instruction in personas.items():
19
- prompt = f"{instruction}\nDebate Topic: {topic}"
20
- result = pipe(prompt, max_new_tokens=120, do_sample=True, temperature=0.7)[0]["generated_text"]
21
  responses[label] = result.strip()
22
- return "\n\n".join([f"**{k}**: {v}" for k, v in responses.items()])
23
 
24
  # Gradio UI
25
  demo = gr.Interface(
26
  fn=generate_debate,
27
- inputs=gr.Textbox(label="Enter a Debate Topic"),
28
  outputs=gr.Markdown(),
29
- title="πŸ€– Debate Club: Multi-Agent Edition",
30
- description="A mini debate simulation using different perspectives powered by MBART. Runs on free-tier CPU."
31
  )
32
 
33
  demo.launch()
 
1
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
2
  import gradio as gr
3
 
4
+ # Load T0pp (instruction-tuned)
5
+ model_name = "bigscience/T0pp"
6
  tokenizer = AutoTokenizer.from_pretrained(model_name)
7
  model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
8
  pipe = pipeline("text2text-generation", model=model, tokenizer=tokenizer)
9
 
10
+ # Debater personas
11
  personas = {
12
+ "🟒 Optimist": "Give a hopeful, positive opinion with 2 reasons.",
13
+ "πŸ”΄ Pessimist": "Criticize the topic and point out 2 problems.",
14
+ "🟑 Neutral": "Give a fair and balanced view with pros and cons."
15
  }
16
 
17
+ # Debate function
18
  def generate_debate(topic):
19
  responses = {}
20
  for label, instruction in personas.items():
21
+ prompt = f"You are a debater. {instruction}\nDebate Topic: {topic}"
22
+ result = pipe(prompt, max_new_tokens=150, temperature=0.7)[0]['generated_text']
23
  responses[label] = result.strip()
24
+ return "\n\n".join([f"**{k}**:\n{v}" for k, v in responses.items()])
25
 
26
  # Gradio UI
27
  demo = gr.Interface(
28
  fn=generate_debate,
29
+ inputs=gr.Textbox(label="Debate Topic"),
30
  outputs=gr.Markdown(),
31
+ title="🎀 Multi-Agent Debate Simulator (T0pp)",
32
+ description="Simulates a debate with multiple perspectives using the instruction-tuned BigScience T0pp model on Hugging Face πŸ€—."
33
  )
34
 
35
  demo.launch()