Go-Raw commited on
Commit
c2ebf8a
Β·
verified Β·
1 Parent(s): b339a4f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -14
app.py CHANGED
@@ -1,35 +1,35 @@
1
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
2
  import gradio as gr
3
 
4
- # Load T0pp (instruction-tuned)
5
- model_name = "tencent/Hunyuan-A13B-Instruct"
6
  tokenizer = AutoTokenizer.from_pretrained(model_name)
7
  model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
8
  pipe = pipeline("text2text-generation", model=model, tokenizer=tokenizer)
9
 
10
- # Debater personas
11
  personas = {
12
- "🟒 Optimist": "Give a hopeful, positive opinion with 2 reasons.",
13
- "πŸ”΄ Pessimist": "Criticize the topic and point out 2 problems.",
14
- "🟑 Neutral": "Give a fair and balanced view with pros and cons."
15
  }
16
 
17
  # Debate function
18
  def generate_debate(topic):
19
- responses = {}
20
  for label, instruction in personas.items():
21
- prompt = f"You are a debater. {instruction}\nDebate Topic: {topic}"
22
- result = pipe(prompt, max_new_tokens=150, temperature=0.7)[0]['generated_text']
23
- responses[label] = result.strip()
24
- return "\n\n".join([f"**{k}**:\n{v}" for k, v in responses.items()])
25
 
26
- # Gradio UI
27
  demo = gr.Interface(
28
  fn=generate_debate,
29
  inputs=gr.Textbox(label="Debate Topic"),
30
  outputs=gr.Markdown(),
31
- title="🎀 Multi-Agent Debate Simulator (T0pp)",
32
- description="Simulates a debate with multiple perspectives using the instruction-tuned BigScience T0pp model on Hugging Face πŸ€—."
33
  )
34
 
35
  demo.launch()
 
1
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
2
  import gradio as gr
3
 
4
+ # Use lightweight, instruction-tuned model
5
+ model_name = "google/flan-t5-base"
6
  tokenizer = AutoTokenizer.from_pretrained(model_name)
7
  model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
8
  pipe = pipeline("text2text-generation", model=model, tokenizer=tokenizer)
9
 
10
+ # Personas
11
  personas = {
12
+ "🟒 Optimist": "Provide a positive and hopeful opinion, mentioning 2 benefits.",
13
+ "πŸ”΄ Pessimist": "Provide a critical opinion, mentioning 2 drawbacks.",
14
+ "🟑 Neutral": "Provide a balanced perspective, listing pros and cons."
15
  }
16
 
17
  # Debate function
18
  def generate_debate(topic):
19
+ results = []
20
  for label, instruction in personas.items():
21
+ prompt = f"You are a debater. Topic: '{topic}'. {instruction}"
22
+ response = pipe(prompt, max_new_tokens=120, temperature=0.7)[0]['generated_text'].strip()
23
+ results.append(f"### {label}\n{response}")
24
+ return "\n\n".join(results)
25
 
26
+ # Gradio Interface
27
  demo = gr.Interface(
28
  fn=generate_debate,
29
  inputs=gr.Textbox(label="Debate Topic"),
30
  outputs=gr.Markdown(),
31
+ title="πŸŽ™οΈ Multi-Agent Debate Simulator",
32
+ description="Debate from 3 perspectives (Optimist, Pessimist, Neutral) using FLAN-T5-Base."
33
  )
34
 
35
  demo.launch()