Go-Raw commited on
Commit
ccc3eb5
Β·
verified Β·
1 Parent(s): 8ae89ff

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -23
app.py CHANGED
@@ -1,47 +1,45 @@
1
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
2
  import gradio as gr
3
 
4
- # Load stronger open LLM (Mistral-7B-Instruct)
5
- model_name = "mistralai/Mistral-7B-Instruct-v0.1"
6
  tokenizer = AutoTokenizer.from_pretrained(model_name)
7
  model = AutoModelForCausalLM.from_pretrained(model_name)
8
  pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
9
 
10
- # Define agent styles
11
  personas = {
12
- "🟒 Optimist": "An individual who sees the positive side and hopeful outcomes of any topic.",
13
- "πŸ”΄ Pessimist": "A critic who focuses on the flaws, risks, and downsides of the topic.",
14
- "🟑 Neutral": "An unbiased observer who presents a balanced and fair viewpoint."
15
  }
16
 
17
  # Prompt template
18
- def build_prompt(topic, persona_desc):
19
- return (
20
- f"You are a skilled debater. Take on this persona: {persona_desc}\n"
21
- f"Debate Topic: \"{topic}\"\n"
22
- f"Respond in detail and back your opinion with reasoning.\n"
23
- f"Your argument:"
24
- )
25
 
26
- # Generate debate
27
  def debate(topic):
28
  results = {}
29
- for label, desc in personas.items():
30
- prompt = build_prompt(topic, desc)
31
- output = pipe(prompt, max_new_tokens=200, temperature=0.9, top_p=0.95)[0]["generated_text"]
32
- argument = output.split("Your argument:")[-1].strip()
33
- results[label] = argument
34
  return results
35
 
36
- # Gradio UI
37
  def run_debate(topic):
38
- response = debate(topic)
39
- return "\n\n".join([f"**{k}**:\n{v}" for k, v in response.items()])
40
 
41
  gr.Interface(
42
  fn=run_debate,
43
  inputs=gr.Textbox(label="Enter a Debate Topic"),
44
  outputs=gr.Markdown(),
45
  title="πŸŽ™οΈ Multi-Agent Debate Simulator",
46
- description="Debates from multiple perspectives using Mistral-7B on Hugging Face πŸ€—."
47
  ).launch()
 
1
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
2
  import gradio as gr
3
 
4
+ # Load Falcon-7B-Instruct (open and free)
5
+ model_name = "tiiuae/falcon-7b-instruct"
6
  tokenizer = AutoTokenizer.from_pretrained(model_name)
7
  model = AutoModelForCausalLM.from_pretrained(model_name)
8
  pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
9
 
10
+ # Define personas
11
  personas = {
12
+ "🟒 Optimist": "Someone who sees the good and hopeful side of any issue.",
13
+ "πŸ”΄ Pessimist": "Someone who focuses on the risks, negatives, or problems in any issue.",
14
+ "🟑 Neutral": "Someone who presents a balanced, unbiased view based on logic."
15
  }
16
 
17
  # Prompt template
18
+ def build_prompt(topic, style):
19
+ return f"""You are a debater. Take this persona: {style}.
20
+ Debate Topic: "{topic}"
21
+ Provide a thoughtful and opinionated response with reasoning.
22
+ Answer:"""
 
 
23
 
24
+ # Generate responses
25
  def debate(topic):
26
  results = {}
27
+ for label, persona in personas.items():
28
+ prompt = build_prompt(topic, persona)
29
+ output = pipe(prompt, max_new_tokens=200, temperature=0.9)[0]["generated_text"]
30
+ answer = output.split("Answer:")[-1].strip()
31
+ results[label] = answer
32
  return results
33
 
34
+ # Gradio interface
35
  def run_debate(topic):
36
+ responses = debate(topic)
37
+ return "\n\n".join([f"**{k}**:\n{v}" for k, v in responses.items()])
38
 
39
  gr.Interface(
40
  fn=run_debate,
41
  inputs=gr.Textbox(label="Enter a Debate Topic"),
42
  outputs=gr.Markdown(),
43
  title="πŸŽ™οΈ Multi-Agent Debate Simulator",
44
+ description="Simulates multi-perspective debates using Falcon-7B on Hugging Face πŸ€—."
45
  ).launch()