Go-Raw commited on
Commit
86d3714
Β·
verified Β·
1 Parent(s): ccc3eb5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +32 -34
app.py CHANGED
@@ -1,45 +1,43 @@
 
1
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
2
  import gradio as gr
3
 
4
- # Load Falcon-7B-Instruct (open and free)
5
- model_name = "tiiuae/falcon-7b-instruct"
6
- tokenizer = AutoTokenizer.from_pretrained(model_name)
7
- model = AutoModelForCausalLM.from_pretrained(model_name)
 
 
 
 
 
 
 
8
  pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
9
 
10
- # Define personas
11
  personas = {
12
- "🟒 Optimist": "Someone who sees the good and hopeful side of any issue.",
13
- "πŸ”΄ Pessimist": "Someone who focuses on the risks, negatives, or problems in any issue.",
14
- "🟑 Neutral": "Someone who presents a balanced, unbiased view based on logic."
15
  }
16
 
17
- # Prompt template
18
- def build_prompt(topic, style):
19
- return f"""You are a debater. Take this persona: {style}.
20
- Debate Topic: "{topic}"
21
- Provide a thoughtful and opinionated response with reasoning.
22
- Answer:"""
23
-
24
- # Generate responses
25
- def debate(topic):
26
- results = {}
27
- for label, persona in personas.items():
28
- prompt = build_prompt(topic, persona)
29
- output = pipe(prompt, max_new_tokens=200, temperature=0.9)[0]["generated_text"]
30
- answer = output.split("Answer:")[-1].strip()
31
- results[label] = answer
32
- return results
33
 
34
- # Gradio interface
35
- def run_debate(topic):
36
- responses = debate(topic)
37
- return "\n\n".join([f"**{k}**:\n{v}" for k, v in responses.items()])
38
-
39
- gr.Interface(
40
- fn=run_debate,
41
  inputs=gr.Textbox(label="Enter a Debate Topic"),
42
  outputs=gr.Markdown(),
43
- title="πŸŽ™οΈ Multi-Agent Debate Simulator",
44
- description="Simulates multi-perspective debates using Falcon-7B on Hugging Face πŸ€—."
45
- ).launch()
 
 
 
1
+ import torch
2
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
3
  import gradio as gr
4
 
5
+ # Load Mistral-7B with 8-bit quantization (saves memory!)
6
+ model_id = "mistralai/Mistral-7B-Instruct-v0.1"
7
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
8
+
9
+ model = AutoModelForCausalLM.from_pretrained(
10
+ model_id,
11
+ device_map="auto", # Automatically assign layers to available GPU/CPU
12
+ load_in_8bit=True, # Use 8-bit quantization
13
+ torch_dtype=torch.float16 # Reduce precision to save memory
14
+ )
15
+
16
  pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
17
 
18
+ # Viewpoints
19
  personas = {
20
+ "🟒 Optimist": "Respond with hope and positivity",
21
+ "πŸ”΄ Pessimist": "Respond critically and negatively",
22
+ "🟑 Neutral": "Respond with a balanced, unbiased tone"
23
  }
24
 
25
+ # Generate debate
26
+ def generate_debate(topic):
27
+ responses = {}
28
+ for label, style in personas.items():
29
+ prompt = f"[INST] You are a debater. {style}. Topic: '{topic}'. Give a short opinion. [/INST]"
30
+ result = pipe(prompt, max_new_tokens=150, temperature=0.7, do_sample=True)[0]["generated_text"]
31
+ responses[label] = result.strip()
32
+ return "\n\n".join([f"**{k}**: {v}" for k, v in responses.items()])
 
 
 
 
 
 
 
 
33
 
34
+ # Gradio app
35
+ demo = gr.Interface(
36
+ fn=generate_debate,
 
 
 
 
37
  inputs=gr.Textbox(label="Enter a Debate Topic"),
38
  outputs=gr.Markdown(),
39
+ title="🧠 Multi-Agent Debate Simulator (Mistral 7B)",
40
+ description="Debate with different perspectives using the Mistral-7B-Instruct model (quantized)."
41
+ )
42
+
43
+ demo.launch()