Go-Raw commited on
Commit
09585da
Β·
verified Β·
1 Parent(s): a0260ad

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -25
app.py CHANGED
@@ -1,43 +1,33 @@
1
- import torch
2
- from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
3
  import gradio as gr
4
 
5
- # Load Mistral-7B with 8-bit quantization (saves memory!)
6
- model_id = "mistralai/Mistral-7B-Instruct-v0.3"
7
- tokenizer = AutoTokenizer.from_pretrained(model_id)
 
8
 
9
- model = AutoModelForCausalLM.from_pretrained(
10
- model_id,
11
- device_map="auto", # Automatically assign layers to available GPU/CPU
12
- load_in_8bit=True, # Use 8-bit quantization
13
- torch_dtype=torch.float16 # Reduce precision to save memory
14
- )
15
-
16
- pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
17
-
18
- # Viewpoints
19
  personas = {
20
- "🟒 Optimist": "Respond with hope and positivity",
21
- "πŸ”΄ Pessimist": "Respond critically and negatively",
22
- "🟑 Neutral": "Respond with a balanced, unbiased tone"
23
  }
24
 
25
- # Generate debate
26
  def generate_debate(topic):
27
  responses = {}
28
- for label, style in personas.items():
29
- prompt = f"[INST] You are a debater. {style}. Topic: '{topic}'. Give a short opinion. [/INST]"
30
- result = pipe(prompt, max_new_tokens=150, temperature=0.7, do_sample=True)[0]["generated_text"]
31
  responses[label] = result.strip()
32
  return "\n\n".join([f"**{k}**: {v}" for k, v in responses.items()])
33
 
34
- # Gradio app
35
  demo = gr.Interface(
36
  fn=generate_debate,
37
  inputs=gr.Textbox(label="Enter a Debate Topic"),
38
  outputs=gr.Markdown(),
39
- title="🧠 Multi-Agent Debate Simulator (Mistral 7B)",
40
- description="Debate with different perspectives using the Mistral-7B-Instruct model (quantized)."
41
  )
42
 
43
  demo.launch()
 
1
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
 
2
  import gradio as gr
3
 
4
+ model_name = "facebook/mbart-large-50-many-to-many-mmt"
5
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
6
+ model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
7
+ pipe = pipeline("text2text-generation", model=model, tokenizer=tokenizer)
8
 
9
+ # Role-based prompt
 
 
 
 
 
 
 
 
 
10
  personas = {
11
+ "🟒 Optimist": "You are an optimist. Defend the topic with positivity. Give 2 reasons.",
12
+ "πŸ”΄ Pessimist": "You are a pessimist. Criticize the topic. Mention 2 problems.",
13
+ "🟑 Neutral": "You are a neutral thinker. Present pros and cons fairly."
14
  }
15
 
 
16
  def generate_debate(topic):
17
  responses = {}
18
+ for label, instruction in personas.items():
19
+ prompt = f"{instruction}\nDebate Topic: {topic}"
20
+ result = pipe(prompt, max_new_tokens=120, do_sample=True, temperature=0.7)[0]["generated_text"]
21
  responses[label] = result.strip()
22
  return "\n\n".join([f"**{k}**: {v}" for k, v in responses.items()])
23
 
24
+ # Gradio UI
25
  demo = gr.Interface(
26
  fn=generate_debate,
27
  inputs=gr.Textbox(label="Enter a Debate Topic"),
28
  outputs=gr.Markdown(),
29
+ title="πŸ€– Debate Club: Multi-Agent Edition",
30
+ description="A mini debate simulation using different perspectives powered by MBART. Runs on free-tier CPU."
31
  )
32
 
33
  demo.launch()