OneStarDao commited on
Commit
914ff43
Β·
verified Β·
1 Parent(s): 2401890

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +57 -51
app.py CHANGED
@@ -1,66 +1,72 @@
1
- import inspect, numpy as np, gradio as gr
2
- import wfgy_sdk as w
 
 
 
 
 
3
  from wfgy_sdk.evaluator import compare_logits
4
- from wfgy_sdk.visual import plot_histogram
5
 
6
- ENGINE = w.get_engine()
7
- RUN_HAS_ARG = "bbmc_scale" in inspect.signature(ENGINE.run).parameters
8
- BOOST_DEFAULT = 1.2
 
 
 
 
 
 
9
 
10
- def infer(prompt, enabled, boost):
11
- # ---------------- semantic vectors ----------------
12
- G = np.random.randn(256); G /= np.linalg.norm(G)
13
- I = G + np.random.normal(scale=boost, size=256)
14
 
15
- # fake logits (demo) β€”β€” vocab = 50257
16
- raw_logits = np.random.randn(50257)
 
17
 
18
- if enabled:
19
- if RUN_HAS_ARG:
20
- mod_logits = ENGINE.run(I, G, raw_logits, bbmc_scale=boost)
21
- else:
22
- mod_logits = ENGINE.run(I, G, raw_logits)
 
 
23
  else:
24
- mod_logits = raw_logits.copy()
25
 
26
- raw_txt = prompt + " " + str(np.argmax(raw_logits))
27
- mod_txt = prompt + " " + str(np.argmax(mod_logits))
28
- metrics = compare_logits(raw_logits, mod_logits)
29
 
30
- img = plot_histogram(raw_logits, mod_logits)
31
- var_d = f"{(1-metrics['std_ratio'])*100:.0f} %"
32
- kl = f"{metrics['kl_divergence']:.02f}"
33
- top1 = "βœ”" if metrics["top1_shift"] else "✘"
34
- meter = f"variance β–Ό {var_d} | KL {kl} | top-1 {top1}"
 
 
35
 
36
- return raw_txt, mod_txt, meter, img
37
 
38
  with gr.Blocks(title="WFGY 1-click Variance Gate") as demo:
39
- gr.Markdown("### 🧠 WFGY 1-click Variance Gate\n"
40
- "Turn GPT-2 into a calmer thinker. Move the slider β†’ watch variance dive.")
41
- prompt = gr.Textbox(label="Prompt", lines=2,
42
- value="Turn GPT-2 into a calmer thinker. Move the slider β†’ watch variance dive.")
43
- enabled = gr.Checkbox(label="Enable WFGY", value=True)
44
- boost = gr.Slider(0, 3, value=BOOST_DEFAULT,
45
- label="Demo Boost (higher β†’ bigger effect)")
46
- run_btn = gr.Button("Run", variant="primary")
47
 
48
  with gr.Row():
49
- raw_out = gr.Textbox(label="Raw GPT-2")
50
- mod_out = gr.Textbox(label="After WFGY")
51
- meter = gr.Markdown()
52
- hist = gr.Plot(label="Logit distribution")
53
-
54
- run_btn.click(fn=infer,
55
- inputs=[prompt, enabled, boost],
56
- outputs=[raw_out, mod_out, meter, hist])
57
-
58
- gr.Markdown(
59
- "**PDF mode** β€” feed `I_am_not_lizardman/WFGY_1.0.pdf` to any chat-LLM, prepend "
60
- "`Use WFGY:` and watch replies get sharper. Prompt revolution!\n\n"
61
- "⭐ [Star us on GitHub](https://github.com/onestardao/WFGY) β€” 10 000 stars before "
62
- "**2025-08-01** unlocks WFGY 2.0 (adaptive-gamma + multimodal)."
63
  )
64
 
65
- if __name__ == "__main__":
66
- demo.launch()
 
1
+ """
2
+ HF Space Β· WFGY 1-click Variance Gate
3
+ """
4
+
5
+ import gradio as gr, numpy as np, torch
6
+ from transformers import AutoModelForCausalLM, AutoTokenizer
7
+ from wfgy_sdk import get_engine
8
  from wfgy_sdk.evaluator import compare_logits
 
9
 
10
+ MODEL = "sshleifer/tiny-gpt2"
11
+ tok = AutoTokenizer.from_pretrained(MODEL)
12
+ mdl = AutoModelForCausalLM.from_pretrained(MODEL)
13
+ ENGINE = get_engine()
14
+
15
+
16
+ def run(prompt, enable, boost):
17
+ if not prompt.strip():
18
+ return gr.update(value="-", visible=True)
19
 
20
+ # raw logits
21
+ inputs = tok(prompt, return_tensors="pt")
22
+ rawL = mdl(**inputs).logits[0, -1].detach().cpu().float().numpy()
 
23
 
24
+ # demo-only fake semantic vectors
25
+ I = np.random.randn(256).astype(np.float32)
26
+ G = np.random.randn(256).astype(np.float32)
27
 
28
+ if enable:
29
+ modL = ENGINE.run(
30
+ logits = rawL,
31
+ input_vec = I,
32
+ ground_vec = G,
33
+ boost = boost,
34
+ )
35
  else:
36
+ modL = rawL
37
 
38
+ raw_txt = prompt + tok.decode(int(rawL.argmax()))
39
+ mod_txt = prompt + tok.decode(int(modL.argmax()))
40
+ m = compare_logits(rawL, modL)
41
 
42
+ headline = f"variance β–Ό {int(m['var_drop']*100)} % | KL {m['kl']:.2f} | top-1 {'βœ”' if m['top1'] else '✘'}"
43
+
44
+ return (
45
+ raw_txt,
46
+ mod_txt,
47
+ headline,
48
+ )
49
 
 
50
 
51
  with gr.Blocks(title="WFGY 1-click Variance Gate") as demo:
52
+ gr.Markdown("## 🧠 WFGY 1-click Variance Gate\nTurn GPT-2 into a calmer thinker. Move the slider β†’ watch variance dive.")
53
+
54
+ prompt = gr.Textbox(label="Prompt")
55
+ enable = gr.Checkbox(value=True, label="Enable WFGY")
56
+ boost = gr.Slider(0.5, 3.0, value=1.0, label="Demo Boost (higher β†’ bigger effect)")
57
+
58
+ run_btn = gr.Button("Run", variant="primary")
 
59
 
60
  with gr.Row():
61
+ out_raw = gr.Textbox(label="Raw GPT-2")
62
+ out_mod = gr.Textbox(label="After WFGY")
63
+
64
+ headline = gr.Markdown("")
65
+
66
+ run_btn.click(
67
+ run,
68
+ inputs=[prompt, enable, boost],
69
+ outputs=[out_raw, out_mod, headline],
 
 
 
 
 
70
  )
71
 
72
+ demo.queue(concurrency_count=2).launch()