OneStarDao commited on
Commit
c92d178
ยท
verified ยท
1 Parent(s): ac6bd9d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +53 -68
app.py CHANGED
@@ -1,112 +1,97 @@
1
- import io, numpy as np, gradio as gr
2
  import matplotlib.pyplot as plt
3
  from PIL import Image
4
- from transformers import (
5
- AutoModelForCausalLM, AutoTokenizer, set_seed
6
- )
7
 
8
  import wfgy_sdk as w
9
  from wfgy_sdk.evaluator import compare_logits
10
  from wfgy_sdk.visual import plot_histogram
11
 
12
- # โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ tiny GPT-2 (fits free HF Space) โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
13
  MODEL = "sshleifer/tiny-gpt2"
14
- tokenizer = AutoTokenizer.from_pretrained(MODEL)
15
- model = AutoModelForCausalLM.from_pretrained(MODEL)
16
  set_seed(42)
17
 
18
  ENGINE = w.get_engine()
 
19
 
20
- # โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ core helper โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
21
- def one_pass(prompt: str, boost: float):
22
- """Return (raw_txt, mod_txt, metrics, raw_l, mod_l)."""
23
- ids = tokenizer(prompt, return_tensors="pt").input_ids
24
- raw_logits = model(ids).logits[0, -1].detach().cpu().numpy()
25
 
26
- # demo vectors โ€“ boost multiplies semantic distance
27
- G = np.random.randn(256); G /= np.linalg.norm(G)
28
- I = G + np.random.normal(scale=boost, size=256)
29
-
30
- mod_logits = ENGINE.run(I, G, raw_logits, bbmc_scale=boost)
31
-
32
- metrics = compare_logits(raw_logits, mod_logits)
33
- return (
34
- prompt + tokenizer.decode(int(raw_logits.argmax())),
35
- prompt + tokenizer.decode(int(mod_logits.argmax())),
36
- metrics, raw_logits, mod_logits
37
- )
38
-
39
- def wfgy_pipeline(prompt: str, enable: bool, boost: float):
40
  if not prompt.strip():
41
  return "", "", "<i>Please enter a prompt.</i>", None
42
 
 
 
 
 
 
 
 
 
 
43
  try:
44
- raw_txt, mod_txt, met, rl, ml = one_pass(prompt, boost if enable else 0.0)
 
 
 
 
 
45
 
46
- # safety: if enable & variance drop < 5 %, force BBCR collapse once
47
- if enable and (1 - met["std_ratio"]) < .05:
48
- _, mod_txt, met, rl, ml = one_pass(prompt, boost * 1.8)
49
 
50
- stats = (
51
- f"<b>variance โ–ผ {(1-met['std_ratio'])*100:.0f}%</b> &nbsp;|&nbsp; "
52
- f"<b>KL {met['kl_divergence']:.2f}</b> &nbsp;|&nbsp; "
53
- f"top-1 {'โœ”' if met['top1_shift'] else 'โœ˜'}"
54
- )
55
 
56
- # histogram โ†’ PIL
57
- fig = plot_histogram(rl, ml) or plt.gcf()
58
- buf = io.BytesIO()
59
- fig.savefig(buf, format="png", bbox_inches="tight")
60
- plt.close(fig)
61
- img = Image.open(buf)
62
 
63
- return raw_txt, mod_txt, stats, img
 
 
64
 
65
- except Exception as exc:
66
- return "", "", f"<b style='color:red'>Error:</b> {exc}", None
 
67
 
68
- # โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ UI layout โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
69
- with gr.Blocks(title="WFGY Variance Gate", theme=gr.themes.Soft()) as demo:
70
  gr.Markdown(
71
- """
72
  ### ๐Ÿง  WFGY 1-click Variance Gate
73
- **Turn any modelโ€”even GPT-2โ€”into a calmer thinker.**
74
- Move the slider and watch variance dive.
75
-
76
- | Metric | Meaning |
77
- | --- | --- |
78
- | **variance โ–ผ** | logits get less noisy |
79
- | **KL** | distribution really changed |
80
- | **top-1** | most-likely token swapped โœ” / โœ˜ |
81
- """
82
- )
83
 
84
  prompt = gr.Textbox(label="Prompt", lines=2, placeholder="Ask anythingโ€ฆ")
85
  enable = gr.Checkbox(label="Enable WFGY", value=True)
86
- boost = gr.Slider(0, 3, value=1.2, step=.1,
87
  label="Demo Boost (higher โ†’ bigger effect)")
88
- runbtn = gr.Button("Run")
89
 
90
  raw_box = gr.Textbox(label="Raw GPT-2")
91
  mod_box = gr.Textbox(label="After WFGY")
92
  metrics = gr.HTML()
93
  hist = gr.Image(label="Logit distribution", width=460)
94
 
95
- runbtn.click(wfgy_pipeline,
96
  inputs=[prompt, enable, boost],
97
  outputs=[raw_box, mod_box, metrics, hist])
98
 
99
  gr.Markdown(
100
- """
101
- **PDF mode ** โ€“ feed <code>I_am_not_lizardman/WFGY_1.0.pdf</code> to any chat-LLM,
102
- prepend <code>Use WFGY:</code> and enjoy sharper answers.
103
 
104
  โญ <a href="https://github.com/onestardao/WFGY" target="_blank">
105
- GitHub repo โ€“ star to unlock WFGY 2.0 (10 k โญ before 2025-08-01)
106
- </a>
107
 
108
- ๐Ÿ“‚ Hidden folder <b>I_am_not_lizardman/</b> holds 8 + 1 โ€œChallenge-Einsteinโ€ papers โ€” tweet a screenshot if you find them!
109
- """
110
- )
111
 
112
  demo.launch()
 
1
+ import io, inspect, numpy as np, gradio as gr
2
  import matplotlib.pyplot as plt
3
  from PIL import Image
4
+ from transformers import AutoTokenizer, AutoModelForCausalLM, set_seed
 
 
5
 
6
  import wfgy_sdk as w
7
  from wfgy_sdk.evaluator import compare_logits
8
  from wfgy_sdk.visual import plot_histogram
9
 
10
+ # โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ tiny GPT-2 (ๆœ€ๅฐๆจกๅž‹ไนŸ่ƒฝ็œ‹ๅ‡บ่ฝๅทฎ) โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
11
  MODEL = "sshleifer/tiny-gpt2"
12
+ tok = AutoTokenizer.from_pretrained(MODEL)
13
+ mdl = AutoModelForCausalLM.from_pretrained(MODEL)
14
  set_seed(42)
15
 
16
  ENGINE = w.get_engine()
17
+ BOOST = 1.2 # ้ ่จญ Demo ๆ”พๅคงๅ€็އ
18
 
19
+ # ๆชขๆŸฅ run() ๆ˜ฏๅฆๆ”ฏๆด bbmc_scale
20
+ _RUN_HAS_SCALE = "bbmc_scale" in inspect.signature(ENGINE.run).parameters
 
 
 
21
 
22
+ # โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ ๆ ธๅฟƒๆŽจ่ซ– โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
23
+ def run_wfgy(prompt: str, enable: bool, boost: float):
 
 
 
 
 
 
 
 
 
 
 
 
24
  if not prompt.strip():
25
  return "", "", "<i>Please enter a prompt.</i>", None
26
 
27
+ # ๅ–ๅพ—ๆœ€็ต‚ token ็š„ logits
28
+ ids = tok(prompt, return_tensors="pt").input_ids
29
+ rawL = mdl(ids).logits[0, -1].detach().cpu().numpy()
30
+
31
+ # ็”Ÿๆˆ่ชžๆ„ๅ‘้‡
32
+ G = np.random.randn(256); G /= np.linalg.norm(G)
33
+ I = G + np.random.normal(scale=boost if enable else 0.0, size=256)
34
+
35
+ # ๅ˜—่ฉฆๅธถๅ…ฅ bbmc_scale๏ผ›่ˆŠ็‰ˆ SDK ๅ‰‡ fallback
36
  try:
37
+ if enable and _RUN_HAS_SCALE:
38
+ modL = ENGINE.run(I, G, rawL, bbmc_scale=boost)
39
+ else:
40
+ modL = ENGINE.run(I, G, rawL)
41
+ except TypeError: # ่ˆŠ API
42
+ modL = ENGINE.run(I, G, rawL)
43
 
44
+ m = compare_logits(rawL, modL)
 
 
45
 
46
+ stats = (f"<b>variance โ–ผ {(1-m['std_ratio'])*100:.0f}%</b> &nbsp;|&nbsp; "
47
+ f"<b>KL {m['kl_divergence']:.2f}</b> &nbsp;|&nbsp; "
48
+ f"top-1 {'โœ”' if m['top1_shift'] else 'โœ˜'}")
 
 
49
 
50
+ # ็”ข็”Ÿ็›ดๆ–นๅœ–
51
+ fig = plot_histogram(rawL, modL) or plt.gcf()
52
+ buf = io.BytesIO(); fig.savefig(buf, format="png", bbox_inches="tight")
53
+ plt.close(fig)
54
+ img = Image.open(buf)
 
55
 
56
+ raw_txt = prompt + tok.decode(int(rawL.argmax()))
57
+ mod_txt = prompt + tok.decode(int(modL.argmax()))
58
+ return raw_txt, mod_txt, stats, img
59
 
60
+ # โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ Gradio ไป‹้ข โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
61
+ with gr.Blocks(theme=gr.themes.Soft(),
62
+ title="WFGY Variance Gate") as demo:
63
 
 
 
64
  gr.Markdown(
65
+ """
66
  ### ๐Ÿง  WFGY 1-click Variance Gate
67
+ Turn GPT-2 into a calmer thinker. Move the slider โ†’ watch variance dive.
68
+ """)
 
 
 
 
 
 
 
 
69
 
70
  prompt = gr.Textbox(label="Prompt", lines=2, placeholder="Ask anythingโ€ฆ")
71
  enable = gr.Checkbox(label="Enable WFGY", value=True)
72
+ boost = gr.Slider(0, 3, value=BOOST, step=.1,
73
  label="Demo Boost (higher โ†’ bigger effect)")
74
+ runbtn = gr.Button("Run", variant="primary")
75
 
76
  raw_box = gr.Textbox(label="Raw GPT-2")
77
  mod_box = gr.Textbox(label="After WFGY")
78
  metrics = gr.HTML()
79
  hist = gr.Image(label="Logit distribution", width=460)
80
 
81
+ runbtn.click(run_wfgy,
82
  inputs=[prompt, enable, boost],
83
  outputs=[raw_box, mod_box, metrics, hist])
84
 
85
  gr.Markdown(
86
+ """
87
+ **PDF mode** โ€“ feed <code>I_am_not_lizardman/WFGY_1.0.pdf</code> to any chat-LLM,
88
+ prepend <code>Use&nbsp;WFGY:</code> and watch replies get sharper. *Prompt revolution!*
89
 
90
  โญ <a href="https://github.com/onestardao/WFGY" target="_blank">
91
+ 10 000 GitHub&nbsp;stars before&nbsp;2025-08-01 unlock WFGY 2.0
92
+ </a> โ€“ adaptive-gamma &amp; multimodal edition.
93
 
94
+ ๐Ÿ“‚ Folder <b>I_am_not_lizardman/</b> hides eight โ€œChallenge-Einsteinโ€ papers โ€“ find them!
95
+ """)
 
96
 
97
  demo.launch()