Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,12 +1,15 @@
|
|
1 |
-
import io, numpy as np, gradio as gr
|
2 |
-
from wfgy_sdk.evaluator import compare_logits
|
3 |
-
from wfgy_sdk.visual import plot_histogram
|
4 |
-
|
5 |
-
from PIL import Image
|
6 |
import matplotlib.pyplot as plt
|
7 |
-
from
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
|
9 |
-
#
|
10 |
MODEL = "sshleifer/tiny-gpt2"
|
11 |
tokenizer = AutoTokenizer.from_pretrained(MODEL)
|
12 |
model = AutoModelForCausalLM.from_pretrained(MODEL)
|
@@ -14,104 +17,96 @@ set_seed(42)
|
|
14 |
|
15 |
ENGINE = w.get_engine()
|
16 |
|
17 |
-
#
|
18 |
-
def
|
19 |
-
"""
|
20 |
-
boost β [0,1] controls semantic deviation.
|
21 |
-
Larger boost β larger variance drop & KL.
|
22 |
-
"""
|
23 |
-
if not prompt.strip():
|
24 |
-
return "", "", "<i>Please enter a prompt.</i>", None
|
25 |
-
|
26 |
-
# β raw logits from GPT-2 (single token) β
|
27 |
ids = tokenizer(prompt, return_tensors="pt").input_ids
|
28 |
raw_logits = model(ids).logits[0, -1].detach().cpu().numpy()
|
29 |
|
30 |
-
#
|
31 |
G = np.random.randn(256); G /= np.linalg.norm(G)
|
32 |
I = G + np.random.normal(scale=boost, size=256)
|
33 |
|
34 |
-
|
35 |
-
mod_logits = (
|
36 |
-
ENGINE.run(input_vec=I, ground_vec=G, logits=raw_logits)
|
37 |
-
if enable_wfgy else raw_logits.copy()
|
38 |
-
)
|
39 |
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
f"top-1 {'β' if m['top1_shift'] else 'β'}"
|
46 |
)
|
47 |
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
|
|
|
|
|
|
|
|
|
|
53 |
|
54 |
-
|
55 |
-
|
56 |
-
|
|
|
|
|
57 |
|
58 |
-
|
|
|
|
|
|
|
|
|
|
|
59 |
|
60 |
-
|
61 |
-
css = "#prompt-row{margin-bottom:1rem}.gr-box{font-size:.85rem}"
|
62 |
|
63 |
-
|
|
|
|
|
|
|
|
|
64 |
gr.Markdown(
|
65 |
"""
|
66 |
### π§ WFGY 1-click Variance Gate
|
67 |
-
|
68 |
-
|
69 |
|
70 |
| Metric | Meaning |
|
71 |
-
|
72 |
-
| **variance βΌ** | logits
|
73 |
-
| **KL** | distribution
|
74 |
| **top-1** | most-likely token swapped β / β |
|
75 |
-
|
76 |
-
Benchmarks (WFGY 1.0 vs base) show up to **+47 %** accuracy on MMLU.
|
77 |
"""
|
78 |
)
|
79 |
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
boost = gr.Slider(
|
86 |
-
minimum=0.0, maximum=1.0, step=0.05, value=0.30,
|
87 |
-
label="Demo Boost (higher β bigger effect)"
|
88 |
-
)
|
89 |
-
|
90 |
-
with gr.Row():
|
91 |
-
raw_box = gr.Textbox(label="Raw GPT-2")
|
92 |
-
mod_box = gr.Textbox(label="After WFGY")
|
93 |
|
|
|
|
|
94 |
metrics = gr.HTML()
|
95 |
-
hist = gr.Image(label="Logit distribution", width=
|
96 |
|
97 |
-
runbtn.click(
|
98 |
-
|
99 |
-
|
100 |
-
outputs=[raw_box, mod_box, metrics, hist]
|
101 |
-
)
|
102 |
|
103 |
gr.Markdown(
|
104 |
"""
|
105 |
-
**PDF mode** β feed <code>I_am_not_lizardman/WFGY_1.0.pdf</code> to any chat-LLM,
|
106 |
-
prepend <code>Use WFGY:</code> and
|
107 |
|
108 |
β <a href="https://github.com/onestardao/WFGY" target="_blank">
|
109 |
-
GitHub repo β star to unlock WFGY 2.0 (10
|
110 |
-
</a>
|
111 |
|
112 |
π Hidden folder <b>I_am_not_lizardman/</b> holds 8 + 1 βChallenge-Einsteinβ papers β tweet a screenshot if you find them!
|
113 |
-
"""
|
114 |
-
elem_id="footer"
|
115 |
)
|
116 |
|
117 |
demo.launch()
|
|
|
1 |
+
import io, numpy as np, gradio as gr
|
|
|
|
|
|
|
|
|
2 |
import matplotlib.pyplot as plt
|
3 |
+
from PIL import Image
|
4 |
+
from transformers import (
|
5 |
+
AutoModelForCausalLM, AutoTokenizer, set_seed
|
6 |
+
)
|
7 |
+
|
8 |
+
import wfgy_sdk as w
|
9 |
+
from wfgy_sdk.evaluator import compare_logits
|
10 |
+
from wfgy_sdk.visual import plot_histogram
|
11 |
|
12 |
+
# βββββββββββββββ tiny GPT-2 (fits free HF Space) ββββββββββββββββ
|
13 |
MODEL = "sshleifer/tiny-gpt2"
|
14 |
tokenizer = AutoTokenizer.from_pretrained(MODEL)
|
15 |
model = AutoModelForCausalLM.from_pretrained(MODEL)
|
|
|
17 |
|
18 |
ENGINE = w.get_engine()
|
19 |
|
20 |
+
# βββββββββββββββββββββββββ core helper ββββββββββββββββββββββββββ
|
21 |
+
def one_pass(prompt: str, boost: float):
|
22 |
+
"""Return (raw_txt, mod_txt, metrics, raw_l, mod_l)."""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
ids = tokenizer(prompt, return_tensors="pt").input_ids
|
24 |
raw_logits = model(ids).logits[0, -1].detach().cpu().numpy()
|
25 |
|
26 |
+
# demo vectors β boost multiplies semantic distance
|
27 |
G = np.random.randn(256); G /= np.linalg.norm(G)
|
28 |
I = G + np.random.normal(scale=boost, size=256)
|
29 |
|
30 |
+
mod_logits = ENGINE.run(I, G, raw_logits, bbmc_scale=boost)
|
|
|
|
|
|
|
|
|
31 |
|
32 |
+
metrics = compare_logits(raw_logits, mod_logits)
|
33 |
+
return (
|
34 |
+
prompt + tokenizer.decode(int(raw_logits.argmax())),
|
35 |
+
prompt + tokenizer.decode(int(mod_logits.argmax())),
|
36 |
+
metrics, raw_logits, mod_logits
|
|
|
37 |
)
|
38 |
|
39 |
+
def wfgy_pipeline(prompt: str, enable: bool, boost: float):
|
40 |
+
if not prompt.strip():
|
41 |
+
return "", "", "<i>Please enter a prompt.</i>", None
|
42 |
+
|
43 |
+
try:
|
44 |
+
raw_txt, mod_txt, met, rl, ml = one_pass(prompt, boost if enable else 0.0)
|
45 |
+
|
46 |
+
# safety: if enable & variance drop < 5 %, force BBCR collapse once
|
47 |
+
if enable and (1 - met["std_ratio"]) < .05:
|
48 |
+
_, mod_txt, met, rl, ml = one_pass(prompt, boost * 1.8)
|
49 |
|
50 |
+
stats = (
|
51 |
+
f"<b>variance βΌ {(1-met['std_ratio'])*100:.0f}%</b> | "
|
52 |
+
f"<b>KL {met['kl_divergence']:.2f}</b> | "
|
53 |
+
f"top-1 {'β' if met['top1_shift'] else 'β'}"
|
54 |
+
)
|
55 |
|
56 |
+
# histogram β PIL
|
57 |
+
fig = plot_histogram(rl, ml) or plt.gcf()
|
58 |
+
buf = io.BytesIO()
|
59 |
+
fig.savefig(buf, format="png", bbox_inches="tight")
|
60 |
+
plt.close(fig)
|
61 |
+
img = Image.open(buf)
|
62 |
|
63 |
+
return raw_txt, mod_txt, stats, img
|
|
|
64 |
|
65 |
+
except Exception as exc:
|
66 |
+
return "", "", f"<b style='color:red'>Error:</b> {exc}", None
|
67 |
+
|
68 |
+
# ββββββββββββββββββββββββββ UI layout βββββββββββββββββββββββββββ
|
69 |
+
with gr.Blocks(title="WFGY Variance Gate", theme=gr.themes.Soft()) as demo:
|
70 |
gr.Markdown(
|
71 |
"""
|
72 |
### π§ WFGY 1-click Variance Gate
|
73 |
+
**Turn any modelβeven GPT-2βinto a calmer thinker.**
|
74 |
+
Move the slider and watch variance dive.
|
75 |
|
76 |
| Metric | Meaning |
|
77 |
+
| --- | --- |
|
78 |
+
| **variance βΌ** | logits get less noisy |
|
79 |
+
| **KL** | distribution really changed |
|
80 |
| **top-1** | most-likely token swapped β / β |
|
|
|
|
|
81 |
"""
|
82 |
)
|
83 |
|
84 |
+
prompt = gr.Textbox(label="Prompt", lines=2, placeholder="Ask anythingβ¦")
|
85 |
+
enable = gr.Checkbox(label="Enable WFGY", value=True)
|
86 |
+
boost = gr.Slider(0, 3, value=1.2, step=.1,
|
87 |
+
label="Demo Boost (higher β bigger effect)")
|
88 |
+
runbtn = gr.Button("Run")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
89 |
|
90 |
+
raw_box = gr.Textbox(label="Raw GPT-2")
|
91 |
+
mod_box = gr.Textbox(label="After WFGY")
|
92 |
metrics = gr.HTML()
|
93 |
+
hist = gr.Image(label="Logit distribution", width=460)
|
94 |
|
95 |
+
runbtn.click(wfgy_pipeline,
|
96 |
+
inputs=[prompt, enable, boost],
|
97 |
+
outputs=[raw_box, mod_box, metrics, hist])
|
|
|
|
|
98 |
|
99 |
gr.Markdown(
|
100 |
"""
|
101 |
+
**PDF mode ** β feed <code>I_am_not_lizardman/WFGY_1.0.pdf</code> to any chat-LLM,
|
102 |
+
prepend <code>Use WFGY:</code> and enjoy sharper answers.
|
103 |
|
104 |
β <a href="https://github.com/onestardao/WFGY" target="_blank">
|
105 |
+
GitHub repo β star to unlock WFGY 2.0 (10 k β before 2025-08-01)
|
106 |
+
</a>
|
107 |
|
108 |
π Hidden folder <b>I_am_not_lizardman/</b> holds 8 + 1 βChallenge-Einsteinβ papers β tweet a screenshot if you find them!
|
109 |
+
"""
|
|
|
110 |
)
|
111 |
|
112 |
demo.launch()
|