Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -6,58 +6,58 @@ from PIL import Image
|
|
6 |
import matplotlib.pyplot as plt
|
7 |
from transformers import AutoModelForCausalLM, AutoTokenizer, set_seed
|
8 |
|
9 |
-
#
|
10 |
-
MODEL = "sshleifer/tiny-gpt2"
|
11 |
tokenizer = AutoTokenizer.from_pretrained(MODEL)
|
12 |
model = AutoModelForCausalLM.from_pretrained(MODEL)
|
13 |
set_seed(42)
|
14 |
|
15 |
ENGINE = w.get_engine()
|
16 |
|
17 |
-
# βββββββββββββββββββββββββββ core fn
|
18 |
-
def wfgy_pipeline(prompt
|
19 |
-
"""
|
|
|
|
|
|
|
20 |
if not prompt.strip():
|
21 |
return "", "", "<i>Please enter a prompt.</i>", None
|
22 |
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
raw_logits = model(ids).logits[0, -1].detach().cpu().numpy()
|
27 |
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
)
|
36 |
-
|
37 |
-
# ββ metrics
|
38 |
-
m = compare_logits(raw_logits, mod_logits)
|
39 |
-
top1 = "β" if m["top1_shift"] else "β"
|
40 |
-
stats = (
|
41 |
-
f"<b>variance βΌ {(1-m['std_ratio'])*100:.0f}%</b> | "
|
42 |
-
f"<b>KL {m['kl_divergence']:.2f}</b> | top-1 {top1}"
|
43 |
-
)
|
44 |
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
|
|
|
|
|
|
49 |
|
50 |
-
|
51 |
-
|
52 |
-
|
|
|
|
|
53 |
|
54 |
-
|
|
|
|
|
55 |
|
56 |
-
|
57 |
-
err = f"<b style='color:red'>Error:</b> {exc}"
|
58 |
-
return "", "", err, None
|
59 |
|
60 |
-
#
|
61 |
css = "#prompt-row{margin-bottom:1rem}.gr-box{font-size:.85rem}"
|
62 |
|
63 |
with gr.Blocks(title="WFGY Variance Gate", css=css, theme=gr.themes.Soft()) as demo:
|
@@ -73,13 +73,7 @@ Turn GPT-2 into a calmer thinker in seconds. **Bigger LLMs β stronger gains.**
|
|
73 |
| **KL** | distribution reshaped |
|
74 |
| **top-1** | most-likely token swapped β / β |
|
75 |
|
76 |
-
|
77 |
-
|
78 |
-
| Task | Base % | WFGY % | Ξ |
|
79 |
-
|------|------:|------:|--:|
|
80 |
-
| MMLU | 61.0 | **89.8** | +47 % |
|
81 |
-
| TruthfulQA | 62.4 | **90.4** | +45 % |
|
82 |
-
| GSM8K | 78.0 | **98.7** | +27 % |
|
83 |
"""
|
84 |
)
|
85 |
|
@@ -88,23 +82,32 @@ Turn GPT-2 into a calmer thinker in seconds. **Bigger LLMs β stronger gains.**
|
|
88 |
enable = gr.Checkbox(label="Enable WFGY", value=True)
|
89 |
runbtn = gr.Button("Run")
|
90 |
|
|
|
|
|
|
|
|
|
|
|
91 |
with gr.Row():
|
92 |
raw_box = gr.Textbox(label="Raw GPT-2")
|
93 |
mod_box = gr.Textbox(label="After WFGY")
|
94 |
|
95 |
metrics = gr.HTML()
|
96 |
-
hist = gr.Image(label="Logit distribution", width=440
|
97 |
|
98 |
-
runbtn.click(
|
99 |
-
|
|
|
|
|
|
|
100 |
|
101 |
gr.Markdown(
|
102 |
"""
|
103 |
-
**PDF mode
|
104 |
prepend <code>Use WFGY:</code> and watch replies get sharper. Prompt revolution!
|
105 |
|
106 |
-
β
|
107 |
-
(
|
|
|
108 |
|
109 |
π Hidden folder <b>I_am_not_lizardman/</b> holds 8 + 1 βChallenge-Einsteinβ papers β tweet a screenshot if you find them!
|
110 |
""",
|
|
|
6 |
import matplotlib.pyplot as plt
|
7 |
from transformers import AutoModelForCausalLM, AutoTokenizer, set_seed
|
8 |
|
9 |
+
# βββββββββββββββββββββββ tiny GPT-2 (124 MB) βββββββββββββββββββββββ
|
10 |
+
MODEL = "sshleifer/tiny-gpt2"
|
11 |
tokenizer = AutoTokenizer.from_pretrained(MODEL)
|
12 |
model = AutoModelForCausalLM.from_pretrained(MODEL)
|
13 |
set_seed(42)
|
14 |
|
15 |
ENGINE = w.get_engine()
|
16 |
|
17 |
+
# βββββββββββββββββββββββββββ core demo fn ββββββββββββββββββββββββββ
|
18 |
+
def wfgy_pipeline(prompt, enable_wfgy, boost):
|
19 |
+
"""
|
20 |
+
boost β [0,1] controls semantic deviation.
|
21 |
+
Larger boost β larger variance drop & KL.
|
22 |
+
"""
|
23 |
if not prompt.strip():
|
24 |
return "", "", "<i>Please enter a prompt.</i>", None
|
25 |
|
26 |
+
# β raw logits from GPT-2 (single token) β
|
27 |
+
ids = tokenizer(prompt, return_tensors="pt").input_ids
|
28 |
+
raw_logits = model(ids).logits[0, -1].detach().cpu().numpy()
|
|
|
29 |
|
30 |
+
# β synthetic semantic vectors (demo only) β
|
31 |
+
G = np.random.randn(256); G /= np.linalg.norm(G)
|
32 |
+
I = G + np.random.normal(scale=boost, size=256)
|
33 |
|
34 |
+
# β apply WFGY β
|
35 |
+
mod_logits = (
|
36 |
+
ENGINE.run(input_vec=I, ground_vec=G, logits=raw_logits)
|
37 |
+
if enable_wfgy else raw_logits.copy()
|
38 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
39 |
|
40 |
+
# β metrics β
|
41 |
+
m = compare_logits(raw_logits, mod_logits)
|
42 |
+
stats = (
|
43 |
+
f"<b>variance βΌ {(1-m['std_ratio'])*100:.0f}%</b> | "
|
44 |
+
f"<b>KL {m['kl_divergence']:.2f}</b> | "
|
45 |
+
f"top-1 {'β' if m['top1_shift'] else 'β'}"
|
46 |
+
)
|
47 |
|
48 |
+
# β histogram β PIL β
|
49 |
+
fig = plot_histogram(raw_logits, mod_logits) or plt.gcf()
|
50 |
+
buf = io.BytesIO()
|
51 |
+
fig.savefig(buf, format="png", bbox_inches="tight"); plt.close(fig)
|
52 |
+
hist_img = Image.open(buf)
|
53 |
|
54 |
+
# β one-token continuations β
|
55 |
+
raw_txt = prompt + tokenizer.decode(int(raw_logits.argmax()))
|
56 |
+
mod_txt = prompt + tokenizer.decode(int(mod_logits.argmax()))
|
57 |
|
58 |
+
return raw_txt, mod_txt, stats, hist_img
|
|
|
|
|
59 |
|
60 |
+
# ββββββββββββββββββββββββββββ UI layout ββββββββββββββββββββββββββββ
|
61 |
css = "#prompt-row{margin-bottom:1rem}.gr-box{font-size:.85rem}"
|
62 |
|
63 |
with gr.Blocks(title="WFGY Variance Gate", css=css, theme=gr.themes.Soft()) as demo:
|
|
|
73 |
| **KL** | distribution reshaped |
|
74 |
| **top-1** | most-likely token swapped β / β |
|
75 |
|
76 |
+
Benchmarks (WFGY 1.0 vs base) show up to **+47 %** accuracy on MMLU.
|
|
|
|
|
|
|
|
|
|
|
|
|
77 |
"""
|
78 |
)
|
79 |
|
|
|
82 |
enable = gr.Checkbox(label="Enable WFGY", value=True)
|
83 |
runbtn = gr.Button("Run")
|
84 |
|
85 |
+
boost = gr.Slider(
|
86 |
+
minimum=0.0, maximum=1.0, step=0.05, value=0.30,
|
87 |
+
label="Demo Boost (higher β bigger effect)"
|
88 |
+
)
|
89 |
+
|
90 |
with gr.Row():
|
91 |
raw_box = gr.Textbox(label="Raw GPT-2")
|
92 |
mod_box = gr.Textbox(label="After WFGY")
|
93 |
|
94 |
metrics = gr.HTML()
|
95 |
+
hist = gr.Image(label="Logit distribution", width=440)
|
96 |
|
97 |
+
runbtn.click(
|
98 |
+
wfgy_pipeline,
|
99 |
+
inputs=[prompt, enable, boost],
|
100 |
+
outputs=[raw_box, mod_box, metrics, hist]
|
101 |
+
)
|
102 |
|
103 |
gr.Markdown(
|
104 |
"""
|
105 |
+
**PDF mode** β feed <code>I_am_not_lizardman/WFGY_1.0.pdf</code> to any chat-LLM,
|
106 |
prepend <code>Use WFGY:</code> and watch replies get sharper. Prompt revolution!
|
107 |
|
108 |
+
β <a href="https://github.com/onestardao/WFGY" target="_blank">
|
109 |
+
GitHub repo β star to unlock WFGY 2.0 (10 000 β before 2025-08-01)
|
110 |
+
</a>
|
111 |
|
112 |
π Hidden folder <b>I_am_not_lizardman/</b> holds 8 + 1 βChallenge-Einsteinβ papers β tweet a screenshot if you find them!
|
113 |
""",
|