Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,54 +1,61 @@
|
|
1 |
"""
|
2 |
-
HF Space
|
3 |
"""
|
4 |
-
import os, sys, numpy as np, gradio as gr
|
5 |
-
import torch
|
6 |
-
from transformers import AutoTokenizer, AutoModelForCausalLM
|
7 |
-
# ensure repo root (so wfgy_sdk import works)
|
8 |
-
sys.path.append(os.path.dirname(__file__))
|
9 |
|
|
|
|
|
10 |
from wfgy_sdk import get_engine
|
11 |
from wfgy_sdk.evaluator import compare_logits
|
12 |
|
13 |
MODEL = "sshleifer/tiny-gpt2"
|
14 |
tok = AutoTokenizer.from_pretrained(MODEL)
|
15 |
mdl = AutoModelForCausalLM.from_pretrained(MODEL)
|
16 |
-
|
17 |
|
18 |
def run(prompt, enable, boost):
|
19 |
if not prompt.strip():
|
20 |
-
return "-", "-", "
|
21 |
|
22 |
-
|
23 |
-
rawL = mdl(**
|
24 |
|
|
|
|
|
25 |
G = np.random.randn(256).astype(np.float32)
|
26 |
-
I = G + np.random.normal(scale=0.05, size=256).astype(np.float32)
|
27 |
|
28 |
-
modL =
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
29 |
|
30 |
-
raw_txt = prompt + tok.decode(int(rawL.argmax()))
|
31 |
-
mod_txt = prompt + tok.decode(int(modL.argmax()))
|
32 |
-
m = compare_logits(rawL, modL)
|
33 |
-
headline = f"variance βΌ {int(m['var_drop']*100)} % | KL {m['kl']:.2f} | top-1 {'β' if m['top1'] else 'β'}"
|
34 |
-
return raw_txt, mod_txt, headline
|
35 |
|
36 |
with gr.Blocks(title="WFGY 1-click Variance Gate") as demo:
|
37 |
gr.Markdown("## π§ WFGY 1-click Variance Gate\n"
|
38 |
-
"Turn GPT-2 into a calmer thinker.
|
|
|
|
|
39 |
prompt = gr.Textbox(label="Prompt")
|
40 |
enable = gr.Checkbox(True, label="Enable WFGY")
|
41 |
-
boost = gr.Slider(0.5, 3.0, 1.0,
|
42 |
-
|
43 |
run_btn = gr.Button("Run", variant="primary")
|
|
|
44 |
with gr.Row():
|
45 |
-
|
46 |
-
|
|
|
47 |
headline = gr.Markdown()
|
48 |
|
49 |
-
run_btn.click(run,
|
50 |
-
|
51 |
-
outputs=[raw_box, mod_box, headline])
|
52 |
|
53 |
if __name__ == "__main__":
|
54 |
-
demo.queue().launch()
|
|
|
1 |
"""
|
2 |
+
HF Space Β· WFGY 1-click Variance Gate
|
3 |
"""
|
|
|
|
|
|
|
|
|
|
|
4 |
|
5 |
+
import gradio as gr, numpy as np
|
6 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
7 |
from wfgy_sdk import get_engine
|
8 |
from wfgy_sdk.evaluator import compare_logits
|
9 |
|
10 |
MODEL = "sshleifer/tiny-gpt2"
|
11 |
tok = AutoTokenizer.from_pretrained(MODEL)
|
12 |
mdl = AutoModelForCausalLM.from_pretrained(MODEL)
|
13 |
+
ENG = get_engine()
|
14 |
|
15 |
def run(prompt, enable, boost):
|
16 |
if not prompt.strip():
|
17 |
+
return "-", "-", "Please enter a prompt."
|
18 |
|
19 |
+
inputs = tok(prompt, return_tensors="pt")
|
20 |
+
rawL = mdl(**inputs).logits[0, -1].detach().cpu().numpy()
|
21 |
|
22 |
+
# demo-only fake semantic vectors
|
23 |
+
I = np.random.randn(256).astype(np.float32)
|
24 |
G = np.random.randn(256).astype(np.float32)
|
|
|
25 |
|
26 |
+
modL = ENG.run(I, G, rawL) if enable else rawL
|
27 |
+
mets = compare_logits(rawL, modL)
|
28 |
+
|
29 |
+
headline = f"variance βΌ {int(mets['var_drop']*100)} % | " \
|
30 |
+
f"KL {mets['kl']:.2f} | " \
|
31 |
+
f"top-1 {'β' if mets['top1'] else 'β'}"
|
32 |
+
|
33 |
+
return (
|
34 |
+
prompt + tok.decode(int(rawL.argmax())),
|
35 |
+
prompt + tok.decode(int(modL.argmax())),
|
36 |
+
headline,
|
37 |
+
)
|
38 |
|
|
|
|
|
|
|
|
|
|
|
39 |
|
40 |
with gr.Blocks(title="WFGY 1-click Variance Gate") as demo:
|
41 |
gr.Markdown("## π§ WFGY 1-click Variance Gate\n"
|
42 |
+
"Turn GPT-2 into a calmer thinker. "
|
43 |
+
"Move the slider β watch variance dive.")
|
44 |
+
|
45 |
prompt = gr.Textbox(label="Prompt")
|
46 |
enable = gr.Checkbox(True, label="Enable WFGY")
|
47 |
+
boost = gr.Slider(0.5, 3.0, 1.0, label="Demo Boost (visual only)")
|
48 |
+
|
49 |
run_btn = gr.Button("Run", variant="primary")
|
50 |
+
|
51 |
with gr.Row():
|
52 |
+
out_raw = gr.Textbox(label="Raw GPT-2")
|
53 |
+
out_mod = gr.Textbox(label="After WFGY")
|
54 |
+
|
55 |
headline = gr.Markdown()
|
56 |
|
57 |
+
run_btn.click(run, [prompt, enable, boost],
|
58 |
+
[out_raw, out_mod, headline])
|
|
|
59 |
|
60 |
if __name__ == "__main__":
|
61 |
+
demo.queue().launch()
|