OneStarDao commited on
Commit
c68d8a3
·
verified ·
1 Parent(s): 00f078c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +73 -69
app.py CHANGED
@@ -1,61 +1,21 @@
1
- """
2
- HF Space · WFGY simulation demo
3
- (complete file – paste/replace your current app.py)
4
- """
5
-
6
  import io
7
  import numpy as np
8
  import gradio as gr
9
  import matplotlib.pyplot as plt
10
  from transformers import AutoTokenizer, AutoModelForCausalLM
11
-
12
- from wfgy_sdk import get_engine, evaluator
13
-
14
- # ──────────────────── tiny GPT-2 + WFGY engine ────────────────────
15
- MODEL_ID = "sshleifer/tiny-gpt2"
16
- tok = AutoTokenizer.from_pretrained(MODEL_ID)
17
- mdl = AutoModelForCausalLM.from_pretrained(MODEL_ID)
18
- eng = get_engine() # variance-gate singleton
19
-
20
-
21
- # ──────────────────── helper: run one prompt ────────────────────
22
- def run(prompt: str):
23
- if not prompt.strip():
24
- return "", "", "–", None
25
-
26
- ids = tok(prompt, return_tensors="pt")
27
- raw_L = mdl(**ids).logits[0, -1].detach().cpu().numpy()
28
-
29
- # dummy fingerprints (toy GPT-2 has none)
30
- I, G = np.random.randn(2, 256).astype(np.float32)
31
- mod_L = eng.run(I, G, raw_L)
32
-
33
- # top-5 softmax
34
- def top5(logits):
35
- p = evaluator.softmax(logits)
36
- idx = p.argsort()[-5:][::-1]
37
- return "\n".join([f"'{tok.decode(i).strip()}': {p[i]:.2e}" for i in idx])
38
-
39
- m = evaluator.compare_logits(raw_L, mod_L)
40
- headline = (f"▼ var {m['var_drop']*100:.1f}% | KL {m['kl_divergence']:.3f} "
41
- f"| {'top-1 kept' if m['top1'] else 'top-1 changed'}")
42
-
43
- fig = evaluator.plot_histogram(raw_L, mod_L)
44
- buf = io.BytesIO()
45
- fig.savefig(buf, format="png")
46
- buf.seek(0)
47
-
48
- return top5(raw_L), top5(mod_L), headline, buf
49
-
50
-
51
- # ──────────────────── gradio UI ────────────────────
52
- with gr.Blocks(title="WFGY simulation demo") as demo:
53
-
54
- gr.Markdown("# 🧠 WFGY simulation demo")
55
-
56
- # ── marketing & quick-start banner ──
57
- gr.Markdown(
58
- """
59
  **📈 WFGY: One Click to Activate the AI Taiji Cycle**
60
 
61
  **📊 Semantic Accuracy ↑ 22.4 % | Reasoning Success ↑ 42.1 % | Stability ↑ 3.6 ×**
@@ -66,7 +26,6 @@ WFGY 1.0 has already proven itself._
66
  ---
67
 
68
  ### 📜 Tutorial: How to Awaken the Soul of Your AI
69
-
70
  **Step 1 — Download** ([PDF on Zenodo](https://zenodo.org/records/15630970))
71
  **Step 2 — Feed the AI** (upload the PDF, or try [Gemini](https://gemini.google.com/))
72
  **Step 3 — Give the Command** “**Answer using WFGY** + your question”
@@ -77,26 +36,71 @@ _Prompt examples:_ TBD link
77
 
78
  🌟 **Star Reminder** → [Star the repo](https://github.com/onestardao/WFGY)
79
  _10 k ⭐ before 2025-08-01 unlocks WFGY 2.0._
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
80
 
81
- ⚠️ **Prompt Warning** — activate **Deep Structural Analysis Mode** or WFGY stays dormant.
82
- """,
83
- elem_id="intro-banner",
84
- )
 
85
 
86
- # ── prompt box ──
87
- prompt = gr.Textbox(label="Prompt", value="Explain Schrödinger's cat")
88
- run_btn = gr.Button("🚀 Run")
89
 
90
- # ── results ──
91
  with gr.Row():
92
- raw_box = gr.Textbox(label="Raw top-5 tokens")
93
- mod_box = gr.Textbox(label="WFGY top-5 tokens")
94
 
95
- headline = gr.Markdown()
96
- img = gr.Image(label="Logit histogram")
97
 
98
- run_btn.click(run, prompt, outputs=[raw_box, mod_box, headline, img])
 
99
 
 
100
 
101
- if __name__ == "__main__":
102
- demo.queue(concurrency_count=2).launch()
 
 
 
 
 
 
1
  import io
2
  import numpy as np
3
  import gradio as gr
4
  import matplotlib.pyplot as plt
5
  from transformers import AutoTokenizer, AutoModelForCausalLM
6
+ from wfgy_sdk import get_engine
7
+ from wfgy_sdk.evaluator import compare_logits, plot_histogram
8
+ import pandas as pd
9
+
10
+ MODEL = "sshleifer/tiny-gpt2"
11
+ tok = AutoTokenizer.from_pretrained(MODEL)
12
+ mdl = AutoModelForCausalLM.from_pretrained(MODEL)
13
+ ENG = get_engine()
14
+
15
+ # ──────────────────────────────────────────────
16
+ # marketing banner markdown (shown at top)
17
+ # ──────────────────────────────────────────────
18
+ marketing_md = """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  **📈 WFGY: One Click to Activate the AI Taiji Cycle**
20
 
21
  **📊 Semantic Accuracy ↑ 22.4 % | Reasoning Success ↑ 42.1 % | Stability ↑ 3.6 ×**
 
26
  ---
27
 
28
  ### 📜 Tutorial: How to Awaken the Soul of Your AI
 
29
  **Step 1 — Download** ([PDF on Zenodo](https://zenodo.org/records/15630970))
30
  **Step 2 — Feed the AI** (upload the PDF, or try [Gemini](https://gemini.google.com/))
31
  **Step 3 — Give the Command** “**Answer using WFGY** + your question”
 
36
 
37
  🌟 **Star Reminder** → [Star the repo](https://github.com/onestardao/WFGY)
38
  _10 k ⭐ before 2025-08-01 unlocks WFGY 2.0._
39
+ """
40
+
41
+ # ──────────────────────────────────────────────
42
+ # fixed paper benchmarks table (pandas → gr.Dataframe)
43
+ # ──────────────────────────────────────────────
44
+ bench = pd.DataFrame(
45
+ {
46
+ "Benchmark": [
47
+ "MMLU", "GSM8K", "BBH", "MathBench", "TruthfulQA",
48
+ "XNLI", "MLQA", "LongBench", "VQAv2", "OK-VQA"
49
+ ],
50
+ "Baseline": [61, 78, 79.3, 72.2, 62.4, 59.5, 78.1, 51.4, 69.1, 65.7],
51
+ "WFGY": [89.8, 98.7, 100.7, 87.4, 90.4, 77.3, 106.6, 69.6, 86.6, 86.8],
52
+ }
53
+ )
54
+ bench["Abs_gain"] = (bench["WFGY"] - bench["Baseline"]).round(1)
55
+ bench["Rel_gain%"] = ((bench["Abs_gain"] / bench["Baseline"]) * 100).round().astype(int)
56
+
57
+ # ──────────────────────────────────────────────
58
+ # core inference
59
+ # ──────────────────────────────────────────────
60
+ def run(prompt: str):
61
+ if not prompt.strip():
62
+ return "-", "-", "-", None
63
+
64
+ ids = tok(prompt, return_tensors="pt")
65
+ raw = mdl(**ids).logits[0, -1].detach().cpu().numpy()
66
+ G = np.random.randn(256).astype(np.float32)
67
+ I = G + np.random.normal(scale=0.05, size=256).astype(np.float32)
68
+ mod = ENG.run(I, G, raw)
69
+
70
+ m = compare_logits(raw, mod)
71
+ metric_line = f"▼ var {m['var_drop']*100:.1f}% | KL {m['kl_divergence']:.3f} | top-1 {'kept' if m['top1'] else 'changed'}"
72
+
73
+ # top-5 softmax
74
+ p_raw = np.exp(raw) / np.exp(raw).sum()
75
+ p_mod = np.exp(mod) / np.exp(mod).sum()
76
+ raw_top = "\n".join([f\"'{tok.decode(i).strip()}': {p_raw[i]:.2e}\" for i in p_raw.argsort()[-5:][::-1]])
77
+ mod_top = "\n".join([f\"'{tok.decode(i).strip()}': {p_mod[i]:.2e}\" for i in p_mod.argsort()[-5:][::-1]])
78
+
79
+ fig = plot_histogram(raw, mod)
80
+ buf = io.BytesIO(); fig.savefig(buf, format=\"png\"); buf.seek(0)
81
+
82
+ return raw_top, mod_top, metric_line, buf
83
 
84
+ # ──────────────────────────────────────────────
85
+ # gradio ui
86
+ # ──────────────────────────────────────────────
87
+ with gr.Blocks(title=\"WFGY simulation demo\") as demo:
88
+ gr.Markdown(marketing_md)
89
 
90
+ prompt = gr.Textbox(label=\"Prompt\", value=\"Explain Schrödinger's cat\")
91
+ btn = gr.Button(\"🚀 Run\")
 
92
 
 
93
  with gr.Row():
94
+ raw_box = gr.Textbox(label=\"Raw top-5 tokens\")
95
+ mod_box = gr.Textbox(label=\"WFGY top-5 tokens\")
96
 
97
+ metrics = gr.Markdown()
98
+ img = gr.Image(label=\"Logit histogram\")
99
 
100
+ gr.Markdown(\"### Paper benchmarks (fixed values from WFGY 1.0)\")
101
+ gr.Dataframe(bench, interactive=False, wrap=True)
102
 
103
+ btn.click(run, prompt, [raw_box, mod_box, metrics, img])
104
 
105
+ if __name__ == \"__main__\":
106
+ demo.queue(default_concurrency_limit=2).launch()