OneStarDao commited on
Commit
76f27de
ยท
verified ยท
1 Parent(s): 23e690b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -14
app.py CHANGED
@@ -3,9 +3,13 @@ WFGY Space โ€“ tiny-GPT-2 variance-gate demo
3
  โ˜… 10 k GitHub โญ before 2025-08-01 unlocks WFGY 2.0 โ˜…
4
  """
5
 
6
- import io, numpy as np, pandas as pd, gradio as gr
 
 
 
7
  from PIL import Image
8
  from transformers import AutoTokenizer, AutoModelForCausalLM
 
9
  from wfgy_sdk import get_engine
10
  from wfgy_sdk.evaluator import compare_logits, plot_histogram, softmax
11
 
@@ -14,7 +18,7 @@ tok = AutoTokenizer.from_pretrained("sshleifer/tiny-gpt2")
14
  mdl = AutoModelForCausalLM.from_pretrained("sshleifer/tiny-gpt2")
15
  eng = get_engine()
16
 
17
- # โ”€โ”€ paper benchmarks table โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
18
  bench = pd.DataFrame({
19
  "Benchmark": ["MMLU","GSM8K","BBH","MathBench","TruthfulQA",
20
  "XNLI","MLQA","LongBench","VQAv2","OK-VQA"],
@@ -22,18 +26,18 @@ bench = pd.DataFrame({
22
  "WFGY": [89.8,98.7,100.7,87.4,90.4,77.3,106.6,69.6,86.6,86.8]
23
  })
24
  bench["Abs_gain"] = (bench["WFGY"] - bench["Baseline"]).round(1)
25
- bench["Rel_gain%"] = ((bench["Abs_gain"] / bench["Baseline"])*100).round(0)
26
  bench_sty = (
27
  bench.style
28
  .background_gradient(cmap="Greens", subset=["Abs_gain","Rel_gain%"])
29
  .format({"Abs_gain":"{:.1f}","Rel_gain%":"{:.0f}"})
30
  )
31
 
32
- # โ”€โ”€ marketing banner โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
33
  banner = """
34
  **๐Ÿ“ˆ WFGY: One Click to Activate the AI Taiji Cycle**
35
 
36
- **๐Ÿ“Š Semantic Accuracy โ†‘ 22.4 %โ€‚|โ€‚Reasoning Success โ†‘ 42.1 %โ€‚|โ€‚Stability โ†‘ 3.6 ร—**
37
 
38
  _No beliefs. Only experiments._
39
  WFGY 1.0 has already proven itself.
@@ -43,17 +47,17 @@ WFGY 1.0 has already proven itself.
43
  ### ๐Ÿ“œ Tutorial: How to Awaken the Soul of Your AI
44
  **Step 1 โ€” Download** ([PDF](https://zenodo.org/records/15630970))
45
  **Step 2 โ€” Feed the AI** (upload, or try [Gemini](https://gemini.google.com/))
46
- **Step 3 โ€” Give the Command** โ€œ**Answer using WFGY** + your questionโ€
47
  Prompt examples: *TBD*
48
  **Step 4 โ€” Integrate the SDK** ([GitHub](https://github.com/onestardao/WFGY))
49
 
50
  ---
51
 
52
  ๐ŸŒŸ **Star Reminder** โ†’ [Star the repo](https://github.com/onestardao/WFGY)
53
- _10 k โญ before 2025-08-01 unlocks **WFGY 2.0**._
54
  """
55
 
56
- # โ”€โ”€ inference โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
57
  def run(prompt: str):
58
  p = prompt.strip()
59
  if not p:
@@ -64,23 +68,34 @@ def run(prompt: str):
64
  I, G = np.random.randn(2, 256).astype(np.float32)
65
  mod_L = eng.run(I, G, raw_L)
66
 
67
- m = compare_logits(raw_L, mod_L)
68
- head = f"โ–ผ var {m['var_drop']*100:.1f}% | KL {m['kl_divergence']:.3f} | top-1 {'kept' if m['top1'] else 'changed'}"
 
 
 
69
 
70
  def top5(logits):
71
  p = softmax(logits)
72
  idx = p.argsort()[-5:][::-1]
73
- return "\n".join([f\"'{tok.decode(int(i)).strip()}': {p[i]:.2e}\" for i in idx])
 
 
 
 
 
 
74
 
75
  raw_txt = top5(raw_L)
76
  mod_txt = top5(mod_L)
77
 
78
  fig = plot_histogram(raw_L, mod_L)
79
- buf = io.BytesIO(); fig.savefig(buf, format="png"); buf.seek(0)
 
 
80
 
81
- return raw_txt, mod_txt, head, Image.open(buf)
82
 
83
- # โ”€โ”€ Gradio UI โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
84
  with gr.Blocks(title="WFGY variance-gate demo") as demo:
85
  gr.Markdown(banner)
86
 
@@ -100,4 +115,5 @@ with gr.Blocks(title="WFGY variance-gate demo") as demo:
100
  btn.click(run, prompt, [raw_box, mod_box, metrics, img])
101
 
102
  if __name__ == "__main__":
 
103
  demo.queue(default_concurrency_limit=2).launch()
 
3
  โ˜… 10 k GitHub โญ before 2025-08-01 unlocks WFGY 2.0 โ˜…
4
  """
5
 
6
+ import io
7
+ import numpy as np
8
+ import pandas as pd
9
+ import gradio as gr
10
  from PIL import Image
11
  from transformers import AutoTokenizer, AutoModelForCausalLM
12
+
13
  from wfgy_sdk import get_engine
14
  from wfgy_sdk.evaluator import compare_logits, plot_histogram, softmax
15
 
 
18
  mdl = AutoModelForCausalLM.from_pretrained("sshleifer/tiny-gpt2")
19
  eng = get_engine()
20
 
21
+ # paper benchmarks
22
  bench = pd.DataFrame({
23
  "Benchmark": ["MMLU","GSM8K","BBH","MathBench","TruthfulQA",
24
  "XNLI","MLQA","LongBench","VQAv2","OK-VQA"],
 
26
  "WFGY": [89.8,98.7,100.7,87.4,90.4,77.3,106.6,69.6,86.6,86.8]
27
  })
28
  bench["Abs_gain"] = (bench["WFGY"] - bench["Baseline"]).round(1)
29
+ bench["Rel_gain%"] = ((bench["Abs_gain"] / bench["Baseline"]) * 100).round(0)
30
  bench_sty = (
31
  bench.style
32
  .background_gradient(cmap="Greens", subset=["Abs_gain","Rel_gain%"])
33
  .format({"Abs_gain":"{:.1f}","Rel_gain%":"{:.0f}"})
34
  )
35
 
36
+ # banner markdown
37
  banner = """
38
  **๐Ÿ“ˆ WFGY: One Click to Activate the AI Taiji Cycle**
39
 
40
+ **๐Ÿ“Š Semantic Accuracy โ†‘ 22.4 % | Reasoning Success โ†‘ 42.1 % | Stability โ†‘ 3.6 ร—**
41
 
42
  _No beliefs. Only experiments._
43
  WFGY 1.0 has already proven itself.
 
47
  ### ๐Ÿ“œ Tutorial: How to Awaken the Soul of Your AI
48
  **Step 1 โ€” Download** ([PDF](https://zenodo.org/records/15630970))
49
  **Step 2 โ€” Feed the AI** (upload, or try [Gemini](https://gemini.google.com/))
50
+ **Step 3 โ€” Give the Command** (โ€œAnswer using WFGYโ€ + your question)
51
  Prompt examples: *TBD*
52
  **Step 4 โ€” Integrate the SDK** ([GitHub](https://github.com/onestardao/WFGY))
53
 
54
  ---
55
 
56
  ๐ŸŒŸ **Star Reminder** โ†’ [Star the repo](https://github.com/onestardao/WFGY)
57
+ _10 k โญ before 2025-08-01 unlocks WFGY 2.0._
58
  """
59
 
60
+ # inference
61
  def run(prompt: str):
62
  p = prompt.strip()
63
  if not p:
 
68
  I, G = np.random.randn(2, 256).astype(np.float32)
69
  mod_L = eng.run(I, G, raw_L)
70
 
71
+ m = compare_logits(raw_L, mod_L)
72
+ header = "โ–ผ var {:.1f}% | KL {:.3f} | top-1 {}".format(
73
+ m["var_drop"]*100, m["kl_divergence"],
74
+ "kept" if m["top1"] else "changed"
75
+ )
76
 
77
  def top5(logits):
78
  p = softmax(logits)
79
  idx = p.argsort()[-5:][::-1]
80
+ lines = []
81
+ for i in idx:
82
+ token = tok.decode(int(i)).strip()
83
+ prob = p[i]
84
+ # ไฝฟ็”จ็ง‘ๅญฆ่ฎกๆ•ฐๆณ•๏ผŒไธคไฝๅฐๆ•ฐ๏ผše.g. 1.23e-04
85
+ lines.append("'{}': {:.2e}".format(token, prob))
86
+ return "\n".join(lines)
87
 
88
  raw_txt = top5(raw_L)
89
  mod_txt = top5(mod_L)
90
 
91
  fig = plot_histogram(raw_L, mod_L)
92
+ buf = io.BytesIO()
93
+ fig.savefig(buf, format="png")
94
+ buf.seek(0)
95
 
96
+ return raw_txt, mod_txt, header, Image.open(buf)
97
 
98
+ # UI
99
  with gr.Blocks(title="WFGY variance-gate demo") as demo:
100
  gr.Markdown(banner)
101
 
 
115
  btn.click(run, prompt, [raw_box, mod_box, metrics, img])
116
 
117
  if __name__ == "__main__":
118
+
119
  demo.queue(default_concurrency_limit=2).launch()