OneStarDao commited on
Commit
6aba93c
·
verified ·
1 Parent(s): ef37700

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -11
app.py CHANGED
@@ -1,4 +1,4 @@
1
- # HF Space · WFGY demo (all-English, no comments in other languages)
2
 
3
  import io
4
  import numpy as np
@@ -8,27 +8,27 @@ from wfgy_sdk import get_engine
8
  from wfgy_sdk.evaluator import compare_logits, plot_histogram
9
 
10
  MODEL_ID = "sshleifer/tiny-gpt2"
11
-
12
  tok = AutoTokenizer.from_pretrained(MODEL_ID)
13
  mdl = AutoModelForCausalLM.from_pretrained(MODEL_ID)
14
  eng = get_engine()
15
 
 
16
  def run(prompt: str):
17
  prompt = prompt.strip()
18
  if not prompt:
19
- return "", "", "No prompt nothing to show", None
20
 
21
  ids = tok(prompt, return_tensors="pt").input_ids
22
  logits_raw = mdl(ids).logits[0, -1].detach().cpu().numpy()
23
 
24
- # toy fingerprints just for the demo
25
  G = np.random.randn(256).astype(np.float32)
26
  I = G + np.random.normal(scale=0.05, size=256).astype(np.float32)
27
 
28
  logits_mod = eng.run(I, G, logits_raw)
29
  m = compare_logits(logits_raw, logits_mod)
30
 
31
- headline = f"▼ var {m['var_drop']*100:.1f} % | KL {m['kl']:.3f}"
32
 
33
  fig = plot_histogram(logits_raw, logits_mod)
34
  buf = io.BytesIO()
@@ -40,10 +40,10 @@ def run(prompt: str):
40
  return raw_txt, mod_txt, headline, buf
41
 
42
 
43
- with gr.Blocks(title="WFGY Variance Gate") as demo:
44
  gr.Markdown(
45
- "# 🧠 WFGY simulation demo\n"
46
- "Type any prompt watch variance shrink in real time."
47
  )
48
 
49
  prompt = gr.Textbox(label="Prompt", value="Explain Schrödinger's cat")
@@ -60,9 +60,10 @@ with gr.Blocks(title="WFGY Variance Gate") as demo:
60
 
61
  gr.Markdown(
62
  "---\n"
63
- "### ⭐ Help unlock **WFGY 2.0**\n"
64
- "10 000 stars on GitHub by **2025-08-01** → next-gen release."
65
  )
66
 
67
  if __name__ == "__main__":
68
- demo.queue(concurrency_count=2).launch()
 
 
1
+ # HF Space · WFGY variance gate demo (Gradio 4.31+)
2
 
3
  import io
4
  import numpy as np
 
8
  from wfgy_sdk.evaluator import compare_logits, plot_histogram
9
 
10
  MODEL_ID = "sshleifer/tiny-gpt2"
 
11
  tok = AutoTokenizer.from_pretrained(MODEL_ID)
12
  mdl = AutoModelForCausalLM.from_pretrained(MODEL_ID)
13
  eng = get_engine()
14
 
15
+
16
  def run(prompt: str):
17
  prompt = prompt.strip()
18
  if not prompt:
19
+ return "", "", "no prompt nothing to show", None
20
 
21
  ids = tok(prompt, return_tensors="pt").input_ids
22
  logits_raw = mdl(ids).logits[0, -1].detach().cpu().numpy()
23
 
24
+ # toy fingerprints
25
  G = np.random.randn(256).astype(np.float32)
26
  I = G + np.random.normal(scale=0.05, size=256).astype(np.float32)
27
 
28
  logits_mod = eng.run(I, G, logits_raw)
29
  m = compare_logits(logits_raw, logits_mod)
30
 
31
+ headline = f"▼ var {m['var_drop']*100:4.1f} %|KL {m['kl']:.3f}"
32
 
33
  fig = plot_histogram(logits_raw, logits_mod)
34
  buf = io.BytesIO()
 
40
  return raw_txt, mod_txt, headline, buf
41
 
42
 
43
+ with gr.Blocks(title="WFGY variance gate") as demo:
44
  gr.Markdown(
45
+ "# 🧠 WFGY simulation demo \n"
46
+ "Type any prompt and watch the logit variance collapse in real time."
47
  )
48
 
49
  prompt = gr.Textbox(label="Prompt", value="Explain Schrödinger's cat")
 
60
 
61
  gr.Markdown(
62
  "---\n"
63
+ "### ⭐ Help unlock **WFGY 2.0** \n"
64
+ "10 000 GitHub stars by **2025-08-01** → next-gen release."
65
  )
66
 
67
  if __name__ == "__main__":
68
+ # Gradio ≥4.31: queue() has no arg; use default queue size (=2)
69
+ demo.queue().launch()