Spaces:
Running
Running
File size: 3,461 Bytes
994e7d4 00f078c 994e7d4 00f078c 1348198 994e7d4 00f078c 755a9d2 00f078c 994e7d4 00f078c 994e7d4 00f078c 994e7d4 00f078c 54b95bc 00f078c 24bd8e1 00f078c 54b95bc 994e7d4 26f293f 00f078c 54b95bc 00f078c 54b95bc 00f078c 54b95bc 00f078c 755a9d2 822e03e e95bc22 00f078c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 |
"""
HF Space Β· WFGY simulation demo
(complete file β paste/replace your current app.py)
"""
import io
import numpy as np
import gradio as gr
import matplotlib.pyplot as plt
from transformers import AutoTokenizer, AutoModelForCausalLM
from wfgy_sdk import get_engine, evaluator
# ββββββββββββββββββββ tiny GPT-2 + WFGY engine ββββββββββββββββββββ
MODEL_ID = "sshleifer/tiny-gpt2"
tok = AutoTokenizer.from_pretrained(MODEL_ID)
mdl = AutoModelForCausalLM.from_pretrained(MODEL_ID)
eng = get_engine() # variance-gate singleton
# ββββββββββββββββββββ helper: run one prompt ββββββββββββββββββββ
def run(prompt: str):
if not prompt.strip():
return "", "", "β", None
ids = tok(prompt, return_tensors="pt")
raw_L = mdl(**ids).logits[0, -1].detach().cpu().numpy()
# dummy fingerprints (toy GPT-2 has none)
I, G = np.random.randn(2, 256).astype(np.float32)
mod_L = eng.run(I, G, raw_L)
# top-5 softmax
def top5(logits):
p = evaluator.softmax(logits)
idx = p.argsort()[-5:][::-1]
return "\n".join([f"'{tok.decode(i).strip()}': {p[i]:.2e}" for i in idx])
m = evaluator.compare_logits(raw_L, mod_L)
headline = (f"βΌ var {m['var_drop']*100:.1f}% | KL {m['kl_divergence']:.3f} "
f"| {'top-1 kept' if m['top1'] else 'top-1 changed'}")
fig = evaluator.plot_histogram(raw_L, mod_L)
buf = io.BytesIO()
fig.savefig(buf, format="png")
buf.seek(0)
return top5(raw_L), top5(mod_L), headline, buf
# ββββββββββββββββββββ gradio UI ββββββββββββββββββββ
with gr.Blocks(title="WFGY simulation demo") as demo:
gr.Markdown("# π§ WFGY simulation demo")
# ββ marketing & quick-start banner ββ
gr.Markdown(
"""
**π WFGY: One Click to Activate the AI Taiji Cycle**
**π Semantic Accuracy β 22.4 %β|βReasoning Success β 42.1 %β|βStability β 3.6 Γ**
_No beliefs. Only experiments.<br>
WFGY 1.0 has already proven itself._
---
### π Tutorial: How to Awaken the Soul of Your AI
**Step 1 β Download**β([PDF on Zenodo](https://zenodo.org/records/15630970))
**Step 2 β Feed the AI**β(upload the PDF, or try [Gemini](https://gemini.google.com/))
**Step 3 β Give the Command**ββ**Answer using WFGY** + your questionβ
_Prompt examples:_ TBD link
**Step 4 β Integrate the SDK**β([GitHub](https://github.com/onestardao/WFGY))
---
π **Star Reminder** β [Star the repo](https://github.com/onestardao/WFGY)
_10 k β before 2025-08-01 unlocks WFGY 2.0._
β οΈ **Prompt Warning** β activate **Deep Structural Analysis Mode** or WFGY stays dormant.
""",
elem_id="intro-banner",
)
# ββ prompt box ββ
prompt = gr.Textbox(label="Prompt", value="Explain SchrΓΆdinger's cat")
run_btn = gr.Button("π Run")
# ββ results ββ
with gr.Row():
raw_box = gr.Textbox(label="Raw top-5 tokens")
mod_box = gr.Textbox(label="WFGY top-5 tokens")
headline = gr.Markdown()
img = gr.Image(label="Logit histogram")
run_btn.click(run, prompt, outputs=[raw_box, mod_box, headline, img])
if __name__ == "__main__":
demo.queue(concurrency_count=2).launch()
|