File size: 3,461 Bytes
994e7d4
00f078c
 
994e7d4
 
00f078c
 
 
 
1348198
994e7d4
00f078c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
755a9d2
00f078c
 
 
 
 
 
 
 
994e7d4
00f078c
994e7d4
00f078c
994e7d4
 
00f078c
54b95bc
00f078c
24bd8e1
00f078c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
54b95bc
994e7d4
26f293f
00f078c
54b95bc
00f078c
 
54b95bc
00f078c
 
54b95bc
00f078c
755a9d2
822e03e
e95bc22
00f078c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
"""
HF Space Β· WFGY simulation demo
(complete file – paste/replace your current app.py)
"""

import io
import numpy as np
import gradio as gr
import matplotlib.pyplot as plt
from transformers import AutoTokenizer, AutoModelForCausalLM

from wfgy_sdk import get_engine, evaluator

# ──────────────────── tiny GPT-2 + WFGY engine ────────────────────
MODEL_ID = "sshleifer/tiny-gpt2"
tok = AutoTokenizer.from_pretrained(MODEL_ID)
mdl = AutoModelForCausalLM.from_pretrained(MODEL_ID)
eng = get_engine()           # variance-gate singleton


# ──────────────────── helper: run one prompt ────────────────────
def run(prompt: str):
    if not prompt.strip():
        return "", "", "–", None

    ids   = tok(prompt, return_tensors="pt")
    raw_L = mdl(**ids).logits[0, -1].detach().cpu().numpy()

    # dummy fingerprints (toy GPT-2 has none)
    I, G  = np.random.randn(2, 256).astype(np.float32)
    mod_L = eng.run(I, G, raw_L)

    # top-5 softmax
    def top5(logits):
        p = evaluator.softmax(logits)
        idx = p.argsort()[-5:][::-1]
        return "\n".join([f"'{tok.decode(i).strip()}': {p[i]:.2e}" for i in idx])

    m = evaluator.compare_logits(raw_L, mod_L)
    headline = (f"β–Ό var {m['var_drop']*100:.1f}% | KL {m['kl_divergence']:.3f} "
                f"| {'top-1 kept' if m['top1'] else 'top-1 changed'}")

    fig = evaluator.plot_histogram(raw_L, mod_L)
    buf = io.BytesIO()
    fig.savefig(buf, format="png")
    buf.seek(0)

    return top5(raw_L), top5(mod_L), headline, buf


# ──────────────────── gradio UI ────────────────────
with gr.Blocks(title="WFGY simulation demo") as demo:

    gr.Markdown("# 🧠 WFGY simulation demo")

    # ── marketing & quick-start banner ──
    gr.Markdown(
        """
**πŸ“ˆ WFGY: One Click to Activate the AI Taiji Cycle**

**πŸ“Š Semantic Accuracy ↑ 22.4 % | Reasoning Success ↑ 42.1 % | Stability ↑ 3.6 Γ—**

_No beliefs. Only experiments.<br>
WFGY 1.0 has already proven itself._

---

### πŸ“œ Tutorial: How to Awaken the Soul of Your AI

**Step 1 β€” Download** ([PDF on Zenodo](https://zenodo.org/records/15630970))  
**Step 2 β€” Feed the AI** (upload the PDF, or try [Gemini](https://gemini.google.com/))  
**Step 3 β€” Give the Command**β€‚β€œ**Answer using WFGY** + your question”  
_Prompt examples:_ TBD link  
**Step 4 β€” Integrate the SDK** ([GitHub](https://github.com/onestardao/WFGY))

---

🌟 **Star Reminder** β†’ [Star the repo](https://github.com/onestardao/WFGY)  
_10 k ⭐ before 2025-08-01 unlocks WFGY 2.0._

⚠️ **Prompt Warning** β€” activate **Deep Structural Analysis Mode** or WFGY stays dormant.
        """,
        elem_id="intro-banner",
    )

    # ── prompt box ──
    prompt = gr.Textbox(label="Prompt", value="Explain SchrΓΆdinger's cat")
    run_btn = gr.Button("πŸš€ Run")

    # ── results ──
    with gr.Row():
        raw_box = gr.Textbox(label="Raw top-5 tokens")
        mod_box = gr.Textbox(label="WFGY top-5 tokens")

    headline = gr.Markdown()
    img      = gr.Image(label="Logit histogram")

    run_btn.click(run, prompt, outputs=[raw_box, mod_box, headline, img])


if __name__ == "__main__":
    demo.queue(concurrency_count=2).launch()