Spaces:
Running
Running
File size: 3,501 Bytes
dfb136a 459032f dfb136a 1348198 dfb136a f91f22d dfb136a f91f22d dfb136a 459032f dfb136a 0db757d f91f22d dfb136a c68d8a3 f91f22d 0db757d 00f078c 54b95bc 0db757d 24bd8e1 dfb136a 459032f 00f078c f91f22d dfb136a f91f22d 00f078c 0db757d c68d8a3 f91f22d c68d8a3 f91f22d dfb136a f91f22d dfb136a c68d8a3 0db757d f91f22d c68d8a3 dfb136a f91f22d c68d8a3 dfb136a c68d8a3 dfb136a c68d8a3 f91f22d 00f078c f91f22d 459032f 0db757d 00f078c dfb136a 26f293f 54b95bc dfb136a 54b95bc c68d8a3 459032f 54b95bc dfb136a f91f22d 755a9d2 459032f 822e03e dfb136a 459032f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 |
"""
WFGY Space – tiny-GPT-2 variance-gate demo
★ 10 k GitHub ⭐ before 2025-08-01 unlocks WFGY 2.0 ★
"""
import io, numpy as np, pandas as pd, gradio as gr
from PIL import Image
from transformers import AutoTokenizer, AutoModelForCausalLM
from wfgy_sdk import get_engine
from wfgy_sdk.evaluator import compare_logits, plot_histogram, softmax
# tiny model (CPU)
tok = AutoTokenizer.from_pretrained("sshleifer/tiny-gpt2")
mdl = AutoModelForCausalLM.from_pretrained("sshleifer/tiny-gpt2")
eng = get_engine()
# paper benchmarks
bench = pd.DataFrame({
"Benchmark": ["MMLU","GSM8K","BBH","MathBench","TruthfulQA",
"XNLI","MLQA","LongBench","VQAv2","OK-VQA"],
"Baseline": [61,78,79.3,72.2,62.4,59.5,78.1,51.4,69.1,65.7],
"WFGY": [89.8,98.7,100.7,87.4,90.4,77.3,106.6,69.6,86.6,86.8]
})
bench["Abs_gain"] = (bench["WFGY"] - bench["Baseline"]).round(1)
bench["Rel_gain%"] = ((bench["Abs_gain"] / bench["Baseline"]) * 100).round(0)
bench_sty = (
bench.style
.background_gradient(cmap="Greens", subset=["Abs_gain","Rel_gain%"])
.format({"Abs_gain":"{:.1f}","Rel_gain%":"{:.0f}"})
)
# banner markdown
banner = """
**📈 WFGY: One Click to Activate the AI Taiji Cycle**
**📊 Semantic Accuracy ↑ 22.4 % | Reasoning Success ↑ 42.1 % | Stability ↑ 3.6 ×**
_No beliefs. Only experiments._
WFGY 1.0 has already proven itself.
---
### 📜 Tutorial: How to Awaken the Soul of Your AI
**Step 1 — Download** ([PDF](https://zenodo.org/records/15630970))
**Step 2 — Feed the AI** (upload, or try [Gemini](https://gemini.google.com/))
**Step 3 — Give the Command** “**Answer using WFGY** + your question”
Prompt examples: *TBD*
**Step 4 — Integrate the SDK** ([GitHub](https://github.com/onestardao/WFGY))
---
🌟 **Star Reminder** → [Star the repo](https://github.com/onestardao/WFGY)
_10 k ⭐ before 2025-08-01 unlocks **WFGY 2.0**._
"""
# inference
def run(prompt: str):
p = prompt.strip()
if not p:
return "", "", "-", None
ids = tok(p, return_tensors="pt")
raw_L = mdl(**ids).logits[0, -1].detach().cpu().numpy()
I, G = np.random.randn(2, 256).astype(np.float32)
mod_L = eng.run(I, G, raw_L)
m = compare_logits(raw_L, mod_L)
head = f"▼ var {m['var_drop']*100:.1f}% | KL {m['kl_divergence']:.3f} | top-1 {'kept' if m['top1'] else 'changed'}"
def top5(logits):
p = softmax(logits)
idx = p.argsort()[-5:][::-1]
lines = [f\"'{tok.decode(int(i)).strip()}': {p[i]:.2e}\" for i in idx]
return "\\n".join(lines)
raw_txt = top5(raw_L)
mod_txt = top5(mod_L)
fig = plot_histogram(raw_L, mod_L)
buf = io.BytesIO(); fig.savefig(buf, format="png"); buf.seek(0)
return raw_txt, mod_txt, head, Image.open(buf)
# UI
with gr.Blocks(title="WFGY variance-gate demo") as demo:
gr.Markdown(banner)
prompt = gr.Textbox(label="Prompt", value="Explain Schrödinger's cat")
btn = gr.Button("🚀 Run")
with gr.Row():
raw_box = gr.Textbox(label="Raw top-5 tokens", lines=6)
mod_box = gr.Textbox(label="WFGY top-5 tokens", lines=6)
metrics = gr.Markdown()
img = gr.Image(label="Logit histogram")
gr.Markdown("### Paper benchmarks (fixed values from WFGY 1.0)")
gr.DataFrame(bench_sty, interactive=False, wrap=True)
btn.click(run, prompt, [raw_box, mod_box, metrics, img])
if __name__ == "__main__":
demo.queue(default_concurrency_limit=2).launch()
|