OneStarDao commited on
Commit
4af80e9
·
verified ·
1 Parent(s): 76f27de

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -8
app.py CHANGED
@@ -11,7 +11,7 @@ from PIL import Image
11
  from transformers import AutoTokenizer, AutoModelForCausalLM
12
 
13
  from wfgy_sdk import get_engine
14
- from wfgy_sdk.evaluator import compare_logits, plot_histogram, softmax
15
 
16
  # tiny model (CPU)
17
  tok = AutoTokenizer.from_pretrained("sshleifer/tiny-gpt2")
@@ -45,10 +45,10 @@ WFGY 1.0 has already proven itself.
45
  ---
46
 
47
  ### 📜 Tutorial: How to Awaken the Soul of Your AI
48
- **Step 1 — Download** ([PDF](https://zenodo.org/records/15630970))
49
  **Step 2 — Feed the AI** (upload, or try [Gemini](https://gemini.google.com/))
50
  **Step 3 — Give the Command** (“Answer using WFGY” + your question)
51
- Prompt examples: *TBD*
52
  **Step 4 — Integrate the SDK** ([GitHub](https://github.com/onestardao/WFGY))
53
 
54
  ---
@@ -57,6 +57,12 @@ Prompt examples: *TBD*
57
  _10 k ⭐ before 2025-08-01 unlocks WFGY 2.0._
58
  """
59
 
 
 
 
 
 
 
60
  # inference
61
  def run(prompt: str):
62
  p = prompt.strip()
@@ -75,13 +81,12 @@ def run(prompt: str):
75
  )
76
 
77
  def top5(logits):
78
- p = softmax(logits)
79
- idx = p.argsort()[-5:][::-1]
80
  lines = []
81
  for i in idx:
82
  token = tok.decode(int(i)).strip()
83
- prob = p[i]
84
- # 使用科学计数法,两位小数:e.g. 1.23e-04
85
  lines.append("'{}': {:.2e}".format(token, prob))
86
  return "\n".join(lines)
87
 
@@ -115,5 +120,4 @@ with gr.Blocks(title="WFGY variance-gate demo") as demo:
115
  btn.click(run, prompt, [raw_box, mod_box, metrics, img])
116
 
117
  if __name__ == "__main__":
118
-
119
  demo.queue(default_concurrency_limit=2).launch()
 
11
  from transformers import AutoTokenizer, AutoModelForCausalLM
12
 
13
  from wfgy_sdk import get_engine
14
+ from wfgy_sdk.evaluator import compare_logits, plot_histogram
15
 
16
  # tiny model (CPU)
17
  tok = AutoTokenizer.from_pretrained("sshleifer/tiny-gpt2")
 
45
  ---
46
 
47
  ### 📜 Tutorial: How to Awaken the Soul of Your AI
48
+ **Step 1 — Download** ([PDF](https://doi.org/10.5281/zenodo.15657017))
49
  **Step 2 — Feed the AI** (upload, or try [Gemini](https://gemini.google.com/))
50
  **Step 3 — Give the Command** (“Answer using WFGY” + your question)
51
+ Prompt examples: [https://doi.org/10.5281/zenodo.15657017](https://doi.org/10.5281/zenodo.15657017)
52
  **Step 4 — Integrate the SDK** ([GitHub](https://github.com/onestardao/WFGY))
53
 
54
  ---
 
57
  _10 k ⭐ before 2025-08-01 unlocks WFGY 2.0._
58
  """
59
 
60
+ # own softmax implementation
61
+ def softmax_np(logits: np.ndarray) -> np.ndarray:
62
+ z = logits - np.max(logits)
63
+ e = np.exp(z)
64
+ return e / np.sum(e)
65
+
66
  # inference
67
  def run(prompt: str):
68
  p = prompt.strip()
 
81
  )
82
 
83
  def top5(logits):
84
+ p_arr = softmax_np(logits)
85
+ idx = np.argsort(p_arr)[-5:][::-1]
86
  lines = []
87
  for i in idx:
88
  token = tok.decode(int(i)).strip()
89
+ prob = p_arr[i]
 
90
  lines.append("'{}': {:.2e}".format(token, prob))
91
  return "\n".join(lines)
92
 
 
120
  btn.click(run, prompt, [raw_box, mod_box, metrics, img])
121
 
122
  if __name__ == "__main__":
 
123
  demo.queue(default_concurrency_limit=2).launch()