m-ric HF staff commited on
Commit
a7a4e14
1 Parent(s): b20c42a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -63
app.py CHANGED
@@ -1,22 +1,15 @@
1
- import gradio as gr
2
-
3
-
4
  STYLE = """
5
- @import url('https://fonts.googleapis.com/css2?family=Poppins:ital,wght@0,100;0,200;0,300;0,400;0,500;0,600;0,700;0,800;0,900;1,100;1,200;1,300;1,400;1,500;1,600;1,700;1,800;1,900&display=swap');
6
- * {
7
- padding: 0px;
8
- margin: 0px;
9
- box-sizing: border-box;
10
- font-size: 16px;
11
- }
12
  body {
13
  height: 100vh;
14
  width: 100vw;
15
  display: grid;
16
  align-items: center;
17
- font-family: 'Poppins', sans-serif;
18
  }
19
  .tree {
 
 
 
 
20
  width: 100%;
21
  height: auto;
22
  text-align: center;
@@ -27,8 +20,7 @@ body {
27
  transition: .5s;
28
  }
29
  .tree li {
30
- display: flex;
31
- flex-direction:row;
32
  text-align: center;
33
  list-style-type: none;
34
  position: relative;
@@ -87,13 +79,6 @@ body {
87
  border-radius: 5px;
88
  transition: .5s;
89
  }
90
- .tree li a img {
91
- width: 50px;
92
- height: 50px;
93
- margin-bottom: 10px !important;
94
- border-radius: 100px;
95
- margin: auto;
96
- }
97
  .tree li a span {
98
  border: 1px solid #ccc;
99
  border-radius: 5px;
@@ -122,56 +107,19 @@ tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
122
  model = AutoModelForCausalLM.from_pretrained("gpt2")
123
  tokenizer.pad_token_id = tokenizer.eos_token_id
124
 
125
- def display_top_k_tokens(scores, sequences, beam_indices):
126
- display = "<div style='display: flex; flex-direction:row;'>"
127
- for i, sequence in enumerate(sequences):
128
- markdown_table = f"""<p>Sequence {i}: {tokenizer.batch_decode(sequence)}<p><br>
129
- <table>
130
- <tr>
131
- <th><b>Token</b></th>
132
- <th><b>Probability</b></th>
133
- </tr>"""
134
- for step, step_scores in enumerate(scores):
135
- markdown_table += f"""
136
- <tr>
137
- <td><b>Step {step}</b></td>
138
- <td>=====</td>
139
- </tr>"""
140
- current_beam = beam_indices[i, step]
141
- chosen_token = sequences[i, step]
142
- for token_idx in np.argsort(step_scores[current_beam, :])[-5:]:
143
- if token_idx == chosen_token:
144
- markdown_table += f"""
145
- <tr style="background-color:red">
146
- <td>{tokenizer.decode([token_idx])}</td>
147
- <td>{step_scores[current_beam, token_idx]}</td>
148
- </tr>"""
149
- else:
150
- markdown_table += f"""
151
- <tr>
152
- <td>{tokenizer.decode([token_idx])}</td>
153
- <td>{step_scores[current_beam, token_idx]}</td>
154
- </tr>"""
155
- markdown_table += "</table>"
156
- display += markdown_table
157
- display += "</div>"
158
- print(display)
159
- return display
160
-
161
-
162
  def generate_html(token, node):
163
  """Recursively generate HTML for the tree."""
164
 
165
- html_content = f" <ul> <a href='#'> <p> <b>{token}</b> </p> "
166
  html_content += node["table"] if node["table"] is not None else ""
167
  html_content += "</a>"
168
  if len(node["children"].keys()) > 0:
169
- html_content += "<li> "
170
  for token, subnode in node["children"].items():
171
  html_content += generate_html(token, subnode)
172
- html_content += "</li>"
173
 
174
- html_content += "</ul>"
175
 
176
  return html_content
177
 
@@ -202,7 +150,8 @@ def display_tree(scores, sequences, beam_indices):
202
  display = """<body>
203
  <div class="container">
204
  <div class="row">
205
- <div class="tree">"""
 
206
  sequences = sequences.cpu().numpy()
207
  print(tokenizer.batch_decode(sequences))
208
  original_tree = {"table": None, "children": {}}
@@ -230,6 +179,7 @@ def display_tree(scores, sequences, beam_indices):
230
  display += generate_html("Today is", original_tree)
231
 
232
  display += """
 
233
  </div>
234
  </div>
235
  </div>
@@ -260,7 +210,8 @@ def get_tables(input_text, number_steps, number_beams):
260
  outputs.beam_indices[:, : -len(inputs)],
261
  )
262
  return tables
263
-
 
264
 
265
  with gr.Blocks(
266
  theme=gr.themes.Soft(
 
 
 
 
1
  STYLE = """
 
 
 
 
 
 
 
2
  body {
3
  height: 100vh;
4
  width: 100vw;
5
  display: grid;
6
  align-items: center;
 
7
  }
8
  .tree {
9
+ padding: 0px;
10
+ margin: 0px;
11
+ box-sizing: border-box;
12
+ font-size: 16px;
13
  width: 100%;
14
  height: auto;
15
  text-align: center;
 
20
  transition: .5s;
21
  }
22
  .tree li {
23
+ display: inline-table;
 
24
  text-align: center;
25
  list-style-type: none;
26
  position: relative;
 
79
  border-radius: 5px;
80
  transition: .5s;
81
  }
 
 
 
 
 
 
 
82
  .tree li a span {
83
  border: 1px solid #ccc;
84
  border-radius: 5px;
 
107
  model = AutoModelForCausalLM.from_pretrained("gpt2")
108
  tokenizer.pad_token_id = tokenizer.eos_token_id
109
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
110
  def generate_html(token, node):
111
  """Recursively generate HTML for the tree."""
112
 
113
+ html_content = f" <li> <a href='#'> <span> <b>{token}</b> </span> "
114
  html_content += node["table"] if node["table"] is not None else ""
115
  html_content += "</a>"
116
  if len(node["children"].keys()) > 0:
117
+ html_content += "<ul> "
118
  for token, subnode in node["children"].items():
119
  html_content += generate_html(token, subnode)
120
+ html_content += "</ul>"
121
 
122
+ html_content += "</li>"
123
 
124
  return html_content
125
 
 
150
  display = """<body>
151
  <div class="container">
152
  <div class="row">
153
+ <div class="tree">
154
+ <ul>"""
155
  sequences = sequences.cpu().numpy()
156
  print(tokenizer.batch_decode(sequences))
157
  original_tree = {"table": None, "children": {}}
 
179
  display += generate_html("Today is", original_tree)
180
 
181
  display += """
182
+ </ul>
183
  </div>
184
  </div>
185
  </div>
 
210
  outputs.beam_indices[:, : -len(inputs)],
211
  )
212
  return tables
213
+
214
+ import gradio as gr
215
 
216
  with gr.Blocks(
217
  theme=gr.themes.Soft(