ejschwartz commited on
Commit
0d90edb
·
1 Parent(s): 2e5fb84

Try examples again

Browse files
Files changed (1) hide show
  1. app.py +9 -5
app.py CHANGED
@@ -29,7 +29,6 @@ and I reused some of their own code to do this.
29
  ## Todo
30
 
31
  * Add field decoding (probably needs Docker)
32
- * Add examples
33
 
34
  """
35
 
@@ -56,7 +55,10 @@ v5 = a3;
56
  return sub_411142(a1, a2, &v4);
57
  }"""
58
 
59
- examples = open("examples.txt", "r").readlines()
 
 
 
60
 
61
  @spaces.GPU
62
  def infer(code):
@@ -83,7 +85,9 @@ def infer(code):
83
 
84
  print(f"Prompt:\n{var_prompt}")
85
 
86
- input_ids = tokenizer.encode(var_prompt, return_tensors="pt").cuda()[:, : 8192 - 1024]
 
 
87
  var_output = vardecoder_model.generate(
88
  input_ids=input_ids,
89
  max_new_tokens=1024,
@@ -127,10 +131,10 @@ demo = gr.Interface(
127
  ],
128
  outputs=[
129
  gr.Text(label="Var Decoder Output"),
130
- #gr.Text(label="Field Decoder Output"),
131
  gr.Text(label="Generated Variable List"),
132
  ],
133
  description=description,
134
- examples=examples
135
  )
136
  demo.launch()
 
29
  ## Todo
30
 
31
  * Add field decoding (probably needs Docker)
 
32
 
33
  """
34
 
 
55
  return sub_411142(a1, a2, &v4);
56
  }"""
57
 
58
+ examples = [
59
+ ex.encode().decode("unicode_escape") for ex in open("examples.txt", "r").readlines()
60
+ ]
61
+
62
 
63
  @spaces.GPU
64
  def infer(code):
 
85
 
86
  print(f"Prompt:\n{var_prompt}")
87
 
88
+ input_ids = tokenizer.encode(var_prompt, return_tensors="pt").cuda()[
89
+ :, : 8192 - 1024
90
+ ]
91
  var_output = vardecoder_model.generate(
92
  input_ids=input_ids,
93
  max_new_tokens=1024,
 
131
  ],
132
  outputs=[
133
  gr.Text(label="Var Decoder Output"),
134
+ # gr.Text(label="Field Decoder Output"),
135
  gr.Text(label="Generated Variable List"),
136
  ],
137
  description=description,
138
+ examples=examples,
139
  )
140
  demo.launch()