ejschwartz commited on
Commit
894ff3a
·
1 Parent(s): e49ede1
Files changed (1) hide show
  1. app.py +7 -7
app.py CHANGED
@@ -37,10 +37,10 @@ example = """{
37
 
38
 
39
  @spaces.GPU
40
- def infer(var_name, input):
41
- line = json.loads(input)
42
- first_token = line["output"].split(":")[0]
43
- prompt = line["input"] + var_name + ":"
44
 
45
  input_ids = tokenizer.encode(prompt, return_tensors="pt").cuda()[:, : 8192 - 1024]
46
  output = vardecoder_model.generate(
@@ -59,15 +59,15 @@ def infer(var_name, input):
59
  clean_up_tokenization_spaces=True,
60
  )
61
 
62
- output = first_token + ":" + output
63
  return output
64
 
65
 
66
  demo = gr.Interface(
67
  fn=infer,
68
  inputs=[
69
- gr.Text(label="First Token", placeholder="a1"),
70
- gr.Textbox(lines=10, placeholder=example),
71
  ],
72
  outputs=gr.Text(label="Var Decoder Output"),
73
  )
 
37
 
38
 
39
  @spaces.GPU
40
+ def infer(var_name, code):
41
+ #line = json.loads(input)
42
+ #first_token = line["output"].split(":")[0]
43
+ prompt = code + var_name + ":"
44
 
45
  input_ids = tokenizer.encode(prompt, return_tensors="pt").cuda()[:, : 8192 - 1024]
46
  output = vardecoder_model.generate(
 
59
  clean_up_tokenization_spaces=True,
60
  )
61
 
62
+ output = var_name + ":" + output
63
  return output
64
 
65
 
66
  demo = gr.Interface(
67
  fn=infer,
68
  inputs=[
69
+ gr.Text(label="First Token", value="a1"),
70
+ gr.Textbox(lines=10, value=json.loads(example)['input']),
71
  ],
72
  outputs=gr.Text(label="Var Decoder Output"),
73
  )