ivelin commited on
Commit
ba6e8f4
·
1 Parent(s): 66655db

fix: use a well trained checkpoint on sample screenshots used in playspace

Browse files
Files changed (1) hide show
  1. app.py +1 -4
app.py CHANGED
@@ -6,7 +6,7 @@ import torch
6
  import html
7
  from transformers import DonutProcessor, VisionEncoderDecoderModel
8
 
9
- pretrained_repo_name = "ivelin/donut-refexp-draft@116838e7868170d4ecc682aca4afc317b08e6009"
10
 
11
  processor = DonutProcessor.from_pretrained(pretrained_repo_name)
12
  model = VisionEncoderDecoderModel.from_pretrained(pretrained_repo_name)
@@ -132,9 +132,6 @@ examples = [["example_1.jpg", "select the setting icon from top right corner"],
132
  ["example_3.jpg", "select the first column second image"],
133
  ["example_3.jpg", "select the bottom right image"],
134
  ["example_3.jpg", "select the second row second image"],
135
-
136
-
137
-
138
  ]
139
 
140
  demo = gr.Interface(fn=process_refexp,
 
6
  import html
7
  from transformers import DonutProcessor, VisionEncoderDecoderModel
8
 
9
+ pretrained_repo_name = "ivelin/donut-refexp-draft@commit/116838e7868170d4ecc682aca4afc317b08e6009"
10
 
11
  processor = DonutProcessor.from_pretrained(pretrained_repo_name)
12
  model = VisionEncoderDecoderModel.from_pretrained(pretrained_repo_name)
 
132
  ["example_3.jpg", "select the first column second image"],
133
  ["example_3.jpg", "select the bottom right image"],
134
  ["example_3.jpg", "select the second row second image"],
 
 
 
135
  ]
136
 
137
  demo = gr.Interface(fn=process_refexp,