Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -88,7 +88,7 @@ if uploaded_file is not None:
|
|
88 |
prediction_groups = pipeline.recognize([read_image])
|
89 |
predictions = prediction_groups[0] # extract text list
|
90 |
predictions = get_distance(predictions)
|
91 |
-
|
92 |
# Set thresh higher for text further apart
|
93 |
predictions = list(distinguish_rows(predictions, thresh=10))
|
94 |
|
@@ -106,7 +106,7 @@ if uploaded_file is not None:
|
|
106 |
#st.write(sentance)
|
107 |
|
108 |
input_text =sentance
|
109 |
-
print(
|
110 |
"""inputs = tokenizer(text,padding = True, truncation = True, return_tensors='pt').to('cpu')
|
111 |
outputs = model_2(**inputs)
|
112 |
predictions = torch.nn.functional.softmax(outputs.logits, dim=-1)
|
|
|
88 |
prediction_groups = pipeline.recognize([read_image])
|
89 |
predictions = prediction_groups[0] # extract text list
|
90 |
predictions = get_distance(predictions)
|
91 |
+
|
92 |
# Set thresh higher for text further apart
|
93 |
predictions = list(distinguish_rows(predictions, thresh=10))
|
94 |
|
|
|
106 |
#st.write(sentance)
|
107 |
|
108 |
input_text =sentance
|
109 |
+
print(input_text)
|
110 |
"""inputs = tokenizer(text,padding = True, truncation = True, return_tensors='pt').to('cpu')
|
111 |
outputs = model_2(**inputs)
|
112 |
predictions = torch.nn.functional.softmax(outputs.logits, dim=-1)
|