Spaces:
Running
on
L4
Running
on
L4
Update app.py
Browse files
app.py
CHANGED
@@ -14,9 +14,9 @@ from examples import examples as input_examples
|
|
14 |
from nuextract_logging import log_event
|
15 |
|
16 |
|
17 |
-
MAX_INPUT_SIZE =
|
18 |
-
MAX_NEW_TOKENS =
|
19 |
-
MAX_WINDOW_SIZE =
|
20 |
|
21 |
markdown_description = """
|
22 |
<!DOCTYPE html>
|
@@ -139,13 +139,13 @@ model = AutoModelForCausalLM.from_pretrained(model_name,
|
|
139 |
tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=auth_token)
|
140 |
model.eval()
|
141 |
|
142 |
-
def gradio_interface_function(template, text, is_example):
|
143 |
if len(tokenizer.tokenize(text)) > MAX_INPUT_SIZE:
|
144 |
yield "", "Input text too long for space. Download model to use unrestricted.", ""
|
145 |
return # End the function since there was an error
|
146 |
|
147 |
# Initialize the sliding window prediction process
|
148 |
-
prediction_generator = sliding_window_prediction(template, text, model, tokenizer, window_size=
|
149 |
|
150 |
# Iterate over the generator to return values at each step
|
151 |
for progress, full_pred, html_content in prediction_generator:
|
@@ -163,6 +163,7 @@ iface = gr.Interface(
|
|
163 |
inputs=[
|
164 |
gr.Textbox(lines=2, placeholder="Enter Template here...", label="Template"),
|
165 |
gr.Textbox(lines=2, placeholder="Enter input Text here...", label="Input Text"),
|
|
|
166 |
gr.Checkbox(label="Is Example?", visible=False),
|
167 |
],
|
168 |
outputs=[
|
|
|
14 |
from nuextract_logging import log_event
|
15 |
|
16 |
|
17 |
+
MAX_INPUT_SIZE = 100_000
|
18 |
+
MAX_NEW_TOKENS = 8_000
|
19 |
+
MAX_WINDOW_SIZE = 1_000
|
20 |
|
21 |
markdown_description = """
|
22 |
<!DOCTYPE html>
|
|
|
139 |
tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=auth_token)
|
140 |
model.eval()
|
141 |
|
142 |
+
def gradio_interface_function(template, text, size, is_example):
|
143 |
if len(tokenizer.tokenize(text)) > MAX_INPUT_SIZE:
|
144 |
yield "", "Input text too long for space. Download model to use unrestricted.", ""
|
145 |
return # End the function since there was an error
|
146 |
|
147 |
# Initialize the sliding window prediction process
|
148 |
+
prediction_generator = sliding_window_prediction(template, text, model, tokenizer, window_size=size)
|
149 |
|
150 |
# Iterate over the generator to return values at each step
|
151 |
for progress, full_pred, html_content in prediction_generator:
|
|
|
163 |
inputs=[
|
164 |
gr.Textbox(lines=2, placeholder="Enter Template here...", label="Template"),
|
165 |
gr.Textbox(lines=2, placeholder="Enter input Text here...", label="Input Text"),
|
166 |
+
gr.Textbox(lines=2, placeholder="Enter windows size here...", label="Size"),
|
167 |
gr.Checkbox(label="Is Example?", visible=False),
|
168 |
],
|
169 |
outputs=[
|