carpelan commited on
Commit
1e41501
·
1 Parent(s): 8d18f89

testing with a sidebar

Browse files
.gradio_cache/examples/belfort_snippet.jpg ADDED

Git LFS Details

  • SHA256: be2460862121c96a67357c9ea5e868a1259759da2e07b5faaa2bda035aba0b30
  • Pointer size: 130 Bytes
  • Size of remote file: 61.2 kB
.gradio_cache/examples/iam.png ADDED

Git LFS Details

  • SHA256: 14d659a47113b4cd7be2f44dd9acfecf7254433b58d83dbfc16c6b27fdcd3180
  • Pointer size: 132 Bytes
  • Size of remote file: 3.1 MB
.gradio_cache/examples/manusscript_kb.png ADDED

Git LFS Details

  • SHA256: 7c60db5e32b1ca61c1b7050a55885ed226b0bfab6dde04c4432bc05c0ca0b15f
  • Pointer size: 132 Bytes
  • Size of remote file: 1.39 MB
.gradio_cache/examples/norhand_fmgh040_4.jpg ADDED

Git LFS Details

  • SHA256: f97efcc81d00ed10f20d161b31ff34b574593811ddb6e13c1ae1942e120a24df
  • Pointer size: 131 Bytes
  • Size of remote file: 458 kB
app/assets/templates/{nested.yaml → nested_swe_ra.yaml} RENAMED
@@ -14,4 +14,6 @@ steps:
14
  model: TrOCR
15
  model_settings:
16
  model: Riksarkivet/trocr-base-handwritten-hist-swe-2
 
 
17
  - step: ReadingOrderMarginalia
 
14
  model: TrOCR
15
  model_settings:
16
  model: Riksarkivet/trocr-base-handwritten-hist-swe-2
17
+ generation_settings:
18
+ batch_size: 4
19
  - step: ReadingOrderMarginalia
app/assets/templates/simple_eng_modern.yaml ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ steps:
2
+ - step: Segmentation
3
+ settings:
4
+ model: yolo
5
+ model_settings:
6
+ model: Riksarkivet/yolov9-lines-within-regions-1
7
+ - step: TextRecognition
8
+ settings:
9
+ model: TrOCR
10
+ model_settings:
11
+ model: microsoft/trocr-base-handwritten
12
+ - step: OrderLines
app/assets/templates/simple_medival.yaml ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ steps:
2
+ - step: Segmentation
3
+ settings:
4
+ model: yolo
5
+ model_settings:
6
+ model: Riksarkivet/yolov9-lines-within-regions-1
7
+ - step: TextRecognition
8
+ settings:
9
+ model: TrOCR
10
+ model_settings:
11
+ model: medieval-data/trocr-medieval-base
12
+ - step: OrderLines
app/assets/templates/simple_nordhand.yaml ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ steps:
2
+ - step: Segmentation
3
+ settings:
4
+ model: yolo
5
+ model_settings:
6
+ model: Riksarkivet/yolov9-lines-within-regions-1
7
+ - step: TextRecognition
8
+ settings:
9
+ model: TrOCR
10
+ model_settings:
11
+ model: Sprakbanken/TrOCR-norhand-v3
12
+ - step: OrderLines
app/assets/templates/simple_pylaia_french.yaml ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ steps:
2
+ - step: Segmentation
3
+ settings:
4
+ model: yolo
5
+ model_settings:
6
+ model: Riksarkivet/yolov9-lines-within-regions-1
7
+ - step: TextRecognition
8
+ settings:
9
+ model: Pylaia
10
+ model_settings:
11
+ model: Teklia/pylaia-belfort
12
+ - step: OrderLines
app/assets/templates/{simple.yaml → simple_swe_ra.yaml} RENAMED
File without changes
app/content/main_sub_title_hum.md ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ <a href="https://www.huminfra.se/">
2
+ <center>
3
+ <img src="https://www.umu.se/globalassets/qbank/huminfra-till-webben-48568w600h600.jpg"
4
+ width="60%"
5
+ style="margin-top: 0.3rem; border-radius: 0.5rem;" />
6
+ </center>
7
+ </a>
app/content/{main_sub_title.md → main_sub_title_ra.md} RENAMED
@@ -1,3 +1,3 @@
1
  <a href="https://huggingface.co/Riksarkivet">
2
- <img src="https://media.githubusercontent.com/media/AI-Riksarkivet/htrflow_app/refs/heads/main/app/assets/images/fav-removebg-preview.png" width="17%" align="right" margin-right="100" />
3
  </a>
 
1
  <a href="https://huggingface.co/Riksarkivet">
2
+ <img src="https://media.githubusercontent.com/media/AI-Riksarkivet/htrflow_app/refs/heads/main/app/assets/images/fav-removebg-preview.png" width="17%" align="right" />
3
  </a>
app/content/main_title.md CHANGED
@@ -1 +1 @@
1
- <h1><center> HTRflow 🔍 App </center></h1>
 
1
+ <h1><center> HTRflow 🔍</center></h1>
app/content/sidebar.md ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## HTRflow Demo
2
+
3
+ This web application is developed by the **National Archives of Sweden** as part of [Huminfra](https://www.huminfra.se/)
4
+
5
+ With this demo, users can explore step by step how **AI transforms historical manuscripts into digital text**. Behind the scenes, it runs our open-source **Python package** [HTRflow](https://ai-riksarkivet.github.io/htrflow/latest). Both the app and HTRflow’s code are fully open source.
6
+
7
+ ### How It Works
8
+ The demo follows a simple three-step workflow:
9
+
10
+ 1. **Upload** - Upload your own image and run HTRflow on it.
11
+ 2. **Results** – View the transcribed text generated by HTRflow.
12
+ 3. **Export** – Select your preferred format and download the results.
13
+
14
+ 📌 **Note:** This is a **demo application** and is not intended for production use.
15
+
app/gradio_config.py CHANGED
@@ -4,12 +4,13 @@ theme = gr.themes.Default(
4
  primary_hue="blue",
5
  secondary_hue="blue",
6
  neutral_hue="slate",
7
- # font=[
8
- # gr.themes.GoogleFont("Open Sans"),
9
- # "ui-sans-serif",
10
- # "system-ui",
11
- # "sans-serif",
12
- # ],
 
13
  )
14
 
15
  css = """
@@ -25,14 +26,6 @@ css = """
25
  top: 20px;
26
  }
27
 
28
- .transcription-column {
29
- height: 100vh;
30
- }
31
-
32
- /* this is needed in order to make the transcription sticky */
33
- .app {
34
- overflow: visible;
35
- }
36
 
37
  /* style of textline svg elements */
38
  .textline {
@@ -73,7 +66,6 @@ hr.region-divider {
73
  .pipeline-info {
74
  padding: 0 0 0 2px;
75
  font-weight: var(--block-info-text-weight);
76
- font-size: var(--block-info-text-size);
77
  color: var(--block-info-text-color);
78
  }
79
 
 
4
  primary_hue="blue",
5
  secondary_hue="blue",
6
  neutral_hue="slate",
7
+ font=[
8
+ gr.themes.GoogleFont("Open Sans"),
9
+ "ui-sans-serif",
10
+ "system-ui",
11
+ "sans-serif",
12
+ ],
13
+ # text_size="md",
14
  )
15
 
16
  css = """
 
26
  top: 20px;
27
  }
28
 
 
 
 
 
 
 
 
 
29
 
30
  /* style of textline svg elements */
31
  .textline {
 
66
  .pipeline-info {
67
  padding: 0 0 0 2px;
68
  font-weight: var(--block-info-text-weight);
 
69
  color: var(--block-info-text-color);
70
  }
71
 
app/main.py CHANGED
@@ -57,23 +57,28 @@ matomo = """
57
  <!-- End Matomo Code -->
58
  """
59
 
 
 
 
 
 
 
60
 
61
- with gr.Blocks(title="HTRflow", theme=theme, css=css, head=matomo) as demo:
62
- with gr.Row():
63
- with gr.Column(scale=1):
64
- pass
65
- with gr.Column(scale=2):
66
- gr.Markdown(load_markdown(None, "main_title"))
67
- with gr.Column(scale=1):
68
- gr.Markdown(load_markdown(None, "main_sub_title"))
69
 
70
  with gr.Tabs(elem_classes="top-navbar") as navbar:
71
- with gr.Tab(label="Upload") as tab_submit:
72
  submit.render()
73
- with gr.Tab(label="Result", interactive=False, id="result") as tab_visualizer:
 
 
74
  visualizer.render()
75
 
76
- with gr.Tab(label="Export", interactive=False) as tab_export:
77
  export.render()
78
 
79
  @demo.load()
@@ -85,7 +90,9 @@ with gr.Blocks(title="HTRflow", theme=theme, css=css, head=matomo) as demo:
85
  state_value = input_value
86
  return state_value if state_value is not None else gr.skip()
87
 
88
- collection_submit_state.change(activate_tab, collection_submit_state, tab_visualizer)
 
 
89
  collection_submit_state.change(activate_tab, collection_submit_state, tab_export)
90
  collection_submit_state.change(lambda: gr.Tabs(selected="result"), outputs=navbar)
91
 
@@ -104,4 +111,6 @@ with gr.Blocks(title="HTRflow", theme=theme, css=css, head=matomo) as demo:
104
  demo.queue()
105
 
106
  if __name__ == "__main__":
107
- demo.launch(server_name="0.0.0.0", server_port=7860, enable_monitoring=True, show_api=False)
 
 
 
57
  <!-- End Matomo Code -->
58
  """
59
 
60
+ with gr.Blocks(
61
+ title="HTRflow",
62
+ theme=theme,
63
+ css=css,
64
+ head=matomo,
65
+ ) as demo:
66
 
67
+ gr.Markdown(load_markdown(None, "main_title"))
68
+
69
+ with gr.Sidebar(label="Menu"):
70
+ gr.Markdown(load_markdown(None, "main_sub_title_hum"))
71
+ gr.Markdown(load_markdown(None, "sidebar"))
 
 
 
72
 
73
  with gr.Tabs(elem_classes="top-navbar") as navbar:
74
+ with gr.Tab(label="1. Upload") as tab_submit:
75
  submit.render()
76
+ with gr.Tab(
77
+ label="2. Result", interactive=False, id="result"
78
+ ) as tab_visualizer:
79
  visualizer.render()
80
 
81
+ with gr.Tab(label="3. Export", interactive=False) as tab_export:
82
  export.render()
83
 
84
  @demo.load()
 
90
  state_value = input_value
91
  return state_value if state_value is not None else gr.skip()
92
 
93
+ collection_submit_state.change(
94
+ activate_tab, collection_submit_state, tab_visualizer
95
+ )
96
  collection_submit_state.change(activate_tab, collection_submit_state, tab_export)
97
  collection_submit_state.change(lambda: gr.Tabs(selected="result"), outputs=navbar)
98
 
 
111
  demo.queue()
112
 
113
  if __name__ == "__main__":
114
+ demo.launch(
115
+ server_name="0.0.0.0", server_port=7860, enable_monitoring=True, show_api=False
116
+ )
app/pipelines.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ PIPELINES = {
2
+ "Swedish - Spreads": {
3
+ "file": "app/assets/templates/nested_swe_ra.yaml",
4
+ "description": "This pipeline works well on handwritten historic documents written in Swedish with multiple text regions. The model is devloped by <a href='https://huggingface.co/Riksarkivet'>the National Archives of Sweden</a>.",
5
+ "examples": [
6
+ "R0003364_00005.jpg",
7
+ "30002027_00008.jpg",
8
+ "A0070302_00201.jpg",
9
+ ],
10
+ },
11
+ "Swedish - Single page and snippets": {
12
+ "file": "app/assets/templates/simple_swe_ra.yaml",
13
+ "description": "This pipeline works well on handwritten historic letters and other documents written in Swedish with only one text region. The model is devloped by <a href='https://huggingface.co/Riksarkivet'>the National Archives of Sweden</a>.",
14
+ "examples": [
15
+ "451511_1512_01.jpg",
16
+ "A0062408_00006.jpg",
17
+ "C0000546_00085_crop.png",
18
+ "A0073477_00025.jpg",
19
+ ],
20
+ },
21
+ "Norwegian - Single page and snippets": {
22
+ "file": "app/assets/templates/simple_nordhand.yaml",
23
+ "description": "This pipeline works well on handwritten historic letters and other documents written in Norwegian with only one text region. The model is developed by the <a href='https://huggingface.co/Sprakbanken/TrOCR-norhand-v3'>Language Bank</a> at The National Library of Norway.",
24
+ "examples": ["norhand_fmgh040_4.jpg"],
25
+ },
26
+ "French - Single page and snippets": {
27
+ "file": "app/assets/templates/simple_pylaia_french.yaml",
28
+ "description": "This pipeline works well on handwritten historic letters and other documents written in French. The model is developed by <a href='https://huggingface.co/Teklia/pylaia-belfort'>Teklia</a>.",
29
+ "examples": ["belfort_snippet.jpg"],
30
+ },
31
+ "Medival - Single page and snippets": {
32
+ "file": "app/assets/templates/simple_medival.yaml",
33
+ "description": "This pipeline works well for medieval scripts written in single-page running text. It is a base model from <a href='https://huggingface.co/medieval-data'>Medieval Data</a>, but other models can be selected from here: <a href='https://huggingface.co/collections/medieval-data/trocr-medieval-htr-66871faba03abfbb1b66ab69'>Medieval Models</a>.",
34
+ "examples": ["manusscript_kb.png"],
35
+ },
36
+ "English - Single page and snippets": {
37
+ "file": "app/assets/templates/simple_eng_modern.yaml",
38
+ "description": "This pipeline works well for English text in single page running text. This a base model from <a href='https://huggingface.co/microsoft/trocr-base-handwritten'>Microsoft</a>.",
39
+ "examples": ["iam.png"],
40
+ },
41
+ }
app/tabs/export.py CHANGED
@@ -70,21 +70,20 @@ with gr.Blocks() as export:
70
  gr.Markdown("Choose file format for export.")
71
  with gr.Row():
72
  with gr.Column(scale=1):
73
- with gr.Group():
74
- export_file_format = gr.Dropdown(
75
- value=DEFAULT_C,
76
- label="File format",
77
- info="Select export format(s)",
78
- choices=CHOICES,
79
- multiselect=True,
80
- interactive=True,
81
- )
82
- download_files = gr.Files(label="Download files", interactive=False)
83
- with gr.Column(scale=1):
84
- pass
85
 
86
- with gr.Row():
87
- export_button = gr.Button("Export", scale=0, min_width=200, variant="primary")
88
 
89
  export_button.click(
90
  fn=export_files,
 
70
  gr.Markdown("Choose file format for export.")
71
  with gr.Row():
72
  with gr.Column(scale=1):
73
+ export_file_format = gr.Dropdown(
74
+ value=DEFAULT_C,
75
+ label="File format",
76
+ info="Select export format(s)",
77
+ choices=CHOICES,
78
+ multiselect=True,
79
+ interactive=True,
80
+ )
81
+ export_button = gr.Button(
82
+ "Export", scale=0, min_width=200, variant="primary"
83
+ )
 
84
 
85
+ with gr.Column(scale=1):
86
+ download_files = gr.Files(label="Download files", interactive=False)
87
 
88
  export_button.click(
89
  fn=export_files,
app/tabs/submit.py CHANGED
@@ -9,34 +9,14 @@ from htrflow.pipeline.pipeline import Pipeline
9
  from htrflow.pipeline.steps import init_step
10
  from htrflow.volume.volume import Collection
11
 
 
 
 
12
  logger = logging.getLogger(__name__)
13
 
14
  # Max number of images a user can upload at once
15
  MAX_IMAGES = int(os.environ.get("MAX_IMAGES", 5))
16
 
17
- # Example pipelines
18
- PIPELINES = {
19
- "Running text (Swedish)": {
20
- "file": "app/assets/templates/nested.yaml",
21
- "description": "This pipeline works well on documents with multiple text regions.",
22
- "examples": [
23
- "R0003364_00005.jpg",
24
- "30002027_00008.jpg",
25
- "A0070302_00201.jpg",
26
- ],
27
- },
28
- "Letters and snippets (Swedish)": {
29
- "file": "app/assets/templates/simple.yaml",
30
- "description": "This pipeline works well on letters and other documents with only one text region.",
31
- "examples": [
32
- "451511_1512_01.jpg",
33
- "A0062408_00006.jpg",
34
- "C0000546_00085_crop.png",
35
- "A0073477_00025.jpg",
36
- ],
37
- },
38
- }
39
-
40
  # Setup the cache directory to point to the directory where the example images
41
  # are located. The images must lay in the cache directory because otherwise they
42
  # have to be reuploaded when drag-and-dropped to the input image widget.
@@ -52,7 +32,12 @@ class PipelineWithProgress(Pipeline):
52
  @classmethod
53
  def from_config(cls, config: dict[str, str]):
54
  """Init pipeline from config, ensuring the correct subclass is instantiated."""
55
- return cls([init_step(step["step"], step.get("settings", {})) for step in config["steps"]])
 
 
 
 
 
56
 
57
  def run(self, collection, start=0, progress=None):
58
  """
@@ -110,7 +95,9 @@ def run_htrflow(custom_template_yaml, batch_image_gallery, progress=gr.Progress(
110
 
111
  pipe = PipelineWithProgress.from_config(config)
112
 
113
- gr.Info(f"HTRflow: processing {len(images)} {'image' if len(images) == 1 else 'images'}.")
 
 
114
  progress(0.1, desc="HTRflow: Processing")
115
 
116
  collection.label = "demo_output"
@@ -178,22 +165,29 @@ def get_image_from_image_id(image_id):
178
 
179
 
180
  with gr.Blocks() as submit:
181
- gr.Markdown("# Upload")
182
- gr.Markdown("Select or upload the image you want to transcribe. You can upload up to five images at a time.")
 
 
 
 
 
 
183
 
184
  collection_submit_state = gr.State()
185
  with gr.Group():
186
- with gr.Row(equal_height=True):
 
 
187
  with gr.Column(scale=5):
188
  batch_image_gallery = gr.Gallery(
189
  file_types=["image"],
190
  label="Image to transcribe",
191
  interactive=True,
192
  object_fit="scale-down",
193
- scale=3,
194
  )
195
 
196
- with gr.Column(scale=2):
197
  examples = gr.Gallery(
198
  all_example_images(),
199
  label="Examples",
@@ -201,6 +195,7 @@ with gr.Blocks() as submit:
201
  allow_preview=False,
202
  object_fit="scale-down",
203
  min_width=250,
 
204
  )
205
  image_iiif_url = gr.Textbox(
206
  label="Upload by image ID",
@@ -215,7 +210,9 @@ with gr.Blocks() as submit:
215
  with gr.Column(variant="panel", elem_classes="pipeline-panel"):
216
  gr.Markdown("## Settings")
217
  gr.Markdown(
218
- "Select a pipeline that suits your image. You can edit the pipeline if you need to customize it further."
 
 
219
  )
220
 
221
  with gr.Row():
@@ -286,10 +283,13 @@ with gr.Blocks() as submit:
286
  image_ids = image_ids.split(",")
287
 
288
  return [
289
- f"https://lbiiif.riksarkivet.se/arkis!{image_id.strip()}/full/max/0/default.jpg" for image_id in image_ids
 
290
  ]
291
 
292
- image_iiif_url.submit(fn=return_iiif_url, inputs=image_iiif_url, outputs=batch_image_gallery)
 
 
293
 
294
  run_button.click(
295
  fn=run_htrflow,
 
9
  from htrflow.pipeline.steps import init_step
10
  from htrflow.volume.volume import Collection
11
 
12
+ from app.main import load_markdown
13
+ from app.pipelines import PIPELINES
14
+
15
  logger = logging.getLogger(__name__)
16
 
17
  # Max number of images a user can upload at once
18
  MAX_IMAGES = int(os.environ.get("MAX_IMAGES", 5))
19
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
  # Setup the cache directory to point to the directory where the example images
21
  # are located. The images must lay in the cache directory because otherwise they
22
  # have to be reuploaded when drag-and-dropped to the input image widget.
 
32
  @classmethod
33
  def from_config(cls, config: dict[str, str]):
34
  """Init pipeline from config, ensuring the correct subclass is instantiated."""
35
+ return cls(
36
+ [
37
+ init_step(step["step"], step.get("settings", {}))
38
+ for step in config["steps"]
39
+ ]
40
+ )
41
 
42
  def run(self, collection, start=0, progress=None):
43
  """
 
95
 
96
  pipe = PipelineWithProgress.from_config(config)
97
 
98
+ gr.Info(
99
+ f"HTRflow: processing {len(images)} {'image' if len(images) == 1 else 'images'}."
100
+ )
101
  progress(0.1, desc="HTRflow: Processing")
102
 
103
  collection.label = "demo_output"
 
165
 
166
 
167
  with gr.Blocks() as submit:
168
+
169
+ with gr.Row():
170
+ with gr.Column():
171
+ gr.Markdown("# Upload")
172
+ gr.Markdown(
173
+ "Select or upload the image you want to transcribe. You can upload up to five images at a time. \n "
174
+ "Alternatively, you can choose from example images from the gallery or use Image_IDs."
175
+ )
176
 
177
  collection_submit_state = gr.State()
178
  with gr.Group():
179
+ with gr.Row(
180
+ equal_height=True,
181
+ ):
182
  with gr.Column(scale=5):
183
  batch_image_gallery = gr.Gallery(
184
  file_types=["image"],
185
  label="Image to transcribe",
186
  interactive=True,
187
  object_fit="scale-down",
 
188
  )
189
 
190
+ with gr.Column(scale=3):
191
  examples = gr.Gallery(
192
  all_example_images(),
193
  label="Examples",
 
195
  allow_preview=False,
196
  object_fit="scale-down",
197
  min_width=250,
198
+ columns=3,
199
  )
200
  image_iiif_url = gr.Textbox(
201
  label="Upload by image ID",
 
210
  with gr.Column(variant="panel", elem_classes="pipeline-panel"):
211
  gr.Markdown("## Settings")
212
  gr.Markdown(
213
+ "Select a pipeline that best matches your image. The pipeline determines the processing workflow optimized for different handwritten text recognition tasks."
214
+ "If you select an example image, a suitable pipeline will be preselected automatically. However, you can edit the pipeline if you need to customize it further."
215
+ "Choosing the right pipeline significantly improves transcription quality"
216
  )
217
 
218
  with gr.Row():
 
283
  image_ids = image_ids.split(",")
284
 
285
  return [
286
+ f"https://lbiiif.riksarkivet.se/arkis!{image_id.strip()}/full/max/0/default.jpg"
287
+ for image_id in image_ids
288
  ]
289
 
290
+ image_iiif_url.submit(
291
+ fn=return_iiif_url, inputs=image_iiif_url, outputs=batch_image_gallery
292
+ )
293
 
294
  run_button.click(
295
  fn=run_htrflow,
app/tabs/visualizer.py CHANGED
@@ -59,32 +59,51 @@ with gr.Blocks() as visualizer:
59
  with gr.Row():
60
  # Annotated image panel
61
  with gr.Column(scale=2):
62
- gr.Markdown("## Annotated image")
63
- image = gr.HTML(padding=False, elem_classes="svg-image", container=True)
 
 
 
 
 
 
 
64
 
65
  image_caption = gr.Markdown(elem_classes="button-group-viz")
66
  with gr.Row(elem_classes="button-group-viz"):
67
- left = gr.Button("← Previous", visible=False, interactive=False, scale=0)
 
 
68
  right = gr.Button("Next →", visible=False, scale=0)
69
 
70
  # Transcription panel
71
  with gr.Column(scale=1, elem_classes="transcription-column"):
72
- gr.Markdown("## Transcription")
73
- transcription = gr.HTML(elem_classes="transcription", container=True, max_height="60vh")
 
 
 
 
 
 
74
 
75
  collection = gr.State()
76
  current_page_index = gr.State(0)
77
 
78
  # Wiring of navigation buttons
79
  left.click(left_button_click, current_page_index, current_page_index)
80
- right.click(right_button_click, [collection, current_page_index], current_page_index)
 
 
81
 
82
  # Updates on collection change:
83
  # - update the view
84
  # - reset the page index (always start on page 0)
85
  # - toggle visibility of navigation buttons (don't show them for single pages)
86
  # - update the image caption
87
- collection.change(render_image, inputs=[collection, current_page_index], outputs=image)
 
 
88
  collection.change(
89
  render_transcription,
90
  inputs=[collection, current_page_index],
@@ -103,14 +122,18 @@ with gr.Blocks() as visualizer:
103
  # - update the view
104
  # - activate/deactivate buttons
105
  # - update the image caption
106
- current_page_index.change(render_image, inputs=[collection, current_page_index], outputs=image)
 
 
107
  current_page_index.change(
108
  render_transcription,
109
  inputs=[collection, current_page_index],
110
  outputs=transcription,
111
  )
112
  current_page_index.change(activate_left_button, current_page_index, left)
113
- current_page_index.change(activate_right_button, [collection, current_page_index], right)
 
 
114
  current_page_index.change(
115
  update_image_caption,
116
  inputs=[collection, current_page_index],
 
59
  with gr.Row():
60
  # Annotated image panel
61
  with gr.Column(scale=2):
62
+ image = gr.HTML(
63
+ label="Annotated image",
64
+ padding=False,
65
+ elem_classes="svg-image",
66
+ container=True,
67
+ max_height="70vh",
68
+ min_height="70vh",
69
+ show_label=True,
70
+ )
71
 
72
  image_caption = gr.Markdown(elem_classes="button-group-viz")
73
  with gr.Row(elem_classes="button-group-viz"):
74
+ left = gr.Button(
75
+ "← Previous", visible=False, interactive=False, scale=0
76
+ )
77
  right = gr.Button("Next →", visible=False, scale=0)
78
 
79
  # Transcription panel
80
  with gr.Column(scale=1, elem_classes="transcription-column"):
81
+ transcription = gr.HTML(
82
+ label="Transcription",
83
+ show_label=True,
84
+ elem_classes="transcription",
85
+ container=True,
86
+ max_height="70vh",
87
+ min_height="70vh",
88
+ )
89
 
90
  collection = gr.State()
91
  current_page_index = gr.State(0)
92
 
93
  # Wiring of navigation buttons
94
  left.click(left_button_click, current_page_index, current_page_index)
95
+ right.click(
96
+ right_button_click, [collection, current_page_index], current_page_index
97
+ )
98
 
99
  # Updates on collection change:
100
  # - update the view
101
  # - reset the page index (always start on page 0)
102
  # - toggle visibility of navigation buttons (don't show them for single pages)
103
  # - update the image caption
104
+ collection.change(
105
+ render_image, inputs=[collection, current_page_index], outputs=image
106
+ )
107
  collection.change(
108
  render_transcription,
109
  inputs=[collection, current_page_index],
 
122
  # - update the view
123
  # - activate/deactivate buttons
124
  # - update the image caption
125
+ current_page_index.change(
126
+ render_image, inputs=[collection, current_page_index], outputs=image
127
+ )
128
  current_page_index.change(
129
  render_transcription,
130
  inputs=[collection, current_page_index],
131
  outputs=transcription,
132
  )
133
  current_page_index.change(activate_left_button, current_page_index, left)
134
+ current_page_index.change(
135
+ activate_right_button, [collection, current_page_index], right
136
+ )
137
  current_page_index.change(
138
  update_image_caption,
139
  inputs=[collection, current_page_index],
pyproject.toml CHANGED
@@ -17,13 +17,12 @@ classifiers = [
17
  requires-python = ">=3.10,<3.13"
18
 
19
  dependencies = [
20
- "htrflow==0.2.0",
21
- "gradio>=5.15.0",
22
- "datasets>=3.2.0",
23
- "pandas>=2.2.3",
24
  "tqdm>=4.67.1",
25
- "pillow>=11.1.0",
26
  "gradio-modal>=0.0.4",
 
 
27
  ]
28
 
29
  [project.urls]
@@ -32,24 +31,11 @@ Repository = "https://github.com/AI-Riksarkivet/htrflow-app"
32
 
33
  [tool.uv]
34
  dev-dependencies = [
35
- "pytest >=8.0.1",
36
  "python-dotenv >=1.0.1",
37
  "ruff >=0.6.2",
38
  "uv>=0.4.12",
39
  ]
40
 
41
- # [project.optional-dependencies]
42
- # openmmlab = [
43
- # "openmim==0.3.9",
44
- # "mmengine==0.7.4",
45
- # "mmcv==2.0.1",
46
- # "mmdet==3.0.0",
47
- # "mmocr==1.0.0"
48
- # ]
49
- # teklia = [
50
- # "pylaia==1.1.2"
51
- # ]
52
-
53
 
54
  [build-system]
55
  requires = ["hatchling"]
@@ -71,5 +57,5 @@ line-length = 119
71
  target-version = "py310"
72
 
73
  [tool.ruff.lint]
74
- ignore = ["C901", "E741", "W605"]
75
  select = ["C", "E", "F", "I", "W"]
 
17
  requires-python = ">=3.10,<3.13"
18
 
19
  dependencies = [
20
+ "htrflow==0.2.5",
21
+ "gradio>=5.16.0",
 
 
22
  "tqdm>=4.67.1",
 
23
  "gradio-modal>=0.0.4",
24
+ "dill>=0.3.9",
25
+ "pylaia>=1.1.2",
26
  ]
27
 
28
  [project.urls]
 
31
 
32
  [tool.uv]
33
  dev-dependencies = [
 
34
  "python-dotenv >=1.0.1",
35
  "ruff >=0.6.2",
36
  "uv>=0.4.12",
37
  ]
38
 
 
 
 
 
 
 
 
 
 
 
 
 
39
 
40
  [build-system]
41
  requires = ["hatchling"]
 
57
  target-version = "py310"
58
 
59
  [tool.ruff.lint]
60
+ ignore = ["C901", "E741", "W605", "E501"]
61
  select = ["C", "E", "F", "I", "W"]
uv.lock CHANGED
The diff for this file is too large to render. See raw diff