Spaces:
Sleeping
Sleeping
bugg in config
Browse files- .dockerignore +1 -3
- helper/gradio_config.py +5 -5
- requirements.txt +13 -13
- test_api.ipynb +1 -70
.dockerignore
CHANGED
@@ -6,8 +6,6 @@ __pycache__
|
|
6 |
.Python
|
7 |
env
|
8 |
.env
|
9 |
-
Makefile
|
10 |
page_txt.txt
|
11 |
page_xml.xml
|
12 |
-
|
13 |
-
helper/text/videos/
|
|
|
6 |
.Python
|
7 |
env
|
8 |
.env
|
|
|
9 |
page_txt.txt
|
10 |
page_xml.xml
|
11 |
+
src/tests/
|
|
helper/gradio_config.py
CHANGED
@@ -92,11 +92,6 @@ class GradioConfig:
|
|
92 |
return f"""
|
93 |
function monitorButtonHover() {{
|
94 |
|
95 |
-
gradioURL = window.location.href
|
96 |
-
if (!gradioURL.endsWith('?__theme=dark')) {{
|
97 |
-
window.location.replace(gradioURL + '?__theme=dark');
|
98 |
-
}}
|
99 |
-
|
100 |
const buttons = document.querySelectorAll('{button_ids}');
|
101 |
buttons.forEach(function(button) {{
|
102 |
button.addEventListener('mouseenter', function() {{
|
@@ -110,6 +105,11 @@ class GradioConfig:
|
|
110 |
}}
|
111 |
"""
|
112 |
|
|
|
|
|
|
|
|
|
|
|
113 |
|
114 |
buttons_with_tooltip = {
|
115 |
"run_pipeline_button": "Runs HTR on the image. Takes approx 1-2 mins per image (depending on hardware).",
|
|
|
92 |
return f"""
|
93 |
function monitorButtonHover() {{
|
94 |
|
|
|
|
|
|
|
|
|
|
|
95 |
const buttons = document.querySelectorAll('{button_ids}');
|
96 |
buttons.forEach(function(button) {{
|
97 |
button.addEventListener('mouseenter', function() {{
|
|
|
105 |
}}
|
106 |
"""
|
107 |
|
108 |
+
# gradioURL = window.location.href
|
109 |
+
# if (!gradioURL.endsWith('?__theme=dark')) {{
|
110 |
+
# window.location.replace(gradioURL + '?__theme=dark');
|
111 |
+
# }}
|
112 |
+
|
113 |
|
114 |
buttons_with_tooltip = {
|
115 |
"run_pipeline_button": "Runs HTR on the image. Takes approx 1-2 mins per image (depending on hardware).",
|
requirements.txt
CHANGED
@@ -1,19 +1,19 @@
|
|
1 |
-
torch
|
2 |
-
torchvision
|
3 |
-
openmim
|
4 |
-
gradio
|
5 |
-
pandas
|
6 |
-
numpy
|
7 |
-
opencv-python-headless
|
8 |
-
|
9 |
-
transformers
|
10 |
-
|
11 |
-
datasets
|
12 |
-
requests
|
13 |
# scipy
|
14 |
# sklearn
|
15 |
|
16 |
-
# make install_openmmlab
|
17 |
# !pip install -U openmim
|
18 |
# !mim install mmengine
|
19 |
# !mim install mmcv
|
|
|
1 |
+
torch==2.0.1
|
2 |
+
torchvision==0.15.2
|
3 |
+
openmim==0.3.9
|
4 |
+
gradio==3.35.2
|
5 |
+
pandas==2.0.3
|
6 |
+
numpy==1.25.0
|
7 |
+
opencv-python-headless==4.7.0.72
|
8 |
+
Jinja2==3.1.2
|
9 |
+
transformers==4.30.2
|
10 |
+
huggingface-hub==0.15.1
|
11 |
+
datasets==2.13.1
|
12 |
+
requests==2.31.0
|
13 |
# scipy
|
14 |
# sklearn
|
15 |
|
16 |
+
# make install_openmmlab (they are excuted in dockerfile)
|
17 |
# !pip install -U openmim
|
18 |
# !mim install mmengine
|
19 |
# !mim install mmcv
|
test_api.ipynb
CHANGED
@@ -376,82 +376,13 @@
|
|
376 |
"\n",
|
377 |
"client = Client(\"http://127.0.0.1:7860/\")\n",
|
378 |
"job = client.submit(\n",
|
379 |
-
" \"./
|
380 |
" \"test_api\", # str in 'parameter_22' Textbox component\n",
|
381 |
" api_name=\"/predict\",\n",
|
382 |
")\n",
|
383 |
"\n",
|
384 |
"print(job.result())\n"
|
385 |
]
|
386 |
-
},
|
387 |
-
{
|
388 |
-
"cell_type": "code",
|
389 |
-
"execution_count": 9,
|
390 |
-
"metadata": {},
|
391 |
-
"outputs": [
|
392 |
-
{
|
393 |
-
"name": "stdout",
|
394 |
-
"output_type": "stream",
|
395 |
-
"text": [
|
396 |
-
"Running on local URL: http://127.0.0.1:7861\n",
|
397 |
-
"\n",
|
398 |
-
"To create a public link, set `share=True` in `launch()`.\n"
|
399 |
-
]
|
400 |
-
},
|
401 |
-
{
|
402 |
-
"data": {
|
403 |
-
"text/html": [
|
404 |
-
"<div><iframe src=\"http://127.0.0.1:7861/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
|
405 |
-
],
|
406 |
-
"text/plain": [
|
407 |
-
"<IPython.core.display.HTML object>"
|
408 |
-
]
|
409 |
-
},
|
410 |
-
"metadata": {},
|
411 |
-
"output_type": "display_data"
|
412 |
-
},
|
413 |
-
{
|
414 |
-
"data": {
|
415 |
-
"text/plain": []
|
416 |
-
},
|
417 |
-
"execution_count": 9,
|
418 |
-
"metadata": {},
|
419 |
-
"output_type": "execute_result"
|
420 |
-
}
|
421 |
-
],
|
422 |
-
"source": [
|
423 |
-
"import gradio as gr\n",
|
424 |
-
"\n",
|
425 |
-
"def create_object(arg):\n",
|
426 |
-
" return gr.Textbox(value=arg)\n",
|
427 |
-
"\n",
|
428 |
-
"my_objects = {}\n",
|
429 |
-
"\n",
|
430 |
-
"test_list =[\"first\", \"second\"] \n",
|
431 |
-
"for i in test_list:\n",
|
432 |
-
" object_name = f\"object_{i}\"\n",
|
433 |
-
" new_object = create_object(i)\n",
|
434 |
-
" my_objects[object_name] = new_object\n",
|
435 |
-
"\n",
|
436 |
-
"# Accessing objects by their assigned names\n",
|
437 |
-
"first_object = my_objects[\"object_first\"]\n",
|
438 |
-
"second_object = my_objects[\"object_second\"]\n",
|
439 |
-
"\n",
|
440 |
-
"with gr.Blocks() as test:\n",
|
441 |
-
" with gr.Row():\n",
|
442 |
-
" first_object.render()\n",
|
443 |
-
" with gr.Row():\n",
|
444 |
-
" second_object.render()\n",
|
445 |
-
"\n",
|
446 |
-
"test.launch()\n"
|
447 |
-
]
|
448 |
-
},
|
449 |
-
{
|
450 |
-
"cell_type": "code",
|
451 |
-
"execution_count": null,
|
452 |
-
"metadata": {},
|
453 |
-
"outputs": [],
|
454 |
-
"source": []
|
455 |
}
|
456 |
],
|
457 |
"metadata": {
|
|
|
376 |
"\n",
|
377 |
"client = Client(\"http://127.0.0.1:7860/\")\n",
|
378 |
"job = client.submit(\n",
|
379 |
+
" \"./test.jpg\", # str (filepath or URL to image) in 'Image to run HTR-pipeline on' Image component\n",
|
380 |
" \"test_api\", # str in 'parameter_22' Textbox component\n",
|
381 |
" api_name=\"/predict\",\n",
|
382 |
")\n",
|
383 |
"\n",
|
384 |
"print(job.result())\n"
|
385 |
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
386 |
}
|
387 |
],
|
388 |
"metadata": {
|