Aspiring Astro commited on
Commit
8d2450a
·
1 Parent(s): 926c115

switch to using blocks

Browse files
Files changed (2) hide show
  1. app.ipynb +82 -210
  2. app.py +33 -13
app.ipynb CHANGED
@@ -20,6 +20,9 @@
20
  "#| export\n",
21
  "from fastai.vision.all import *\n",
22
  "import gradio as gr\n",
 
 
 
23
  "title = \"FastAI - Big Cats Classifier\"\n",
24
  "description = \"Classify big cats using all Resnet models available pre-trained in FastAI\""
25
  ]
@@ -41,7 +44,7 @@
41
  "}\n",
42
  "models = list(learners.keys())\n",
43
  "\n",
44
- " "
45
  ]
46
  },
47
  {
@@ -53,11 +56,16 @@
53
  "source": [
54
  "#| export\n",
55
  " \n",
56
- "def classify_image(img, model_file=\"resnet-101\"):\n",
57
- " learn = load_learner(learners[model_file])\n",
58
  " pred,idx,probs = learn.predict(img)\n",
59
- " print(pred, idx, probs)\n",
60
- " return dict(zip(learn.dls.vocab, map(float, probs)))\n"
 
 
 
 
 
61
  ]
62
  },
63
  {
@@ -107,9 +115,7 @@
107
  "name": "stdout",
108
  "output_type": "stream",
109
  "text": [
110
- "cheetah TensorBase(1) TensorBase([2.9325e-08, 9.9999e-01, 1.2872e-09, 1.3284e-05, 3.6218e-08,\n",
111
- " 6.6378e-07, 1.2428e-08, 7.0062e-09])\n",
112
- "{'african leopard': 2.932508635922204e-08, 'cheetah': 0.9999860525131226, 'clouded leopard': 1.2872064525382143e-09, 'cougar': 1.3283532098284923e-05, 'jaguar': 3.6217517873637917e-08, 'lion': 6.637808382947696e-07, 'snow leopard': 1.242834812842375e-08, 'tiger': 7.0062102786039304e-09}\n"
113
  ]
114
  },
115
  {
@@ -153,9 +159,7 @@
153
  "name": "stdout",
154
  "output_type": "stream",
155
  "text": [
156
- "jaguar TensorBase(4) TensorBase([2.2414e-06, 4.8124e-07, 1.5911e-08, 1.5741e-08, 1.0000e+00,\n",
157
- " 8.4150e-10, 2.4537e-08, 4.5623e-07])\n",
158
- "{'african leopard': 2.241393531221547e-06, 'cheetah': 4.812366114492761e-07, 'clouded leopard': 1.5911437500903958e-08, 'cougar': 1.5740527103957902e-08, 'jaguar': 0.9999967813491821, 'lion': 8.415030339214979e-10, 'snow leopard': 2.453731973162121e-08, 'tiger': 4.562308788536029e-07}\n"
159
  ]
160
  },
161
  {
@@ -199,9 +203,7 @@
199
  "name": "stdout",
200
  "output_type": "stream",
201
  "text": [
202
- "tiger TensorBase(7) TensorBase([2.0140e-08, 3.2289e-10, 3.0278e-07, 1.7037e-07, 2.8471e-08,\n",
203
- " 3.1560e-08, 5.5170e-08, 1.0000e+00])\n",
204
- "{'african leopard': 2.0139752976433556e-08, 'cheetah': 3.228871059413052e-10, 'clouded leopard': 3.0278118856585934e-07, 'cougar': 1.7037031341260445e-07, 'jaguar': 2.8470973134631095e-08, 'lion': 3.15602726175257e-08, 'snow leopard': 5.5169955714973185e-08, 'tiger': 0.9999994039535522}\n"
205
  ]
206
  },
207
  {
@@ -245,9 +247,7 @@
245
  "name": "stdout",
246
  "output_type": "stream",
247
  "text": [
248
- "cougar TensorBase(3) TensorBase([7.7202e-04, 9.6453e-05, 3.6239e-04, 9.9550e-01, 5.8073e-04,\n",
249
- " 1.0296e-03, 1.6978e-04, 1.4883e-03])\n",
250
- "{'african leopard': 0.0007720203138887882, 'cheetah': 9.645262616686523e-05, 'clouded leopard': 0.00036238841130398214, 'cougar': 0.9955006241798401, 'jaguar': 0.0005807342822663486, 'lion': 0.0010295877000316978, 'snow leopard': 0.000169777573319152, 'tiger': 0.0014882636023685336}\n"
251
  ]
252
  },
253
  {
@@ -291,9 +291,7 @@
291
  "name": "stdout",
292
  "output_type": "stream",
293
  "text": [
294
- "lion TensorBase(5) TensorBase([6.3666e-10, 2.1585e-07, 6.5407e-09, 1.1020e-08, 1.3697e-08,\n",
295
- " 9.9998e-01, 5.2166e-09, 1.6965e-05])\n",
296
- "{'african leopard': 6.366598359619502e-10, 'cheetah': 2.1584540377261874e-07, 'clouded leopard': 6.540694652557022e-09, 'cougar': 1.1020346413204152e-08, 'jaguar': 1.3696873857327319e-08, 'lion': 0.9999828338623047, 'snow leopard': 5.2166360120509125e-09, 'tiger': 1.696465005807113e-05}\n"
297
  ]
298
  },
299
  {
@@ -337,9 +335,7 @@
337
  "name": "stdout",
338
  "output_type": "stream",
339
  "text": [
340
- "african leopard TensorBase(0) TensorBase([9.7809e-01, 1.9370e-03, 5.1859e-04, 1.8196e-05, 1.5251e-02,\n",
341
- " 1.8402e-04, 3.8208e-03, 1.8130e-04])\n",
342
- "{'african leopard': 0.9780895113945007, 'cheetah': 0.0019370485097169876, 'clouded leopard': 0.0005185850313864648, 'cougar': 1.819587851059623e-05, 'jaguar': 0.015250639989972115, 'lion': 0.00018402353452984244, 'snow leopard': 0.0038208006881177425, 'tiger': 0.00018130325770471245}\n"
343
  ]
344
  },
345
  {
@@ -383,9 +379,7 @@
383
  "name": "stdout",
384
  "output_type": "stream",
385
  "text": [
386
- "clouded leopard TensorBase(2) TensorBase([3.5035e-05, 2.8548e-06, 9.9938e-01, 1.8297e-06, 5.6521e-04,\n",
387
- " 1.3141e-06, 7.5178e-06, 1.0570e-05])\n",
388
- "{'african leopard': 3.5035314795095474e-05, 'cheetah': 2.8547888177854475e-06, 'clouded leopard': 0.9993757605552673, 'cougar': 1.8296907455805922e-06, 'jaguar': 0.0005652108229696751, 'lion': 1.314112978434423e-06, 'snow leopard': 7.517839094361989e-06, 'tiger': 1.0569940059212968e-05}\n"
389
  ]
390
  },
391
  {
@@ -429,9 +423,7 @@
429
  "name": "stdout",
430
  "output_type": "stream",
431
  "text": [
432
- "snow leopard TensorBase(6) TensorBase([1.9796e-07, 5.2659e-07, 1.7047e-04, 2.0246e-07, 1.5801e-08,\n",
433
- " 5.4288e-06, 9.9982e-01, 6.8012e-09])\n",
434
- "{'african leopard': 1.9796296157892357e-07, 'cheetah': 5.265908384899376e-07, 'clouded leopard': 0.00017047168512362987, 'cougar': 2.024643492859468e-07, 'jaguar': 1.5801049357833108e-08, 'lion': 5.4287702369038016e-06, 'snow leopard': 0.9998231530189514, 'tiger': 6.801158747293812e-09}\n"
435
  ]
436
  }
437
  ],
@@ -446,124 +438,57 @@
446
  },
447
  {
448
  "cell_type": "code",
449
- "execution_count": 6,
450
  "id": "a48e7483-c04b-4048-a1ae-34a8c7986a57",
451
  "metadata": {},
452
  "outputs": [
453
- {
454
- "name": "stderr",
455
- "output_type": "stream",
456
- "text": [
457
- "/Users/ajithj/Library/Python/3.8/lib/python/site-packages/gradio/inputs.py:256: UserWarning: Usage of gradio.inputs is deprecated, and will not be supported in the future, please import your component from gradio.components\n",
458
- " warnings.warn(\n",
459
- "/Users/ajithj/Library/Python/3.8/lib/python/site-packages/gradio/deprecation.py:40: UserWarning: `optional` parameter is deprecated, and it has no effect\n",
460
- " warnings.warn(value)\n",
461
- "/Users/ajithj/Library/Python/3.8/lib/python/site-packages/gradio/inputs.py:216: UserWarning: Usage of gradio.inputs is deprecated, and will not be supported in the future, please import your component from gradio.components\n",
462
- " warnings.warn(\n",
463
- "/Users/ajithj/Library/Python/3.8/lib/python/site-packages/gradio/outputs.py:196: UserWarning: Usage of gradio.outputs is deprecated, and will not be supported in the future, please import your components from gradio.components\n",
464
- " warnings.warn(\n",
465
- "/Users/ajithj/Library/Python/3.8/lib/python/site-packages/gradio/deprecation.py:40: UserWarning: The 'type' parameter has been deprecated. Use the Number component instead.\n",
466
- " warnings.warn(value)\n"
467
- ]
468
- },
469
  {
470
  "name": "stdout",
471
  "output_type": "stream",
472
  "text": [
473
  "Running on local URL: http://127.0.0.1:7860\n",
474
- "Running on public URL: https://9569b03a-5208-4edb.gradio.live\n",
475
  "\n",
476
- "This share link expires in 72 hours. For free permanent hosting and GPU upgrades (NEW!), check out Spaces: https://huggingface.co/spaces\n"
477
- ]
478
- },
479
- {
480
- "data": {
481
- "text/html": [
482
- "\n",
483
- "<style>\n",
484
- " /* Turns off some styling */\n",
485
- " progress {\n",
486
- " /* gets rid of default border in Firefox and Opera. */\n",
487
- " border: none;\n",
488
- " /* Needs to be in here for Safari polyfill so background images work as expected. */\n",
489
- " background-size: auto;\n",
490
- " }\n",
491
- " progress:not([value]), progress:not([value])::-webkit-progress-bar {\n",
492
- " background: repeating-linear-gradient(45deg, #7e7e7e, #7e7e7e 10px, #5c5c5c 10px, #5c5c5c 20px);\n",
493
- " }\n",
494
- " .progress-bar-interrupted, .progress-bar-interrupted::-webkit-progress-bar {\n",
495
- " background: #F44336;\n",
496
- " }\n",
497
- "</style>\n"
498
- ],
499
- "text/plain": [
500
- "<IPython.core.display.HTML object>"
501
- ]
502
- },
503
- "metadata": {},
504
- "output_type": "display_data"
505
- },
506
- {
507
- "data": {
508
- "text/html": [],
509
- "text/plain": [
510
- "<IPython.core.display.HTML object>"
511
- ]
512
- },
513
- "metadata": {},
514
- "output_type": "display_data"
515
- },
516
- {
517
- "name": "stdout",
518
- "output_type": "stream",
519
- "text": [
520
- "snow leopard TensorBase(6) TensorBase([1.9796e-07, 5.2659e-07, 1.7047e-04, 2.0246e-07, 1.5801e-08,\n",
521
- " 5.4288e-06, 9.9982e-01, 6.8012e-09])\n"
522
  ]
523
  },
524
  {
525
- "data": {
526
- "text/html": [
527
- "\n",
528
- "<style>\n",
529
- " /* Turns off some styling */\n",
530
- " progress {\n",
531
- " /* gets rid of default border in Firefox and Opera. */\n",
532
- " border: none;\n",
533
- " /* Needs to be in here for Safari polyfill so background images work as expected. */\n",
534
- " background-size: auto;\n",
535
- " }\n",
536
- " progress:not([value]), progress:not([value])::-webkit-progress-bar {\n",
537
- " background: repeating-linear-gradient(45deg, #7e7e7e, #7e7e7e 10px, #5c5c5c 10px, #5c5c5c 20px);\n",
538
- " }\n",
539
- " .progress-bar-interrupted, .progress-bar-interrupted::-webkit-progress-bar {\n",
540
- " background: #F44336;\n",
541
- " }\n",
542
- "</style>\n"
543
- ],
544
- "text/plain": [
545
- "<IPython.core.display.HTML object>"
546
- ]
547
- },
548
- "metadata": {},
549
- "output_type": "display_data"
550
- },
551
- {
552
- "data": {
553
- "text/html": [],
554
- "text/plain": [
555
- "<IPython.core.display.HTML object>"
556
- ]
557
- },
558
- "metadata": {},
559
- "output_type": "display_data"
560
- },
561
- {
562
- "name": "stdout",
563
  "output_type": "stream",
564
  "text": [
565
- "african leopard TensorBase(0) TensorBase([9.7809e-01, 1.9370e-03, 5.1859e-04, 1.8196e-05, 1.5251e-02,\n",
566
- " 1.8402e-04, 3.8208e-03, 1.8130e-04])\n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
567
  ]
568
  },
569
  {
@@ -603,14 +528,6 @@
603
  "metadata": {},
604
  "output_type": "display_data"
605
  },
606
- {
607
- "name": "stdout",
608
- "output_type": "stream",
609
- "text": [
610
- "tiger TensorBase(7) TensorBase([2.0140e-08, 3.2289e-10, 3.0278e-07, 1.7037e-07, 2.8471e-08,\n",
611
- " 3.1560e-08, 5.5170e-08, 1.0000e+00])\n"
612
- ]
613
- },
614
  {
615
  "data": {
616
  "text/html": [
@@ -648,14 +565,6 @@
648
  "metadata": {},
649
  "output_type": "display_data"
650
  },
651
- {
652
- "name": "stdout",
653
- "output_type": "stream",
654
- "text": [
655
- "cheetah TensorBase(1) TensorBase([2.9325e-08, 9.9999e-01, 1.2872e-09, 1.3284e-05, 3.6218e-08,\n",
656
- " 6.6378e-07, 1.2428e-08, 7.0062e-09])\n"
657
- ]
658
- },
659
  {
660
  "data": {
661
  "text/html": [
@@ -697,84 +606,47 @@
697
  "name": "stdout",
698
  "output_type": "stream",
699
  "text": [
700
- "clouded leopard TensorBase(2) TensorBase([3.5035e-05, 2.8548e-06, 9.9938e-01, 1.8297e-06, 5.6521e-04,\n",
701
- " 1.3141e-06, 7.5178e-06, 1.0570e-05])\n"
702
  ]
703
  },
704
  {
705
  "data": {
706
- "text/html": [
707
- "\n",
708
- "<style>\n",
709
- " /* Turns off some styling */\n",
710
- " progress {\n",
711
- " /* gets rid of default border in Firefox and Opera. */\n",
712
- " border: none;\n",
713
- " /* Needs to be in here for Safari polyfill so background images work as expected. */\n",
714
- " background-size: auto;\n",
715
- " }\n",
716
- " progress:not([value]), progress:not([value])::-webkit-progress-bar {\n",
717
- " background: repeating-linear-gradient(45deg, #7e7e7e, #7e7e7e 10px, #5c5c5c 10px, #5c5c5c 20px);\n",
718
- " }\n",
719
- " .progress-bar-interrupted, .progress-bar-interrupted::-webkit-progress-bar {\n",
720
- " background: #F44336;\n",
721
- " }\n",
722
- "</style>\n"
723
- ],
724
- "text/plain": [
725
- "<IPython.core.display.HTML object>"
726
- ]
727
- },
728
- "metadata": {},
729
- "output_type": "display_data"
730
- },
731
- {
732
- "data": {
733
- "text/html": [],
734
- "text/plain": [
735
- "<IPython.core.display.HTML object>"
736
- ]
737
  },
 
738
  "metadata": {},
739
- "output_type": "display_data"
740
- },
741
- {
742
- "name": "stdout",
743
- "output_type": "stream",
744
- "text": [
745
- "cougar TensorBase(3) TensorBase([7.7202e-04, 9.6453e-05, 3.6239e-04, 9.9550e-01, 5.8073e-04,\n",
746
- " 1.0296e-03, 1.6978e-04, 1.4883e-03])\n",
747
- "Keyboard interruption in main thread... closing server.\n",
748
- "Killing tunnel 127.0.0.1:7860 <> https://9569b03a-5208-4edb.gradio.live\n"
749
- ]
750
  }
751
  ],
752
  "source": [
753
  "#| export\n",
754
- "image = gr.inputs.Image()\n",
755
- "model = gr.inputs.Dropdown(choices=models)\n",
756
- "label = gr.outputs.Label()\n",
757
  "example_images = [ 'cheetah.jpg', 'jaguar.jpg', 'tiger.jpg', 'cougar.jpg', 'lion.jpg', 'african leopard.jpg', 'clouded leopard.jpg', 'snow leopard.jpg' ]\n",
758
- "example_models = [] #list(learners.values())\n",
759
- "intf = gr.Interface(fn=classify_image, inputs=image, outputs=label, examples=example_images, title=title, description=description )\n",
760
- "if __name__ == \"__main__\":\n",
761
- " intf.launch(debug=True, inline=False)\n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
762
  ]
763
  },
764
  {
765
  "cell_type": "code",
766
- "execution_count": 8,
767
  "id": "cab071f9-7c3b-4b35-a0d1-3687731ffce5",
768
  "metadata": {},
769
- "outputs": [
770
- {
771
- "name": "stdout",
772
- "output_type": "stream",
773
- "text": [
774
- "Export successful\n"
775
- ]
776
- }
777
- ],
778
  "source": [
779
  "import nbdev\n",
780
  "nbdev.export.nb_export('app.ipynb', './')\n",
 
20
  "#| export\n",
21
  "from fastai.vision.all import *\n",
22
  "import gradio as gr\n",
23
+ "import warnings\n",
24
+ "warnings.filterwarnings('ignore')\n",
25
+ "\n",
26
  "title = \"FastAI - Big Cats Classifier\"\n",
27
  "description = \"Classify big cats using all Resnet models available pre-trained in FastAI\""
28
  ]
 
44
  "}\n",
45
  "models = list(learners.keys())\n",
46
  "\n",
47
+ "active_model = learners[\"resnet-18\"]\n"
48
  ]
49
  },
50
  {
 
56
  "source": [
57
  "#| export\n",
58
  " \n",
59
+ "def classify_image(img):\n",
60
+ " learn = load_learner(active_model)\n",
61
  " pred,idx,probs = learn.predict(img)\n",
62
+ " return dict(zip(learn.dls.vocab, map(float, probs)))\n",
63
+ "\n",
64
+ "def select_model(model_name):\n",
65
+ " if model_name not in models:\n",
66
+ " model_name = \"resnet-18\"\n",
67
+ " active_model = learners[model_name]\n",
68
+ " return model_name\n"
69
  ]
70
  },
71
  {
 
115
  "name": "stdout",
116
  "output_type": "stream",
117
  "text": [
118
+ "{'african leopard': 0.0005852991016581655, 'cheetah': 0.9993988275527954, 'clouded leopard': 1.7600793000838166e-07, 'cougar': 6.112059963925276e-06, 'jaguar': 7.491902579204179e-06, 'lion': 1.3097942428430542e-06, 'snow leopard': 6.794325599912554e-07, 'tiger': 1.22832446436405e-07}\n"
 
 
119
  ]
120
  },
121
  {
 
159
  "name": "stdout",
160
  "output_type": "stream",
161
  "text": [
162
+ "{'african leopard': 0.2962114214897156, 'cheetah': 2.706606210267637e-05, 'clouded leopard': 0.0008470952161587775, 'cougar': 1.0193979505856987e-05, 'jaguar': 0.701975405216217, 'lion': 1.3766093616141006e-05, 'snow leopard': 0.0008549779886379838, 'tiger': 6.007726915413514e-05}\n"
 
 
163
  ]
164
  },
165
  {
 
203
  "name": "stdout",
204
  "output_type": "stream",
205
  "text": [
206
+ "{'african leopard': 2.0210626061611947e-08, 'cheetah': 1.6748231246310752e-08, 'clouded leopard': 1.1174745395692298e-06, 'cougar': 2.63490710494807e-06, 'jaguar': 2.399448703727103e-06, 'lion': 6.196571433747522e-08, 'snow leopard': 2.4245096028607804e-06, 'tiger': 0.9999912977218628}\n"
 
 
207
  ]
208
  },
209
  {
 
247
  "name": "stdout",
248
  "output_type": "stream",
249
  "text": [
250
+ "{'african leopard': 9.39465026021935e-05, 'cheetah': 0.00021114452101755887, 'clouded leopard': 8.688175876159221e-05, 'cougar': 0.9761292934417725, 'jaguar': 7.082346655806759e-06, 'lion': 0.02333180606365204, 'snow leopard': 0.00011577722762012854, 'tiger': 2.4006889361771755e-05}\n"
 
 
251
  ]
252
  },
253
  {
 
291
  "name": "stdout",
292
  "output_type": "stream",
293
  "text": [
294
+ "{'african leopard': 1.3545766286426897e-08, 'cheetah': 2.635677674334147e-06, 'clouded leopard': 7.659965994832874e-09, 'cougar': 9.957815017003213e-09, 'jaguar': 1.497639772196635e-07, 'lion': 0.9999957084655762, 'snow leopard': 1.294516778216348e-07, 'tiger': 1.2779944427165901e-06}\n"
 
 
295
  ]
296
  },
297
  {
 
335
  "name": "stdout",
336
  "output_type": "stream",
337
  "text": [
338
+ "{'african leopard': 0.024091463536024094, 'cheetah': 0.0014163728337734938, 'clouded leopard': 0.008692733943462372, 'cougar': 0.0010448594111949205, 'jaguar': 0.7156786322593689, 'lion': 0.017859801650047302, 'snow leopard': 0.22819218039512634, 'tiger': 0.0030239589978009462}\n"
 
 
339
  ]
340
  },
341
  {
 
379
  "name": "stdout",
380
  "output_type": "stream",
381
  "text": [
382
+ "{'african leopard': 7.144178198359441e-06, 'cheetah': 3.725538704202336e-07, 'clouded leopard': 0.9994736313819885, 'cougar': 6.0378228226909414e-05, 'jaguar': 3.279747033957392e-05, 'lion': 1.1806019273308266e-07, 'snow leopard': 0.0003000575816258788, 'tiger': 0.0001255277602467686}\n"
 
 
383
  ]
384
  },
385
  {
 
423
  "name": "stdout",
424
  "output_type": "stream",
425
  "text": [
426
+ "{'african leopard': 2.8642458346439525e-05, 'cheetah': 0.00017579919949639589, 'clouded leopard': 0.08972200006246567, 'cougar': 7.897598698036745e-05, 'jaguar': 2.5307128453277983e-05, 'lion': 1.8576161892269738e-05, 'snow leopard': 0.9099361896514893, 'tiger': 1.4485961401078384e-05}\n"
 
 
427
  ]
428
  }
429
  ],
 
438
  },
439
  {
440
  "cell_type": "code",
441
+ "execution_count": 19,
442
  "id": "a48e7483-c04b-4048-a1ae-34a8c7986a57",
443
  "metadata": {},
444
  "outputs": [
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
445
  {
446
  "name": "stdout",
447
  "output_type": "stream",
448
  "text": [
449
  "Running on local URL: http://127.0.0.1:7860\n",
 
450
  "\n",
451
+ "To create a public link, set `share=True` in `launch()`.\n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
452
  ]
453
  },
454
  {
455
+ "name": "stderr",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
456
  "output_type": "stream",
457
  "text": [
458
+ "Traceback (most recent call last):\n",
459
+ " File \"/Users/ajithj/Library/Python/3.8/lib/python/site-packages/gradio/routes.py\", line 321, in run_predict\n",
460
+ " output = await app.blocks.process_api(\n",
461
+ " File \"/Users/ajithj/Library/Python/3.8/lib/python/site-packages/gradio/blocks.py\", line 1015, in process_api\n",
462
+ " result = await self.call_function(fn_index, inputs, iterator, request)\n",
463
+ " File \"/Users/ajithj/Library/Python/3.8/lib/python/site-packages/gradio/blocks.py\", line 856, in call_function\n",
464
+ " prediction = await anyio.to_thread.run_sync(\n",
465
+ " File \"/Users/ajithj/Library/Python/3.8/lib/python/site-packages/anyio/to_thread.py\", line 31, in run_sync\n",
466
+ " return await get_asynclib().run_sync_in_worker_thread(\n",
467
+ " File \"/Users/ajithj/Library/Python/3.8/lib/python/site-packages/anyio/_backends/_asyncio.py\", line 937, in run_sync_in_worker_thread\n",
468
+ " return await future\n",
469
+ " File \"/Users/ajithj/Library/Python/3.8/lib/python/site-packages/anyio/_backends/_asyncio.py\", line 867, in run\n",
470
+ " result = context.run(func, *args)\n",
471
+ " File \"/var/folders/jk/w8lkkz7n40s81208_5_qd5_80000gn/T/ipykernel_3681/233086315.py\", line 5, in classify_image\n",
472
+ " pred,idx,probs = learn.predict(img)\n",
473
+ " File \"/Users/ajithj/Library/Python/3.8/lib/python/site-packages/fastai/learner.py\", line 312, in predict\n",
474
+ " dl = self.dls.test_dl([item], rm_type_tfms=rm_type_tfms, num_workers=0)\n",
475
+ " File \"/Users/ajithj/Library/Python/3.8/lib/python/site-packages/fastai/data/core.py\", line 532, in test_dl\n",
476
+ " test_ds = test_set(self.valid_ds, test_items, rm_tfms=rm_type_tfms, with_labels=with_labels\n",
477
+ " File \"/Users/ajithj/Library/Python/3.8/lib/python/site-packages/fastai/data/core.py\", line 511, in test_set\n",
478
+ " if rm_tfms is None: rm_tfms = [tl.infer_idx(get_first(test_items)) for tl in test_tls]\n",
479
+ " File \"/Users/ajithj/Library/Python/3.8/lib/python/site-packages/fastai/data/core.py\", line 511, in <listcomp>\n",
480
+ " if rm_tfms is None: rm_tfms = [tl.infer_idx(get_first(test_items)) for tl in test_tls]\n",
481
+ " File \"/Users/ajithj/Library/Python/3.8/lib/python/site-packages/fastai/data/core.py\", line 405, in infer_idx\n",
482
+ " assert idx < len(self.types), f\"Expected an input of type in \\n{pretty_types}\\n but got {type(x)}\"\n",
483
+ "AssertionError: Expected an input of type in \n",
484
+ " - <class 'pathlib.PosixPath'>\n",
485
+ " - <class 'pathlib.Path'>\n",
486
+ " - <class 'str'>\n",
487
+ " - <class 'torch.Tensor'>\n",
488
+ " - <class 'numpy.ndarray'>\n",
489
+ " - <class 'bytes'>\n",
490
+ " - <class 'fastai.vision.core.PILImage'>\n",
491
+ " but got <class 'NoneType'>\n"
492
  ]
493
  },
494
  {
 
528
  "metadata": {},
529
  "output_type": "display_data"
530
  },
 
 
 
 
 
 
 
 
531
  {
532
  "data": {
533
  "text/html": [
 
565
  "metadata": {},
566
  "output_type": "display_data"
567
  },
 
 
 
 
 
 
 
 
568
  {
569
  "data": {
570
  "text/html": [
 
606
  "name": "stdout",
607
  "output_type": "stream",
608
  "text": [
609
+ "Keyboard interruption in main thread... closing server.\n"
 
610
  ]
611
  },
612
  {
613
  "data": {
614
+ "text/plain": []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
615
  },
616
+ "execution_count": 19,
617
  "metadata": {},
618
+ "output_type": "execute_result"
 
 
 
 
 
 
 
 
 
 
619
  }
620
  ],
621
  "source": [
622
  "#| export\n",
 
 
 
623
  "example_images = [ 'cheetah.jpg', 'jaguar.jpg', 'tiger.jpg', 'cougar.jpg', 'lion.jpg', 'african leopard.jpg', 'clouded leopard.jpg', 'snow leopard.jpg' ]\n",
624
+ "\n",
625
+ "demo = gr.Blocks()\n",
626
+ "with demo:\n",
627
+ " with gr.Column(variant=\"panel\"):\n",
628
+ " image = gr.inputs.Image(label=\"Pick an image\")\n",
629
+ " model = gr.inputs.Dropdown(label=\"Select a model\", choices=models)\n",
630
+ " model.change(fn=select_model, inputs=model, outputs=None)\n",
631
+ " btnClassify = gr.Button(\"Classify\")\n",
632
+ " with gr.Column(variant=\"panel\"):\n",
633
+ " result = gr.outputs.Label(label=\"Result\")\n",
634
+ " \n",
635
+ " btnClassify.click(fn=classify_image, inputs=image, outputs=result)\n",
636
+ " img_gallery = gr.Examples(examples=example_images, inputs=image)\n",
637
+ "\n",
638
+ "demo.launch(debug=True, inline=False)\n",
639
+ " # intf = gr.Interface(fn=classify_image, inputs=image, outputs=label, examples=example_images, title=title, description=description )\n",
640
+ " # if __name__ == \"__main__\":\n",
641
+ " # intf.launch(debug=True, inline=False)\n"
642
  ]
643
  },
644
  {
645
  "cell_type": "code",
646
+ "execution_count": null,
647
  "id": "cab071f9-7c3b-4b35-a0d1-3687731ffce5",
648
  "metadata": {},
649
+ "outputs": [],
 
 
 
 
 
 
 
 
650
  "source": [
651
  "import nbdev\n",
652
  "nbdev.export.nb_export('app.ipynb', './')\n",
app.py CHANGED
@@ -1,12 +1,15 @@
1
  # AUTOGENERATED! DO NOT EDIT! File to edit: app.ipynb.
2
 
3
  # %% auto 0
4
- __all__ = ['title', 'description', 'learners', 'models', 'image', 'model', 'label', 'example_images', 'example_models', 'intf',
5
- 'classify_image']
6
 
7
  # %% app.ipynb 1
8
  from fastai.vision.all import *
9
  import gradio as gr
 
 
 
10
  title = "FastAI - Big Cats Classifier"
11
  description = "Classify big cats using all Resnet models available pre-trained in FastAI"
12
 
@@ -20,23 +23,40 @@ learners = {
20
  }
21
  models = list(learners.keys())
22
 
23
-
 
24
 
25
  # %% app.ipynb 3
26
- def classify_image(img, model_file="resnet-101"):
27
- learn = load_learner(learners[model_file])
28
  pred,idx,probs = learn.predict(img)
29
- print(pred, idx, probs)
30
  return dict(zip(learn.dls.vocab, map(float, probs)))
31
 
 
 
 
 
 
 
32
 
33
  # %% app.ipynb 5
34
- image = gr.inputs.Image()
35
- model = gr.inputs.Dropdown(choices=models)
36
- label = gr.outputs.Label()
37
  example_images = [ 'cheetah.jpg', 'jaguar.jpg', 'tiger.jpg', 'cougar.jpg', 'lion.jpg', 'african leopard.jpg', 'clouded leopard.jpg', 'snow leopard.jpg' ]
38
- example_models = [] #list(learners.values())
39
- intf = gr.Interface(fn=classify_image, inputs=image, outputs=label, examples=example_images, title=title, description=description )
40
- if __name__ == "__main__":
41
- intf.launch(debug=True, inline=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
 
 
1
  # AUTOGENERATED! DO NOT EDIT! File to edit: app.ipynb.
2
 
3
  # %% auto 0
4
+ __all__ = ['title', 'description', 'learners', 'models', 'active_model', 'example_images', 'demo', 'classify_image',
5
+ 'select_model']
6
 
7
  # %% app.ipynb 1
8
  from fastai.vision.all import *
9
  import gradio as gr
10
+ import warnings
11
+ warnings.filterwarnings('ignore')
12
+
13
  title = "FastAI - Big Cats Classifier"
14
  description = "Classify big cats using all Resnet models available pre-trained in FastAI"
15
 
 
23
  }
24
  models = list(learners.keys())
25
 
26
+ active_model = learners["resnet-18"]
27
+
28
 
29
  # %% app.ipynb 3
30
+ def classify_image(img):
31
+ learn = load_learner(active_model)
32
  pred,idx,probs = learn.predict(img)
 
33
  return dict(zip(learn.dls.vocab, map(float, probs)))
34
 
35
+ def select_model(model_name):
36
+ if model_name not in models:
37
+ model_name = "resnet-18"
38
+ active_model = learners[model_name]
39
+ return model_name
40
+
41
 
42
  # %% app.ipynb 5
 
 
 
43
  example_images = [ 'cheetah.jpg', 'jaguar.jpg', 'tiger.jpg', 'cougar.jpg', 'lion.jpg', 'african leopard.jpg', 'clouded leopard.jpg', 'snow leopard.jpg' ]
44
+
45
+ demo = gr.Blocks()
46
+ with demo:
47
+ with gr.Column(variant="panel"):
48
+ image = gr.inputs.Image(label="Pick an image")
49
+ model = gr.inputs.Dropdown(label="Select a model", choices=models)
50
+ model.change(fn=select_model, inputs=model, outputs=None)
51
+ btnClassify = gr.Button("Classify")
52
+ with gr.Column(variant="panel"):
53
+ result = gr.outputs.Label(label="Result")
54
+
55
+ btnClassify.click(fn=classify_image, inputs=image, outputs=result)
56
+ img_gallery = gr.Examples(examples=example_images, inputs=image)
57
+
58
+ demo.launch(debug=True, inline=False)
59
+ # intf = gr.Interface(fn=classify_image, inputs=image, outputs=label, examples=example_images, title=title, description=description )
60
+ # if __name__ == "__main__":
61
+ # intf.launch(debug=True, inline=False)
62