Ailyth commited on
Commit
b4354e4
·
1 Parent(s): 7b7480f

fisrt push

Browse files
app.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from timeit import default_timer as timer
3
+ from transformers import pipeline
4
+ m1='models/3labels'
5
+ m2='models/2labels'
6
+ modelList=[m2,m1]
7
+ def classifier(modelName,img):
8
+ startTime=timer()
9
+ pipe = pipeline(task="image-classification",
10
+ model=modelName
11
+ )
12
+ preds = pipe(img)
13
+ result={}
14
+ for pred in preds:
15
+ if pred["label"] == "zhazu":
16
+ result["炸组"] = pred["score"]
17
+ elif pred["label"] == "versailles":
18
+ result["凡尔赛"] = pred["score"]
19
+ else:
20
+ result["正常"] = pred["score"]
21
+ #result[pred["label"]] = pred["score"]
22
+ endTime=timer()
23
+ predTime=round(endTime-startTime,4)
24
+
25
+ return result,predTime
26
+ css='''
27
+ #main {background-color: #ffffff;opacity: 0.8;background-image: repeating-linear-gradient(45deg, #edffe1 25%, transparent 25%, transparent 75%, #edffe1 75%, #edffe1), repeating-linear-gradient(45deg, #edffe1 25%, #ffffff 25%, #ffffff 75%, #edffe1 75%, #edffe1);
28
+ background-position: 0 0, 40px 40px;background-size: 80px 80px;}
29
+ #mainContainer {max-width: 700px; margin-left: auto; margin-right: auto;background-color:transparent;}
30
+ #btn {border: 2px solid #3ed6e500; margin-left: auto; margin-right: auto;background-color:#3ed6e500;border-radius: 5px;
31
+ :hover{
32
+ color: #92ccd8; } }
33
+ #bg {border:2px solid #888;background-color:#fff;border-radius: 5px;}
34
+ '''
35
+ APP = gr.Blocks(css=css)
36
+ APP.encrypt = False
37
+ with APP:
38
+ with gr.Column(elem_id="main"):
39
+ with gr.Column(elem_id="mainContainer"):
40
+ gr.HTML('''
41
+ <div align=center>
42
+ <img src="https://huggingface.co/Ailyth/2_Labels/resolve/main/banner.png"/>
43
+ </div><br>
44
+ <p style="font-size:12.5px">🎆这是一个可以给烹饪作品打分的工具,以豆瓣炸厨房组热门/精华帖中的作品为标准<br>
45
+ 😂功能主要是判断烹饪作品是否“炸组风”<br>
46
+ 当然结果并不十分严谨,纯玩耍用
47
+ <br><br/>
48
+ <b>使用方法</b><br>
49
+ 点击下面输入框即可上传图片,等待片刻后即可出结果。其中有两个模型,分别可判断三种标签(炸组、正常、凡尔赛)和两种标签(炸组,正常)。<br>
50
+ 希望大家都做饭愉快,吃的开心。</p>
51
+
52
+ ''')
53
+ imgUpload=gr.components.Image(type="filepath", label="选择图片",elem_id="bg")
54
+ modelSelect=gr.components.Radio(choices=modelList,label="选择预测模型:(第一个模型是两个分类,第二个是三个分类)",value=m2,elem_id="bg")
55
+
56
+ btn=gr.Button(value='💥提交',elem_id="btn")
57
+
58
+ predResult=gr.components.Label(num_top_classes=3,label="预测结果",elem_id="bg")
59
+ predTime=gr.Number(label="实际预测耗时 (秒)",elem_id="bg")
60
+ btn.click(classifier,inputs=[modelSelect,imgUpload], outputs=[predResult,predTime])
61
+ gr.HTML('''
62
+ <br/>
63
+ <h> 一些补充</4>
64
+ <p>由于食物本身是一个复杂的集合概念,失败的烹饪作品和成功的烹饪作品又属于其子集,都有很多特征,判断起来很复杂,加上本功能所用的模型训练样本有限,所有检测结果经常翻车。</p>''')
65
+
66
+ gr.HTML('''<div align=center><img id="visitor-badge" alt="visitor badge" src="https://visitor-badge.laobi.icu/badge?page_id=Ailyth/ZhazuClassification" /></div>''')
67
+ APP.launch(debug=True)
68
+
69
+
70
+
models/2labels/config.json ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "AutoTrain",
3
+ "architectures": [
4
+ "SwinForImageClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.0,
7
+ "depths": [
8
+ 2,
9
+ 2,
10
+ 18,
11
+ 2
12
+ ],
13
+ "drop_path_rate": 0.1,
14
+ "embed_dim": 128,
15
+ "encoder_stride": 32,
16
+ "hidden_act": "gelu",
17
+ "hidden_dropout_prob": 0.0,
18
+ "hidden_size": 1024,
19
+ "id2label": {
20
+ "0": "cuisine",
21
+ "1": "zhazu"
22
+ },
23
+ "image_size": 224,
24
+ "initializer_range": 0.02,
25
+ "label2id": {
26
+ "cuisine": "0",
27
+ "zhazu": "1"
28
+ },
29
+ "layer_norm_eps": 1e-05,
30
+ "max_length": 128,
31
+ "mlp_ratio": 4.0,
32
+ "model_type": "swin",
33
+ "num_channels": 3,
34
+ "num_heads": [
35
+ 4,
36
+ 8,
37
+ 16,
38
+ 32
39
+ ],
40
+ "num_layers": 4,
41
+ "padding": "max_length",
42
+ "patch_size": 4,
43
+ "path_norm": true,
44
+ "problem_type": "single_label_classification",
45
+ "qkv_bias": true,
46
+ "torch_dtype": "float32",
47
+ "transformers_version": "4.25.1",
48
+ "use_absolute_embeddings": false,
49
+ "window_size": 7
50
+ }
models/2labels/preprocessor_config.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "do_rescale": true,
4
+ "do_resize": true,
5
+ "feature_extractor_type": "ViTFeatureExtractor",
6
+ "image_mean": [
7
+ 0.485,
8
+ 0.456,
9
+ 0.406
10
+ ],
11
+ "image_processor_type": "ViTImageProcessor",
12
+ "image_std": [
13
+ 0.229,
14
+ 0.224,
15
+ 0.225
16
+ ],
17
+ "resample": 3,
18
+ "rescale_factor": 0.00392156862745098,
19
+ "size": {
20
+ "height": 224,
21
+ "width": 224
22
+ }
23
+ }
models/2labels/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:06fbd6d85bd24d4cd530cc94b50e92797ae5e97489227c1b62fb091525c87d9c
3
+ size 347599761
models/3labels/config.json ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "AutoTrain",
3
+ "architectures": [
4
+ "SwinForImageClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.0,
7
+ "depths": [
8
+ 2,
9
+ 2,
10
+ 18,
11
+ 2
12
+ ],
13
+ "drop_path_rate": 0.1,
14
+ "embed_dim": 128,
15
+ "encoder_stride": 32,
16
+ "hidden_act": "gelu",
17
+ "hidden_dropout_prob": 0.0,
18
+ "hidden_size": 1024,
19
+ "id2label": {
20
+ "0": "cuisine",
21
+ "1": "versailles",
22
+ "2": "zhazu"
23
+ },
24
+ "image_size": 224,
25
+ "initializer_range": 0.02,
26
+ "label2id": {
27
+ "cuisine": "0",
28
+ "versailles": "1",
29
+ "zhazu": "2"
30
+ },
31
+ "layer_norm_eps": 1e-05,
32
+ "max_length": 128,
33
+ "mlp_ratio": 4.0,
34
+ "model_type": "swin",
35
+ "num_channels": 3,
36
+ "num_heads": [
37
+ 4,
38
+ 8,
39
+ 16,
40
+ 32
41
+ ],
42
+ "num_layers": 4,
43
+ "padding": "max_length",
44
+ "patch_size": 4,
45
+ "path_norm": true,
46
+ "problem_type": "single_label_classification",
47
+ "qkv_bias": true,
48
+ "torch_dtype": "float32",
49
+ "transformers_version": "4.25.1",
50
+ "use_absolute_embeddings": false,
51
+ "window_size": 7
52
+ }
models/3labels/preprocessor_config.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "do_rescale": true,
4
+ "do_resize": true,
5
+ "feature_extractor_type": "ViTFeatureExtractor",
6
+ "image_mean": [
7
+ 0.485,
8
+ 0.456,
9
+ 0.406
10
+ ],
11
+ "image_processor_type": "ViTImageProcessor",
12
+ "image_std": [
13
+ 0.229,
14
+ 0.224,
15
+ 0.225
16
+ ],
17
+ "resample": 3,
18
+ "rescale_factor": 0.00392156862745098,
19
+ "size": {
20
+ "height": 224,
21
+ "width": 224
22
+ }
23
+ }
models/3labels/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cedc07f507204b184b2dba718e9759fec6963b5bb2716f88bb60f83d5f7b43c4
3
+ size 347603857
models/banner.png ADDED
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ gradio
2
+ torch
3
+ numpy
4
+ transformers
5
+
samples/1.jpg ADDED
samples/2.png ADDED
samples/3.png ADDED
samples/4.jpg ADDED
samples/banner.png ADDED