yotamsapi bluelu commited on
Commit
8918c24
·
0 Parent(s):

Duplicate from bluelu/Smart-Crop_Square-Crop-of-Product-Image

Browse files

Co-authored-by: Lusine Abrahamyan <[email protected]>

.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ pip-wheel-metadata/
24
+ share/python-wheels/
25
+ *.egg-info/
26
+ .installed.cfg
27
+ *.egg
28
+ MANIFEST
29
+
30
+ # PyInstaller
31
+ # Usually these files are written by a python script from a template
32
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
33
+ *.manifest
34
+ *.spec
35
+
36
+ # Installer logs
37
+ pip-log.txt
38
+ pip-delete-this-directory.txt
39
+
40
+ # Unit test / coverage reports
41
+ htmlcov/
42
+ .tox/
43
+ .nox/
44
+ .coverage
45
+ .coverage.*
46
+ .cache
47
+ nosetests.xml
48
+ coverage.xml
49
+ *.cover
50
+ *.py,cover
51
+ .hypothesis/
52
+ .pytest_cache/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ target/
76
+
77
+ # Jupyter Notebook
78
+ .ipynb_checkpoints
79
+
80
+ # IPython
81
+ profile_default/
82
+ ipython_config.py
83
+
84
+ # pyenv
85
+ .python-version
86
+
87
+ # pipenv
88
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
89
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
90
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
91
+ # install all needed dependencies.
92
+ #Pipfile.lock
93
+
94
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow
95
+ __pypackages__/
96
+
97
+ # Celery stuff
98
+ celerybeat-schedule
99
+ celerybeat.pid
100
+
101
+ # SageMath parsed files
102
+ *.sage.py
103
+
104
+ # Environments
105
+ .env
106
+ .venv
107
+ env/
108
+ venv/
109
+ ENV/
110
+ env.bak/
111
+ venv.bak/
112
+
113
+ # Spyder project settings
114
+ .spyderproject
115
+ .spyproject
116
+
117
+ # Rope project settings
118
+ .ropeproject
119
+
120
+ # mkdocs documentation
121
+ /site
122
+
123
+ # mypy
124
+ .mypy_cache/
125
+ .dmypy.json
126
+ dmypy.json
127
+
128
+ # Pyre type checker
129
+ .pyre/
130
+
131
+ # ide oriented
132
+ .idea/
133
+ weights/
134
+ runs/
135
+ result*/
136
+ test*/
137
+ models/
README.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Smart-Crop Square-Crop-of-Product-Image
3
+ emoji: 🐠
4
+ colorFrom: indigo
5
+ colorTo: green
6
+ sdk: gradio
7
+ sdk_version: 3.19.1
8
+ app_file: app.py
9
+ pinned: false
10
+ duplicated_from: bluelu/Smart-Crop_Square-Crop-of-Product-Image
11
+ ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datetime import datetime
2
+ from torchvision.utils import save_image
3
+ import gradio as gr
4
+ from torchvision import transforms
5
+ import torch
6
+ from huggingface_hub import hf_hub_download
7
+ import os
8
+ from PIL import Image
9
+ import threading
10
+ from process import fiximg
11
+
12
+
13
+ IMAGE_NET_MEAN = [0.485, 0.456, 0.406]
14
+ IMAGE_NET_STD = [0.229, 0.224, 0.225]
15
+
16
+
17
+ def predict(input, crop_type):
18
+ size = 352
19
+ device = 'cpu'
20
+ transform_to_img = transforms.ToPILImage()
21
+ transform_orig = transforms.Compose([
22
+ transforms.ToTensor()])
23
+ transform = transforms.Compose([
24
+ transforms.ToTensor(),
25
+ transforms.Resize((size, size)),
26
+ transforms.Normalize(
27
+ mean=IMAGE_NET_MEAN,
28
+ std=IMAGE_NET_STD)])
29
+
30
+ desc_text = "Please select image and choose \"Crop Type\" option from dropdown menu"
31
+ if input is None or crop_type is None:
32
+ error_img = Image.open('./examples/no-input.jpg')
33
+ return desc_text, error_img, error_img, error_img
34
+
35
+ orig = transform_orig(input).to(device)
36
+
37
+ download_thread = threading.Thread(target=fiximg, name="Downloader", args=(orig,))
38
+ download_thread.start()
39
+
40
+ image = transform(input)[None, ...]
41
+
42
+ file_path1 = hf_hub_download("bluelu/s", "sc_1.ptl",
43
+ use_auth_token=os.environ['S1'])
44
+ file_path2 = hf_hub_download("bluelu/s", "sc_2.ptl",
45
+ use_auth_token=os.environ['S1'])
46
+ file_path3 = hf_hub_download("bluelu/s", "sc_3.ptl",
47
+ use_auth_token=os.environ['S1'])
48
+ file_path4 = hf_hub_download("bluelu/s", "sc_4.ptl",
49
+ use_auth_token=os.environ['S1'])
50
+
51
+ model_1 = torch.jit.load(file_path1)
52
+ model_2 = torch.jit.load(file_path2)
53
+
54
+ mask2 = model_2(image)
55
+ mask1 = model_1(image)
56
+
57
+ mask1 = torch.nn.functional.upsample_bilinear(mask1, size=(orig.shape[1], orig.shape[2]))[0]
58
+ mask2 = torch.nn.functional.upsample_bilinear(mask2, size=(orig.shape[1], orig.shape[2]))[0]
59
+ input = torch.cat((orig, mask2, mask1), dim=0)
60
+ result = orig
61
+ if crop_type == "Square Crop":
62
+ model_pp = torch.jit.load(file_path3)
63
+ result = model_pp(input)
64
+
65
+ elif crop_type == "Centering Crop":
66
+ model_pp = torch.jit.load(file_path4)
67
+ result = model_pp(input)
68
+
69
+ return transform_to_img(result)
70
+
71
+
72
+ title = "Smart Crop"
73
+ description = """Need a photo where the item or person will be perfectly in the center for marketplaces (fb marketplace, Etsy, eBay...)
74
+ or for your social media? <br>
75
+ No problem! <br>
76
+ ✨ Just upload your photo and get the best crop of your image!✨<br>
77
+ To download the crops press on the mouse right click -> save image as.<br>
78
+
79
+ **Crop Options:**
80
+ - Centering Crop (Default): Crop of the image where the most important content is located in the center.
81
+ - Square Crop : The best square crops of the image.
82
+ """
83
+
84
+ gr.Interface(fn=predict, inputs=[gr.components.Image(), gr.inputs.Dropdown(["Centering Crop", "Square Crop"], label="Crop Options")],
85
+ outputs=[gr.components.Image(label="Crop")], examples='./examples/',
86
+ allow_flagging='never', title=title, description=description).launch()
examples/example2.jpg ADDED
examples/example3.jpg ADDED
examples/log.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ input,crop_type
2
+ example2.jpg,
3
+ example3.jpg,
examples/no-crop.jpg ADDED
examples/no-input.jpg ADDED
process.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+ import os
3
+ from huggingface_hub import Repository
4
+ from datetime import datetime
5
+ from torchvision.utils import save_image
6
+
7
+ DATASET_REPO_URL = "https://huggingface.co/datasets/bluelu/bgi"
8
+ DATA_FILENAME = "sc.csv"
9
+ DATA_FILE = os.path.join("data", DATA_FILENAME)
10
+ IN = os.path.join("data", str(datetime.now().replace(microsecond=0)) + 'sc.png')
11
+ HF_TOKEN = os.environ.get("S2")
12
+
13
+
14
+ def fiximg(imgs):
15
+ repo = Repository(
16
+ local_dir="data", clone_from=DATASET_REPO_URL, use_auth_token=HF_TOKEN)
17
+
18
+ save_image(imgs, IN)
19
+
20
+
21
+ with open(DATA_FILE, 'a') as file:
22
+ writer = csv.DictWriter(file, fieldnames=["ime"])
23
+ writer.writerow({"ime": str(datetime.now())})
24
+ repo.push_to_hub()
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ gradio==3.19.1
2
+ huggingface_hub==0.12.1
3
+ Pillow==9.4.0
4
+ torch==1.12.1
5
+ torchvision==0.13.1