Spaces:
Sleeping
Sleeping
Muhammad Naufal Rizqullah
commited on
Commit
·
d022305
1
Parent(s):
366238b
first commit
Browse files- .dockerignore +89 -0
- .gitignore +30 -0
- Dockerfile +22 -0
- __init__.py +0 -0
- data/44158_3_efficientnet_b2.pth +3 -0
- data/class_names.txt +90 -0
- main.py +15 -0
- requirements.txt +5 -0
- routers/__init__.py +0 -0
- routers/api.py +6 -0
- routers/v1/__init__.py +0 -0
- routers/v1/predict.py +59 -0
- routers/v1/wrapper_v1.py +7 -0
- schemas/__init__.py +0 -0
- schemas/output_44158.py +7 -0
- utils/__init__.py +0 -0
- utils/helper.py +11 -0
- utils/model.py +49 -0
.dockerignore
ADDED
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Git
|
2 |
+
.git
|
3 |
+
.gitignore
|
4 |
+
.gitattributes
|
5 |
+
|
6 |
+
|
7 |
+
# CI
|
8 |
+
.codeclimate.yml
|
9 |
+
.travis.yml
|
10 |
+
.taskcluster.yml
|
11 |
+
|
12 |
+
# Docker
|
13 |
+
docker-compose.yml
|
14 |
+
Dockerfile
|
15 |
+
.docker
|
16 |
+
.dockerignore
|
17 |
+
|
18 |
+
# Byte-compiled / optimized / DLL files
|
19 |
+
**/__pycache__/
|
20 |
+
**/*.py[cod]
|
21 |
+
|
22 |
+
# C extensions
|
23 |
+
*.so
|
24 |
+
|
25 |
+
# Distribution / packaging
|
26 |
+
.Python
|
27 |
+
env/
|
28 |
+
build/
|
29 |
+
develop-eggs/
|
30 |
+
dist/
|
31 |
+
downloads/
|
32 |
+
eggs/
|
33 |
+
lib/
|
34 |
+
lib64/
|
35 |
+
parts/
|
36 |
+
sdist/
|
37 |
+
var/
|
38 |
+
*.egg-info/
|
39 |
+
.installed.cfg
|
40 |
+
*.egg
|
41 |
+
|
42 |
+
# PyInstaller
|
43 |
+
# Usually these files are written by a python script from a template
|
44 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
45 |
+
*.manifest
|
46 |
+
*.spec
|
47 |
+
|
48 |
+
# Installer logs
|
49 |
+
pip-log.txt
|
50 |
+
pip-delete-this-directory.txt
|
51 |
+
|
52 |
+
# Unit test / coverage reports
|
53 |
+
htmlcov/
|
54 |
+
.tox/
|
55 |
+
.coverage
|
56 |
+
.cache
|
57 |
+
nosetests.xml
|
58 |
+
coverage.xml
|
59 |
+
|
60 |
+
# Translations
|
61 |
+
*.mo
|
62 |
+
*.pot
|
63 |
+
|
64 |
+
# Django stuff:
|
65 |
+
*.log
|
66 |
+
|
67 |
+
# Sphinx documentation
|
68 |
+
docs/_build/
|
69 |
+
|
70 |
+
# PyBuilder
|
71 |
+
target/
|
72 |
+
|
73 |
+
# Virtual environment
|
74 |
+
.env
|
75 |
+
.venv/
|
76 |
+
venv/
|
77 |
+
|
78 |
+
# PyCharm
|
79 |
+
.idea
|
80 |
+
|
81 |
+
# Python mode for VIM
|
82 |
+
.ropeproject
|
83 |
+
**/.ropeproject
|
84 |
+
|
85 |
+
# Vim swap files
|
86 |
+
**/*.swp
|
87 |
+
|
88 |
+
# VS Code
|
89 |
+
.vscode/
|
.gitignore
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.idea
|
2 |
+
.ipynb_checkpoints
|
3 |
+
.mypy_cache
|
4 |
+
.vscode
|
5 |
+
__pycache__
|
6 |
+
.pytest_cache
|
7 |
+
htmlcov
|
8 |
+
dist
|
9 |
+
site
|
10 |
+
.coverage
|
11 |
+
coverage.xml
|
12 |
+
.netlify
|
13 |
+
test.db
|
14 |
+
log.txt
|
15 |
+
Pipfile.lock
|
16 |
+
env3.*
|
17 |
+
env
|
18 |
+
docs_build
|
19 |
+
site_build
|
20 |
+
venv
|
21 |
+
docs.zip
|
22 |
+
archive.zip
|
23 |
+
|
24 |
+
# vim temporary files
|
25 |
+
*~
|
26 |
+
.*.sw?
|
27 |
+
.cache
|
28 |
+
|
29 |
+
# macOS
|
30 |
+
.DS_Store
|
Dockerfile
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM python:3.11
|
2 |
+
ENV HF_PORT=7860
|
3 |
+
|
4 |
+
WORKDIR /code
|
5 |
+
|
6 |
+
# Expose port 7860 (default huggingface)
|
7 |
+
EXPOSE ${HF_PORT}
|
8 |
+
|
9 |
+
# Install First Dependencies
|
10 |
+
# "./requirements.txt" path-nya itu berdasarkan Dockerfile yang kita eksekusi (berati sudah di dalam src)
|
11 |
+
COPY ./requirements.txt /code/requirements.txt
|
12 |
+
|
13 |
+
RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
|
14 |
+
|
15 |
+
# Copy all files required
|
16 |
+
# Instruction COPY, will create destination folder (this case "src") if didnt exist.
|
17 |
+
COPY . /code/src
|
18 |
+
|
19 |
+
# cause working dir it set from /code
|
20 |
+
# and we have code in /code/src
|
21 |
+
# for running command we user fastapi in "src.main:app"
|
22 |
+
CMD ["uvicorn", "src.main:app", "--host", "0.0.0.0", "--port", "7860"]
|
__init__.py
ADDED
File without changes
|
data/44158_3_efficientnet_b2.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6d8b0bfcb84ceb12c3a7081538ad22eb52252ec9375c2b98e737e9ae08558f34
|
3 |
+
size 31772794
|
data/class_names.txt
ADDED
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
a
|
2 |
+
ba
|
3 |
+
be
|
4 |
+
bi
|
5 |
+
bo
|
6 |
+
bu
|
7 |
+
ca
|
8 |
+
ce
|
9 |
+
ci
|
10 |
+
co
|
11 |
+
cu
|
12 |
+
da
|
13 |
+
de
|
14 |
+
di
|
15 |
+
do
|
16 |
+
du
|
17 |
+
e
|
18 |
+
fa
|
19 |
+
fe
|
20 |
+
fi
|
21 |
+
fo
|
22 |
+
fu
|
23 |
+
ga
|
24 |
+
ge
|
25 |
+
gi
|
26 |
+
go
|
27 |
+
gu
|
28 |
+
ha
|
29 |
+
he
|
30 |
+
hi
|
31 |
+
ho
|
32 |
+
hu
|
33 |
+
i
|
34 |
+
ja
|
35 |
+
je
|
36 |
+
ji
|
37 |
+
jo
|
38 |
+
ju
|
39 |
+
ka
|
40 |
+
ke
|
41 |
+
ki
|
42 |
+
ko
|
43 |
+
ku
|
44 |
+
la
|
45 |
+
le
|
46 |
+
li
|
47 |
+
lo
|
48 |
+
lu
|
49 |
+
ma
|
50 |
+
me
|
51 |
+
mi
|
52 |
+
mo
|
53 |
+
mu
|
54 |
+
na
|
55 |
+
ne
|
56 |
+
ni
|
57 |
+
no
|
58 |
+
nu
|
59 |
+
o
|
60 |
+
pa
|
61 |
+
pe
|
62 |
+
pi
|
63 |
+
po
|
64 |
+
pu
|
65 |
+
ra
|
66 |
+
re
|
67 |
+
ri
|
68 |
+
ro
|
69 |
+
ru
|
70 |
+
sa
|
71 |
+
se
|
72 |
+
si
|
73 |
+
so
|
74 |
+
su
|
75 |
+
ta
|
76 |
+
te
|
77 |
+
ti
|
78 |
+
to
|
79 |
+
tu
|
80 |
+
u
|
81 |
+
wa
|
82 |
+
we
|
83 |
+
wi
|
84 |
+
wo
|
85 |
+
wu
|
86 |
+
ya
|
87 |
+
ye
|
88 |
+
yi
|
89 |
+
yo
|
90 |
+
yu
|
main.py
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import FastAPI, Request
|
2 |
+
from fastapi.responses import HTMLResponse
|
3 |
+
|
4 |
+
from src.routers import api
|
5 |
+
|
6 |
+
|
7 |
+
app = FastAPI()
|
8 |
+
|
9 |
+
# Include all wraper router api.
|
10 |
+
app.include_router(api.main_api)
|
11 |
+
|
12 |
+
|
13 |
+
@app.get("/", response_class=HTMLResponse)
|
14 |
+
def home(request: Request):
|
15 |
+
return f"Congratulations! Your API is working as expected. Now head over to <a href='{request.url}docs' target='_blank'>{request.url}docs</a>"
|
requirements.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
fastapi
|
2 |
+
uvicorn[standard]
|
3 |
+
torch
|
4 |
+
torchvision
|
5 |
+
python-multipart
|
routers/__init__.py
ADDED
File without changes
|
routers/api.py
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import APIRouter
|
2 |
+
from src.routers.v1 import wrapper_v1
|
3 |
+
|
4 |
+
main_api = APIRouter()
|
5 |
+
|
6 |
+
main_api.include_router(wrapper_v1.router)
|
routers/v1/__init__.py
ADDED
File without changes
|
routers/v1/predict.py
ADDED
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
|
3 |
+
from fastapi import APIRouter, UploadFile
|
4 |
+
from PIL import Image
|
5 |
+
from timeit import default_timer as timer
|
6 |
+
|
7 |
+
from src.utils.model import create_effnetb2_model
|
8 |
+
from src.utils.helper import load_class_names
|
9 |
+
from src.schemas.output_44158 import OutputBase
|
10 |
+
|
11 |
+
|
12 |
+
|
13 |
+
router = APIRouter()
|
14 |
+
|
15 |
+
@router.post("/predict", response_model=OutputBase)
|
16 |
+
def inference(greetings: str, im: UploadFile):
|
17 |
+
|
18 |
+
# Load class names
|
19 |
+
class_names = load_class_names()
|
20 |
+
|
21 |
+
# get image to PIL format
|
22 |
+
image = Image.open(im.file).convert('RGB')
|
23 |
+
|
24 |
+
# Get model, transformation and calculate time to load it.
|
25 |
+
start_time = timer()
|
26 |
+
model, transforms = create_effnetb2_model(len(class_names))
|
27 |
+
end_time = timer()
|
28 |
+
print(f"[INFO] Total Loading model time: {end_time - start_time:.3f} seconds")
|
29 |
+
|
30 |
+
# Inference Time Start
|
31 |
+
inference_start_time = timer()
|
32 |
+
|
33 |
+
# turn image to same transformation
|
34 |
+
img = torch.unsqueeze(transforms(image), dim=0)
|
35 |
+
|
36 |
+
# Put model into evaluation mode and turn on inference mode
|
37 |
+
model.eval()
|
38 |
+
with torch.inference_mode():
|
39 |
+
# no need to send data to GPU, cause default is cpu.
|
40 |
+
pred_logits = model(img)
|
41 |
+
pred_probs = torch.softmax(pred_logits, dim=1)
|
42 |
+
|
43 |
+
|
44 |
+
# Create prediction labels
|
45 |
+
predicted_label_index = torch.argmax(pred_probs).item() # get single index if not add dim.
|
46 |
+
predicted_label = class_names[predicted_label_index]
|
47 |
+
|
48 |
+
# Inference Time End
|
49 |
+
inference_start_end = timer()
|
50 |
+
|
51 |
+
# Get probability predictions
|
52 |
+
probability_pred = pred_probs[0][predicted_label_index].item()
|
53 |
+
|
54 |
+
return OutputBase(
|
55 |
+
message=greetings,
|
56 |
+
class_predicted=predicted_label,
|
57 |
+
prob=probability_pred,
|
58 |
+
inference_time=f"{inference_start_end - inference_start_time:.3f} Seconds"
|
59 |
+
)
|
routers/v1/wrapper_v1.py
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import APIRouter
|
2 |
+
|
3 |
+
from src.routers.v1 import predict
|
4 |
+
|
5 |
+
router = APIRouter(prefix="/v1")
|
6 |
+
|
7 |
+
router.include_router(predict.router)
|
schemas/__init__.py
ADDED
File without changes
|
schemas/output_44158.py
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pydantic import BaseModel
|
2 |
+
|
3 |
+
class OutputBase(BaseModel):
|
4 |
+
message: str
|
5 |
+
class_predicted: str
|
6 |
+
prob: float
|
7 |
+
inference_time: str
|
utils/__init__.py
ADDED
File without changes
|
utils/helper.py
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pathlib import Path
|
2 |
+
|
3 |
+
def load_class_names():
|
4 |
+
|
5 |
+
file = "class_names.txt"
|
6 |
+
path_file = Path("./src/data") / file
|
7 |
+
|
8 |
+
with open(path_file,"r") as f:
|
9 |
+
class_names = [item.strip() for item in f.readlines()]
|
10 |
+
|
11 |
+
return class_names
|
utils/model.py
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torchvision
|
2 |
+
import torch
|
3 |
+
import functools
|
4 |
+
|
5 |
+
from torch import nn
|
6 |
+
from pathlib import Path
|
7 |
+
|
8 |
+
@functools.cache
|
9 |
+
def create_effnetb2_model(num_class: int):
|
10 |
+
"""Create a pytorch model for EfficientNetB2.
|
11 |
+
|
12 |
+
Making a EfficientNetB2 as Feature Extractor and also
|
13 |
+
can custom output class as need it.
|
14 |
+
|
15 |
+
Args:
|
16 |
+
num_class: A number of class, that will be output (head) of model.
|
17 |
+
|
18 |
+
Returns:
|
19 |
+
a tuple of (model, transforms) of EfficientNetB2.
|
20 |
+
|
21 |
+
"""
|
22 |
+
|
23 |
+
# Get weights of ResNet50
|
24 |
+
weights_effnetb2 = torchvision.models.EfficientNet_B2_Weights.IMAGENET1K_V1
|
25 |
+
|
26 |
+
# Get transforms used in resnet
|
27 |
+
transforms = weights_effnetb2.transforms()
|
28 |
+
|
29 |
+
# making model
|
30 |
+
model = torchvision.models.efficientnet_b2(weights=weights_effnetb2)
|
31 |
+
|
32 |
+
# Freeze All layer
|
33 |
+
for param in model.parameters():
|
34 |
+
param.requires_grad = False
|
35 |
+
|
36 |
+
# Custom Output class
|
37 |
+
model.classifier = nn.Sequential(
|
38 |
+
nn.Dropout(p=0.3, inplace=True),
|
39 |
+
nn.Linear(in_features=1408,
|
40 |
+
out_features=num_class)
|
41 |
+
)
|
42 |
+
|
43 |
+
# Load trained weights
|
44 |
+
path_model_weights = Path("./src/data") / "44158_3_efficientnet_b2.pth"
|
45 |
+
model.load_state_dict(
|
46 |
+
torch.load(path_model_weights, map_location=torch.device("cpu"))
|
47 |
+
)
|
48 |
+
|
49 |
+
return model, transforms
|