tech-aloa's picture
minye
c769220
import streamlit as st
import requests
import torch
from io import BytesIO
from PIL import Image
from transformers import ViTImageProcessor, ViTForImageClassification
# Init model, transforms
@st.cache_resource
def get_model_transformers():
model = ViTForImageClassification.from_pretrained('nateraw/vit-age-classifier')
transforms = ViTImageProcessor.from_pretrained('nateraw/vit-age-classifier')
return model, transforms
st.title("λ‚˜μ΄λ₯Ό μ˜ˆμΈ‘ν•΄λ΄…μ‹œλ‹€!")
uploaded_file = st.file_uploader("λ‚˜μ΄λ₯Ό μ˜ˆμΈ‘ν•  μ‚¬λžŒμ˜ 이미지λ₯Ό μ—…λ‘œλ“œν•˜μ„Έμš”.", type=["jpg", "jpeg", "png", 'gif', 'webp'])
if uploaded_file:
st.write(f'μ—…λ‘œλ“œλœ 파일 이름: {uploaded_file}')
st.image(uploaded_file, caption="Uploaded Image", use_column_width=True)
# Get example image from official fairface repo + read it in as an image
r = requests.get('https://github.com/dchen236/FairFace/blob/master/detected_faces/race_Asian_face0.jpg?raw=true')
im = Image.open(uploaded_file)
model, transforms = get_model_transformers()
# Transform our image and pass it through the model
inputs = transforms(im, return_tensors='pt')
output = model(**inputs)
# Predicted Class probabilities
proba = output.logits.softmax(1)
values, indices = torch.topk(proba, k=5)
result_dict = {model.config.id2label[i.item()]: v.item() for i, v in zip(indices.numpy()[0], values.detach().numpy()[0])}
first_result = list(result_dict.keys())[0]
print(f'predicted result:{result_dict}')
print(f'first_result: {first_result}')
st.header('κ²°κ³Ό')
st.subheader(f'예츑된 λ‚˜μ΄λŠ” {first_result} μž…λ‹ˆλ‹€')
for key, value in result_dict.items():
st.write(f'{key}: {value * 100:.2f}%')