File size: 1,773 Bytes
c769220
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
import streamlit as st
import requests
import torch
from io import BytesIO
from PIL import Image
from transformers import ViTImageProcessor, ViTForImageClassification

# Init model, transforms
@st.cache_resource  
def get_model_transformers():
    model = ViTForImageClassification.from_pretrained('nateraw/vit-age-classifier')
    transforms = ViTImageProcessor.from_pretrained('nateraw/vit-age-classifier')
    return model, transforms
    
st.title("๋‚˜์ด๋ฅผ ์˜ˆ์ธกํ•ด๋ด…์‹œ๋‹ค!")

uploaded_file = st.file_uploader("๋‚˜์ด๋ฅผ ์˜ˆ์ธกํ•  ์‚ฌ๋žŒ์˜ ์ด๋ฏธ์ง€๋ฅผ ์—…๋กœ๋“œํ•˜์„ธ์š”.", type=["jpg", "jpeg", "png", 'gif', 'webp'])

if uploaded_file:
    st.write(f'์—…๋กœ๋“œ๋œ ํŒŒ์ผ ์ด๋ฆ„: {uploaded_file}')
    st.image(uploaded_file, caption="Uploaded Image", use_column_width=True)
    
    # Get example image from official fairface repo + read it in as an image
    r = requests.get('https://github.com/dchen236/FairFace/blob/master/detected_faces/race_Asian_face0.jpg?raw=true')
    im = Image.open(uploaded_file)

    model, transforms = get_model_transformers()

    # Transform our image and pass it through the model
    inputs = transforms(im, return_tensors='pt')
    output = model(**inputs)

    # Predicted Class probabilities
    proba = output.logits.softmax(1)

    values, indices = torch.topk(proba, k=5)

    result_dict = {model.config.id2label[i.item()]: v.item() for i, v in zip(indices.numpy()[0], values.detach().numpy()[0])}
    first_result = list(result_dict.keys())[0]

    print(f'predicted result:{result_dict}')
    print(f'first_result: {first_result}')




    st.header('๊ฒฐ๊ณผ')
    st.subheader(f'์˜ˆ์ธก๋œ ๋‚˜์ด๋Š” {first_result} ์ž…๋‹ˆ๋‹ค')

    for key, value in result_dict.items():
        st.write(f'{key}: {value * 100:.2f}%')