|
import streamlit as st |
|
from PIL import Image |
|
import numpy as np |
|
import torch |
|
|
|
import open_clip |
|
|
|
|
|
|
|
knnpath = '20241204-ams-no-env-open_clip_ViT-H-14-378-quickgelu.npz' |
|
clip_model_name = 'ViT-H-14-378-quickgelu' |
|
pretrained_name = 'dfn5b' |
|
|
|
|
|
|
|
st.set_page_config( |
|
page_title="Percept", |
|
layout="wide" |
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@st.cache_resource |
|
def load_model(): |
|
"""Load the OpenCLIP model and return model and processor""" |
|
model, _, preprocess = open_clip.create_model_and_transforms( |
|
clip_model_name, pretrained=pretrained_name |
|
) |
|
tokenizer = open_clip.get_tokenizer(clip_model_name) |
|
return model, preprocess, tokenizer |
|
|
|
def process_image(image, preprocess): |
|
"""Process image and return tensor""" |
|
if isinstance(image, str): |
|
|
|
response = requests.get(image) |
|
image = Image.open(BytesIO(response.content)) |
|
|
|
if image.mode != 'RGB': |
|
image = image.convert('RGB') |
|
processed_image = preprocess(image).unsqueeze(0) |
|
return processed_image |
|
|
|
@st.cache_resource |
|
def load_knn(): |
|
return np.load(knnpath) |
|
|
|
def main(): |
|
st.title("Percept: Human Perception of Street View Image Analyzer") |
|
|
|
try: |
|
with st.spinner('Loading CLIP model... This may take a moment.'): |
|
model, preprocess, tokenizer = load_model() |
|
device = "cuda" if torch.cuda.is_available() else "cpu" |
|
model = model.to(device) |
|
except Exception as e: |
|
st.error(f"Error loading model: {str(e)}") |
|
st.info("Please make sure you have enough memory and the correct dependencies installed.") |
|
|
|
with st.spinner('Loading KNN model... This may take a moment.'): |
|
knn = load_knn() |
|
st.write(knn['walkability_vecs'].shape) |
|
|
|
file = st.file_uploader('Upload An Image') |
|
|
|
if file: |
|
try: |
|
image = Image.open(file) |
|
|
|
st.image(image, caption="Uploaded Image", use_column_width=True) |
|
|
|
|
|
with st.spinner('Processing image...'): |
|
processed_image = process_image(image, preprocess) |
|
processed_image = processed_image.to(device) |
|
|
|
|
|
with torch.no_grad(): |
|
vec = model.encode_image(processed_image) |
|
|
|
|
|
vec /= vec.norm(dim=-1, keepdim=True) |
|
st.write(vec.shape) |
|
except Exception as e: |
|
st.error(f"Error processing image: {str(e)}") |
|
|
|
if __name__ == "__main__": |
|
main() |
|
|