Spaces:
Sleeping
Sleeping
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from fastai.vision import open_image, show_image
|
3 |
+
import PIL.Image
|
4 |
+
from PIL import Image
|
5 |
+
from io import BytesIO
|
6 |
+
import requests
|
7 |
+
import torch.nn as nn
|
8 |
+
import os
|
9 |
+
|
10 |
+
# Define the FeatureLoss class
|
11 |
+
class FeatureLoss(nn.Module):
|
12 |
+
def __init__(self, m_feat, layer_ids, layer_wgts):
|
13 |
+
super().__init__()
|
14 |
+
self.m_feat = m_feat
|
15 |
+
self.loss_features = [self.m_feat[i] for i in layer_ids]
|
16 |
+
self.hooks = hook_outputs(self.loss_features, detach=False)
|
17 |
+
self.wgts = layer_wgts
|
18 |
+
self.metric_names = ['pixel',] + [f'feat_{i}' for i in range(len(layer_ids))] + [f'gram_{i}' for i in range(len(layer_ids))]
|
19 |
+
|
20 |
+
def make_features(self, x, clone=False):
|
21 |
+
self.m_feat(x)
|
22 |
+
return [(o.clone() if clone else o) for o in self.hooks.stored]
|
23 |
+
|
24 |
+
def forward(self, input, target):
|
25 |
+
out_feat = self.make_features(target, clone=True)
|
26 |
+
in_feat = self.make_features(input)
|
27 |
+
self.feat_losses = [base_loss(input, target)]
|
28 |
+
self.feat_losses += [base_loss(f_in, f_out) * w for f_in, f_out, w in zip(in_feat, out_feat, self.wgts)]
|
29 |
+
self.feat_losses += [base_loss(gram_matrix(f_in), gram_matrix(f_out)) * w**2 * 5e3 for f_in, f_out, w in zip(in_feat, out_feat, self.wgts)]
|
30 |
+
self.metrics = dict(zip(self.metric_names, self.feat_losses))
|
31 |
+
return sum(self.feat_losses)
|
32 |
+
|
33 |
+
def __del__(self): self.hooks.remove()
|
34 |
+
|
35 |
+
def add_margin(pil_img, top, right, bottom, left, color):
|
36 |
+
width, height = pil_img.size
|
37 |
+
new_width = width + right + left
|
38 |
+
new_height = height + top + bottom
|
39 |
+
result = Image.new(pil_img.mode, (new_width, new_height), color)
|
40 |
+
result.paste(pil_img, (left, top))
|
41 |
+
return result
|
42 |
+
|
43 |
+
def inference(image_path_or_url, learn):
|
44 |
+
if image_path_or_url.startswith('http://') or image_path_or_url.startswith('https://'):
|
45 |
+
response = requests.get(image_path_or_url)
|
46 |
+
img = PIL.Image.open(BytesIO(response.content)).convert("RGB")
|
47 |
+
else:
|
48 |
+
img = PIL.Image.open(image_path_or_url).convert("RGB")
|
49 |
+
|
50 |
+
im_new = add_margin(img, 250, 250, 250, 250, (255, 255, 255))
|
51 |
+
im_new.save("test.jpg", quality=95)
|
52 |
+
img = open_image("test.jpg")
|
53 |
+
p, img_hr, b = learn.predict(img)
|
54 |
+
return img_hr
|
55 |
+
|
56 |
+
# Streamlit application
|
57 |
+
st.title("Image Inference with Fastai")
|
58 |
+
|
59 |
+
# Download the model file from the Hugging Face repository
|
60 |
+
model_url = "https://huggingface.co/Hammad712/image2sketch/resolve/main/image2sketch.pkl"
|
61 |
+
model_file_path = 'image2sketch.pkl'
|
62 |
+
|
63 |
+
if not os.path.exists(model_file_path):
|
64 |
+
with st.spinner('Downloading model...'):
|
65 |
+
response = requests.get(model_url)
|
66 |
+
with open(model_file_path, 'wb') as f:
|
67 |
+
f.write(response.content)
|
68 |
+
st.success('Model downloaded successfully!')
|
69 |
+
|
70 |
+
# Load the trained model
|
71 |
+
learn = load_learner(model_file_path)
|
72 |
+
|
73 |
+
# Input for image URL or path
|
74 |
+
image_path_or_url = st.text_input("Enter image path or URL", "")
|
75 |
+
|
76 |
+
# Run inference button
|
77 |
+
if st.button("Run Inference"):
|
78 |
+
if image_path_or_url:
|
79 |
+
with st.spinner('Processing...'):
|
80 |
+
high_res_image = inference(image_path_or_url, learn)
|
81 |
+
st.image(high_res_image, caption='High Resolution Image', use_column_width=True)
|
82 |
+
else:
|
83 |
+
st.error("Please enter a valid image path or URL.")
|