AshanGimhana
commited on
Commit
•
ed36d0c
1
Parent(s):
1a1a664
Update app.py
Browse files
app.py
CHANGED
@@ -1,8 +1,10 @@
|
|
1 |
import os
|
2 |
import subprocess
|
3 |
|
|
|
4 |
os.system("pip install gradio==3.50")
|
5 |
|
|
|
6 |
with open('setup.sh', 'w') as f:
|
7 |
f.write('''#!/bin/bash
|
8 |
export CUDA_HOME=/usr/local/cuda
|
@@ -19,19 +21,19 @@ os.system('./setup.sh')
|
|
19 |
import torch
|
20 |
|
21 |
# Check CUDA availability
|
22 |
-
|
23 |
-
|
|
|
24 |
|
25 |
from argparse import Namespace
|
26 |
import pprint
|
27 |
import numpy as np
|
28 |
from PIL import Image
|
29 |
-
import torch
|
30 |
import torchvision.transforms as transforms
|
31 |
import cv2
|
32 |
-
import
|
33 |
import matplotlib.pyplot as plt
|
34 |
-
import gradio as gr
|
35 |
from tensorflow.keras.preprocessing.image import img_to_array
|
36 |
from huggingface_hub import hf_hub_download, login
|
37 |
from datasets.augmentations import AgeTransformer
|
@@ -67,14 +69,14 @@ EXPERIMENT_DATA_ARGS = {
|
|
67 |
}
|
68 |
EXPERIMENT_ARGS = EXPERIMENT_DATA_ARGS[EXPERIMENT_TYPE]
|
69 |
model_path = EXPERIMENT_ARGS['model_path']
|
70 |
-
ckpt = torch.load(model_path, map_location=
|
71 |
opts = ckpt['opts']
|
72 |
pprint.pprint(opts)
|
73 |
opts['checkpoint_path'] = model_path
|
74 |
opts = Namespace(**opts)
|
75 |
net = pSp(opts)
|
76 |
net.eval()
|
77 |
-
net.
|
78 |
|
79 |
print('Model successfully loaded!')
|
80 |
|
@@ -149,9 +151,9 @@ def apply_aging(image, target_age):
|
|
149 |
results = []
|
150 |
for age_transformer in age_transformers:
|
151 |
with torch.no_grad():
|
152 |
-
input_image_age = [age_transformer(input_image.cpu()).to(
|
153 |
input_image_age = torch.stack(input_image_age)
|
154 |
-
result_tensor = net(input_image_age.to(
|
155 |
result_image = tensor2im(result_tensor)
|
156 |
results.append(np.array(result_image))
|
157 |
final_result = results[0]
|
@@ -184,10 +186,13 @@ def process_image(uploaded_image):
|
|
184 |
|
185 |
iface = gr.Interface(
|
186 |
fn=process_image,
|
187 |
-
inputs=gr.Image(type="pil"),
|
188 |
-
outputs=[
|
189 |
-
|
190 |
-
|
|
|
|
|
|
|
191 |
)
|
192 |
|
193 |
-
iface.launch(
|
|
|
1 |
import os
|
2 |
import subprocess
|
3 |
|
4 |
+
# Install Gradio
|
5 |
os.system("pip install gradio==3.50")
|
6 |
|
7 |
+
# Create and run setup script for CUDA environment variables
|
8 |
with open('setup.sh', 'w') as f:
|
9 |
f.write('''#!/bin/bash
|
10 |
export CUDA_HOME=/usr/local/cuda
|
|
|
21 |
import torch
|
22 |
|
23 |
# Check CUDA availability
|
24 |
+
is_cuda_available = torch.cuda.is_available()
|
25 |
+
device = 'cuda' if is_cuda_available else 'cpu'
|
26 |
+
print("CUDA available:", is_cuda_available)
|
27 |
|
28 |
from argparse import Namespace
|
29 |
import pprint
|
30 |
import numpy as np
|
31 |
from PIL import Image
|
|
|
32 |
import torchvision.transforms as transforms
|
33 |
import cv2
|
34 |
+
import dlib
|
35 |
import matplotlib.pyplot as plt
|
36 |
+
import gradio as gr
|
37 |
from tensorflow.keras.preprocessing.image import img_to_array
|
38 |
from huggingface_hub import hf_hub_download, login
|
39 |
from datasets.augmentations import AgeTransformer
|
|
|
69 |
}
|
70 |
EXPERIMENT_ARGS = EXPERIMENT_DATA_ARGS[EXPERIMENT_TYPE]
|
71 |
model_path = EXPERIMENT_ARGS['model_path']
|
72 |
+
ckpt = torch.load(model_path, map_location=device)
|
73 |
opts = ckpt['opts']
|
74 |
pprint.pprint(opts)
|
75 |
opts['checkpoint_path'] = model_path
|
76 |
opts = Namespace(**opts)
|
77 |
net = pSp(opts)
|
78 |
net.eval()
|
79 |
+
net.to(device) # Send model to the appropriate device (GPU or CPU)
|
80 |
|
81 |
print('Model successfully loaded!')
|
82 |
|
|
|
151 |
results = []
|
152 |
for age_transformer in age_transformers:
|
153 |
with torch.no_grad():
|
154 |
+
input_image_age = [age_transformer(input_image.cpu()).to(device)] # Move to appropriate device
|
155 |
input_image_age = torch.stack(input_image_age)
|
156 |
+
result_tensor = net(input_image_age.to(device).float(), randomize_noise=False, resize=False)[0]
|
157 |
result_image = tensor2im(result_tensor)
|
158 |
results.append(np.array(result_image))
|
159 |
final_result = results[0]
|
|
|
186 |
|
187 |
iface = gr.Interface(
|
188 |
fn=process_image,
|
189 |
+
inputs=gr.Image(type="pil", label="Upload an Image"),
|
190 |
+
outputs=[
|
191 |
+
gr.Image(label="Image with Good Teeth"),
|
192 |
+
gr.Image(label="Image with Bad Teeth")
|
193 |
+
],
|
194 |
+
title="Age Transformation and Teeth Replacement",
|
195 |
+
description="Upload an image and apply aging effects while replacing teeth."
|
196 |
)
|
197 |
|
198 |
+
iface.launch(share=True)
|