jiten6555 commited on
Commit
1c3feda
·
verified ·
1 Parent(s): 68a1a0c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -24
app.py CHANGED
@@ -8,20 +8,39 @@ import cv2
8
 
9
  class RobustDepthTo3DConverter:
10
  def __init__(self):
11
- # Load MiDaS model with explicit configuration
12
  self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
13
 
14
- # Use a specific version and add error handling
15
  try:
16
- self.model = torch.hub.load("intel-isl/MiDaS", "DPT_Small", pretrained=True)
 
 
 
 
 
 
 
 
 
 
 
 
17
  self.model.to(self.device)
18
  self.model.eval()
 
 
 
 
 
 
 
19
  except Exception as e:
20
- print(f"Model loading error: {e}")
21
  self.model = None
22
 
23
  # Create transformation pipeline
24
  self.transform = transforms.Compose([
 
25
  transforms.ToTensor(),
26
  transforms.Normalize(
27
  mean=[0.485, 0.456, 0.406],
@@ -37,20 +56,6 @@ class RobustDepthTo3DConverter:
37
  if not isinstance(input_image, Image.Image):
38
  input_image = Image.fromarray(input_image)
39
 
40
- # Resize image with aspect ratio preservation
41
- max_size = 800
42
- width, height = input_image.size
43
-
44
- # Calculate new dimensions while preserving aspect ratio
45
- if width > height:
46
- new_width = max_size
47
- new_height = int(height * (max_size / width))
48
- else:
49
- new_height = max_size
50
- new_width = int(width * (max_size / height))
51
-
52
- input_image = input_image.resize((new_width, new_height), Image.LANCZOS)
53
-
54
  # Convert to RGB if needed
55
  if input_image.mode != 'RGB':
56
  input_image = input_image.convert('RGB')
@@ -62,7 +67,7 @@ class RobustDepthTo3DConverter:
62
  More robust depth estimation
63
  """
64
  if self.model is None:
65
- raise ValueError("MiDaS model not properly initialized")
66
 
67
  try:
68
  # Preprocess image
@@ -96,7 +101,7 @@ class RobustDepthTo3DConverter:
96
 
97
  def create_point_cloud(self, image, depth_map):
98
  """
99
- Create point cloud with enhanced error handling and adaptive sampling
100
  """
101
  if depth_map is None:
102
  return None
@@ -170,6 +175,10 @@ class RobustDepthTo3DConverter:
170
  """
171
  Enhanced full pipeline with comprehensive error handling
172
  """
 
 
 
 
173
  try:
174
  # Preprocess and validate input
175
  input_image = self.preprocess_image(input_image)
@@ -198,7 +207,7 @@ class RobustDepthTo3DConverter:
198
 
199
  except Exception as e:
200
  print(f"Full pipeline error: {e}")
201
- return f"Error during conversion: {str(e)}"
202
 
203
  def create_huggingface_space():
204
  # Initialize converter
@@ -206,13 +215,15 @@ def create_huggingface_space():
206
 
207
  def convert_image(input_image):
208
  try:
 
 
 
 
209
  output_model = converter.process_image(input_image)
210
- if output_model.startswith("Error"):
211
- raise ValueError(output_model)
212
  return output_model
213
  except Exception as e:
214
  print(f"Conversion error: {e}")
215
- return str(e)
216
 
217
  # Gradio Interface
218
  iface = gr.Interface(
 
8
 
9
  class RobustDepthTo3DConverter:
10
  def __init__(self):
11
+ # Load MiDaS model with explicit configuration and error handling
12
  self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
13
 
 
14
  try:
15
+ # Try multiple methods to load the model
16
+ try:
17
+ # Method 1: Using torch hub with explicit version
18
+ self.model = torch.hub.load("intel-isl/MiDaS", "MiDaS_small", pretrained=True)
19
+ except Exception as e1:
20
+ try:
21
+ # Method 2: Alternative loading
22
+ self.model = torch.hub.load("intel-isl/MiDaS", "DPT_Small", pretrained=True)
23
+ except Exception as e2:
24
+ # If both methods fail, raise a comprehensive error
25
+ raise ValueError(f"Model loading failed. Errors: {e1}, {e2}")
26
+
27
+ # Move model to device and set to eval mode
28
  self.model.to(self.device)
29
  self.model.eval()
30
+
31
+ # Verify model is loaded correctly
32
+ if self.model is None:
33
+ raise ValueError("Model initialization failed: model is None")
34
+
35
+ print("MiDaS model successfully initialized")
36
+
37
  except Exception as e:
38
+ print(f"Critical model initialization error: {e}")
39
  self.model = None
40
 
41
  # Create transformation pipeline
42
  self.transform = transforms.Compose([
43
+ transforms.Resize((256, 256)), # Standardize input size
44
  transforms.ToTensor(),
45
  transforms.Normalize(
46
  mean=[0.485, 0.456, 0.406],
 
56
  if not isinstance(input_image, Image.Image):
57
  input_image = Image.fromarray(input_image)
58
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59
  # Convert to RGB if needed
60
  if input_image.mode != 'RGB':
61
  input_image = input_image.convert('RGB')
 
67
  More robust depth estimation
68
  """
69
  if self.model is None:
70
+ raise ValueError("MiDaS model not properly initialized. Check model loading.")
71
 
72
  try:
73
  # Preprocess image
 
101
 
102
  def create_point_cloud(self, image, depth_map):
103
  """
104
+ Create point cloud with enhanced error handling
105
  """
106
  if depth_map is None:
107
  return None
 
175
  """
176
  Enhanced full pipeline with comprehensive error handling
177
  """
178
+ # First, check if model is initialized
179
+ if self.model is None:
180
+ raise ValueError("MiDaS model initialization failed. Cannot process image.")
181
+
182
  try:
183
  # Preprocess and validate input
184
  input_image = self.preprocess_image(input_image)
 
207
 
208
  except Exception as e:
209
  print(f"Full pipeline error: {e}")
210
+ raise # Re-raise the exception to be caught in the Gradio interface
211
 
212
  def create_huggingface_space():
213
  # Initialize converter
 
215
 
216
  def convert_image(input_image):
217
  try:
218
+ # Check model initialization before processing
219
+ if converter.model is None:
220
+ raise ValueError("MiDaS model failed to initialize. Cannot process image.")
221
+
222
  output_model = converter.process_image(input_image)
 
 
223
  return output_model
224
  except Exception as e:
225
  print(f"Conversion error: {e}")
226
+ raise gr.Error(f"Conversion failed: {str(e)}")
227
 
228
  # Gradio Interface
229
  iface = gr.Interface(