gaur3009 commited on
Commit
519ee0e
·
verified ·
1 Parent(s): 7d54250

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -2
app.py CHANGED
@@ -4,22 +4,28 @@ from torchvision import models, transforms
4
  from PIL import Image, ImageEnhance, ImageDraw
5
  import numpy as np
6
 
 
7
  model = models.detection.fasterrcnn_resnet50_fpn(weights=models.detection.FasterRCNN_ResNet50_FPN_Weights.COCO_V1)
8
  model.eval()
9
 
 
10
  transform = transforms.Compose([
11
  transforms.ToTensor()
12
  ])
13
 
 
14
  def detect_dress(image):
15
  image_tensor = transform(image).unsqueeze(0)
16
  with torch.no_grad():
17
  outputs = model(image_tensor)
18
 
19
  boxes = outputs[0]['boxes'].numpy()
 
20
  scores = outputs[0]['scores'].numpy()
21
- threshold = 0.8
22
- dress_boxes = [box for box, score in zip(boxes, scores) if score > threshold]
 
 
23
 
24
  draw = ImageDraw.Draw(image)
25
  for box in dress_boxes:
@@ -27,6 +33,7 @@ def detect_dress(image):
27
 
28
  return image, dress_boxes
29
 
 
30
  def crop_image(image, box):
31
  return image.crop(box)
32
 
@@ -34,6 +41,7 @@ def adjust_color(image, factor):
34
  enhancer = ImageEnhance.Color(image)
35
  return enhancer.enhance(factor)
36
 
 
37
  def process_image(image, edit_type, factor):
38
  detected_image, boxes = detect_dress(image)
39
 
@@ -50,6 +58,7 @@ def process_image(image, edit_type, factor):
50
 
51
  return edited_image, "Edit applied."
52
 
 
53
  iface = gr.Interface(
54
  fn=process_image,
55
  inputs=[
@@ -64,4 +73,5 @@ iface = gr.Interface(
64
  live=True
65
  )
66
 
 
67
  iface.launch()
 
4
  from PIL import Image, ImageEnhance, ImageDraw
5
  import numpy as np
6
 
7
+ # Load pre-trained Faster R-CNN model with updated weights parameter
8
  model = models.detection.fasterrcnn_resnet50_fpn(weights=models.detection.FasterRCNN_ResNet50_FPN_Weights.COCO_V1)
9
  model.eval()
10
 
11
+ # Image transformation
12
  transform = transforms.Compose([
13
  transforms.ToTensor()
14
  ])
15
 
16
+ # Detection function
17
  def detect_dress(image):
18
  image_tensor = transform(image).unsqueeze(0)
19
  with torch.no_grad():
20
  outputs = model(image_tensor)
21
 
22
  boxes = outputs[0]['boxes'].numpy()
23
+ labels = outputs[0]['labels'].numpy()
24
  scores = outputs[0]['scores'].numpy()
25
+
26
+ # Filter for dress class (e.g., class 1 in COCO for 'person' which might also work for clothing)
27
+ threshold = 0.5
28
+ dress_boxes = [box for box, label, score in zip(boxes, labels, scores) if label == 1 and score > threshold]
29
 
30
  draw = ImageDraw.Draw(image)
31
  for box in dress_boxes:
 
33
 
34
  return image, dress_boxes
35
 
36
+ # Image editing functions
37
  def crop_image(image, box):
38
  return image.crop(box)
39
 
 
41
  enhancer = ImageEnhance.Color(image)
42
  return enhancer.enhance(factor)
43
 
44
+ # Gradio interface function
45
  def process_image(image, edit_type, factor):
46
  detected_image, boxes = detect_dress(image)
47
 
 
58
 
59
  return edited_image, "Edit applied."
60
 
61
+ # Create Gradio interface
62
  iface = gr.Interface(
63
  fn=process_image,
64
  inputs=[
 
73
  live=True
74
  )
75
 
76
+ # Launch the Gradio interface
77
  iface.launch()