giorgio-caparvi commited on
Commit
ddf8ebd
·
1 Parent(s): 7ff3983

saving sketch in fs

Browse files
api/app.py CHANGED
@@ -21,6 +21,10 @@ def index():
21
 
22
  @app.route('/generate-design', methods=['GET','POST'])
23
  def generate_design():
 
 
 
 
24
  try:
25
  print("### Inizio generate_design ###") # Stampa per il debug
26
 
@@ -45,12 +49,17 @@ def generate_design():
45
  print(f"### Errore nell'apertura dell'immagine: {str(e)} ###") # Stampa per il debug
46
  return f"Failed to open the image: {str(e)}", 400
47
 
48
- # Creazione buffer di immagine
49
- img_sketch_buffer = io.BytesIO()
50
- image.save(img_sketch_buffer, format='JPEG')
51
- img_sketch_buffer.seek(0)
52
- print("### Immagine salvata nel buffer ###") # Stampa per il debug
53
 
 
 
 
 
 
 
 
 
 
 
54
  # Argomenti per eval.main
55
  sys.argv = [
56
  'eval.py',
@@ -69,7 +78,7 @@ def generate_design():
69
 
70
  # Esecuzione del modello
71
  try:
72
- final_image = eval.main(img_sketch_buffer, json_data_from_req)
73
  print("### eval.main eseguito con successo ###") # Stampa per il debug
74
  except AttributeError as e:
75
  print(f"### AttributeError: {str(e)} ###") # Stampa per il debug
 
21
 
22
  @app.route('/generate-design', methods=['GET','POST'])
23
  def generate_design():
24
+
25
+ image_save_dir = "/api/model/assets/data/vitonhd/test/im_sketch"
26
+ text_file_path = "/api/model/assets/data/vitonhd/test/test_paired.txt"
27
+
28
  try:
29
  print("### Inizio generate_design ###") # Stampa per il debug
30
 
 
49
  print(f"### Errore nell'apertura dell'immagine: {str(e)} ###") # Stampa per il debug
50
  return f"Failed to open the image: {str(e)}", 400
51
 
 
 
 
 
 
52
 
53
+ # Saving sketch in filesystem
54
+ model_key = list(json_data_from_req["MODEL"].keys())[0] # Es: "03191"
55
+ image_filename = f"{model_key}_00.jpg" # Nome file immagine es: "03191_00.jpg"
56
+ image_save_path = os.path.join(image_save_dir, image_filename)
57
+ image.save(image_save_path, format='JPEG')
58
+
59
+ with open(text_file_path, "w") as text_file:
60
+ text_file.write(f"{image_filename} {image_filename}\n")
61
+
62
+
63
  # Argomenti per eval.main
64
  sys.argv = [
65
  'eval.py',
 
78
 
79
  # Esecuzione del modello
80
  try:
81
+ final_image = eval.main(json_data_from_req)
82
  print("### eval.main eseguito con successo ###") # Stampa per il debug
83
  except AttributeError as e:
84
  print(f"### AttributeError: {str(e)} ###") # Stampa per il debug
api/model/src/datasets/vitonhd.py CHANGED
@@ -6,7 +6,6 @@ import os
6
  import pathlib
7
  import random
8
  import sys
9
- import io
10
  from typing import Tuple
11
 
12
  PROJECT_ROOT = pathlib.Path(__file__).absolute().parents[2].absolute()
@@ -27,11 +26,10 @@ class VitonHDDataset(data.Dataset):
27
  self,
28
  dataroot_path: str,
29
  phase: str,
30
- im_sketch_buffer_from_request,
31
- json_from_req,
32
  tokenizer,
 
33
  radius=5,
34
- caption_folder='captions.json', #######################################################3
35
  sketch_threshold_range: Tuple[int, int] = (20, 127),
36
  order: str = 'paired',
37
  outputlist: Tuple[str] = ('c_name', 'im_name', 'image', 'im_cloth', 'shape', 'pose_map',
@@ -51,7 +49,6 @@ class VitonHDDataset(data.Dataset):
51
  self.width = size[1]
52
  self.radius = radius
53
  self.tokenizer = tokenizer
54
- self.im_sketch_buffer_from_request = im_sketch_buffer_from_request
55
  self.json_from_req = json_from_req
56
  self.transform = transforms.Compose([
57
  transforms.ToTensor(),
@@ -74,20 +71,18 @@ class VitonHDDataset(data.Dataset):
74
 
75
  assert all(x in possible_outputs for x in outputlist)
76
 
 
 
 
 
 
 
77
  # Load Captions
78
  model_data = self.json_from_req.get('MODEL', {}) # Safely get the 'MODEL' key, default to an empty dictionary if it doesn't exist
79
  # Filter captions based on the length requirement (3 or more items)
80
  self.captions_dict = {k: v for k, v in model_data.items() if len(v) >= 3}
81
- '''
82
- with open(os.path.join(self.dataroot, self.caption_folder)) as f:
83
- # self.captions_dict = json.load(f)['items']
84
- self.captions_dict = json.load(f)
85
- self.captions_dict = {k: v for k, v in self.captions_dict.items() if len(v) >= 3}
86
-
87
 
88
  dataroot = self.dataroot
89
-
90
-
91
  if phase == 'train':
92
  filename = os.path.join(dataroot, f"{phase}_pairs.txt")
93
  else:
@@ -111,10 +106,10 @@ class VitonHDDataset(data.Dataset):
111
  im_names.append(im_name)
112
  c_names.append(c_name)
113
  dataroot_names.append(dataroot)
114
- '''
115
- self.im_names = []
116
- self.c_names = []
117
- self.dataroot_names = []
118
 
119
  def __getitem__(self, index):
120
  """
@@ -124,14 +119,14 @@ class VitonHDDataset(data.Dataset):
124
  :return: dict containing dataset samples
125
  :rtype: dict
126
  """
127
- c_name = list(self.captions_dict.keys())[0] + "_00.jpg" # self.c_names[index]
128
- im_name = list(self.captions_dict.keys())[0] + "_00.jpg" #self.im_names[index]
129
- print(im_name)
130
  dataroot = self.dataroot_names[index]
131
- #dataroot = "./assets/data/vitonhd"
132
 
133
  sketch_threshold = random.randint(self.sketch_threshold_range[0], self.sketch_threshold_range[1])
134
 
 
 
135
  if "captions" in self.outputlist or "original_captions" in self.outputlist:
136
  captions = self.captions_dict[c_name.split('_')[0]]
137
  # take a random caption if there are multiple
@@ -160,7 +155,7 @@ class VitonHDDataset(data.Dataset):
160
  image = self.transform(image) # [-1,1]
161
 
162
  if "im_sketch" in self.outputlist:
163
- '''# Person image
164
  # im_sketch = Image.open(os.path.join(dataroot, 'im_sketch', c_name.replace(".jpg", ".png")))
165
  if self.order == 'unpaired':
166
  im_sketch = Image.open(
@@ -168,20 +163,16 @@ class VitonHDDataset(data.Dataset):
168
  os.path.splitext(im_name)[0] + '_' + c_name.replace(".jpg", ".png")))
169
  elif self.order == 'paired':
170
  im_sketch = Image.open(os.path.join(dataroot, self.phase, 'im_sketch', im_name.replace(".jpg", ".jpeg")))
171
- # define a transform to convert the image to grayscale
172
  transform = transforms.Grayscale()
 
173
  # apply the above transform on the image
174
  im_sketch = transform(im_sketch)
175
  else:
176
  raise ValueError(
177
  f"Order should be either paired or unpaired"
178
- )'''
179
- #im_sketch = Image.open(self.im_sketch_buffer_from_request)
180
- im_sketch = Image.open(io.BytesIO(self.im_sketch_buffer_from_request))
181
- # define a transform to convert the image to grayscale
182
- transform = transforms.Grayscale()
183
- # apply the above transform on the image
184
- im_sketch = transform(im_sketch)
185
  im_sketch = im_sketch.resize((self.width, self.height))
186
  im_sketch = ImageOps.invert(im_sketch)
187
  # threshold grayscale pil image
@@ -405,4 +396,4 @@ class VitonHDDataset(data.Dataset):
405
  return result
406
 
407
  def __len__(self):
408
- return len(self.c_names)
 
6
  import pathlib
7
  import random
8
  import sys
 
9
  from typing import Tuple
10
 
11
  PROJECT_ROOT = pathlib.Path(__file__).absolute().parents[2].absolute()
 
26
  self,
27
  dataroot_path: str,
28
  phase: str,
 
 
29
  tokenizer,
30
+ json_from_req,
31
  radius=5,
32
+ caption_folder='captions.json',
33
  sketch_threshold_range: Tuple[int, int] = (20, 127),
34
  order: str = 'paired',
35
  outputlist: Tuple[str] = ('c_name', 'im_name', 'image', 'im_cloth', 'shape', 'pose_map',
 
49
  self.width = size[1]
50
  self.radius = radius
51
  self.tokenizer = tokenizer
 
52
  self.json_from_req = json_from_req
53
  self.transform = transforms.Compose([
54
  transforms.ToTensor(),
 
71
 
72
  assert all(x in possible_outputs for x in outputlist)
73
 
74
+ # Load Captions
75
+ #with open(os.path.join(self.dataroot, self.caption_folder)) as f:
76
+ # # self.captions_dict = json.load(f)['items']
77
+ # self.captions_dict = json.load(f)
78
+ #self.captions_dict = {k: v for k, v in self.captions_dict.items() if len(v) >= 3}
79
+
80
  # Load Captions
81
  model_data = self.json_from_req.get('MODEL', {}) # Safely get the 'MODEL' key, default to an empty dictionary if it doesn't exist
82
  # Filter captions based on the length requirement (3 or more items)
83
  self.captions_dict = {k: v for k, v in model_data.items() if len(v) >= 3}
 
 
 
 
 
 
84
 
85
  dataroot = self.dataroot
 
 
86
  if phase == 'train':
87
  filename = os.path.join(dataroot, f"{phase}_pairs.txt")
88
  else:
 
106
  im_names.append(im_name)
107
  c_names.append(c_name)
108
  dataroot_names.append(dataroot)
109
+
110
+ self.im_names = im_names
111
+ self.c_names = c_names
112
+ self.dataroot_names = dataroot_names
113
 
114
  def __getitem__(self, index):
115
  """
 
119
  :return: dict containing dataset samples
120
  :rtype: dict
121
  """
122
+ c_name = self.c_names[index]
123
+ im_name = self.im_names[index]
 
124
  dataroot = self.dataroot_names[index]
 
125
 
126
  sketch_threshold = random.randint(self.sketch_threshold_range[0], self.sketch_threshold_range[1])
127
 
128
+ print("##############################", self.captions_dict)
129
+ print("$$$$$$$$$$ CLOTH NAME",self.c_names)
130
  if "captions" in self.outputlist or "original_captions" in self.outputlist:
131
  captions = self.captions_dict[c_name.split('_')[0]]
132
  # take a random caption if there are multiple
 
155
  image = self.transform(image) # [-1,1]
156
 
157
  if "im_sketch" in self.outputlist:
158
+ # Person image
159
  # im_sketch = Image.open(os.path.join(dataroot, 'im_sketch', c_name.replace(".jpg", ".png")))
160
  if self.order == 'unpaired':
161
  im_sketch = Image.open(
 
163
  os.path.splitext(im_name)[0] + '_' + c_name.replace(".jpg", ".png")))
164
  elif self.order == 'paired':
165
  im_sketch = Image.open(os.path.join(dataroot, self.phase, 'im_sketch', im_name.replace(".jpg", ".jpeg")))
166
+ # define a transform to convert the image to grayscale
167
  transform = transforms.Grayscale()
168
+
169
  # apply the above transform on the image
170
  im_sketch = transform(im_sketch)
171
  else:
172
  raise ValueError(
173
  f"Order should be either paired or unpaired"
174
+ )
175
+
 
 
 
 
 
176
  im_sketch = im_sketch.resize((self.width, self.height))
177
  im_sketch = ImageOps.invert(im_sketch)
178
  # threshold grayscale pil image
 
396
  return result
397
 
398
  def __len__(self):
399
+ return len(self.c_names)
api/model/src/eval.py CHANGED
@@ -30,7 +30,7 @@ os.environ["TOKENIZERS_PARALLELISM"] = "true"
30
  os.environ["WANDB_START_METHOD"] = "thread"
31
 
32
 
33
- def main(im_sketch_buffer_from_request: io.BytesIO, json_from_req: dict) -> None:
34
  args = eval_parse_args()
35
  accelerator = Accelerator(
36
  mixed_precision=args.mixed_precision,
@@ -92,7 +92,6 @@ def main(im_sketch_buffer_from_request: io.BytesIO, json_from_req: dict) -> None
92
  radius=5,
93
  tokenizer=tokenizer,
94
  size=(512, 384),
95
- im_sketch_buffer_from_request=im_sketch_buffer_from_request,
96
  json_from_req=json_from_req
97
  )
98
  else:
 
30
  os.environ["WANDB_START_METHOD"] = "thread"
31
 
32
 
33
+ def main(json_from_req: dict) -> None:
34
  args = eval_parse_args()
35
  accelerator = Accelerator(
36
  mixed_precision=args.mixed_precision,
 
92
  radius=5,
93
  tokenizer=tokenizer,
94
  size=(512, 384),
 
95
  json_from_req=json_from_req
96
  )
97
  else: