patrickbdevaney commited on
Commit
7fbcaaf
·
verified ·
1 Parent(s): 4387c36

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -8
app.py CHANGED
@@ -33,7 +33,7 @@ parsed_descriptions_queue = deque()
33
 
34
  # Usage limits
35
  MAX_DESCRIPTIONS = 30
36
- MAX_IMAGES = 1 # Generate only 1 image
37
 
38
  # Preload models and checkpoints
39
  print("Preloading models and checkpoints...")
@@ -133,15 +133,14 @@ def generate_descriptions(user_prompt, seed_words_input, batch_size=100, max_ite
133
  return list(parsed_descriptions_queue)
134
 
135
  @spaces.GPU(duration=120)
136
- def generate_images(parsed_descriptions, max_iterations=1): # Set max_iterations to 1
137
- if len(parsed_descriptions) < MAX_IMAGES:
138
- prompts = parsed_descriptions
139
- else:
140
- prompts = [parsed_descriptions.pop(0) for _ in range(MAX_IMAGES)]
141
 
142
  images = []
143
- for prompt in prompts:
144
- images.extend(pipe(prompt, num_images=1, num_inference_steps=max_iterations, height=512, width=512).images) # Set resolution to 512 x 512
145
 
146
  return images
147
 
 
33
 
34
  # Usage limits
35
  MAX_DESCRIPTIONS = 30
36
+ MAX_IMAGES = 2 # Limit to 2 images
37
 
38
  # Preload models and checkpoints
39
  print("Preloading models and checkpoints...")
 
133
  return list(parsed_descriptions_queue)
134
 
135
  @spaces.GPU(duration=120)
136
+ def generate_images(parsed_descriptions, max_iterations=2: # Set max_iterations to 1
137
+ # Limit the number of descriptions passed to the image generator to 2
138
+ if len(parsed_descriptions) > MAX_IMAGES:
139
+ parsed_descriptions = parsed_descriptions[:MAX_IMAGES]
 
140
 
141
  images = []
142
+ for prompt in parsed_descriptions:
143
+ images.extend(pipe(prompt, num_inference_steps=max_iterations, height=512, width=512).images) # Set resolution to 512 x 512
144
 
145
  return images
146