Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -32,7 +32,7 @@ parsed_descriptions_queue = deque()
|
|
32 |
|
33 |
# Usage limits
|
34 |
MAX_DESCRIPTIONS = 30
|
35 |
-
MAX_IMAGES =
|
36 |
|
37 |
def initialize_diffusers():
|
38 |
from optimum.quanto import freeze, qfloat8, quantize
|
@@ -88,7 +88,7 @@ def parse_descriptions(text):
|
|
88 |
return descriptions
|
89 |
|
90 |
@spaces.GPU
|
91 |
-
def generate_descriptions(user_prompt, seed_words_input, batch_size=100, max_iterations=
|
92 |
descriptions = []
|
93 |
description_queue = deque()
|
94 |
iteration_count = 0
|
@@ -102,36 +102,36 @@ def generate_descriptions(user_prompt, seed_words_input, batch_size=100, max_ite
|
|
102 |
|
103 |
seed_words.extend(re.findall(r'"(.*?)"', seed_words_input))
|
104 |
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
|
|
110 |
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
if generated_description:
|
115 |
-
clean_description = generated_description.encode('ascii', 'ignore').decode('ascii')
|
116 |
-
description_queue.append({'subject': subject, 'description': clean_description})
|
117 |
|
118 |
-
|
|
|
|
|
119 |
|
120 |
-
|
121 |
-
|
|
|
|
|
122 |
|
123 |
-
if iteration_count % 3 == 0:
|
124 |
parsed_descriptions = parse_descriptions(clean_description)
|
125 |
parsed_descriptions_queue.extend(parsed_descriptions)
|
126 |
|
127 |
-
|
128 |
|
129 |
return list(parsed_descriptions_queue)
|
130 |
|
131 |
@spaces.GPU(duration=120)
|
132 |
-
def generate_images(parsed_descriptions):
|
133 |
pipe = initialize_diffusers()
|
134 |
-
|
135 |
if len(parsed_descriptions) < MAX_IMAGES:
|
136 |
prompts = parsed_descriptions
|
137 |
else:
|
@@ -139,20 +139,20 @@ def generate_images(parsed_descriptions):
|
|
139 |
|
140 |
images = []
|
141 |
for prompt in prompts:
|
142 |
-
images.extend(pipe(prompt, num_images=1).images)
|
143 |
|
144 |
return images
|
145 |
|
146 |
def combined_function(user_prompt, seed_words_input):
|
147 |
parsed_descriptions = generate_descriptions(user_prompt, seed_words_input)
|
148 |
images = generate_images(parsed_descriptions)
|
149 |
-
return images
|
150 |
|
151 |
if __name__ == '__main__':
|
152 |
interface = gr.Interface(
|
153 |
fn=combined_function,
|
154 |
inputs=[gr.Textbox(lines=2, placeholder="Enter a prompt for descriptions..."), gr.Textbox(lines=2, placeholder='Enter seed words in quotes, e.g., "cat", "dog", "sunset"...')],
|
155 |
-
outputs=gr.Gallery()
|
156 |
)
|
157 |
|
158 |
interface.launch()
|
|
|
32 |
|
33 |
# Usage limits
|
34 |
MAX_DESCRIPTIONS = 30
|
35 |
+
MAX_IMAGES = 10
|
36 |
|
37 |
def initialize_diffusers():
|
38 |
from optimum.quanto import freeze, qfloat8, quantize
|
|
|
88 |
return descriptions
|
89 |
|
90 |
@spaces.GPU
|
91 |
+
def generate_descriptions(user_prompt, seed_words_input, batch_size=100, max_iterations=3):
|
92 |
descriptions = []
|
93 |
description_queue = deque()
|
94 |
iteration_count = 0
|
|
|
102 |
|
103 |
seed_words.extend(re.findall(r'"(.*?)"', seed_words_input))
|
104 |
|
105 |
+
for _ in range(2): # Perform two iterations
|
106 |
+
while iteration_count < max_iterations and len(parsed_descriptions_queue) < MAX_DESCRIPTIONS:
|
107 |
+
available_subjects = [word for word in seed_words if word not in used_words]
|
108 |
+
if not available_subjects:
|
109 |
+
print("No more available subjects to use.")
|
110 |
+
break
|
111 |
|
112 |
+
subject = random.choice(available_subjects)
|
113 |
+
generated_description = generate_description_prompt(subject, user_prompt, text_generator)
|
|
|
|
|
|
|
|
|
114 |
|
115 |
+
if generated_description:
|
116 |
+
clean_description = generated_description.encode('ascii', 'ignore').decode('ascii')
|
117 |
+
description_queue.append({'subject': subject, 'description': clean_description})
|
118 |
|
119 |
+
print(f"Generated description for subject '{subject}': {clean_description}")
|
120 |
+
|
121 |
+
used_words.add(subject)
|
122 |
+
seed_words.append(clean_description)
|
123 |
|
|
|
124 |
parsed_descriptions = parse_descriptions(clean_description)
|
125 |
parsed_descriptions_queue.extend(parsed_descriptions)
|
126 |
|
127 |
+
iteration_count += 1
|
128 |
|
129 |
return list(parsed_descriptions_queue)
|
130 |
|
131 |
@spaces.GPU(duration=120)
|
132 |
+
def generate_images(parsed_descriptions, max_iterations=10):
|
133 |
pipe = initialize_diffusers()
|
134 |
+
|
135 |
if len(parsed_descriptions) < MAX_IMAGES:
|
136 |
prompts = parsed_descriptions
|
137 |
else:
|
|
|
139 |
|
140 |
images = []
|
141 |
for prompt in prompts:
|
142 |
+
images.extend(pipe(prompt, num_images=1, num_inference_steps=max_iterations, height=1024, width=1024).images) # Define the resolution here
|
143 |
|
144 |
return images
|
145 |
|
146 |
def combined_function(user_prompt, seed_words_input):
|
147 |
parsed_descriptions = generate_descriptions(user_prompt, seed_words_input)
|
148 |
images = generate_images(parsed_descriptions)
|
149 |
+
return parsed_descriptions, images
|
150 |
|
151 |
if __name__ == '__main__':
|
152 |
interface = gr.Interface(
|
153 |
fn=combined_function,
|
154 |
inputs=[gr.Textbox(lines=2, placeholder="Enter a prompt for descriptions..."), gr.Textbox(lines=2, placeholder='Enter seed words in quotes, e.g., "cat", "dog", "sunset"...')],
|
155 |
+
outputs=[gr.Textbox(label="Generated Descriptions"), gr.Gallery(label="Generated Images")]
|
156 |
)
|
157 |
|
158 |
interface.launch()
|