patrickbdevaney commited on
Commit
e66b780
·
verified ·
1 Parent(s): a5c135c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -6
app.py CHANGED
@@ -1,4 +1,3 @@
1
- import spaces # beginn
2
  import torch.multiprocessing as mp
3
  import torch
4
  import os
@@ -8,6 +7,7 @@ from collections import deque
8
  from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
9
  import gradio as gr
10
  from accelerate import Accelerator
 
11
 
12
  # Check if the start method has already been set
13
  if mp.get_start_method(allow_none=True) != 'spawn':
@@ -129,7 +129,9 @@ def generate_descriptions(user_prompt, seed_words_input, batch_size=100, max_ite
129
  return list(parsed_descriptions_queue)
130
 
131
  @spaces.GPU(duration=120)
132
- def generate_images(parsed_descriptions, pipe):
 
 
133
  if len(parsed_descriptions) < MAX_IMAGES:
134
  prompts = parsed_descriptions
135
  else:
@@ -143,13 +145,10 @@ def generate_images(parsed_descriptions, pipe):
143
 
144
  def combined_function(user_prompt, seed_words_input):
145
  parsed_descriptions = generate_descriptions(user_prompt, seed_words_input)
146
- pipe = initialize_diffusers()
147
- images = generate_images(parsed_descriptions, pipe)
148
  return images
149
 
150
  if __name__ == '__main__':
151
- torch.cuda.init()
152
-
153
  interface = gr.Interface(
154
  fn=combined_function,
155
  inputs=[gr.Textbox(lines=2, placeholder="Enter a prompt for descriptions..."), gr.Textbox(lines=2, placeholder='Enter seed words in quotes, e.g., "cat", "dog", "sunset"...')],
 
 
1
  import torch.multiprocessing as mp
2
  import torch
3
  import os
 
7
  from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
8
  import gradio as gr
9
  from accelerate import Accelerator
10
+ import spaces
11
 
12
  # Check if the start method has already been set
13
  if mp.get_start_method(allow_none=True) != 'spawn':
 
129
  return list(parsed_descriptions_queue)
130
 
131
  @spaces.GPU(duration=120)
132
+ def generate_images(parsed_descriptions):
133
+ pipe = initialize_diffusers()
134
+
135
  if len(parsed_descriptions) < MAX_IMAGES:
136
  prompts = parsed_descriptions
137
  else:
 
145
 
146
  def combined_function(user_prompt, seed_words_input):
147
  parsed_descriptions = generate_descriptions(user_prompt, seed_words_input)
148
+ images = generate_images(parsed_descriptions)
 
149
  return images
150
 
151
  if __name__ == '__main__':
 
 
152
  interface = gr.Interface(
153
  fn=combined_function,
154
  inputs=[gr.Textbox(lines=2, placeholder="Enter a prompt for descriptions..."), gr.Textbox(lines=2, placeholder='Enter seed words in quotes, e.g., "cat", "dog", "sunset"...')],