Spaces:
Sleeping
Sleeping
remove zeroGPU duration and modify text2image parameters
Browse files
app.py
CHANGED
@@ -107,7 +107,7 @@ def translate_to_english(text):
|
|
107 |
|
108 |
return chat_reply.choices[0].message.content
|
109 |
|
110 |
-
@spaces.GPU
|
111 |
def generate_images(history, conversation_times, last_genimg_times, generated_images):
|
112 |
if generated_images is not None and last_genimg_times == conversation_times:
|
113 |
return conversation_times, last_genimg_times, *generated_images # ε¦ζεηε·²ηζοΌη΄ζ₯θΏε
|
@@ -115,7 +115,7 @@ def generate_images(history, conversation_times, last_genimg_times, generated_im
|
|
115 |
user_story = " ".join([h[0] for h in history])
|
116 |
prompt = translate_to_english(user_story)
|
117 |
|
118 |
-
neg_prompt = "dark, realistic,
|
119 |
|
120 |
seeds = np.random.randint(0, 100000, 4)
|
121 |
generator = [torch.Generator().manual_seed(int(i)) for i in seeds]
|
@@ -123,12 +123,12 @@ def generate_images(history, conversation_times, last_genimg_times, generated_im
|
|
123 |
last_genimg_times = conversation_times
|
124 |
|
125 |
for i in range(4):
|
126 |
-
img = pipe_t2i("style of Milton Glaser,
|
127 |
negative_prompt=neg_prompt,
|
128 |
height=720, width=512,
|
129 |
generator=generator[i],
|
130 |
num_inference_steps=40,
|
131 |
-
guidance_scale=
|
132 |
).images[0]
|
133 |
images.append(img)
|
134 |
|
|
|
107 |
|
108 |
return chat_reply.choices[0].message.content
|
109 |
|
110 |
+
@spaces.GPU
|
111 |
def generate_images(history, conversation_times, last_genimg_times, generated_images):
|
112 |
if generated_images is not None and last_genimg_times == conversation_times:
|
113 |
return conversation_times, last_genimg_times, *generated_images # ε¦ζεηε·²ηζοΌη΄ζ₯θΏε
|
|
|
115 |
user_story = " ".join([h[0] for h in history])
|
116 |
prompt = translate_to_english(user_story)
|
117 |
|
118 |
+
neg_prompt = "dark, realistic, words, sentence, text, Low quality, error, extra, nude, duplicate, ugly"
|
119 |
|
120 |
seeds = np.random.randint(0, 100000, 4)
|
121 |
generator = [torch.Generator().manual_seed(int(i)) for i in seeds]
|
|
|
123 |
last_genimg_times = conversation_times
|
124 |
|
125 |
for i in range(4):
|
126 |
+
img = pipe_t2i("style of Milton Glaser, healing image, "+prompt,
|
127 |
negative_prompt=neg_prompt,
|
128 |
height=720, width=512,
|
129 |
generator=generator[i],
|
130 |
num_inference_steps=40,
|
131 |
+
guidance_scale=10,
|
132 |
).images[0]
|
133 |
images.append(img)
|
134 |
|