Spaces:
aiqtech
/
Running on Zero

aiqtech commited on
Commit
1e991a3
โ€ข
1 Parent(s): eff7695

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -33
app.py CHANGED
@@ -1,5 +1,4 @@
1
  import random
2
-
3
  import gradio as gr
4
  import numpy as np
5
  import torch
@@ -7,6 +6,7 @@ import spaces
7
  from diffusers import FluxPipeline
8
  from PIL import Image
9
  from diffusers.utils import export_to_gif
 
10
 
11
  HEIGHT = 256
12
  WIDTH = 1024
@@ -18,29 +18,32 @@ pipe = FluxPipeline.from_pretrained(
18
  torch_dtype=torch.bfloat16
19
  ).to(device)
20
 
 
 
21
  def split_image(input_image, num_splits=4):
22
- # Create a list to store the output images
23
  output_images = []
24
-
25
- # Split the image into four 256x256 sections
26
  for i in range(num_splits):
27
  left = i * 256
28
  right = (i + 1) * 256
29
  box = (left, 0, right, 256)
30
  output_images.append(input_image.crop(box))
31
-
32
  return output_images
33
 
 
 
 
34
  @spaces.GPU(duration=190)
35
  def predict(prompt, seed=42, randomize_seed=False, guidance_scale=5.0, num_inference_steps=28, progress=gr.Progress(track_tqdm=True)):
 
 
 
36
  prompt_template = f"""
37
- A side by side 4 frame image showing consecutive stills from a looped gif moving from left to right.
38
- The gif is of {prompt}.
39
  """
40
-
41
  if randomize_seed:
42
  seed = random.randint(0, MAX_SEED)
43
-
44
  image = pipe(
45
  prompt=prompt_template,
46
  guidance_scale=guidance_scale,
@@ -50,60 +53,52 @@ def predict(prompt, seed=42, randomize_seed=False, guidance_scale=5.0, num_infer
50
  height=HEIGHT,
51
  width=WIDTH
52
  ).images[0]
53
-
54
  return export_to_gif(split_image(image, 4), "flux.gif", fps=4), image, seed
55
 
56
- demo = gr.Interface(fn=predict, inputs="text", outputs="image")
57
-
58
  css = """
59
- footer {
60
- visibility: hidden;
61
- }
62
  """
63
 
64
-
65
  examples = [
66
- "a cat waving its paws in the air",
67
- "a panda moving their hips from side to side",
68
- "a flower going through the process of blooming"
69
  ]
70
 
71
  with gr.Blocks(theme="Nymbo/Nymbo_Theme", css=css) as demo:
72
  with gr.Column(elem_id="col-container"):
73
-
74
  with gr.Row():
75
- prompt = gr.Text(label="Prompt", show_label=False, max_lines=1, placeholder="Enter your prompt")
76
- submit = gr.Button("Submit", scale=0)
77
-
78
  output = gr.Image(label="GIF", show_label=False)
79
- output_stills = gr.Image(label="stills", show_label=False, elem_id="stills")
80
- with gr.Accordion("Advanced Settings", open=False):
 
81
  seed = gr.Slider(
82
- label="Seed",
83
  minimum=0,
84
  maximum=MAX_SEED,
85
  step=1,
86
  value=0,
87
  )
88
-
89
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
90
-
91
  with gr.Row():
92
  guidance_scale = gr.Slider(
93
- label="Guidance Scale",
94
  minimum=1,
95
  maximum=15,
96
  step=0.1,
97
  value=3.5,
98
  )
99
  num_inference_steps = gr.Slider(
100
- label="Number of inference steps",
101
  minimum=1,
102
  maximum=50,
103
  step=1,
104
  value=28,
105
  )
106
-
107
  gr.Examples(
108
  examples=examples,
109
  fn=predict,
@@ -111,11 +106,12 @@ with gr.Blocks(theme="Nymbo/Nymbo_Theme", css=css) as demo:
111
  outputs=[output, output_stills, seed],
112
  cache_examples="lazy"
113
  )
 
114
  gr.on(
115
  triggers=[submit.click, prompt.submit],
116
  fn=predict,
117
  inputs=[prompt, seed, randomize_seed, guidance_scale, num_inference_steps],
118
- outputs = [output, output_stills, seed]
119
  )
120
 
121
  demo.launch()
 
1
  import random
 
2
  import gradio as gr
3
  import numpy as np
4
  import torch
 
6
  from diffusers import FluxPipeline
7
  from PIL import Image
8
  from diffusers.utils import export_to_gif
9
+ from transformers import pipeline
10
 
11
  HEIGHT = 256
12
  WIDTH = 1024
 
18
  torch_dtype=torch.bfloat16
19
  ).to(device)
20
 
21
+ translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en")
22
+
23
  def split_image(input_image, num_splits=4):
 
24
  output_images = []
 
 
25
  for i in range(num_splits):
26
  left = i * 256
27
  right = (i + 1) * 256
28
  box = (left, 0, right, 256)
29
  output_images.append(input_image.crop(box))
 
30
  return output_images
31
 
32
+ def translate_to_english(text):
33
+ return translator(text)[0]['translation_text']
34
+
35
  @spaces.GPU(duration=190)
36
  def predict(prompt, seed=42, randomize_seed=False, guidance_scale=5.0, num_inference_steps=28, progress=gr.Progress(track_tqdm=True)):
37
+ if any('\u3131' <= char <= '\u318E' or '\uAC00' <= char <= '\uD7A3' for char in prompt):
38
+ prompt = translate_to_english(prompt)
39
+
40
  prompt_template = f"""
41
+ A side by side 4 frame image showing consecutive stills from a looped gif moving from left to right. The gif is of {prompt}.
 
42
  """
43
+
44
  if randomize_seed:
45
  seed = random.randint(0, MAX_SEED)
46
+
47
  image = pipe(
48
  prompt=prompt_template,
49
  guidance_scale=guidance_scale,
 
53
  height=HEIGHT,
54
  width=WIDTH
55
  ).images[0]
56
+
57
  return export_to_gif(split_image(image, 4), "flux.gif", fps=4), image, seed
58
 
 
 
59
  css = """
60
+ footer { visibility: hidden;}
 
 
61
  """
62
 
 
63
  examples = [
64
+ "๊ณ ์–‘์ด๊ฐ€ ๊ณต์ค‘์—์„œ ๋ฐœ์„ ํ”๋“œ๋Š” ๋ชจ์Šต",
65
+ "ํŒฌ๋”๊ฐ€ ์—‰๋ฉ์ด๋ฅผ ์ขŒ์šฐ๋กœ ํ”๋“œ๋Š” ๋ชจ์Šต",
66
+ "๊ฝƒ์ด ํ”ผ์–ด๋‚˜๋Š” ๊ณผ์ •"
67
  ]
68
 
69
  with gr.Blocks(theme="Nymbo/Nymbo_Theme", css=css) as demo:
70
  with gr.Column(elem_id="col-container"):
 
71
  with gr.Row():
72
+ prompt = gr.Text(label="ํ”„๋กฌํ”„ํŠธ", show_label=False, max_lines=1, placeholder="ํ”„๋กฌํ”„ํŠธ๋ฅผ ์ž…๋ ฅํ•˜์„ธ์š”")
73
+ submit = gr.Button("์ œ์ถœ", scale=0)
 
74
  output = gr.Image(label="GIF", show_label=False)
75
+ output_stills = gr.Image(label="์Šคํ‹ธ ์ด๋ฏธ์ง€", show_label=False, elem_id="stills")
76
+
77
+ with gr.Accordion("๊ณ ๊ธ‰ ์„ค์ •", open=False):
78
  seed = gr.Slider(
79
+ label="์‹œ๋“œ",
80
  minimum=0,
81
  maximum=MAX_SEED,
82
  step=1,
83
  value=0,
84
  )
85
+ randomize_seed = gr.Checkbox(label="์‹œ๋“œ ๋ฌด์ž‘์œ„ํ™”", value=True)
 
 
86
  with gr.Row():
87
  guidance_scale = gr.Slider(
88
+ label="๊ฐ€์ด๋˜์Šค ์Šค์ผ€์ผ",
89
  minimum=1,
90
  maximum=15,
91
  step=0.1,
92
  value=3.5,
93
  )
94
  num_inference_steps = gr.Slider(
95
+ label="์ถ”๋ก  ๋‹จ๊ณ„ ์ˆ˜",
96
  minimum=1,
97
  maximum=50,
98
  step=1,
99
  value=28,
100
  )
101
+
102
  gr.Examples(
103
  examples=examples,
104
  fn=predict,
 
106
  outputs=[output, output_stills, seed],
107
  cache_examples="lazy"
108
  )
109
+
110
  gr.on(
111
  triggers=[submit.click, prompt.submit],
112
  fn=predict,
113
  inputs=[prompt, seed, randomize_seed, guidance_scale, num_inference_steps],
114
+ outputs=[output, output_stills, seed]
115
  )
116
 
117
  demo.launch()