jeremyrmanning commited on
Commit
2dd7160
·
1 Parent(s): 5b4009e

create initial app

Browse files

based (heavily!) on https://huggingface.co/spaces/osanseviero/latent-video

Files changed (1) hide show
  1. app.py +84 -0
app.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+ import gradio as gr
3
+ from transformers import pipeline
4
+ import io, base64
5
+ from PIL import Image
6
+ import numpy as np
7
+ import tensorflow as tf
8
+ import mediapy
9
+ import os
10
+ import sys
11
+ from huggingface_hub import snapshot_download
12
+
13
+ #CREDIT: this demo is based *heavily* on https://huggingface.co/spaces/osanseviero/latent-video
14
+
15
+ with warnings.catch_warnings():
16
+ warnings.simplefilter('ignore')
17
+ image_gen = gr.Interface.load("spaces/multimodalart/latentdiffusion")
18
+
19
+ os.system("git clone https://github.com/google-research/frame-interpolation")
20
+ sys.path.append("frame-interpolation")
21
+ from eval import interpolator, util
22
+
23
+ ffmpeg_path = util.get_ffmpeg_path()
24
+ mediapy.set_ffmpeg(ffmpeg_path)
25
+
26
+ model = snapshot_download(repo_id="akhaliq/frame-interpolation-film-style")
27
+ interpolator = interpolator.Interpolator(model, None)
28
+
29
+ def generate_story(choice, input_text):
30
+ query = "<BOS> <{0}> {1}".format(choice, input_text)
31
+
32
+ print(query)
33
+ generated_text = story_gen(query)
34
+ generated_text = generated_text[0]['generated_text']
35
+ generated_text = generated_text.split('> ')[2]
36
+
37
+ return generated_text
38
+
39
+ def generate_images(text, width=256, height=256, steps=50, num_images=1,
40
+ diversity=4):
41
+
42
+ image_bytes = image_gen(text, steps, width, height, num_images, diversity)
43
+
44
+ # Algo from spaces/Gradio-Blocks/latent_gpt2_story/blob/main/app.py
45
+ generated_images = []
46
+ for image in image_bytes[1]:
47
+ image_str = image[0]
48
+ image_str = image_str.replace("data:image/png;base64,","")
49
+ decoded_bytes = base64.decodebytes(bytes(image_str, "utf-8"))
50
+ img = Image.open(io.BytesIO(decoded_bytes))
51
+ generated_images.append(img)
52
+
53
+ return generated_images
54
+
55
+ def generate_interpolation(text, n=4):
56
+ generated_images = []
57
+ for t in text:
58
+ generated_images.extend(generate_images(t))
59
+
60
+ frames = []
61
+ for i, g in enumerate(generated_images):
62
+ frames.append(f'frame_{i}.png')
63
+ g[i].save(frames[-1])
64
+
65
+ frames = list(util.interpolate_recursively_from_files(frames, n, interpolator))
66
+
67
+ mediapy.write_video("out.mp4", frames, fps=7)
68
+
69
+ return "out.mp4"
70
+
71
+ demo = gr.Blocks()
72
+
73
+ with demo:
74
+ x1 = gr.Textbox(placeholder='brain', label='Text 1')
75
+ x2 = gr.Textbox(placeholder='salmon', label='Text 2')
76
+ x3 = gr.Textbox(placeholder='racecar', label='Text 3')
77
+ x4 = gr.Textbox(placeholder='iguana riding a motorcycle', label='Text 4')
78
+ x5 = gr.Textbox(placeholder='computer in space', label='Text 5')
79
+
80
+ button_gen_video = gr.Button("Generate Video")
81
+ output_interpolation = gr.Video(label="Generated Video")
82
+ button_gen_video.click(fn=generate_interpolation, inputs=[x1, x2, x3, x4, x5], outputs=output_interpolation)
83
+
84
+ demo.launch(debug=True, enable_queue=True)