File size: 2,789 Bytes
2dd7160
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f55372e
2dd7160
 
 
 
 
 
 
 
 
 
f55372e
9046042
2dd7160
 
 
 
 
 
 
 
 
 
 
 
 
f55372e
 
 
2dd7160
 
 
 
 
 
 
 
 
 
 
 
 
 
f8b1478
 
 
 
2dd7160
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
import warnings
import gradio as gr
from transformers import pipeline
import io, base64
from PIL import Image
import numpy as np
import tensorflow as tf
import mediapy
import os
import sys
from huggingface_hub import snapshot_download

#CREDIT: this demo is based *heavily* on https://huggingface.co/spaces/osanseviero/latent-video

with warnings.catch_warnings():
  warnings.simplefilter('ignore')
  image_gen = gr.Interface.load("spaces/multimodalart/latentdiffusion")

  os.system("git clone https://github.com/google-research/frame-interpolation")
  sys.path.append("frame-interpolation")
  from eval import interpolator, util
  
ffmpeg_path = util.get_ffmpeg_path()
mediapy.set_ffmpeg(ffmpeg_path)

model = snapshot_download(repo_id="akhaliq/frame-interpolation-film-style")
interpolator = interpolator.Interpolator(model, None)


def generate_story(choice, input_text):
    query = "<BOS> <{0}> {1}".format(choice, input_text)
    
    print(query)
    generated_text = story_gen(query)
    generated_text = generated_text[0]['generated_text']
    generated_text = generated_text.split('> ')[2]
    
    return generated_text


def generate_images(text, width=256, height=256, steps=50, num_images=1, diversity=4):

    image_bytes = image_gen(text, steps, width, height, num_images, diversity)
    
    # Algo from spaces/Gradio-Blocks/latent_gpt2_story/blob/main/app.py
    generated_images = []
    for image in image_bytes[1]:
        image_str = image[0]
        image_str = image_str.replace("data:image/png;base64,","")
        decoded_bytes = base64.decodebytes(bytes(image_str, "utf-8"))
        img = Image.open(io.BytesIO(decoded_bytes))
        generated_images.append(img)
        
    return generated_images


def generate_interpolation(text, n=4):
    generated_images = []
    for t in text:
      generated_images.extend(generate_images(t))
    
    frames = []
    for i, g in enumerate(generated_images):
      frames.append(f'frame_{i}.png')
      g[i].save(frames[-1])
    
    frames = list(util.interpolate_recursively_from_files(frames, n, interpolator))

    mediapy.write_video("out.mp4", frames, fps=7)
    
    return "out.mp4"


demo = gr.Blocks()  
with demo:
    x1 = gr.Textbox(placeholder='brain', label='Text 1')
    x2 = gr.Textbox(placeholder='salmon', label='Text 2')
    x3 = gr.Textbox(placeholder='racecar', label='Text 3')
    x4 = gr.Textbox(placeholder='iguana riding a motorcycle', label='Text 4')
    x5 = gr.Textbox(placeholder='computer in space', label='Text 5')
  
    button_gen_video = gr.Button("Generate Video")
    output_interpolation = gr.Video(label="Generated Video")
    button_gen_video.click(fn=generate_interpolation, inputs=[x1, x2, x3, x4, x5], outputs=output_interpolation)

demo.launch(debug=True, enable_queue=True)