File size: 2,566 Bytes
2dd7160
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f55372e
2dd7160
 
 
 
 
 
 
 
 
 
f55372e
9046042
2dd7160
 
 
 
 
 
 
 
 
 
 
 
 
f55372e
 
14c69a6
2dd7160
fb85cfd
 
2dd7160
 
 
 
db5eded
2dd7160
14c69a6
2dd7160
14c69a6
2dd7160
f8b1478
 
fb85cfd
 
f8b1478
603d210
 
 
 
60f8c26
2dd7160
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
import warnings
import gradio as gr
from transformers import pipeline
import io, base64
from PIL import Image
import numpy as np
import tensorflow as tf
import mediapy
import os
import sys
from huggingface_hub import snapshot_download

#CREDIT: this demo is based *heavily* on https://huggingface.co/spaces/osanseviero/latent-video

with warnings.catch_warnings():
  warnings.simplefilter('ignore')
  image_gen = gr.Interface.load("spaces/multimodalart/latentdiffusion")

  os.system("git clone https://github.com/google-research/frame-interpolation")
  sys.path.append("frame-interpolation")
  from eval import interpolator, util
  
ffmpeg_path = util.get_ffmpeg_path()
mediapy.set_ffmpeg(ffmpeg_path)

model = snapshot_download(repo_id="akhaliq/frame-interpolation-film-style")
interpolator = interpolator.Interpolator(model, None)


def generate_story(choice, input_text):
    query = "<BOS> <{0}> {1}".format(choice, input_text)
    
    print(query)
    generated_text = story_gen(query)
    generated_text = generated_text[0]['generated_text']
    generated_text = generated_text.split('> ')[2]
    
    return generated_text


def generate_images(text, width=256, height=256, steps=50, num_images=1, diversity=4):

    image_bytes = image_gen(text, steps, width, height, num_images, diversity)
    
    # Algo from spaces/Gradio-Blocks/latent_gpt2_story/blob/main/app.py
    generated_images = []
    for image in image_bytes[1]:
        image_str = image[0]
        image_str = image_str.replace("data:image/png;base64,","")
        decoded_bytes = base64.decodebytes(bytes(image_str, "utf-8"))
        img = Image.open(io.BytesIO(decoded_bytes))
        generated_images.append(img)
        
    return generated_images


def generate_interpolation(text, fps=10):    
    generated_images = []
    for t in text.split(','):
      generated_images.extend(generate_images(t.strip()))
    
    frames = []
    for i, g in enumerate(generated_images):
      frames.append(f'frame_{i}.png')
      g.save(frames[-1])
    
    vid = list(util.interpolate_recursively_from_files(frames, fps, interpolator))

    mediapy.write_video("out.mp4", vid, fps=fps)
    return "out.mp4"


demo = gr.Blocks()

with demo:
  text = gr.Textbox(placeholder='human, human head, brain, brain in a computer, humanoid robot', label='Input a comma-separated list of terms:')
  button = gr.Button("Generate Video")
  output = gr.Video(label="Generated Video")
        
  button.click(fn=generate_interpolation, inputs=text, outputs=output)  

demo.launch(debug=True, enable_queue=True)