CHEN11102 commited on
Commit
bcc193e
1 Parent(s): 14d4726

Upload 10 files

Browse files
Files changed (10) hide show
  1. README.md +8 -6
  2. app.py +93 -0
  3. cat1.jpeg +0 -0
  4. cat2.jpeg +0 -0
  5. cat3.jpeg +0 -0
  6. cat4.jpeg +0 -0
  7. index.html +18 -56
  8. packages.txt +1 -0
  9. requirements.txt +12 -0
  10. style.css +28 -0
README.md CHANGED
@@ -1,10 +1,12 @@
1
  ---
2
- title: Sportmodel1
3
- emoji: 🖼️
4
- colorFrom: yellow
5
- colorTo: red
6
- sdk: static
 
 
7
  pinned: false
8
  ---
9
 
10
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: Frame Interpolation
3
+ emoji: 🐢
4
+ colorFrom: blue
5
+ colorTo: gray
6
+ sdk: gradio
7
+ sdk_version: 3.1.4
8
+ app_file: app.py
9
  pinned: false
10
  ---
11
 
12
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
app.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import numpy as np
4
+ import tensorflow as tf
5
+ import mediapy
6
+ from PIL import Image
7
+ import gradio as gr
8
+ from huggingface_hub import snapshot_download
9
+
10
+ # Clone the repository and add the path
11
+ os.system("git clone https://github.com/google-research/frame-interpolation")
12
+ sys.path.append("frame-interpolation")
13
+
14
+ # Import after appending the path
15
+ from eval import interpolator, util
16
+
17
+ def load_model(model_name):
18
+ model = interpolator.Interpolator(snapshot_download(repo_id=model_name), None)
19
+ return model
20
+
21
+ model_names = [
22
+ "akhaliq/frame-interpolation-film-style",
23
+ "NimaBoscarino/frame-interpolation_film_l1",
24
+ "NimaBoscarino/frame_interpolation_film_vgg",
25
+ ]
26
+
27
+ models = {model_name: load_model(model_name) for model_name in model_names}
28
+
29
+ ffmpeg_path = util.get_ffmpeg_path()
30
+ mediapy.set_ffmpeg(ffmpeg_path)
31
+
32
+ def resize(width, img):
33
+ img = Image.fromarray(img)
34
+ wpercent = (width / float(img.size[0]))
35
+ hsize = int((float(img.size[1]) * float(wpercent)))
36
+ img = img.resize((width, hsize), Image.LANCZOS)
37
+ return img
38
+
39
+ def resize_and_crop(img_path, size, crop_origin="middle"):
40
+ img = Image.open(img_path)
41
+ img = img.resize(size, Image.LANCZOS)
42
+ return img
43
+
44
+ def resize_img(img1, img2_path):
45
+ img_target_size = Image.open(img1)
46
+ img_to_resize = resize_and_crop(
47
+ img2_path,
48
+ (img_target_size.size[0], img_target_size.size[1]), # set width and height to match img1
49
+ crop_origin="middle"
50
+ )
51
+ img_to_resize.save('resized_img2.png')
52
+
53
+ def predict(frame1, frame2, times_to_interpolate, model_name):
54
+ model = models[model_name]
55
+
56
+ frame1 = resize(1080, frame1)
57
+ frame2 = resize(1080, frame2)
58
+
59
+ frame1.save("test1.png")
60
+ frame2.save("test2.png")
61
+
62
+ resize_img("test1.png", "test2.png")
63
+ input_frames = ["test1.png", "resized_img2.png"]
64
+
65
+ frames = list(
66
+ util.interpolate_recursively_from_files(
67
+ input_frames, times_to_interpolate, model))
68
+
69
+ mediapy.write_video("out.mp4", frames, fps=30)
70
+ return "out.mp4"
71
+
72
+ title = "Sports model"
73
+ description = "Wechat:Liesle1"
74
+ article = ""
75
+ examples = [
76
+ ['cat3.jpeg', 'cat4.jpeg', 2, model_names[0]],
77
+ ['cat1.jpeg', 'cat2.jpeg', 2, model_names[1]],
78
+ ]
79
+
80
+ gr.Interface(
81
+ fn=predict,
82
+ inputs=[
83
+ gr.Image(label="First Frame"),
84
+ gr.Image(label="Second Frame"),
85
+ gr.Number(label="Times to Interpolate", value=2),
86
+ gr.Dropdown(label="Model", choices=model_names),
87
+ ],
88
+ outputs=gr.Video(label="Interpolated Frames"),
89
+ title=title,
90
+ description=description,
91
+ article=article,
92
+ examples=examples,
93
+ ).launch()
cat1.jpeg ADDED
cat2.jpeg ADDED
cat3.jpeg ADDED
cat4.jpeg ADDED
index.html CHANGED
@@ -1,57 +1,19 @@
1
- <!DOCTYPE html>
2
  <html>
3
- <head>
4
- <meta charset="utf-8">
5
- <meta name="viewport" content="width=device-width, initial-scale=1">
6
- <title>Gradio-Lite: Serverless Gradio Running Entirely in Your Browser</title>
7
- <meta name="description" content="Gradio-Lite: Serverless Gradio Running Entirely in Your Browser">
8
-
9
- <script type="module" crossorigin src="https://cdn.jsdelivr.net/npm/@gradio/lite/dist/lite.js"></script>
10
- <link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/@gradio/lite/dist/lite.css" />
11
-
12
- <style>
13
- html, body {
14
- margin: 0;
15
- padding: 0;
16
- height: 100%;
17
- }
18
- </style>
19
- </head>
20
- <body>
21
- <gradio-lite>
22
- <gradio-file name="app.py" entrypoint>
23
- import gradio as gr
24
-
25
- from filters import as_gray
26
-
27
- def process(input_image):
28
- output_image = as_gray(input_image)
29
- return output_image
30
-
31
- demo = gr.Interface(
32
- process,
33
- "image",
34
- "image",
35
- examples=["lion.jpg", "logo.png"],
36
- )
37
-
38
- demo.launch()
39
- </gradio-file>
40
-
41
- <gradio-file name="filters.py">
42
- from skimage.color import rgb2gray
43
-
44
- def as_gray(image):
45
- return rgb2gray(image)
46
- </gradio-file>
47
-
48
- <gradio-file name="lion.jpg" url="https://raw.githubusercontent.com/gradio-app/gradio/main/gradio/test_data/lion.jpg" />
49
- <gradio-file name="logo.png" url="https://raw.githubusercontent.com/gradio-app/gradio/main/guides/assets/logo.png" />
50
-
51
- <gradio-requirements>
52
- # Same syntax as requirements.txt
53
- scikit-image
54
- </gradio-requirements>
55
- </gradio-lite>
56
- </body>
57
- </html>
 
1
+ <!doctype html>
2
  <html>
3
+ <head>
4
+ <meta charset="utf-8" />
5
+ <meta name="viewport" content="width=device-width" />
6
+ <title>My static Space</title>
7
+ <link rel="stylesheet" href="style.css" />
8
+ </head>
9
+ <body>
10
+ <div class="card">
11
+ <h1>Welcome to your static Space!</h1>
12
+ <p>You can modify this app directly by editing <i>index.html</i> in the Files and versions tab.</p>
13
+ <p>
14
+ Also don't forget to check the
15
+ <a href="https://huggingface.co/docs/hub/spaces" target="_blank">Spaces documentation</a>.
16
+ </p>
17
+ </div>
18
+ </body>
19
+ </html>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
packages.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ ffmpeg
requirements.txt ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ tensorflow>=2.6.2 # The latest should include tensorflow-gpu
2
+ tensorflow-datasets>=4.4.0
3
+ tensorflow-addons>=0.15.0
4
+ absl-py>=0.12.0
5
+ gin-config>=0.5.0
6
+ parameterized>=0.8.1
7
+ mediapy>=1.0.3
8
+ scikit-image>=0.19.1
9
+ apache-beam>=2.34.0
10
+ google-cloud-bigquery-storage>=1.1.0 # Suppresses a harmless error from beam
11
+ natsort>=8.1.0
12
+ image-tools
style.css ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ body {
2
+ padding: 2rem;
3
+ font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif;
4
+ }
5
+
6
+ h1 {
7
+ font-size: 16px;
8
+ margin-top: 0;
9
+ }
10
+
11
+ p {
12
+ color: rgb(107, 114, 128);
13
+ font-size: 15px;
14
+ margin-bottom: 10px;
15
+ margin-top: 5px;
16
+ }
17
+
18
+ .card {
19
+ max-width: 620px;
20
+ margin: 0 auto;
21
+ padding: 16px;
22
+ border: 1px solid lightgray;
23
+ border-radius: 16px;
24
+ }
25
+
26
+ .card p:last-child {
27
+ margin-bottom: 0;
28
+ }