reshav1 commited on
Commit
5d551ae
·
verified ·
1 Parent(s): 4b392f5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -5
app.py CHANGED
@@ -1,6 +1,30 @@
1
- from transformers import pipeline
2
- generator = pipeline("text-generation", model="gpt2")
3
- prompt = "Once upon a time, in a galaxy far, far away"
4
- generated_text = generator(prompt, max_length=100, num_return_sequences=3)
5
 
6
- print(generated_text)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler
3
+ from diffusers.utils import export_to_video
4
+ import streamlit as st
5
 
6
+ # Title and User Input
7
+ st.title("Text-to-Video with Streamlit")
8
+ prompt = st.text_input("Enter your text prompt:", "Spiderman is surfing")
9
+
10
+ # Button to trigger generation
11
+ if st.button("Generate Video"):
12
+
13
+ # Ensure you have 'accelerate' version 0.17.0 or higher (see previous explanation)
14
+ import accelerate
15
+ if accelerate.__version__ < "0.17.0":
16
+ st.warning("Please upgrade 'accelerate' to version 0.17.0 or higher for CPU offloading.")
17
+ else:
18
+ with st.spinner("Generating video..."):
19
+ pipe = DiffusionPipeline.from_pretrained("cerspense/zeroscope_v2_576w",
20
+ torch_dtype=torch.float16,
21
+ variant="fp16",
22
+ device="cpu") # Force CPU usage
23
+ pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
24
+ pipe.enable_model_cpu_offload() # Assuming 'accelerate' is updated
25
+
26
+ video_frames = pipe(prompt, num_inference_steps=25).frames
27
+ video_path = export_to_video(video_frames)
28
+
29
+ # Display the video in the Streamlit app
30
+ st.video(video_path)