kadirnar commited on
Commit
4a00548
1 Parent(s): b9f214e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +69 -115
app.py CHANGED
@@ -1,117 +1,71 @@
1
- import gradio as gr
2
- from huggingface_hub import hf_hub_download
3
  import subprocess
4
- import tempfile
5
- import shutil
6
- import os
7
- import spaces
8
 
9
- from transformers import T5ForConditionalGeneration, T5Tokenizer
10
- import os
11
-
12
- subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
13
- def install_apex():
14
- subprocess.run([
15
- 'pip', 'install', '-v', '--disable-pip-version-check', '--no-cache-dir',
16
- '--no-build-isolation', 'git+https://github.com/NVIDIA/apex.git'
17
- ], check=True)
18
-
19
- try:
20
- import apex
21
- except ModuleNotFoundError:
22
- print("Installing Apex...")
23
- install_apex()
24
- import apex # Try to import Apex again after installation
25
-
26
-
27
- def download_t5_model(model_id, save_directory):
28
- # Modelin tokenizer'ını ve modeli indir
29
- model = T5ForConditionalGeneration.from_pretrained(model_id)
30
- tokenizer = T5Tokenizer.from_pretrained(model_id)
31
-
32
- # Model ve tokenizer'ı belirtilen dizine kaydet
33
- if not os.path.exists(save_directory):
34
- os.makedirs(save_directory)
35
- model.save_pretrained(save_directory)
36
- tokenizer.save_pretrained(save_directory)
37
-
38
- # Model ID ve kaydedilecek dizin
39
- model_id = "DeepFloyd/t5-v1_1-xxl"
40
- save_directory = "pretrained_models/t5_ckpts/t5-v1_1-xxl"
41
-
42
- # Modeli indir
43
- download_t5_model(model_id, save_directory)
44
-
45
- def download_model(repo_id, model_name):
46
- model_path = hf_hub_download(repo_id=repo_id, filename=model_name)
47
- return model_path
48
-
49
- import glob
50
-
51
- @spaces.GPU
52
- def run_inference(model_name, prompt_text):
53
- repo_id = "hpcai-tech/Open-Sora"
54
-
55
- # Map model names to their respective configuration files
56
- config_mapping = {
57
- "OpenSora-v1-16x256x256.pth": "configs/opensora/inference/16x256x256.py",
58
- "OpenSora-v1-HQ-16x256x256.pth": "configs/opensora/inference/16x512x512.py",
59
- "OpenSora-v1-HQ-16x512x512.pth": "configs/opensora/inference/64x512x512.py"
60
- }
61
-
62
- config_path = config_mapping[model_name]
63
- ckpt_path = download_model(repo_id, model_name)
64
-
65
- # Save prompt_text to a temporary text file
66
- prompt_file = tempfile.NamedTemporaryFile(delete=False, suffix=".txt", mode='w')
67
- prompt_file.write(prompt_text)
68
- prompt_file.close()
69
-
70
- with open(config_path, 'r') as file:
71
- config_content = file.read()
72
- config_content = config_content.replace('prompt_path = "./assets/texts/t2v_samples.txt"', f'prompt_path = "{prompt_file.name}"')
73
-
74
- with tempfile.NamedTemporaryFile('w', delete=False, suffix='.py') as temp_file:
75
- temp_file.write(config_content)
76
- temp_config_path = temp_file.name
77
-
78
- cmd = [
79
- "torchrun", "--standalone", "--nproc_per_node", "1",
80
- "scripts/inference.py", temp_config_path,
81
- "--ckpt-path", ckpt_path
82
- ]
83
- subprocess.run(cmd)
84
-
85
- save_dir = "./outputs/samples/" # Örneğin, inference.py tarafından kullanılan kayıt dizini
86
- list_of_files = glob.glob(f'{save_dir}/*')
87
- if list_of_files:
88
- latest_file = max(list_of_files, key=os.path.getctime)
89
- return latest_file
90
- else:
91
- print("No files found in the output directory.")
92
- return None
93
-
94
- # Clean up the temporary files
95
- os.remove(temp_file.name)
96
- os.remove(prompt_file.name)
97
-
98
- def main():
99
- gr.Interface(
100
- fn=run_inference,
101
- inputs=[
102
- gr.Dropdown(choices=[
103
- "OpenSora-v1-16x256x256.pth",
104
- "OpenSora-v1-HQ-16x256x256.pth",
105
- "OpenSora-v1-HQ-16x512x512.pth"
106
- ],
107
- value="OpenSora-v1-16x256x256.pth",
108
- label="Model Selection"),
109
- gr.Textbox(label="Prompt Text", value="Enter prompt text here")
110
- ],
111
- outputs=gr.Video(label="Output Video"),
112
- title="Open-Sora Inference",
113
- description="Run Open-Sora Inference with Custom Parameters",
114
- ).launch()
115
-
116
- if __name__ == "__main__":
117
- main()
 
1
+ from typing import List
2
+ from setuptools import find_packages, setup
3
  import subprocess
 
 
 
 
4
 
5
+ def fetch_requirements(path) -> List[str]:
6
+ """
7
+ This function reads the requirements file.
8
+
9
+ Args:
10
+ path (str): the path to the requirements file.
11
+
12
+ Returns:
13
+ The lines in the requirements file.
14
+ """
15
+ with open(path, "r") as fd:
16
+ return [r.strip() for r in fd.readlines()]
17
+
18
+ def fetch_readme() -> str:
19
+ """
20
+ This function reads the README.md file in the current directory.
21
+
22
+ Returns:
23
+ The lines in the README file.
24
+ """
25
+ with open("README.md", encoding="utf-8") as f:
26
+ return f.read()
27
+
28
+ setup(
29
+ name="opensora",
30
+ version="1.0.0",
31
+ packages=find_packages(
32
+ exclude=(
33
+ "assets",
34
+ "configs",
35
+ "docs",
36
+ "outputs",
37
+ "pretrained_models",
38
+ "scripts",
39
+ "tests",
40
+ "tools",
41
+ "*.egg-info",
42
+ )
43
+ ),
44
+ description="Democratizing Efficient Video Production for All",
45
+ long_description=fetch_readme(),
46
+ long_description_content_type="text/markdown",
47
+ license="Apache Software License 2.0",
48
+ install_requires=fetch_requirements("requirements.txt"),
49
+ python_requires=">=3.6",
50
+ classifiers=[
51
+ "Programming Language :: Python :: 3",
52
+ "License :: OSI Approved :: Apache Software License",
53
+ "Environment :: GPU :: NVIDIA CUDA",
54
+ "Topic :: Scientific/Engineering :: Artificial Intelligence",
55
+ "Topic :: System :: Distributed Computing",
56
+ ],
57
+ )
58
+
59
+ install_options = [
60
+ "--disable-pip-version-check",
61
+ "--no-cache-dir",
62
+ "--no-build-isolation",
63
+ "--config-settings", "--build-option=--cpp_ext",
64
+ "--config-settings", "--build-option=--cuda_ext"
65
+ ]
66
+
67
+ subprocess.run(
68
+ ["pip", "install", "-v"] + install_options + ["git+https://github.com/kadirnar/apex.git"],
69
+ check=True,
70
+ capture_output=True
71
+ )