arif670 commited on
Commit
6fb8958
·
verified ·
1 Parent(s): bedec2e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -8
app.py CHANGED
@@ -1,21 +1,31 @@
1
  import os
 
 
 
 
 
2
  import gradio as gr
3
  import tempfile
 
4
  from models import load_models
5
  from video_generator import generate_video_pipeline
6
 
7
- # Configure environment variables first
8
- os.environ['MPLCONFIGDIR'] = '/tmp/matplotlib'
9
- os.environ['FONTCONFIG_PATH'] = '/tmp/fontconfig'
10
 
11
- # Force CPU mode if no GPU available
12
  if not torch.cuda.is_available():
 
13
  torch.set_num_threads(4)
14
  os.environ['CUDA_VISIBLE_DEVICES'] = ''
15
- logger.warning("Running in CPU-only mode - expect slower generation")
16
 
17
- # Load models at startup
18
- text_to_image, image_to_video, tts_model = load_models()
 
 
 
 
19
 
20
  def generate_video(prompt, duration=5, fps=24):
21
  with tempfile.TemporaryDirectory() as tmpdir:
@@ -31,7 +41,10 @@ def generate_video(prompt, duration=5, fps=24):
31
  )
32
  return video_path
33
  except Exception as e:
34
- raise gr.Error(f"Video generation failed: {str(e)}")
 
 
 
35
 
36
  with gr.Blocks(title="AI Video Generator") as app:
37
  gr.Markdown("# 🎥 AI Video Generator")
 
1
  import os
2
+ # Must be at VERY TOP before any imports
3
+ os.environ['MPLCONFIGDIR'] = '/tmp/matplotlib'
4
+ os.environ['FONTCONFIG_PATH'] = '/tmp/fontconfig'
5
+
6
+ import torch # Import torch immediately after environment vars
7
  import gradio as gr
8
  import tempfile
9
+ import logging
10
  from models import load_models
11
  from video_generator import generate_video_pipeline
12
 
13
+ # Configure logging
14
+ logging.basicConfig(level=logging.INFO)
15
+ logger = logging.getLogger(__name__)
16
 
17
+ # Early hardware detection
18
  if not torch.cuda.is_available():
19
+ logger.warning("CUDA not available - Using CPU with reduced performance")
20
  torch.set_num_threads(4)
21
  os.environ['CUDA_VISIBLE_DEVICES'] = ''
 
22
 
23
+ # Load models after hardware config
24
+ try:
25
+ text_to_image, image_to_video, tts_model = load_models()
26
+ except Exception as e:
27
+ logger.error(f"Model loading failed: {str(e)}")
28
+ raise
29
 
30
  def generate_video(prompt, duration=5, fps=24):
31
  with tempfile.TemporaryDirectory() as tmpdir:
 
41
  )
42
  return video_path
43
  except Exception as e:
44
+ logger.error(f"Generation error: {str(e)}")
45
+ raise gr.Error("Video generation failed - check logs")
46
+
47
+ # Rest of Gradio interface remains unchanged
48
 
49
  with gr.Blocks(title="AI Video Generator") as app:
50
  gr.Markdown("# 🎥 AI Video Generator")