saranbalan commited on
Commit
ef96a1f
·
verified ·
1 Parent(s): f08ad33

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -5,7 +5,6 @@ from groq import Groq
5
  from deep_translator import GoogleTranslator
6
  from diffusers import StableDiffusionPipeline
7
  import torch
8
- import huggingface_hub
9
  from huggingface_hub import login
10
 
11
  # Set up Groq API key
@@ -28,7 +27,7 @@ except Exception as e:
28
  # Set device: CUDA if available, else CPU
29
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
30
 
31
- # Load Whisper model (if using locally, else use API as in original code)
32
  whisper_model = whisper.load_model("base")
33
 
34
  # Model IDs for Stable Diffusion pipelines
@@ -66,10 +65,11 @@ def process_audio(audio_path, image_option):
66
  if image_option == "Generate Image":
67
  try:
68
  # Use the Hugging Face API key to load the restricted model for image generation
69
- pipe = StableDiffusionPipeline.from_pretrained(restricted_model_id, torch_dtype=torch.float16, use_auth_token=HF_API_KEY)
70
  pipe = pipe.to(device)
71
  image = pipe(translation).images[0]
72
  except Exception as e:
 
73
  return tamil_text, translation, f"An error occurred during image generation: {str(e)}", None
74
 
75
  return tamil_text, translation, image
 
5
  from deep_translator import GoogleTranslator
6
  from diffusers import StableDiffusionPipeline
7
  import torch
 
8
  from huggingface_hub import login
9
 
10
  # Set up Groq API key
 
27
  # Set device: CUDA if available, else CPU
28
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
29
 
30
+ # Load Whisper model
31
  whisper_model = whisper.load_model("base")
32
 
33
  # Model IDs for Stable Diffusion pipelines
 
65
  if image_option == "Generate Image":
66
  try:
67
  # Use the Hugging Face API key to load the restricted model for image generation
68
+ pipe = StableDiffusionPipeline.from_pretrained(restricted_model_id, torch_dtype=torch.float16, token=HF_API_KEY)
69
  pipe = pipe.to(device)
70
  image = pipe(translation).images[0]
71
  except Exception as e:
72
+ print(f"Image generation error: {str(e)}") # Add error logging
73
  return tamil_text, translation, f"An error occurred during image generation: {str(e)}", None
74
 
75
  return tamil_text, translation, image