nightey3s commited on
Commit
bdef09a
·
unverified ·
1 Parent(s): f0a16ec

Add ZeroGPU integration

Browse files
Files changed (3) hide show
  1. .gitignore +1 -0
  2. profanity_detector.py +18 -0
  3. requirements.txt +2 -1
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ .gradio
profanity_detector.py CHANGED
@@ -14,6 +14,14 @@ import queue
14
  from scipy.io.wavfile import write as write_wav
15
  from html import escape
16
  import traceback
 
 
 
 
 
 
 
 
17
 
18
  # Configure logging
19
  logging.basicConfig(
@@ -116,6 +124,10 @@ def load_models():
116
  logger.error(error_msg)
117
  return error_msg
118
 
 
 
 
 
119
  def detect_profanity(text: str, threshold: float = 0.5):
120
  """
121
  Detect profanity in text with adjustable threshold
@@ -195,6 +207,7 @@ def create_highlighted_text(text, profane_words):
195
  highlighted = re.sub(pattern, highlight_match, text, flags=re.IGNORECASE)
196
  return highlighted
197
 
 
198
  def rephrase_profanity(text):
199
  """
200
  Rephrase text containing profanity
@@ -248,6 +261,7 @@ def rephrase_profanity(text):
248
  logger.error(error_msg)
249
  return text # Return original text if rephrasing fails
250
 
 
251
  def text_to_speech(text):
252
  """
253
  Convert text to speech using SpeechT5
@@ -337,6 +351,9 @@ def text_analysis(input_text, threshold=0.5):
337
  logger.error(error_msg)
338
  return error_msg, None, None
339
 
 
 
 
340
  def analyze_audio(audio_path, threshold=0.5):
341
  """
342
  Analyze audio for profanity with adjustable threshold
@@ -388,6 +405,7 @@ stream_results = {
388
  "audio_output": None
389
  }
390
 
 
391
  def process_stream_chunk(audio_chunk):
392
  """Process an audio chunk from the streaming interface"""
393
  global stream_results, processing_active
 
14
  from scipy.io.wavfile import write as write_wav
15
  from html import escape
16
  import traceback
17
+ import spaces # Required for Hugging Face ZeroGPU compatibility
18
+
19
+ # ZeroGPU COMPATIBILITY NOTES:
20
+ # The @spaces.GPU decorators throughout this code enable compatibility with Hugging Face ZeroGPU.
21
+ # - They request GPU resources only when needed and release them after function completion
22
+ # - They have no effect when running in local environments or standard GPU Spaces
23
+ # - Custom durations can be specified for functions requiring longer processing times
24
+ # - For local development, you'll need: pip install huggingface_hub[spaces]
25
 
26
  # Configure logging
27
  logging.basicConfig(
 
124
  logger.error(error_msg)
125
  return error_msg
126
 
127
+ # ZeroGPU decorator: Requests GPU resources when function is called and releases them when completed.
128
+ # This enables efficient GPU sharing in Hugging Face Spaces while having no effect in local environments.
129
+ @spaces.GPU
130
+ @spaces.GPU
131
  def detect_profanity(text: str, threshold: float = 0.5):
132
  """
133
  Detect profanity in text with adjustable threshold
 
207
  highlighted = re.sub(pattern, highlight_match, text, flags=re.IGNORECASE)
208
  return highlighted
209
 
210
+ @spaces.GPU
211
  def rephrase_profanity(text):
212
  """
213
  Rephrase text containing profanity
 
261
  logger.error(error_msg)
262
  return text # Return original text if rephrasing fails
263
 
264
+ @spaces.GPU
265
  def text_to_speech(text):
266
  """
267
  Convert text to speech using SpeechT5
 
351
  logger.error(error_msg)
352
  return error_msg, None, None
353
 
354
+ # ZeroGPU decorator with custom duration: Allocates GPU for up to 120 seconds to handle longer audio processing.
355
+ # Longer durations ensure processing isn't cut off, while shorter durations improve queue priority.
356
+ @spaces.GPU(duration=120)
357
  def analyze_audio(audio_path, threshold=0.5):
358
  """
359
  Analyze audio for profanity with adjustable threshold
 
405
  "audio_output": None
406
  }
407
 
408
+ @spaces.GPU
409
  def process_stream_chunk(audio_chunk):
410
  """Process an audio chunk from the streaming interface"""
411
  global stream_results, processing_active
requirements.txt CHANGED
@@ -6,4 +6,5 @@ scipy
6
  torch
7
  transformers
8
  pillow
9
- sentencepiece
 
 
6
  torch
7
  transformers
8
  pillow
9
+ sentencepiece
10
+ spaces