Hyeonsieun commited on
Commit
04a91ee
·
verified ·
1 Parent(s): 53a0ee6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -6
app.py CHANGED
@@ -10,17 +10,13 @@ import whisper
10
 
11
  import matplotlib as plt
12
 
13
- whisper_model = whisper.load_model('large-v2') # Whisper 모델을 불러오기
14
 
15
 
16
  path = "Hyeonsieun/NTtoGT_1epoch"
17
  tokenizer = T5Tokenizer.from_pretrained(path)
18
  model = T5ForConditionalGeneration.from_pretrained(path)
19
 
20
- BATCH_SIZE = 8
21
- FILE_LIMIT_MB = 1000
22
- YT_LENGTH_LIMIT_S = 3600 # limit to 1 hour YouTube files
23
-
24
  def do_correction(text, model, tokenizer):
25
  input_text = f"translate the text pronouncing the formula to a LaTeX equation: {text}"
26
  inputs = tokenizer.encode(
@@ -46,7 +42,13 @@ def do_correction(text, model, tokenizer):
46
  )
47
  return corrected_sentence
48
 
 
 
 
 
 
49
 
 
50
  pipe = pipeline(
51
  task="automatic-speech-recognition",
52
  model=MODEL_NAME,
@@ -179,4 +181,5 @@ yt_transcribe = gr.Interface(
179
  with demo:
180
  gr.TabbedInterface([mf_transcribe, file_transcribe, yt_transcribe], ["Microphone", "Audio file", "YouTube"])
181
 
182
- demo.launch(enable_queue=True)
 
 
10
 
11
  import matplotlib as plt
12
 
13
+ # whisper_model = whisper.load_model('large-v2') # Whisper 모델을 불러오기
14
 
15
 
16
  path = "Hyeonsieun/NTtoGT_1epoch"
17
  tokenizer = T5Tokenizer.from_pretrained(path)
18
  model = T5ForConditionalGeneration.from_pretrained(path)
19
 
 
 
 
 
20
  def do_correction(text, model, tokenizer):
21
  input_text = f"translate the text pronouncing the formula to a LaTeX equation: {text}"
22
  inputs = tokenizer.encode(
 
42
  )
43
  return corrected_sentence
44
 
45
+ # corrected_sentence = do_correction(sentence, model, tokenizer)
46
+
47
+
48
+ gr.Interface(fn=yt_do_correction, inputs="text", outputs="text")
49
+
50
 
51
+ '''
52
  pipe = pipeline(
53
  task="automatic-speech-recognition",
54
  model=MODEL_NAME,
 
181
  with demo:
182
  gr.TabbedInterface([mf_transcribe, file_transcribe, yt_transcribe], ["Microphone", "Audio file", "YouTube"])
183
 
184
+ demo.launch(enable_queue=True)
185
+ '''