ff98 commited on
Commit
0344777
·
1 Parent(s): ec31d89

question promt added

Browse files
Files changed (1) hide show
  1. app.py +7 -5
app.py CHANGED
@@ -5,7 +5,7 @@ from PIL import Image
5
  import matplotlib.pyplot as plt
6
 
7
 
8
- def process_inputs(audio, option):
9
  # Process inputs and return results
10
  if option == "Translate":
11
  generated_text = generate_text_from_audio(audio), None
@@ -18,7 +18,7 @@ def process_inputs(audio, option):
18
  return "", text_classification(generated_text)
19
  elif option == "Ask a Question":
20
  generated_text = generate_text_from_audio(audio)
21
- return ask_ques_from_text(generated_text), None
22
 
23
  def generate_text_from_audio(audio):
24
  device = "cuda:0" if torch.cuda.is_available() else "cpu"
@@ -78,14 +78,14 @@ def text_classification(text):
78
  return "classification_plot.png"
79
 
80
 
81
- def ask_ques_from_text(text):
82
  model_name = "deepset/roberta-base-squad2"
83
 
84
  # Get predictions
85
  nlp = pipeline('question-answering', model=model_name, tokenizer=model_name, device=0)
86
 
87
  QA_input = {
88
- 'question': 'who did not recognize?',
89
  'context': text # Your context text from audio_text_result
90
  }
91
 
@@ -98,9 +98,11 @@ demo = gr.Interface(
98
  fn=process_inputs,
99
  inputs=[
100
  gr.Audio(label="Upload audio in .mp3 format", type="filepath"), # Audio input
101
- gr.Dropdown(choices=["Translate", "Summarize", "text-classification", "Ask a Question"], label="Choose an Option")
 
102
  ],
103
  outputs=[gr.Textbox(label="Result"), gr.Image(label="Classification Plot")],
104
  )
105
 
 
106
  demo.launch()
 
5
  import matplotlib.pyplot as plt
6
 
7
 
8
+ def process_inputs(audio, option, question=None):
9
  # Process inputs and return results
10
  if option == "Translate":
11
  generated_text = generate_text_from_audio(audio), None
 
18
  return "", text_classification(generated_text)
19
  elif option == "Ask a Question":
20
  generated_text = generate_text_from_audio(audio)
21
+ return ask_ques_from_text(generated_text, question), None
22
 
23
  def generate_text_from_audio(audio):
24
  device = "cuda:0" if torch.cuda.is_available() else "cpu"
 
78
  return "classification_plot.png"
79
 
80
 
81
+ def ask_ques_from_text(text, ques):
82
  model_name = "deepset/roberta-base-squad2"
83
 
84
  # Get predictions
85
  nlp = pipeline('question-answering', model=model_name, tokenizer=model_name, device=0)
86
 
87
  QA_input = {
88
+ 'question': ques,
89
  'context': text # Your context text from audio_text_result
90
  }
91
 
 
98
  fn=process_inputs,
99
  inputs=[
100
  gr.Audio(label="Upload audio in .mp3 format", type="filepath"), # Audio input
101
+ gr.Dropdown(choices=["Translate", "Summarize", "text-classification", "Ask a Question"], label="Choose an Option"),
102
+ gr.Textbox(label="Enter your question if you chose Ask a question in dropdown", placeholder="Enter your question here", visible=True)
103
  ],
104
  outputs=[gr.Textbox(label="Result"), gr.Image(label="Classification Plot")],
105
  )
106
 
107
+
108
  demo.launch()