Spaces:
Sleeping
Sleeping
Updated Q/A script
Browse files
app.py
CHANGED
@@ -122,6 +122,7 @@ def process_and_summarize(audio_file, translate, model_size, do_summarize=True):
|
|
122 |
|
123 |
|
124 |
|
|
|
125 |
@spaces.GPU(duration=60)
|
126 |
def answer_question(context, question):
|
127 |
logger.info("Starting Q&A process")
|
@@ -137,18 +138,24 @@ def answer_question(context, question):
|
|
137 |
|
138 |
outputs = qa_pipeline(messages, max_new_tokens=256)
|
139 |
|
140 |
-
# Extract the answer from the
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
|
|
|
|
|
|
|
|
|
|
145 |
|
146 |
logger.info("Q&A process completed successfully")
|
147 |
-
return
|
148 |
except Exception as e:
|
149 |
logger.error(f"Q&A process failed: {str(e)}")
|
150 |
logger.error(traceback.format_exc())
|
151 |
-
return f"Error occurred during Q&A process. Please try again. Error:
|
|
|
152 |
|
153 |
# Main interface
|
154 |
with gr.Blocks() as iface:
|
|
|
122 |
|
123 |
|
124 |
|
125 |
+
|
126 |
@spaces.GPU(duration=60)
|
127 |
def answer_question(context, question):
|
128 |
logger.info("Starting Q&A process")
|
|
|
138 |
|
139 |
outputs = qa_pipeline(messages, max_new_tokens=256)
|
140 |
|
141 |
+
# Extract the answer from the output
|
142 |
+
if isinstance(outputs, list) and len(outputs) > 0 and isinstance(outputs[0], dict):
|
143 |
+
for message in outputs:
|
144 |
+
if message.get('role') == 'assistant':
|
145 |
+
answer = message.get('content', '')
|
146 |
+
break
|
147 |
+
else:
|
148 |
+
answer = "No answer found in the model's response."
|
149 |
+
else:
|
150 |
+
answer = str(outputs) # Fallback to string representation of outputs
|
151 |
|
152 |
logger.info("Q&A process completed successfully")
|
153 |
+
return answer
|
154 |
except Exception as e:
|
155 |
logger.error(f"Q&A process failed: {str(e)}")
|
156 |
logger.error(traceback.format_exc())
|
157 |
+
return f"Error occurred during Q&A process. Please try again. Error: {str(e)}"
|
158 |
+
|
159 |
|
160 |
# Main interface
|
161 |
with gr.Blocks() as iface:
|