Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -212,6 +212,7 @@ def send_to_model(prompt, model_selection, hf_model_choice, hf_custom_model, hf_
|
|
212 |
try:
|
213 |
with gr.Progress() as progress:
|
214 |
progress(0, "Preparing to send to model...")
|
|
|
215 |
|
216 |
# Basic input validation
|
217 |
if model_selection not in ["Clipboard only", "HuggingFace Inference", "Groq API", "OpenAI ChatGPT"]:
|
@@ -228,6 +229,7 @@ def send_to_model(prompt, model_selection, hf_model_choice, hf_custom_model, hf_
|
|
228 |
# Call implementation with error handling
|
229 |
progress(0.5, "Processing with model...")
|
230 |
try:
|
|
|
231 |
summary, download_file = send_to_model_impl(
|
232 |
prompt=prompt.strip(),
|
233 |
model_selection=model_selection,
|
@@ -239,6 +241,7 @@ def send_to_model(prompt, model_selection, hf_model_choice, hf_custom_model, hf_
|
|
239 |
openai_api_key=openai_api_key,
|
240 |
openai_model_choice=openai_model_choice
|
241 |
)
|
|
|
242 |
|
243 |
if summary is None or not isinstance(summary, str):
|
244 |
return "Error: No response from model", None
|
@@ -262,6 +265,7 @@ def send_to_model(prompt, model_selection, hf_model_choice, hf_custom_model, hf_
|
|
262 |
def send_to_model_impl(prompt, model_selection, hf_model_choice, hf_custom_model, hf_api_key,
|
263 |
groq_model_choice, groq_api_key, openai_api_key, openai_model_choice):
|
264 |
"""Implementation of model sending with improved error handling."""
|
|
|
265 |
|
266 |
if model_selection == "Clipboard only":
|
267 |
return "Text copied to clipboard. Use paste for processing.", None
|
@@ -281,6 +285,8 @@ def send_to_model_impl(prompt, model_selection, hf_model_choice, hf_custom_model
|
|
281 |
return "Error: Invalid model selection", None
|
282 |
|
283 |
# Validate response
|
|
|
|
|
284 |
if not summary or not isinstance(summary, str):
|
285 |
return "Error: Invalid response from model", None
|
286 |
|
|
|
212 |
try:
|
213 |
with gr.Progress() as progress:
|
214 |
progress(0, "Preparing to send to model...")
|
215 |
+
logging.info("sending to model preparation.")
|
216 |
|
217 |
# Basic input validation
|
218 |
if model_selection not in ["Clipboard only", "HuggingFace Inference", "Groq API", "OpenAI ChatGPT"]:
|
|
|
229 |
# Call implementation with error handling
|
230 |
progress(0.5, "Processing with model...")
|
231 |
try:
|
232 |
+
logging.info("calling send_to_model_impl.")
|
233 |
summary, download_file = send_to_model_impl(
|
234 |
prompt=prompt.strip(),
|
235 |
model_selection=model_selection,
|
|
|
241 |
openai_api_key=openai_api_key,
|
242 |
openai_model_choice=openai_model_choice
|
243 |
)
|
244 |
+
logging.info("summary received:", summary)
|
245 |
|
246 |
if summary is None or not isinstance(summary, str):
|
247 |
return "Error: No response from model", None
|
|
|
265 |
def send_to_model_impl(prompt, model_selection, hf_model_choice, hf_custom_model, hf_api_key,
|
266 |
groq_model_choice, groq_api_key, openai_api_key, openai_model_choice):
|
267 |
"""Implementation of model sending with improved error handling."""
|
268 |
+
logging.info("send to model impl commencing...")
|
269 |
|
270 |
if model_selection == "Clipboard only":
|
271 |
return "Text copied to clipboard. Use paste for processing.", None
|
|
|
285 |
return "Error: Invalid model selection", None
|
286 |
|
287 |
# Validate response
|
288 |
+
logging.info("model told us:", summary)
|
289 |
+
|
290 |
if not summary or not isinstance(summary, str):
|
291 |
return "Error: Invalid response from model", None
|
292 |
|