Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -2,13 +2,22 @@ import gradio as gr
|
|
2 |
from openai import OpenAI
|
3 |
import base64
|
4 |
import io
|
|
|
|
|
|
|
|
|
|
|
5 |
|
6 |
def solve_stem_problem(api_key, image, subject="math"):
|
7 |
# Initialize OpenAI client with user-provided API key
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
|
|
|
|
|
|
|
|
12 |
|
13 |
# Define detective based on subject
|
14 |
detectives = {
|
@@ -30,16 +39,18 @@ def solve_stem_problem(api_key, image, subject="math"):
|
|
30 |
encoded_image = base64.b64encode(img_byte_arr).decode('utf-8')
|
31 |
image_url_data = f"data:image/png;base64,{encoded_image}"
|
32 |
except Exception as e:
|
|
|
33 |
return f"Error encoding image: {str(e)}"
|
34 |
|
35 |
-
# Call the
|
36 |
try:
|
37 |
completion = client.chat.completions.create(
|
38 |
extra_headers={
|
39 |
"HTTP-Referer": "https://stem-sleuth.example.com",
|
40 |
"X-Title": "STEM Sleuth",
|
41 |
},
|
42 |
-
model
|
|
|
43 |
messages=[
|
44 |
{
|
45 |
"role": "user",
|
@@ -57,15 +68,22 @@ def solve_stem_problem(api_key, image, subject="math"):
|
|
57 |
]
|
58 |
)
|
59 |
|
60 |
-
#
|
61 |
-
if
|
62 |
-
|
63 |
-
|
64 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
65 |
except Exception as e:
|
66 |
-
|
67 |
-
|
68 |
-
return solution
|
69 |
|
70 |
# Create Gradio interface
|
71 |
with gr.Blocks() as app:
|
|
|
2 |
from openai import OpenAI
|
3 |
import base64
|
4 |
import io
|
5 |
+
import logging
|
6 |
+
|
7 |
+
# Set up logging
|
8 |
+
logging.basicConfig(level=logging.INFO)
|
9 |
+
logger = logging.getLogger(__name__)
|
10 |
|
11 |
def solve_stem_problem(api_key, image, subject="math"):
|
12 |
# Initialize OpenAI client with user-provided API key
|
13 |
+
try:
|
14 |
+
client = OpenAI(
|
15 |
+
base_url="https://openrouter.ai/api/v1",
|
16 |
+
api_key=api_key,
|
17 |
+
)
|
18 |
+
except Exception as e:
|
19 |
+
logger.error(f"Failed to initialize OpenAI client: {str(e)}")
|
20 |
+
return f"Error initializing API client: {str(e)}"
|
21 |
|
22 |
# Define detective based on subject
|
23 |
detectives = {
|
|
|
39 |
encoded_image = base64.b64encode(img_byte_arr).decode('utf-8')
|
40 |
image_url_data = f"data:image/png;base64,{encoded_image}"
|
41 |
except Exception as e:
|
42 |
+
logger.error(f"Image encoding error: {str(e)}")
|
43 |
return f"Error encoding image: {str(e)}"
|
44 |
|
45 |
+
# Call the API with error handling
|
46 |
try:
|
47 |
completion = client.chat.completions.create(
|
48 |
extra_headers={
|
49 |
"HTTP-Referer": "https://stem-sleuth.example.com",
|
50 |
"X-Title": "STEM Sleuth",
|
51 |
},
|
52 |
+
# Using a more stable model (adjust based on OpenRouter's available models)
|
53 |
+
model="google/gemini-flash-1.5",
|
54 |
messages=[
|
55 |
{
|
56 |
"role": "user",
|
|
|
68 |
]
|
69 |
)
|
70 |
|
71 |
+
# Detailed response checking
|
72 |
+
if not completion.choices:
|
73 |
+
logger.warning("API returned no choices")
|
74 |
+
return "API returned no choices. Please check model availability or API key permissions."
|
75 |
+
|
76 |
+
if not completion.choices[0].message:
|
77 |
+
logger.warning("API returned no message content")
|
78 |
+
return "API returned no message content. Please try again or check the model."
|
79 |
+
|
80 |
+
solution = completion.choices[0].message.content
|
81 |
+
logger.info("Successfully retrieved solution")
|
82 |
+
return solution
|
83 |
+
|
84 |
except Exception as e:
|
85 |
+
logger.error(f"API call failed: {str(e)}")
|
86 |
+
return f"Error calling API: {str(e)}. Please verify model availability or try again later."
|
|
|
87 |
|
88 |
# Create Gradio interface
|
89 |
with gr.Blocks() as app:
|