Spaces:
Sleeping
Sleeping
Commit
Β·
f5c3d9c
1
Parent(s):
3ee7b82
refactor: improve code readability and structure in OpenAI integration tests and services, update requirements for consistency
Browse files- backend/app/api/v1/__pycache__/router.cpython-311.pyc +0 -0
- backend/app/api/v1/endpoints/__pycache__/images.cpython-311.pyc +0 -0
- backend/app/api/v1/endpoints/chat.py +101 -94
- backend/app/core/config.py +1 -1
- backend/app/main.py +63 -59
- backend/app/schemas/chat.py +1 -1
- backend/app/services/__pycache__/image_service.cpython-311.pyc +0 -0
- backend/app/services/image_service.py +7 -16
- backend/requirements.txt +2 -1
- test_openai.py +15 -14
backend/app/api/v1/__pycache__/router.cpython-311.pyc
CHANGED
Binary files a/backend/app/api/v1/__pycache__/router.cpython-311.pyc and b/backend/app/api/v1/__pycache__/router.cpython-311.pyc differ
|
|
backend/app/api/v1/endpoints/__pycache__/images.cpython-311.pyc
CHANGED
Binary files a/backend/app/api/v1/endpoints/__pycache__/images.cpython-311.pyc and b/backend/app/api/v1/endpoints/__pycache__/images.cpython-311.pyc differ
|
|
backend/app/api/v1/endpoints/chat.py
CHANGED
@@ -1,94 +1,101 @@
|
|
1 |
-
import os
|
2 |
-
import json
|
3 |
-
|
4 |
-
from fastapi import APIRouter, HTTPException
|
5 |
-
from fastapi.responses import StreamingResponse
|
6 |
-
import openai
|
7 |
-
from typing import List
|
8 |
-
|
9 |
-
from ....schemas.chat import Message, ChatRequest
|
10 |
-
from ....core.config import settings
|
11 |
-
|
12 |
-
router = APIRouter(prefix="/chat", tags=["chat"])
|
13 |
-
|
14 |
-
# Initialize OpenAI client using the same settings as other working endpoints
|
15 |
-
client = openai.OpenAI(api_key=settings.OPENAI_API_KEY)
|
16 |
-
|
17 |
-
|
18 |
-
async def stream_text(messages: List[Message]):
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# import os
|
2 |
+
# import json
|
3 |
+
|
4 |
+
# from fastapi import APIRouter, HTTPException
|
5 |
+
# from fastapi.responses import StreamingResponse
|
6 |
+
# import openai
|
7 |
+
# from typing import List
|
8 |
+
|
9 |
+
# from ....schemas.chat import Message, ChatRequest
|
10 |
+
# from ....core.config import settings
|
11 |
+
|
12 |
+
# router = APIRouter(prefix="/chat", tags=["chat"])
|
13 |
+
|
14 |
+
# # Initialize OpenAI client using the same settings as other working endpoints
|
15 |
+
# client = openai.OpenAI(api_key=settings.OPENAI_API_KEY)
|
16 |
+
|
17 |
+
|
18 |
+
# async def stream_text(messages: List[Message]):
|
19 |
+
# try:
|
20 |
+
# formatted_messages = (
|
21 |
+
# [
|
22 |
+
# {
|
23 |
+
# "role": "system",
|
24 |
+
# "content": """You are an AI learning assistant for PlayGo AI,
|
25 |
+
# an educational platform. Your goal is to help students learn and understand various
|
26 |
+
# subjects. Provide clear, concise, and accurate explanations.""",
|
27 |
+
# },
|
28 |
+
# ]
|
29 |
+
# + [{"role": msg.role, "content": msg.content} for msg in messages]
|
30 |
+
# )
|
31 |
+
|
32 |
+
# stream = client.chat.completions.create(
|
33 |
+
# model="gpt-3.5-turbo",
|
34 |
+
# messages=formatted_messages,
|
35 |
+
# temperature=0.7,
|
36 |
+
# stream=True,
|
37 |
+
# )
|
38 |
+
|
39 |
+
# for chunk in stream:
|
40 |
+
# for choice in chunk.choices:
|
41 |
+
# if choice.finish_reason == "stop":
|
42 |
+
# continue
|
43 |
+
|
44 |
+
# else:
|
45 |
+
# yield "0:{text}\n".format(text=json.dumps(choice.delta.content))
|
46 |
+
|
47 |
+
# if chunk.choices == []:
|
48 |
+
# usage = chunk.usage
|
49 |
+
# prompt_tokens = usage.prompt_tokens
|
50 |
+
# completion_tokens = usage.completion_tokens
|
51 |
+
# yield 'd:{{"finishReason":"{reason}","usage":{{"promptTokens":{prompt},"completionTokens":{completion}}}}}\n'.format(
|
52 |
+
# reason="stop", prompt=prompt_tokens, completion=completion_tokens
|
53 |
+
# )
|
54 |
+
|
55 |
+
# except Exception as e:
|
56 |
+
# print(f"Error in stream_text: {str(e)}")
|
57 |
+
# yield f"Error: {str(e)}".encode("utf-8")
|
58 |
+
|
59 |
+
|
60 |
+
# @router.post("/stream")
|
61 |
+
# async def chat_stream(request: ChatRequest):
|
62 |
+
# response = StreamingResponse(
|
63 |
+
# stream_text(request.messages),
|
64 |
+
# )
|
65 |
+
# response.headers["x-vercel-ai-data-stream"] = "v1"
|
66 |
+
# return response
|
67 |
+
|
68 |
+
|
69 |
+
# @router.get("/test")
|
70 |
+
# async def test_chat():
|
71 |
+
# return {"message": "Chat endpoint is working!"}
|
72 |
+
|
73 |
+
|
74 |
+
# @router.get("/test-simple")
|
75 |
+
# async def test_simple_chat():
|
76 |
+
# """Test simple chat completion without streaming - same as working repo"""
|
77 |
+
# try:
|
78 |
+
# response = client.chat.completions.create(
|
79 |
+
# model="gpt-3.5-turbo",
|
80 |
+
# messages=[
|
81 |
+
# {
|
82 |
+
# "role": "user",
|
83 |
+
# "content": "Say 'Hello from simple chat!' in exactly those words.",
|
84 |
+
# }
|
85 |
+
# ],
|
86 |
+
# max_tokens=20,
|
87 |
+
# )
|
88 |
+
|
89 |
+
# return {
|
90 |
+
# "status": "success",
|
91 |
+
# "message": "Simple chat works!",
|
92 |
+
# "response": response.choices[0].message.content,
|
93 |
+
# "model": response.model,
|
94 |
+
# }
|
95 |
+
|
96 |
+
# except Exception as e:
|
97 |
+
# return {
|
98 |
+
# "status": "error",
|
99 |
+
# "message": f"Simple chat failed: {str(e)}",
|
100 |
+
# "error_type": type(e).__name__,
|
101 |
+
# }
|
backend/app/core/config.py
CHANGED
@@ -36,7 +36,7 @@ class Settings(BaseSettings):
|
|
36 |
ACCESS_TOKEN_EXPIRE_MINUTES: int = 30
|
37 |
|
38 |
# OpenAI settings
|
39 |
-
OPENAI_API_KEY: str = os.getenv("OPENAI_API_KEY")
|
40 |
|
41 |
class Config:
|
42 |
env_file = ".env"
|
|
|
36 |
ACCESS_TOKEN_EXPIRE_MINUTES: int = 30
|
37 |
|
38 |
# OpenAI settings
|
39 |
+
OPENAI_API_KEY: str = os.getenv("OPENAI_API_KEY", "")
|
40 |
|
41 |
class Config:
|
42 |
env_file = ".env"
|
backend/app/main.py
CHANGED
@@ -72,67 +72,71 @@ async def health_check():
|
|
72 |
|
73 |
|
74 |
# OpenAI connection test endpoint
|
75 |
-
@app.get("/api/test-openai")
|
76 |
-
async def test_openai():
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
|
|
100 |
|
101 |
# Test chat completions endpoint
|
102 |
-
@app.get("/api/test-chat")
|
103 |
-
async def test_chat():
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
|
|
|
|
|
|
136 |
|
137 |
|
138 |
# Include API router
|
|
|
72 |
|
73 |
|
74 |
# OpenAI connection test endpoint
|
75 |
+
# @app.get("/api/test-openai")
|
76 |
+
# async def test_openai():
|
77 |
+
# """Test OpenAI connection without making actual API calls"""
|
78 |
+
# try:
|
79 |
+
# from .core.config import settings
|
80 |
+
|
81 |
+
# if not settings.OPENAI_API_KEY:
|
82 |
+
# return {"status": "error", "message": "OPENAI_API_KEY not configured"}
|
83 |
+
|
84 |
+
# api_key_preview = (
|
85 |
+
# settings.OPENAI_API_KEY[:10] + "..."
|
86 |
+
# if len(settings.OPENAI_API_KEY) > 10
|
87 |
+
# else "Too short"
|
88 |
+
# )
|
89 |
+
|
90 |
+
# return {
|
91 |
+
# "status": "ok",
|
92 |
+
# "message": "OpenAI API key is configured",
|
93 |
+
# "api_key_preview": api_key_preview,
|
94 |
+
# "environment": (
|
95 |
+
# "production" if os.getenv("NODE_ENV") == "production" else "development"
|
96 |
+
# ),
|
97 |
+
# }
|
98 |
+
# except Exception as e:
|
99 |
+
# return {"status": "error", "message": f"OpenAI configuration error: {str(e)}"}
|
100 |
+
|
101 |
|
102 |
# Test chat completions endpoint
|
103 |
+
# @app.get("/api/test-chat")
|
104 |
+
# async def test_chat():
|
105 |
+
# """Test if OpenAI chat completions work on Hugging Face"""
|
106 |
+
# try:
|
107 |
+
# from .core.config import settings
|
108 |
+
# from openai import OpenAI
|
109 |
+
|
110 |
+
# if not settings.OPENAI_API_KEY:
|
111 |
+
# return {"status": "error", "message": "OPENAI_API_KEY not configured"}
|
112 |
+
|
113 |
+
# client = OpenAI(api_key=settings.OPENAI_API_KEY, timeout=30.0)
|
114 |
+
|
115 |
+
# # Test a simple chat completion
|
116 |
+
# response = client.chat.completions.create(
|
117 |
+
# model="gpt-3.5-turbo",
|
118 |
+
# messages=[
|
119 |
+
# {
|
120 |
+
# "role": "user",
|
121 |
+
# "content": "Say 'Hello from Hugging Face!' in exactly those words.",
|
122 |
+
# }
|
123 |
+
# ],
|
124 |
+
# max_tokens=20,
|
125 |
+
# )
|
126 |
+
|
127 |
+
# return {
|
128 |
+
# "status": "success",
|
129 |
+
# "message": "Chat completions work!",
|
130 |
+
# "response": response.choices[0].message.content,
|
131 |
+
# "model": response.model,
|
132 |
+
# }
|
133 |
+
|
134 |
+
# except Exception as e:
|
135 |
+
# return {
|
136 |
+
# "status": "error",
|
137 |
+
# "message": f"Chat completion failed: {str(e)}",
|
138 |
+
# "error_type": type(e).__name__,
|
139 |
+
# }
|
140 |
|
141 |
|
142 |
# Include API router
|
backend/app/schemas/chat.py
CHANGED
@@ -8,4 +8,4 @@ class Message(BaseModel):
|
|
8 |
|
9 |
|
10 |
class ChatRequest(BaseModel):
|
11 |
-
messages: List[Message]
|
|
|
8 |
|
9 |
|
10 |
class ChatRequest(BaseModel):
|
11 |
+
messages: List[Message]
|
backend/app/services/__pycache__/image_service.cpython-311.pyc
CHANGED
Binary files a/backend/app/services/__pycache__/image_service.cpython-311.pyc and b/backend/app/services/__pycache__/image_service.cpython-311.pyc differ
|
|
backend/app/services/image_service.py
CHANGED
@@ -14,21 +14,8 @@ class ImageGenerationService:
|
|
14 |
"""Service for handling OpenAI image generation"""
|
15 |
|
16 |
def __init__(self):
|
17 |
-
# Validate API key exists
|
18 |
-
if not settings.OPENAI_API_KEY:
|
19 |
-
logger.error("OPENAI_API_KEY not found in environment variables")
|
20 |
-
raise ValueError("OPENAI_API_KEY is required")
|
21 |
-
|
22 |
-
# Log API key status (first few characters only for security)
|
23 |
-
api_key_preview = (
|
24 |
-
settings.OPENAI_API_KEY[:10] + "..."
|
25 |
-
if settings.OPENAI_API_KEY
|
26 |
-
else "Not set"
|
27 |
-
)
|
28 |
-
logger.info(f"OpenAI API Key status: {api_key_preview}")
|
29 |
-
|
30 |
self.client = OpenAI(
|
31 |
-
api_key=
|
32 |
timeout=60.0, # Increase timeout for Hugging Face environment
|
33 |
max_retries=2, # Reduce retries to fail faster
|
34 |
)
|
@@ -40,7 +27,9 @@ class ImageGenerationService:
|
|
40 |
if not os.path.exists(self.output_dir):
|
41 |
os.makedirs(self.output_dir)
|
42 |
|
43 |
-
async def _fallback_to_dalle(
|
|
|
|
|
44 |
"""
|
45 |
Fallback to regular DALL-E when responses API is blocked
|
46 |
This sacrifices reference image capability but ensures the app works on Hugging Face
|
@@ -205,7 +194,9 @@ class ImageGenerationService:
|
|
205 |
|
206 |
if not generated_filenames:
|
207 |
# If responses API failed due to network restrictions, try fallback to regular DALL-E
|
208 |
-
logger.warning(
|
|
|
|
|
209 |
return await self._fallback_to_dalle(prompt, size, n, model)
|
210 |
|
211 |
logger.info(
|
|
|
14 |
"""Service for handling OpenAI image generation"""
|
15 |
|
16 |
def __init__(self):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
self.client = OpenAI(
|
18 |
+
api_key=os.getenv("OPENAI_API_KEY"),
|
19 |
timeout=60.0, # Increase timeout for Hugging Face environment
|
20 |
max_retries=2, # Reduce retries to fail faster
|
21 |
)
|
|
|
27 |
if not os.path.exists(self.output_dir):
|
28 |
os.makedirs(self.output_dir)
|
29 |
|
30 |
+
async def _fallback_to_dalle(
|
31 |
+
self, prompt: str, size: str, n: int, model: str
|
32 |
+
) -> dict:
|
33 |
"""
|
34 |
Fallback to regular DALL-E when responses API is blocked
|
35 |
This sacrifices reference image capability but ensures the app works on Hugging Face
|
|
|
194 |
|
195 |
if not generated_filenames:
|
196 |
# If responses API failed due to network restrictions, try fallback to regular DALL-E
|
197 |
+
logger.warning(
|
198 |
+
"Responses API failed, attempting fallback to regular DALL-E"
|
199 |
+
)
|
200 |
return await self._fallback_to_dalle(prompt, size, n, model)
|
201 |
|
202 |
logger.info(
|
backend/requirements.txt
CHANGED
@@ -3,4 +3,5 @@ uvicorn>=0.15.0
|
|
3 |
pydantic>=1.8.0
|
4 |
python-dotenv>=0.19.0
|
5 |
openai>=1.0.0
|
6 |
-
python-dotenv>=0.19.0
|
|
|
|
3 |
pydantic>=1.8.0
|
4 |
python-dotenv>=0.19.0
|
5 |
openai>=1.0.0
|
6 |
+
python-dotenv>=0.19.0
|
7 |
+
pydantic-settings>=2.0.3
|
test_openai.py
CHANGED
@@ -9,7 +9,7 @@ print("=" * 40)
|
|
9 |
|
10 |
# Load environment
|
11 |
load_dotenv()
|
12 |
-
api_key = os.getenv(
|
13 |
|
14 |
print(f"1. API Key configured: {'β
Yes' if api_key else 'β No'}")
|
15 |
if api_key:
|
@@ -19,9 +19,12 @@ if api_key:
|
|
19 |
print("\n2. Testing basic HTTP connectivity...")
|
20 |
try:
|
21 |
import requests
|
22 |
-
|
23 |
-
|
24 |
-
|
|
|
|
|
|
|
25 |
print(f" Status: {response.status_code}")
|
26 |
if response.status_code == 200:
|
27 |
print(" β
Direct HTTP request works!")
|
@@ -34,12 +37,13 @@ except Exception as e:
|
|
34 |
print("\n3. Testing OpenAI library (simple config)...")
|
35 |
try:
|
36 |
from openai import OpenAI
|
|
|
37 |
client = OpenAI(api_key=api_key)
|
38 |
-
|
39 |
response = client.chat.completions.create(
|
40 |
model="gpt-3.5-turbo",
|
41 |
messages=[{"role": "user", "content": "Say 'test'"}],
|
42 |
-
max_tokens=5
|
43 |
)
|
44 |
print(" β
OpenAI library works!")
|
45 |
print(f" Response: {response.choices[0].message.content}")
|
@@ -51,16 +55,13 @@ except Exception as e:
|
|
51 |
print("\n4. Testing OpenAI library (with timeout)...")
|
52 |
try:
|
53 |
from openai import OpenAI
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
max_retries=2
|
58 |
-
)
|
59 |
-
|
60 |
response = client.chat.completions.create(
|
61 |
model="gpt-3.5-turbo",
|
62 |
messages=[{"role": "user", "content": "Say 'test'"}],
|
63 |
-
max_tokens=5
|
64 |
)
|
65 |
print(" β
OpenAI library with timeout works!")
|
66 |
print(f" Response: {response.choices[0].message.content}")
|
@@ -68,4 +69,4 @@ except Exception as e:
|
|
68 |
print(f" β OpenAI library with timeout error: {str(e)}")
|
69 |
print(f" Error type: {type(e).__name__}")
|
70 |
|
71 |
-
print("\nπ Debug complete!")
|
|
|
9 |
|
10 |
# Load environment
|
11 |
load_dotenv()
|
12 |
+
api_key = os.getenv("OPENAI_API_KEY")
|
13 |
|
14 |
print(f"1. API Key configured: {'β
Yes' if api_key else 'β No'}")
|
15 |
if api_key:
|
|
|
19 |
print("\n2. Testing basic HTTP connectivity...")
|
20 |
try:
|
21 |
import requests
|
22 |
+
|
23 |
+
response = requests.get(
|
24 |
+
"https://api.openai.com/v1/models",
|
25 |
+
headers={"Authorization": f"Bearer {api_key}"},
|
26 |
+
timeout=30,
|
27 |
+
)
|
28 |
print(f" Status: {response.status_code}")
|
29 |
if response.status_code == 200:
|
30 |
print(" β
Direct HTTP request works!")
|
|
|
37 |
print("\n3. Testing OpenAI library (simple config)...")
|
38 |
try:
|
39 |
from openai import OpenAI
|
40 |
+
|
41 |
client = OpenAI(api_key=api_key)
|
42 |
+
|
43 |
response = client.chat.completions.create(
|
44 |
model="gpt-3.5-turbo",
|
45 |
messages=[{"role": "user", "content": "Say 'test'"}],
|
46 |
+
max_tokens=5,
|
47 |
)
|
48 |
print(" β
OpenAI library works!")
|
49 |
print(f" Response: {response.choices[0].message.content}")
|
|
|
55 |
print("\n4. Testing OpenAI library (with timeout)...")
|
56 |
try:
|
57 |
from openai import OpenAI
|
58 |
+
|
59 |
+
client = OpenAI(api_key=api_key, timeout=60.0, max_retries=2)
|
60 |
+
|
|
|
|
|
|
|
61 |
response = client.chat.completions.create(
|
62 |
model="gpt-3.5-turbo",
|
63 |
messages=[{"role": "user", "content": "Say 'test'"}],
|
64 |
+
max_tokens=5,
|
65 |
)
|
66 |
print(" β
OpenAI library with timeout works!")
|
67 |
print(f" Response: {response.choices[0].message.content}")
|
|
|
69 |
print(f" β OpenAI library with timeout error: {str(e)}")
|
70 |
print(f" Error type: {type(e).__name__}")
|
71 |
|
72 |
+
print("\nπ Debug complete!")
|