ChenyuRabbitLove commited on
Commit
008dadb
·
1 Parent(s): 05257d2

chore: update OpenAI package version and refactor chat streaming logic for improved error handling and added test endpoints

Browse files
backend/app/api/v1/endpoints/chat.py CHANGED
@@ -1,88 +1,94 @@
 
1
  import json
2
- import logging
3
- from typing import AsyncGenerator
4
 
5
- from fastapi import APIRouter, HTTPException, status
6
  from fastapi.responses import StreamingResponse
7
- from openai import OpenAI, APIConnectionError
 
8
 
 
9
  from ....core.config import settings
10
- from ....schemas.chat import ChatRequest, Message
11
-
12
- logger = logging.getLogger(__name__)
13
 
14
  router = APIRouter(prefix="/chat", tags=["chat"])
15
 
16
- # Create a singleton OpenAI client instance.
17
- client = OpenAI(api_key=settings.OPENAI_API_KEY, timeout=30.0)
18
-
19
-
20
- async def _stream_text(messages: list[Message]) -> AsyncGenerator[str, None]:
21
- """Internal helper that yields text deltas as server-sent-events."""
22
 
23
- # Add a default system message as first instruction.
24
- system_msg = {
25
- "role": "system",
26
- "content": (
27
- "You are an AI learning assistant for PlayGo AI, an educational platform. "
28
- "Your goal is to help students learn and understand various subjects. "
29
- "Provide clear, concise, and accurate explanations."
30
- ),
31
- }
32
-
33
- formatted_messages = [system_msg] + [msg.model_dump() for msg in messages]
34
 
 
35
  try:
 
 
 
 
 
 
36
  stream = client.chat.completions.create(
37
  model="gpt-3.5-turbo",
38
  messages=formatted_messages,
39
  temperature=0.7,
40
- stream=True,
41
  )
42
 
43
- # The new Python SDK returns an iterator of events
44
  for chunk in stream:
45
  for choice in chunk.choices:
46
- # Skip explicit finished signal – handled after loop
47
  if choice.finish_reason == "stop":
48
  continue
49
 
50
- # Emit the delta as JSON-encoded string prefixed with 0: (Vercel AI convention)
51
- yield f"0:{json.dumps(choice.delta.content)}\n"
52
-
53
- # When the stream is exhausted include usage metadata
54
- usage = stream.usage if hasattr(stream, "usage") else None
55
- if usage:
56
- payload = {
57
- "finishReason": "stop",
58
- "usage": {
59
- "promptTokens": usage.prompt_tokens,
60
- "completionTokens": usage.completion_tokens,
61
- },
62
- }
63
- yield f"d:{json.dumps(payload)}\n"
64
-
65
- except APIConnectionError as e:
66
- logger.error("OpenAI connection error: %s", str(e))
67
- yield f"Error: Connection error – {str(e)}"
68
  except Exception as e:
69
- logger.exception("Unexpected error during streaming chat")
70
- yield f"Error: {str(e)}"
71
 
72
 
73
  @router.post("/stream")
74
  async def chat_stream(request: ChatRequest):
75
- """Streaming chat endpoint returning SSE-style chunks compatible with Vercel AI."""
76
-
77
- if not settings.OPENAI_API_KEY:
78
- raise HTTPException(
79
- status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
80
- detail="OPENAI_API_KEY is not configured on the server.",
81
- )
82
-
83
  response = StreamingResponse(
84
- _stream_text(request.messages),
85
  )
86
- # Vercel AI convention header so front-end recognises a streamed response
87
- response.headers["x-vercel-ai-data-stream"] = "v1"
88
- return response
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
  import json
 
 
3
 
4
+ from fastapi import APIRouter, HTTPException
5
  from fastapi.responses import StreamingResponse
6
+ from openai import OpenAI
7
+ from typing import List
8
 
9
+ from ....schemas.chat import Message, ChatRequest
10
  from ....core.config import settings
 
 
 
11
 
12
  router = APIRouter(prefix="/chat", tags=["chat"])
13
 
14
+ # Initialize OpenAI client with same simple config as working repo
15
+ client = OpenAI(api_key=settings.OPENAI_API_KEY)
 
 
 
 
16
 
 
 
 
 
 
 
 
 
 
 
 
17
 
18
+ async def stream_text(messages: List[Message]):
19
  try:
20
+ formatted_messages = [
21
+ {"role": "system", "content": """You are an AI learning assistant for PlayGo AI,
22
+ an educational platform. Your goal is to help students learn and understand various
23
+ subjects. Provide clear, concise, and accurate explanations."""},
24
+ ] + [{"role": msg.role, "content": msg.content} for msg in messages]
25
+
26
  stream = client.chat.completions.create(
27
  model="gpt-3.5-turbo",
28
  messages=formatted_messages,
29
  temperature=0.7,
30
+ stream=True
31
  )
32
 
 
33
  for chunk in stream:
34
  for choice in chunk.choices:
 
35
  if choice.finish_reason == "stop":
36
  continue
37
 
38
+ else:
39
+ yield '0:{text}\n'.format(text=json.dumps(choice.delta.content))
40
+
41
+ if chunk.choices == []:
42
+ usage = chunk.usage
43
+ prompt_tokens = usage.prompt_tokens
44
+ completion_tokens = usage.completion_tokens
45
+ yield 'd:{{"finishReason":"{reason}","usage":{{"promptTokens":{prompt},"completionTokens":{completion}}}}}\n'.format(
46
+ reason="stop",
47
+ prompt=prompt_tokens,
48
+ completion=completion_tokens
49
+ )
50
+
 
 
 
 
 
51
  except Exception as e:
52
+ print(f"Error in stream_text: {str(e)}")
53
+ yield f"Error: {str(e)}".encode('utf-8')
54
 
55
 
56
  @router.post("/stream")
57
  async def chat_stream(request: ChatRequest):
 
 
 
 
 
 
 
 
58
  response = StreamingResponse(
59
+ stream_text(request.messages),
60
  )
61
+ response.headers['x-vercel-ai-data-stream'] = 'v1'
62
+ return response
63
+
64
+
65
+ @router.get("/test")
66
+ async def test_chat():
67
+ return {"message": "Chat endpoint is working!"}
68
+
69
+
70
+ @router.get("/test-simple")
71
+ async def test_simple_chat():
72
+ """Test simple chat completion without streaming - same as working repo"""
73
+ try:
74
+ response = client.chat.completions.create(
75
+ model="gpt-3.5-turbo",
76
+ messages=[
77
+ {"role": "user", "content": "Say 'Hello from simple chat!' in exactly those words."}
78
+ ],
79
+ max_tokens=20
80
+ )
81
+
82
+ return {
83
+ "status": "success",
84
+ "message": "Simple chat works!",
85
+ "response": response.choices[0].message.content,
86
+ "model": response.model
87
+ }
88
+
89
+ except Exception as e:
90
+ return {
91
+ "status": "error",
92
+ "message": f"Simple chat failed: {str(e)}",
93
+ "error_type": type(e).__name__
94
+ }
backend/app/schemas/chat.py CHANGED
@@ -1,16 +1,11 @@
1
- from typing import List
2
-
3
  from pydantic import BaseModel
 
4
 
5
 
6
  class Message(BaseModel):
7
- """Represents a single message in the chat conversation"""
8
-
9
  content: str
10
  role: str
11
 
12
 
13
  class ChatRequest(BaseModel):
14
- """Schema for chat completion request body"""
15
-
16
  messages: List[Message]
 
 
 
1
  from pydantic import BaseModel
2
+ from typing import List
3
 
4
 
5
  class Message(BaseModel):
 
 
6
  content: str
7
  role: str
8
 
9
 
10
  class ChatRequest(BaseModel):
 
 
11
  messages: List[Message]
backend/requirements.txt CHANGED
@@ -2,5 +2,5 @@ fastapi==0.104.1
2
  uvicorn[standard]==0.24.0
3
  pydantic==2.5.0
4
  pydantic-settings==2.1.0
5
- openai==1.8.2
6
  python-dotenv==1.0.0
 
2
  uvicorn[standard]==0.24.0
3
  pydantic==2.5.0
4
  pydantic-settings==2.1.0
5
+ openai==1.82.0
6
  python-dotenv==1.0.0