ChenyuRabbitLove commited on
Commit
05257d2
·
1 Parent(s): 06f464b

feat: add chat streaming endpoint and related schemas for OpenAI integration

Browse files
backend/app/api/v1/endpoints/chat.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import logging
3
+ from typing import AsyncGenerator
4
+
5
+ from fastapi import APIRouter, HTTPException, status
6
+ from fastapi.responses import StreamingResponse
7
+ from openai import OpenAI, APIConnectionError
8
+
9
+ from ....core.config import settings
10
+ from ....schemas.chat import ChatRequest, Message
11
+
12
+ logger = logging.getLogger(__name__)
13
+
14
+ router = APIRouter(prefix="/chat", tags=["chat"])
15
+
16
+ # Create a singleton OpenAI client instance.
17
+ client = OpenAI(api_key=settings.OPENAI_API_KEY, timeout=30.0)
18
+
19
+
20
+ async def _stream_text(messages: list[Message]) -> AsyncGenerator[str, None]:
21
+ """Internal helper that yields text deltas as server-sent-events."""
22
+
23
+ # Add a default system message as first instruction.
24
+ system_msg = {
25
+ "role": "system",
26
+ "content": (
27
+ "You are an AI learning assistant for PlayGo AI, an educational platform. "
28
+ "Your goal is to help students learn and understand various subjects. "
29
+ "Provide clear, concise, and accurate explanations."
30
+ ),
31
+ }
32
+
33
+ formatted_messages = [system_msg] + [msg.model_dump() for msg in messages]
34
+
35
+ try:
36
+ stream = client.chat.completions.create(
37
+ model="gpt-3.5-turbo",
38
+ messages=formatted_messages,
39
+ temperature=0.7,
40
+ stream=True,
41
+ )
42
+
43
+ # The new Python SDK returns an iterator of events
44
+ for chunk in stream:
45
+ for choice in chunk.choices:
46
+ # Skip explicit finished signal – handled after loop
47
+ if choice.finish_reason == "stop":
48
+ continue
49
+
50
+ # Emit the delta as JSON-encoded string prefixed with 0: (Vercel AI convention)
51
+ yield f"0:{json.dumps(choice.delta.content)}\n"
52
+
53
+ # When the stream is exhausted include usage metadata
54
+ usage = stream.usage if hasattr(stream, "usage") else None
55
+ if usage:
56
+ payload = {
57
+ "finishReason": "stop",
58
+ "usage": {
59
+ "promptTokens": usage.prompt_tokens,
60
+ "completionTokens": usage.completion_tokens,
61
+ },
62
+ }
63
+ yield f"d:{json.dumps(payload)}\n"
64
+
65
+ except APIConnectionError as e:
66
+ logger.error("OpenAI connection error: %s", str(e))
67
+ yield f"Error: Connection error – {str(e)}"
68
+ except Exception as e:
69
+ logger.exception("Unexpected error during streaming chat")
70
+ yield f"Error: {str(e)}"
71
+
72
+
73
+ @router.post("/stream")
74
+ async def chat_stream(request: ChatRequest):
75
+ """Streaming chat endpoint returning SSE-style chunks compatible with Vercel AI."""
76
+
77
+ if not settings.OPENAI_API_KEY:
78
+ raise HTTPException(
79
+ status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
80
+ detail="OPENAI_API_KEY is not configured on the server.",
81
+ )
82
+
83
+ response = StreamingResponse(
84
+ _stream_text(request.messages),
85
+ )
86
+ # Vercel AI convention header so front-end recognises a streamed response
87
+ response.headers["x-vercel-ai-data-stream"] = "v1"
88
+ return response
backend/app/api/v1/router.py CHANGED
@@ -1,8 +1,10 @@
1
  from fastapi import APIRouter
2
 
3
  from .endpoints.images import router as images_router
 
4
 
5
  api_router = APIRouter()
6
 
7
  # Include all endpoint routers
8
  api_router.include_router(images_router)
 
 
1
  from fastapi import APIRouter
2
 
3
  from .endpoints.images import router as images_router
4
+ from .endpoints.chat import router as chat_router
5
 
6
  api_router = APIRouter()
7
 
8
  # Include all endpoint routers
9
  api_router.include_router(images_router)
10
+ api_router.include_router(chat_router)
backend/app/schemas/chat.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+
3
+ from pydantic import BaseModel
4
+
5
+
6
+ class Message(BaseModel):
7
+ """Represents a single message in the chat conversation"""
8
+
9
+ content: str
10
+ role: str
11
+
12
+
13
+ class ChatRequest(BaseModel):
14
+ """Schema for chat completion request body"""
15
+
16
+ messages: List[Message]
backend/requirements.txt CHANGED
@@ -2,5 +2,5 @@ fastapi==0.104.1
2
  uvicorn[standard]==0.24.0
3
  pydantic==2.5.0
4
  pydantic-settings==2.1.0
5
- openai==1.82.0
6
  python-dotenv==1.0.0
 
2
  uvicorn[standard]==0.24.0
3
  pydantic==2.5.0
4
  pydantic-settings==2.1.0
5
+ openai==1.8.2
6
  python-dotenv==1.0.0