File size: 1,533 Bytes
e9d730a
 
 
 
 
 
9700f95
 
 
 
 
e9d730a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9700f95
e9d730a
 
9700f95
 
 
 
 
 
 
 
 
 
e9d730a
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
# src/models/chat.py
from pydantic import BaseModel
from typing import Optional, List, Dict
from datetime import datetime
from .base import ChatMetadata

from pydantic import BaseModel, validator, Field
from typing import Optional
from config.config import settings


class ChatRequest(BaseModel):
    """Request model for chat endpoint"""
    query: str
    llm_provider: str = 'openai'
    max_context_docs: int = 3
    temperature: float = 0.7
    stream: bool = False
    conversation_id: Optional[str] = None

class ChatResponse(ChatMetadata):
    """Response model for chat endpoint"""
    response: str
    context: Optional[List[str]] = None
    sources: Optional[List[Dict[str, str]]] = None
    relevant_doc_scores: Optional[List[float]] = None

class FeedbackRequest(BaseModel):
    rating: int = Field(..., ge=0, le=settings.MAX_RATING)
    feedback: Optional[str] = None

    @validator('rating')
    def validate_rating(cls, v):
        if v < 0 or v > settings.MAX_RATING:
            raise ValueError(f'Rating must be between 0 and {settings.MAX_RATING}')
        return v

    def format_rating(self) -> str:
        """Format rating as a fraction of maximum"""
        return f"{self.rating}/{settings.MAX_RATING}"

class SummarizeRequest(BaseModel):
    """Request model for summarize endpoint"""
    conversation_id: str
    include_metadata: bool = True

class SummaryResponse(BaseModel):
    """Response model for summarize endpoint"""
    summary: str
    key_insights: Dict
    metadata: Optional[Dict] = None