TalatMasood's picture
Working chat with context and history
9700f95
raw
history blame
1.53 kB
# src/models/chat.py
from pydantic import BaseModel
from typing import Optional, List, Dict
from datetime import datetime
from .base import ChatMetadata
from pydantic import BaseModel, validator, Field
from typing import Optional
from config.config import settings
class ChatRequest(BaseModel):
"""Request model for chat endpoint"""
query: str
llm_provider: str = 'openai'
max_context_docs: int = 3
temperature: float = 0.7
stream: bool = False
conversation_id: Optional[str] = None
class ChatResponse(ChatMetadata):
"""Response model for chat endpoint"""
response: str
context: Optional[List[str]] = None
sources: Optional[List[Dict[str, str]]] = None
relevant_doc_scores: Optional[List[float]] = None
class FeedbackRequest(BaseModel):
rating: int = Field(..., ge=0, le=settings.MAX_RATING)
feedback: Optional[str] = None
@validator('rating')
def validate_rating(cls, v):
if v < 0 or v > settings.MAX_RATING:
raise ValueError(f'Rating must be between 0 and {settings.MAX_RATING}')
return v
def format_rating(self) -> str:
"""Format rating as a fraction of maximum"""
return f"{self.rating}/{settings.MAX_RATING}"
class SummarizeRequest(BaseModel):
"""Request model for summarize endpoint"""
conversation_id: str
include_metadata: bool = True
class SummaryResponse(BaseModel):
"""Response model for summarize endpoint"""
summary: str
key_insights: Dict
metadata: Optional[Dict] = None