pvanand commited on
Commit
7aaf905
·
verified ·
1 Parent(s): 2f5381c

Delete document_generator_v3.py

Browse files
Files changed (1) hide show
  1. document_generator_v3.py +0 -658
document_generator_v3.py DELETED
@@ -1,658 +0,0 @@
1
- # File: prompts.py
2
-
3
- DOCUMENT_OUTLINE_PROMPT_SYSTEM = """You are a document generator. Provide the outline of the document requested in <prompt></prompt> in JSON format.
4
- Include sections and subsections if required. Use the "Content" field to provide a specific prompt or instruction for generating content for that particular section or subsection.
5
- make sure the Pages follow a logical flow and each prompt's content does not overlap with other pages.
6
- OUTPUT IN FOLLOWING JSON FORMAT enclosed in <output> tags
7
- <output>
8
- {
9
- "Document": {
10
- "Title": "Document Title",
11
- "Author": "Author Name",
12
- "Date": "YYYY-MM-DD",
13
- "Version": "1.0",
14
-
15
- "Pages": [
16
- {
17
- "PageNumber": "1",
18
- "Title": "Section Title",
19
- "Content": "overview", # Optional: Short overview of the Section, if not required leave it as "" empty string
20
- "Subsections": [
21
- {
22
- "PageNumber": "1.1",
23
- "Title": "Subsection Title",
24
- "Content": "Specific prompt or instruction for generating content for this subsection"
25
- }
26
- ]
27
- }
28
- ]
29
- }
30
- }
31
- </output>"""
32
-
33
- DOCUMENT_OUTLINE_PROMPT_USER = """Generate a document outline {num_pages}for the following query: <prompt>{query}</prompt>"""
34
-
35
- DOCUMENT_SECTION_PROMPT_SYSTEM = """You are a document generator, replace the section/subsection prompts with the requested content.
36
- OUTPUT AS A WELL FORMATED DOCUMENT ENCLOSED IN <response></response> tags
37
- <overall_objective>{overall_objective}</overall_objective>
38
- <document_layout>{document_layout}</document_layout>"""
39
-
40
- DOCUMENT_SECTION_PROMPT_USER = """<prompt>Output the content requested formatted as markdown. Follow the instructions below title/subtitle to replace it with appropriate content: {content_instruction}</prompt>"""
41
-
42
- ##########################################
43
-
44
- DOCUMENT_TEMPLATE_OUTLINE_PROMPT_SYSTEM = """You are a document template generator. Provide the outline of the document requested in <prompt></prompt> in JSON format.
45
- Include sections and subsections if required. Use the "Content" field to provide a specific prompt or instruction for generating template with placeholder text /example content for that particular section or subsection. Specify in each prompt to output as a template and use placeholder text/ tables as necessory.
46
- make sure the Sections follow a logical flow and each prompt's content does not overlap with other sections.
47
- OUTPUT IN FOLLOWING JSON FORMAT enclosed in <output> tags
48
- <output>
49
- {
50
- "Document": {
51
- "Title": "Document Title",
52
- "Author": "Author Name",
53
- "Date": "YYYY-MM-DD",
54
- "Version": "1.0",
55
-
56
- "Pages": [
57
- {
58
- "PageNumber": "1",
59
- "Title": "Section Title",
60
- "Content": "Specific prompt or instruction for generating template for this section",
61
- "Subsections": [
62
- {
63
- "PageNumber": "1.1",
64
- "Title": "Subsection Title",
65
- "Content": "Specific prompt or instruction for generating template for this subsection"
66
- }
67
- ]
68
- }
69
- ]
70
- }
71
- }
72
- </output>"""
73
-
74
- DOCUMENT_TEMPLATE_PROMPT_USER = """Generate a document template outline {num_pages} for the following query:<prompt>{query}</prompt>"""
75
-
76
- DOCUMENT_TEMPLATE_SECTION_PROMPT_SYSTEM = """You are a document template generator, replace the section/subsection prompts with the requested content, Use placeholder text/examples/tables wherever required.
77
- FORMAT YOUR OUTPUT AS A TEMPLATE ENCLOSED IN <response></response> tags
78
- <overall_objective>{overall_objective}</overall_objective>
79
- <document_layout>{document_layout}</document_layout>"""
80
-
81
- DOCUMENT_TEMPLATE_SECTION_PROMPT_USER = """<prompt>Output the content requested formatted as markdown. Follow the instructions below title/subtitle to replace it with appropriate content: {content_instruction}</prompt>"""
82
-
83
-
84
- # File: llm_observability.py
85
-
86
- import sqlite3
87
- import json
88
- from datetime import datetime
89
- from typing import Dict, Any, List, Optional
90
-
91
- class LLMObservabilityManager:
92
- def __init__(self, db_path: str = "llm_observability_v2.db"):
93
- self.db_path = db_path
94
- self.create_table()
95
-
96
- def create_table(self):
97
- with sqlite3.connect(self.db_path) as conn:
98
- cursor = conn.cursor()
99
- cursor.execute('''
100
- CREATE TABLE IF NOT EXISTS llm_observations (
101
- id TEXT PRIMARY KEY,
102
- conversation_id TEXT,
103
- created_at DATETIME,
104
- status TEXT,
105
- request TEXT,
106
- response TEXT,
107
- model TEXT,
108
- total_tokens INTEGER,
109
- prompt_tokens INTEGER,
110
- completion_tokens INTEGER,
111
- latency FLOAT,
112
- user TEXT
113
- )
114
- ''')
115
-
116
- def insert_observation(self, response: Dict[str, Any], conversation_id: str, status: str, request: str, latency: float, user: str):
117
- created_at = datetime.fromtimestamp(response['created'])
118
-
119
- with sqlite3.connect(self.db_path) as conn:
120
- cursor = conn.cursor()
121
- cursor.execute('''
122
- INSERT INTO llm_observations
123
- (id, conversation_id, created_at, status, request, response, model, total_tokens, prompt_tokens, completion_tokens, latency, user)
124
- VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
125
- ''', (
126
- response['id'],
127
- conversation_id,
128
- created_at,
129
- status,
130
- request,
131
- json.dumps(response['choices'][0]['message']),
132
- response['model'],
133
- response['usage']['total_tokens'],
134
- response['usage']['prompt_tokens'],
135
- response['usage']['completion_tokens'],
136
- latency,
137
- user
138
- ))
139
-
140
- def get_observations(self, conversation_id: Optional[str] = None) -> List[Dict[str, Any]]:
141
- with sqlite3.connect(self.db_path) as conn:
142
- cursor = conn.cursor()
143
- if conversation_id:
144
- cursor.execute('SELECT * FROM llm_observations WHERE conversation_id = ? ORDER BY created_at', (conversation_id,))
145
- else:
146
- cursor.execute('SELECT * FROM llm_observations ORDER BY created_at')
147
- rows = cursor.fetchall()
148
-
149
- column_names = [description[0] for description in cursor.description]
150
- return [dict(zip(column_names, row)) for row in rows]
151
-
152
- def get_all_observations(self) -> List[Dict[str, Any]]:
153
- return self.get_observations()
154
-
155
-
156
- # File: app.py
157
- import os
158
- import json
159
- import re
160
- import asyncio
161
- import time
162
- from typing import List, Dict, Optional, Any, Callable, Union
163
- from openai import OpenAI
164
- import logging
165
- import functools
166
- from fastapi import APIRouter, HTTPException, Request, UploadFile, File, Depends
167
- from fastapi.responses import StreamingResponse
168
- from pydantic import BaseModel
169
- from fastapi_cache import FastAPICache
170
- from fastapi_cache.decorator import cache
171
- import psycopg2
172
- from datetime import datetime
173
- import base64
174
- from fastapi import Form
175
-
176
- logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
177
- logger = logging.getLogger(__name__)
178
-
179
- def log_execution(func: Callable) -> Callable:
180
- @functools.wraps(func)
181
- def wrapper(*args: Any, **kwargs: Any) -> Any:
182
- logger.info(f"Executing {func.__name__}")
183
- try:
184
- result = func(*args, **kwargs)
185
- logger.info(f"{func.__name__} completed successfully")
186
- return result
187
- except Exception as e:
188
- logger.error(f"Error in {func.__name__}: {e}")
189
- raise
190
- return wrapper
191
-
192
- # aiclient.py
193
-
194
- class AIClient:
195
- def __init__(self):
196
- self.client = OpenAI(
197
- base_url="https://openrouter.ai/api/v1",
198
- api_key="sk-or-v1-" + os.environ['OPENROUTER_API_KEY']
199
- )
200
- self.observability_manager = LLMObservabilityManager()
201
-
202
- @log_execution
203
- def generate_response(
204
- self,
205
- messages: List[Dict[str, str]],
206
- model: str = "openai/gpt-4o-mini",
207
- max_tokens: int = 32000,
208
- conversation_id: str = None,
209
- user: str = "anonymous"
210
- ) -> Optional[str]:
211
- if not messages:
212
- return None
213
-
214
- start_time = time.time()
215
- response = self.client.chat.completions.create(
216
- model=model,
217
- messages=messages,
218
- max_tokens=max_tokens,
219
- stream=False
220
- )
221
- end_time = time.time()
222
- latency = end_time - start_time
223
-
224
- # Log the observation
225
- self.observability_manager.insert_observation(
226
- response=response.dict(),
227
- conversation_id=conversation_id or "default",
228
- status="success",
229
- request=json.dumps(messages),
230
- latency=latency,
231
- user=user
232
- )
233
-
234
- return response.choices[0].message.content
235
-
236
- @log_execution
237
- def generate_vision_response(
238
- self,
239
- messages: List[Dict[str, Union[str, List[Dict[str, Union[str, Dict[str, str]]]]]]],
240
- model: str = "google/gemini-flash-1.5-8b",
241
- max_tokens: int = 32000,
242
- conversation_id: str = None,
243
- user: str = "anonymous"
244
- ) -> Optional[str]:
245
- if not messages:
246
- return None
247
-
248
- start_time = time.time()
249
- response = self.client.chat.completions.create(
250
- model=model,
251
- messages=messages,
252
- max_tokens=max_tokens,
253
- stream=False
254
- )
255
- end_time = time.time()
256
- latency = end_time - start_time
257
-
258
- # Log the observation
259
- self.observability_manager.insert_observation(
260
- response=response.dict(),
261
- conversation_id=conversation_id or "default",
262
- status="success",
263
- request=json.dumps(messages),
264
- latency=latency,
265
- user=user
266
- )
267
-
268
- return response.choices[0].message.content
269
-
270
-
271
- class VisionTools:
272
- def __init__(self, ai_client):
273
- self.ai_client = ai_client
274
-
275
- async def extract_images_info(self, images: List[UploadFile]) -> str:
276
- try:
277
- image_contents = []
278
- for image in images:
279
- image_content = await image.read()
280
- base64_image = base64.b64encode(image_content).decode('utf-8')
281
- image_contents.append({
282
- "type": "image_url",
283
- "image_url": {
284
- "url": f"data:image/jpeg;base64,{base64_image}"
285
- }
286
- })
287
-
288
- messages = [
289
- {
290
- "role": "user",
291
- "content": [
292
- {
293
- "type": "text",
294
- "text": "Extract the contents of these images in detail in a structured format, focusing on any text, tables, diagrams, or visual elements that might be relevant for document generation."
295
- },
296
- *image_contents
297
- ]
298
- }
299
- ]
300
-
301
- image_context = self.ai_client.generate_vision_response(messages)
302
- return image_context
303
- except Exception as e:
304
- print(f"Error processing images: {str(e)}")
305
- return ""
306
-
307
-
308
- class DatabaseManager:
309
- """Manages database operations."""
310
-
311
- def __init__(self):
312
- self.db_params = {
313
- "dbname": "postgres",
314
- "user": os.environ['SUPABASE_USER'],
315
- "password": os.environ['SUPABASE_PASSWORD'],
316
- "host": "aws-0-us-west-1.pooler.supabase.com",
317
- "port": "5432"
318
- }
319
-
320
- @log_execution
321
- def update_database(self, user_id: str, user_query: str, response: str) -> None:
322
- with psycopg2.connect(**self.db_params) as conn:
323
- with conn.cursor() as cur:
324
- insert_query = """
325
- INSERT INTO ai_document_generator (user_id, user_query, response)
326
- VALUES (%s, %s, %s);
327
- """
328
- cur.execute(insert_query, (user_id, user_query, response))
329
-
330
- class DocumentGenerator:
331
- def __init__(self, ai_client: AIClient):
332
- self.ai_client = ai_client
333
- self.document_outline = None
334
- self.content_messages = []
335
-
336
- @staticmethod
337
- def extract_between_tags(text: str, tag: str) -> str:
338
- pattern = f"<{tag}>(.*?)</{tag}>"
339
- match = re.search(pattern, text, re.DOTALL)
340
- return match.group(1).strip() if match else ""
341
-
342
- @staticmethod
343
- def remove_duplicate_title(content: str, title: str, section_number: str) -> str:
344
- patterns = [
345
- rf"^#+\s*{re.escape(section_number)}(?:\s+|\s*:\s*|\.\s*){re.escape(title)}",
346
- rf"^#+\s*{re.escape(title)}",
347
- rf"^{re.escape(section_number)}(?:\s+|\s*:\s*|\.\s*){re.escape(title)}",
348
- rf"^{re.escape(title)}",
349
- ]
350
-
351
- for pattern in patterns:
352
- content = re.sub(pattern, "", content, flags=re.MULTILINE | re.IGNORECASE)
353
-
354
- return content.lstrip()
355
-
356
- @log_execution
357
- def generate_document_outline(self, query: str, num_pages:int, template: bool = False, image_context: str = "", max_retries: int = 3) -> Optional[Dict]:
358
- pages_prompt = "" if num_pages == 0 else f"consisting of {num_pages} pages "
359
- messages = [
360
- {"role": "system", "content": DOCUMENT_OUTLINE_PROMPT_SYSTEM if not template else DOCUMENT_TEMPLATE_OUTLINE_PROMPT_SYSTEM},
361
- {"role": "user", "content": DOCUMENT_OUTLINE_PROMPT_USER.format(query=query, num_pages=pages_prompt) if not template else DOCUMENT_TEMPLATE_PROMPT_USER.format(query=query, num_pages=pages_prompt)}
362
- ]
363
- # Update user content to include image context if provided
364
- if image_context:
365
- messages[1]["content"] += f"<attached_images>\n\n{image_context}\n\n</attached_images>"
366
-
367
- for attempt in range(max_retries):
368
- outline_response = self.ai_client.generate_response(messages, model="openai/gpt-4o")
369
- outline_json_text = self.extract_between_tags(outline_response, "output")
370
-
371
- try:
372
- self.document_outline = json.loads(outline_json_text)
373
- return self.document_outline
374
- except json.JSONDecodeError as e:
375
- if attempt < max_retries - 1:
376
- logger.warning(f"Failed to parse JSON (attempt {attempt + 1}): {e}")
377
- logger.info("Retrying...")
378
- else:
379
- logger.error(f"Failed to parse JSON after {max_retries} attempts: {e}")
380
- return None
381
-
382
- @log_execution
383
- def generate_content(self, title: str, content_instruction: str, section_number: str, template: bool = False) -> str:
384
- SECTION_PROMPT_USER = DOCUMENT_SECTION_PROMPT_USER if not template else DOCUMENT_TEMPLATE_SECTION_PROMPT_USER
385
- self.content_messages.append({
386
- "role": "user",
387
- "content": SECTION_PROMPT_USER.format(
388
- #section_or_subsection_title=title,
389
- content_instruction=content_instruction
390
- )
391
- })
392
- section_response = self.ai_client.generate_response(self.content_messages)
393
- content = self.extract_between_tags(section_response, "response")
394
- content = self.remove_duplicate_title(content, title, section_number)
395
- self.content_messages.append({
396
- "role": "assistant",
397
- "content": section_response
398
- })
399
- return content
400
-
401
- class MarkdownConverter:
402
- @staticmethod
403
- def slugify(text: str) -> str:
404
- return re.sub(r'\W+', '-', text.lower())
405
-
406
- @classmethod
407
- def convert_to_markdown(cls, document: Dict) -> str:
408
- markdown = "<div style='text-align: center; padding-top: 33vh;'>\n\n"
409
- markdown += f"<h1 style='color: #2c3e50; border-bottom: 2px solid #3498db; padding-bottom: 10px; display: inline-block;'>{document['Title']}</h1>\n\n"
410
- markdown += f"<p style='color: #7f8c8d;'><em>By {document['Author']}</em></p>\n\n"
411
- markdown += f"<p style='color: #95a5a6;'>Version {document['Version']} | {document['Date']}</p>\n\n"
412
- markdown += "</div>\n\n"
413
-
414
- # Generate Table of Contents
415
- markdown += "<div style='page-break-before: always;'></div>\n\n"
416
- markdown += "<h2 style='color: #2c3e50; text-align: center;'>Table of Contents</h2>\n\n"
417
- markdown += "<nav style='background-color: #f8f9fa; padding: 20px; border-radius: 5px; line-height: 1.6;'>\n\n"
418
-
419
- for section in document['Pages']:
420
- section_number = section['PageNumber']
421
- section_title = section['Title']
422
- markdown += f"<p><a href='#{cls.slugify(section_title)}' style='color: #3498db; text-decoration: none;'>{section_number}. {section_title}</a></p>\n\n"
423
-
424
- for subsection in section.get('Subsections', []):
425
- subsection_number = subsection['PageNumber']
426
- subsection_title = subsection['Title']
427
- markdown += f"<p style='margin-left: 20px;'><a href='#{cls.slugify(subsection_title)}' style='color: #2980b9; text-decoration: none;'>{subsection_number} {subsection_title}</a></p>\n\n"
428
-
429
- markdown += "</nav>\n\n"
430
-
431
- # Generate Content
432
- markdown += "<div style='max-width: 800px; margin: 0 auto; font-family: \"Segoe UI\", Arial, sans-serif; line-height: 1.6;'>\n\n"
433
-
434
- for section in document['Pages']:
435
- markdown += "<div style='page-break-before: always;'></div>\n\n"
436
- section_number = section['PageNumber']
437
- section_title = section['Title']
438
- markdown += f"<h2 id='{cls.slugify(section_title)}' style='color: #2c3e50; border-bottom: 1px solid #bdc3c7; padding-bottom: 5px;'>{section_number}. {section_title}</h2>\n\n"
439
- markdown += f"<div style='color: #34495e; margin-bottom: 20px;'>\n\n{section['Content']}\n\n</div>\n\n"
440
-
441
- markdown += "</div>"
442
- return markdown
443
-
444
- router = APIRouter()
445
-
446
- class JsonDocumentResponse(BaseModel):
447
- json_document: Dict
448
-
449
- # class JsonDocumentRequest(BaseModel):
450
- # query: str
451
- # template: bool = False
452
- # images: Optional[List[UploadFile]] = File(None)
453
- # documents: Optional[List[UploadFile]] = File(None)
454
- # conversation_id: str = ""
455
-
456
- class MarkdownDocumentRequest(BaseModel):
457
- json_document: Dict
458
- query: str
459
- template: bool = False
460
- conversation_id: str = ""
461
-
462
- MESSAGE_DELIMITER = b"\n---DELIMITER---\n"
463
-
464
- def yield_message(message):
465
- message_json = json.dumps(message, ensure_ascii=False).encode('utf-8')
466
- return message_json + MESSAGE_DELIMITER
467
-
468
- async def generate_document_stream(document_generator: DocumentGenerator, document_outline: Dict, query: str, template: bool = False, conversation_id: str = ""):
469
- document_generator.document_outline = document_outline
470
- db_manager = DatabaseManager()
471
- overall_objective = query
472
- document_layout = json.dumps(document_generator.document_outline["Document"]["Pages"], indent=2)
473
- cache_key = f"image_context_{conversation_id}"
474
- image_context = await FastAPICache.get_backend().get(cache_key)
475
-
476
- SECTION_PROMPT_SYSTEM = DOCUMENT_SECTION_PROMPT_SYSTEM if not template else DOCUMENT_TEMPLATE_SECTION_PROMPT_SYSTEM
477
- document_generator.content_messages = [
478
- {
479
- "role": "system",
480
- "content": SECTION_PROMPT_SYSTEM.format(
481
- overall_objective=overall_objective,
482
- document_layout=document_layout
483
- )
484
- }
485
- ]
486
- if image_context:
487
- document_generator.content_messages[0]["content"] += f"<attached_images>\n\n{image_context}\n\n</attached_images>"
488
-
489
- for section in document_generator.document_outline["Document"].get("Pages", []):
490
- section_title = section.get("Title", "")
491
- section_number = section.get("PageNumber", "")
492
- content_instruction = section.get("Content", "")
493
-
494
- section_prompt_content = f"""# {section_number} {section_title}\n\n{content_instruction}\n\n"""
495
-
496
- for subsection in section.get("Subsections", []):
497
- subsection_title = subsection.get("Title", "")
498
- subsection_number = subsection.get("PageNumber", "")
499
- subsection_content_instruction = subsection.get("Content", "")
500
- section_prompt_content += f"""## {subsection_number} {subsection_title}\n\n{subsection_content_instruction}\n\n"""
501
-
502
- content = document_generator.generate_content(section_title, section_prompt_content, section_number, template)
503
- section["Content"] = content
504
- yield yield_message({
505
- "type": "document_section",
506
- "content": {
507
- "section_number": section_number,
508
- "section_title": section_title,
509
- "content": content
510
- }
511
- })
512
-
513
- markdown_document = MarkdownConverter.convert_to_markdown(document_generator.document_outline["Document"])
514
-
515
- yield yield_message({
516
- "type": "complete_document",
517
- "content": {
518
- "markdown": markdown_document,
519
- "json": document_generator.document_outline
520
- },
521
- });
522
-
523
- db_manager.update_database("elevatics", query, markdown_document)
524
-
525
- @router.post("/generate-document/markdown-stream")
526
- async def generate_markdown_document_stream_endpoint(request: MarkdownDocumentRequest):
527
- ai_client = AIClient()
528
- document_generator = DocumentGenerator(ai_client)
529
-
530
- async def stream_generator():
531
- try:
532
- async for chunk in generate_document_stream(document_generator, request.json_document, request.query, request.template, request.conversation_id):
533
- yield chunk
534
- except Exception as e:
535
- yield yield_message({
536
- "type": "error",
537
- "content": str(e)
538
- })
539
-
540
- return StreamingResponse(stream_generator(), media_type="application/octet-stream")
541
-
542
-
543
- @cache(expire=600*24*7)
544
- @router.post("/generate-document/json", response_model=JsonDocumentResponse)
545
- async def generate_document_outline_endpoint(
546
- query: str = Form(...),
547
- template: bool = Form(False),
548
- conversation_id: str = Form(...),
549
- num_pages:int = Form(...),
550
- images: Optional[List[UploadFile]] = File(None),
551
- documents: Optional[List[UploadFile]] = File(None)
552
- ):
553
- ai_client = AIClient()
554
- document_generator = DocumentGenerator(ai_client)
555
- vision_tools = VisionTools(ai_client)
556
- try:
557
- image_context = ""
558
- if images:
559
- image_context = await vision_tools.extract_images_info(images)
560
-
561
- # Store the image_context in the cache
562
- cache_key = f"image_context_{conversation_id}"
563
- await FastAPICache.get_backend().set(cache_key, image_context, expire=3600) # Cache for 1 hour
564
-
565
- json_document = document_generator.generate_document_outline(
566
- query,
567
- num_pages,
568
- template,
569
- image_context=image_context
570
- )
571
-
572
- if json_document is None:
573
- raise HTTPException(status_code=500, detail="Failed to generate a valid document outline")
574
-
575
- return JsonDocumentResponse(json_document=json_document)
576
- except Exception as e:
577
- raise HTTPException(status_code=500, detail=str(e))
578
-
579
-
580
- ## OBSERVABILITY
581
- from uuid import uuid4
582
- import csv
583
- from io import StringIO
584
-
585
- class ObservationResponse(BaseModel):
586
- observations: List[Dict]
587
-
588
- def create_csv_response(observations: List[Dict]) -> StreamingResponse:
589
- def iter_csv(data):
590
- output = StringIO()
591
- writer = csv.DictWriter(output, fieldnames=data[0].keys() if data else [])
592
- writer.writeheader()
593
- for row in data:
594
- writer.writerow(row)
595
- output.seek(0)
596
- yield output.read()
597
-
598
- headers = {
599
- 'Content-Disposition': 'attachment; filename="observations.csv"'
600
- }
601
- return StreamingResponse(iter_csv(observations), media_type="text/csv", headers=headers)
602
-
603
-
604
- @router.get("/last-observations/{limit}")
605
- async def get_last_observations(limit: int = 10, format: str = "json"):
606
- observability_manager = LLMObservabilityManager()
607
-
608
- try:
609
- # Get all observations, sorted by created_at in descending order
610
- all_observations = observability_manager.get_observations()
611
- all_observations.sort(key=lambda x: x['created_at'], reverse=True)
612
-
613
- # Get the last conversation_id
614
- if all_observations:
615
- last_conversation_id = all_observations[0]['conversation_id']
616
-
617
- # Filter observations for the last conversation
618
- last_conversation_observations = [
619
- obs for obs in all_observations
620
- if obs['conversation_id'] == last_conversation_id
621
- ][:limit]
622
-
623
- if format.lower() == "csv":
624
- return create_csv_response(last_conversation_observations)
625
- else:
626
- return ObservationResponse(observations=last_conversation_observations)
627
- else:
628
- if format.lower() == "csv":
629
- return create_csv_response([])
630
- else:
631
- return ObservationResponse(observations=[])
632
- except Exception as e:
633
- raise HTTPException(status_code=500, detail=f"Failed to retrieve observations: {str(e)}")
634
-
635
- ## TEST CACHE
636
-
637
- class CacheItem(BaseModel):
638
- key: str
639
- value: str
640
-
641
- @router.post("/set-cache")
642
- async def set_cache(item: CacheItem):
643
- try:
644
- # Set the cache with a default expiration of 1 hour (3600 seconds)
645
- await FastAPICache.get_backend().set(item.key, item.value, expire=3600)
646
- return {"message": f"Cache set for key: {item.key}"}
647
- except Exception as e:
648
- raise HTTPException(status_code=500, detail=f"Failed to set cache: {str(e)}")
649
-
650
- @router.get("/get-cache/{key}")
651
- async def get_cache(key: str):
652
- try:
653
- value = await FastAPICache.get_backend().get(key)
654
- if value is None:
655
- raise HTTPException(status_code=404, detail=f"No cache found for key: {key}")
656
- return {"key": key, "value": value}
657
- except Exception as e:
658
- raise HTTPException(status_code=500, detail=f"Failed to get cache: {str(e)}")