File size: 3,195 Bytes
b39c0ba
 
d57efd6
b39c0ba
 
 
d57efd6
0743bb0
9002555
b39c0ba
 
 
 
9002555
b39c0ba
9002555
 
d57efd6
9002555
 
 
 
 
b39c0ba
9002555
b39c0ba
 
 
 
 
 
69beac6
 
 
0743bb0
69beac6
 
0743bb0
d57efd6
0743bb0
d57efd6
0743bb0
d57efd6
 
 
9002555
d57efd6
 
 
9002555
 
 
d57efd6
 
9002555
d57efd6
9002555
 
d57efd6
9002555
 
 
 
 
 
 
 
d57efd6
9002555
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d57efd6
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
import json
import logging
from typing import Any

from dotenv import load_dotenv
from fastapi import HTTPException, UploadFile
from fastapi.responses import JSONResponse

from core.chat.engine import Engine
from core.parser import clean_text
from langfuse.llama_index import LlamaIndexCallbackHandler
from script.document_uploader import Uploader
from script.vector_db import IndexManager
from service.aws_loader import Loader
from service.dto import BotResponseStreaming


load_dotenv()

# Configure logging
logging.basicConfig(level=logging.INFO)


async def data_ingestion(reference, file: UploadFile) -> Any:
    try:
        # Assuming you have a Langfuse callback handler
        langfuse_callback_handler = LlamaIndexCallbackHandler()
        langfuse_callback_handler.set_trace_params(
            user_id="admin_book_uploaded",
        )
        
        # # Upload to AWS
        # file_name = f"{reference['title']}"
        # aws_loader = Loader()

        # file_obj = file
        # aws_loader.upload_to_s3(file_obj, file_name)

        uploader = Uploader(reference, file)

        nodes_with_metadata = await uploader.process_documents()

        # Build indexes using IndexManager
        index = IndexManager()
        index.build_indexes(nodes_with_metadata)

        return json.dumps(
            {"status": "success", "message": "Vector Index loaded successfully."}
        )

    except Exception as e:
        # Log the error and raise HTTPException for FastAPI
        logging.error("An error occurred in data ingestion: %s", e)
        return JSONResponse(
            status_code=500,
            content="An internal server error occurred in data ingestion.",
        )

async def generate_streaming_completion(user_request, session_id):
    try:
        engine = Engine()
        index_manager = IndexManager()

        # Load existing indexes
        index = index_manager.load_existing_indexes()

        # Retrieve the chat engine with the loaded index
        chat_engine = engine.get_chat_engine(index, session_id)
        # Generate completion response
        response = chat_engine.stream_chat(user_request)

        completed_response = ""

        for gen in response.response_gen:
            completed_response += gen  # Concatenate the new string
            yield BotResponseStreaming(
                content=gen, completed_content=completed_response
            )

        nodes = response.source_nodes
        for node in nodes:
            reference = str(clean_text(node.node.get_text()))
            metadata = dict(node.node.metadata)
            score = float(node.score)
            yield BotResponseStreaming(
                completed_content=completed_response,
                reference=reference,
                metadata=metadata,
                score=score,
            )
    except Exception as e:
        yield {"error": str(e)}

    except Exception as e:
        # Log the error and raise HTTPException for FastAPI
        logging.error(f"An error occurred in generate text: {e}")
        raise HTTPException(
            status_code=500,
            detail="An internal server error occurred in generate text.",
        ) from e