Spaces:
Running
Running
File size: 8,150 Bytes
1286e81 78209bc 55f46c1 c5586ab 12d3e1a dc376b6 12d3e1a 1286e81 dc376b6 d07865c 1286e81 dc376b6 55f46c1 dc376b6 12d3e1a 1286e81 3d2062e 1286e81 cb23311 12d3e1a 1286e81 3d2062e 1286e81 12d3e1a 1286e81 dc376b6 1286e81 12d3e1a 1286e81 dc376b6 1286e81 dc376b6 1286e81 dc376b6 1286e81 dc376b6 1286e81 dc376b6 1286e81 dc376b6 1286e81 e1d2a79 3d2062e 1286e81 3d2062e 1286e81 3d2062e ecd9808 cb23311 12d3e1a e1d2a79 1286e81 e1d2a79 78209bc dc376b6 3d2062e 78209bc 1286e81 e1d2a79 1286e81 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 |
import os
from typing import List, Dict, Tuple, Optional, cast
from pydantic import SecretStr
from _utils.LLMs.LLM_class import LLM
from _utils.vector_stores.Vector_store_class import VectorStore
from setup.easy_imports import (
Chroma,
ChatOpenAI,
PromptTemplate,
BM25Okapi,
Response,
HuggingFaceEmbeddings,
)
import logging
from _utils.gerar_relatorio_modelo_usuario.DocumentSummarizer_simples import (
DocumentSummarizer,
)
from _utils.models.gerar_relatorio import (
RetrievalConfig,
)
from cohere import Client
from _utils.splitters.Splitter_class import Splitter
class GerarDocumento:
openai_api_key = os.environ.get("OPENAI_API_KEY", "")
cohere_api_key = os.environ.get("COHERE_API_KEY", "")
resumo_gerado = ""
def __init__(
self,
config: RetrievalConfig,
embedding_model,
chunk_size,
chunk_overlap,
num_k_rerank,
model_cohere_rerank,
# prompt_auxiliar,
gpt_model,
gpt_temperature,
# id_modelo_do_usuario,
prompt_gerar_documento,
reciprocal_rank_fusion,
):
self.config = config
self.logger = logging.getLogger(__name__)
# self.prompt_auxiliar = prompt_auxiliar
self.gpt_model = gpt_model
self.gpt_temperature = gpt_temperature
self.prompt_gerar_documento = prompt_gerar_documento
self.reciprocal_rank_fusion = reciprocal_rank_fusion
self.openai_api_key = self.openai_api_key
self.cohere_client = Client(self.cohere_api_key)
self.embeddings = HuggingFaceEmbeddings(model_name=embedding_model)
self.num_k_rerank = num_k_rerank
self.model_cohere_rerank = model_cohere_rerank
self.splitter = Splitter(chunk_size, chunk_overlap)
self.vector_store = VectorStore(embedding_model)
def retrieve_with_rank_fusion(
self, vector_store: Chroma, bm25: BM25Okapi, chunk_ids: List[str], query: str
) -> List[Dict]:
"""Combine embedding and BM25 retrieval results"""
try:
# Get embedding results
embedding_results = vector_store.similarity_search_with_score(
query, k=self.config.num_chunks
)
# Convert embedding results to list of (chunk_id, score)
embedding_list = [
(doc.metadata["chunk_id"], 1 / (1 + score))
for doc, score in embedding_results
]
# Get BM25 results
tokenized_query = query.split()
bm25_scores = bm25.get_scores(tokenized_query)
# Convert BM25 scores to list of (chunk_id, score)
bm25_list = [
(chunk_ids[i], float(score)) for i, score in enumerate(bm25_scores)
]
# Sort bm25_list by score in descending order and limit to top N results
bm25_list = sorted(bm25_list, key=lambda x: x[1], reverse=True)[
: self.config.num_chunks
]
# Normalize BM25 scores
calculo_max = max(
[score for _, score in bm25_list]
) # Criei este max() pois em alguns momentos estava vindo valores 0, e reclamava que não podia dividir por 0
max_bm25 = calculo_max if bm25_list and calculo_max else 1
bm25_list = [(doc_id, score / max_bm25) for doc_id, score in bm25_list]
# Pass the lists to rank fusion
result_lists = [embedding_list, bm25_list]
weights = [self.config.embedding_weight, self.config.bm25_weight]
combined_results = self.reciprocal_rank_fusion(
result_lists, weights=weights
)
return combined_results
except Exception as e:
self.logger.error(f"Error in rank fusion retrieval: {str(e)}")
raise
def rank_fusion_get_top_results(
self,
vector_store: Chroma,
bm25: BM25Okapi,
chunk_ids: List[str],
query: str = "Summarize the main points of this document",
):
# Get combined results using rank fusion
ranked_results = self.retrieve_with_rank_fusion(
vector_store, bm25, chunk_ids, query
)
# Prepare context and track sources
contexts = []
sources = []
# Get full documents for top results
for chunk_id, score in ranked_results[: self.config.num_chunks]:
results = vector_store.get(
where={"chunk_id": chunk_id}, include=["documents", "metadatas"]
)
if results["documents"]:
context = results["documents"][0]
metadata = results["metadatas"][0]
contexts.append(context)
sources.append(
{
"content": context,
"page": metadata["page"],
"chunk_id": chunk_id,
"relevance_score": score,
"context": metadata.get("context", ""),
}
)
return sources, contexts
def select_model_for_last_requests(self, llm_ultimas_requests: str):
llm_instance = LLM()
if llm_ultimas_requests == "gpt-4o-mini":
llm = ChatOpenAI(
temperature=self.gpt_temperature,
model=self.gpt_model,
api_key=SecretStr(self.openai_api_key),
)
elif llm_ultimas_requests == "deepseek-chat":
llm = llm_instance.deepseek()
elif llm_ultimas_requests == "gemini-2.0-flash":
llm = llm_instance.google_gemini("gemini-2.0-flash")
return llm
async def gerar_documento_final(
self,
vector_store: Chroma,
bm25: BM25Okapi,
chunk_ids: List[str],
llm_ultimas_requests: str,
query: str = "Summarize the main points of this document",
) -> List[Dict]:
try:
sources, contexts = self.rank_fusion_get_top_results(
vector_store, bm25, chunk_ids, query
)
llm = self.select_model_for_last_requests(llm_ultimas_requests)
# prompt_auxiliar = PromptTemplate(
# template=self.prompt_auxiliar, input_variables=["context"]
# )
# resumo_auxiliar_do_documento = llm.invoke(
# prompt_auxiliar.format(context="\n\n".join(contexts))
# )
# self.resumo_gerado = cast(str, resumo_auxiliar_do_documento.content)
prompt_gerar_documento = PromptTemplate(
template=self.prompt_gerar_documento,
input_variables=["context"],
)
documento_gerado = cast(
str,
llm.invoke(
prompt_gerar_documento.format(
context="\n\n".join(contexts),
# modelo_usuario=serializer.data["modelo"],
)
).content,
)
# Split the response into paragraphs
summaries = [p.strip() for p in documento_gerado.split("\n\n") if p.strip()]
# Create structured output
structured_output = []
for idx, summary in enumerate(summaries):
source_idx = min(idx, len(sources) - 1)
structured_output.append(
{
"content": summary,
"source": {
"page": sources[source_idx]["page"],
"text": sources[source_idx]["content"][:200] + "...",
"context": sources[source_idx]["context"],
"relevance_score": sources[source_idx]["relevance_score"],
"chunk_id": sources[source_idx]["chunk_id"],
},
}
)
return structured_output
except Exception as e:
self.logger.error(f"Error generating enhanced summary: {str(e)}")
raise
|