Spaces:
Running
Running
luanpoppe
commited on
Commit
·
91028c0
1
Parent(s):
2213315
feat: mudando contextuals para ser Gemini-2.0-flash
Browse files
_utils/LLMs/LLM_class.py
CHANGED
@@ -22,7 +22,7 @@ class LLM:
|
|
22 |
model=model,
|
23 |
)
|
24 |
|
25 |
-
def google_gemini(self, model="gemini-
|
26 |
return ChatGoogleGenerativeAI(
|
27 |
api_key=SecretStr(google_api_key),
|
28 |
model=model,
|
|
|
22 |
model=model,
|
23 |
)
|
24 |
|
25 |
+
def google_gemini(self, model="gemini-2.0-flash"):
|
26 |
return ChatGoogleGenerativeAI(
|
27 |
api_key=SecretStr(google_api_key),
|
28 |
model=model,
|
_utils/gerar_relatorio_modelo_usuario/contextual_retriever.py
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
import os
|
|
|
2 |
from _utils.gerar_relatorio_modelo_usuario.utils import (
|
3 |
get_response_from_auxiliar_contextual_prompt,
|
4 |
validate_many_chunks_in_one_request,
|
@@ -19,6 +20,7 @@ from _utils.models.gerar_relatorio import (
|
|
19 |
DocumentChunk,
|
20 |
RetrievalConfig,
|
21 |
)
|
|
|
22 |
|
23 |
lista_contador = []
|
24 |
|
@@ -84,10 +86,12 @@ class ContextualRetriever:
|
|
84 |
f"\n\nTENTATIVA FORMATAÇÃO CHUNKS NÚMERO {attempt + 1}"
|
85 |
)
|
86 |
print("\nCOMEÇANDO UMA REQUISIÇÃO DO CONTEXTUAL")
|
87 |
-
|
|
|
|
|
|
|
88 |
print("\nTERMINOU UMA REQUISIÇÃO DO CONTEXTUAL")
|
89 |
response = cast(str, raw_response)
|
90 |
-
# llms = LLM()
|
91 |
# response = await llms.deepseek().ainvoke([HumanMessage(content=prompt)])
|
92 |
# return cast(str, response.content)
|
93 |
|
|
|
1 |
import os
|
2 |
+
from _utils.LLMs.LLM_class import LLM
|
3 |
from _utils.gerar_relatorio_modelo_usuario.utils import (
|
4 |
get_response_from_auxiliar_contextual_prompt,
|
5 |
validate_many_chunks_in_one_request,
|
|
|
20 |
DocumentChunk,
|
21 |
RetrievalConfig,
|
22 |
)
|
23 |
+
from langchain_core.messages import HumanMessage
|
24 |
|
25 |
lista_contador = []
|
26 |
|
|
|
86 |
f"\n\nTENTATIVA FORMATAÇÃO CHUNKS NÚMERO {attempt + 1}"
|
87 |
)
|
88 |
print("\nCOMEÇANDO UMA REQUISIÇÃO DO CONTEXTUAL")
|
89 |
+
llms = LLM()
|
90 |
+
# raw_response = await agpt_answer(prompt)
|
91 |
+
raw_response = await llms.google_gemini("gemini-2.0-flash-lite-preview-02-05").ainvoke([HumanMessage(content=prompt)])
|
92 |
+
|
93 |
print("\nTERMINOU UMA REQUISIÇÃO DO CONTEXTUAL")
|
94 |
response = cast(str, raw_response)
|
|
|
95 |
# response = await llms.deepseek().ainvoke([HumanMessage(content=prompt)])
|
96 |
# return cast(str, response.content)
|
97 |
|
_utils/splitters/Splitter_class.py
CHANGED
@@ -108,7 +108,7 @@ class Splitter:
|
|
108 |
}
|
109 |
|
110 |
# char_count += len(text)
|
111 |
-
|
112 |
|
113 |
return chunks, initial_chunks
|
114 |
|
|
|
108 |
}
|
109 |
|
110 |
# char_count += len(text)
|
111 |
+
print("\nTERMINOU DE ORGANIZAR PDFS EM CHUNKS")
|
112 |
|
113 |
return chunks, initial_chunks
|
114 |
|
tests/test_custom_exception_handler.py
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pytest
|
2 |
+
import os
|
3 |
+
from _utils.splitters.Splitter_class import Splitter
|
4 |
+
from _utils.models.gerar_relatorio import (
|
5 |
+
DocumentChunk,
|
6 |
+
)
|
7 |
+
from rest_framework import status
|
8 |
+
from rest_framework.exceptions import ValidationError
|
9 |
+
from rest_framework.test import APIClient
|
10 |
+
from django.core.handlers.wsgi import WSGIRequest
|
11 |
+
|
12 |
+
|
13 |
+
# class TestCustomExceptionHandler:
|
14 |
+
# @pytest.mark.django_db
|
15 |
+
# @pytest.mark.asyncio
|
16 |
+
# def test_function_that_raises(self):
|
17 |
+
# client = APIClient()
|
18 |
+
|
19 |
+
# invalid_data = {
|
20 |
+
# "name": "wrong property"
|
21 |
+
# }
|
22 |
+
|
23 |
+
# response = client.post('/gerar-documento', invalid_data, format='json')
|
24 |
+
# assert response.status_code == status.HTTP_400_BAD_REQUEST
|