Update services/openai_service.py
Browse files
services/openai_service.py
CHANGED
@@ -28,7 +28,8 @@ except Exception as e:
|
|
28 |
|
29 |
def generate_rag_response(json_output, user_query):
|
30 |
logging.info("Generating RAG response")
|
31 |
-
|
|
|
32 |
# Extract text from the JSON output
|
33 |
context_texts = [hit['chunk_text'] for hit in json_output]
|
34 |
|
@@ -49,7 +50,7 @@ def generate_rag_response(json_output, user_query):
|
|
49 |
max_tokens=2000, # Limit the maximum number of tokens in the response
|
50 |
temperature=0.5
|
51 |
)
|
52 |
-
|
53 |
# Log the response from the model
|
54 |
logging.info("RAG response generation completed")
|
55 |
logging.info(f"RAG response: {chat_completion.choices[0].message.content}")
|
|
|
28 |
|
29 |
def generate_rag_response(json_output, user_query):
|
30 |
logging.info("Generating RAG response")
|
31 |
+
print("JSON INPUT FOR RAG RESPONE:")
|
32 |
+
print(json_output)
|
33 |
# Extract text from the JSON output
|
34 |
context_texts = [hit['chunk_text'] for hit in json_output]
|
35 |
|
|
|
50 |
max_tokens=2000, # Limit the maximum number of tokens in the response
|
51 |
temperature=0.5
|
52 |
)
|
53 |
+
print(f"GENERATED RESPONSE FROM OPENAI: {chat_completion}")
|
54 |
# Log the response from the model
|
55 |
logging.info("RAG response generation completed")
|
56 |
logging.info(f"RAG response: {chat_completion.choices[0].message.content}")
|