Spaces:
Sleeping
Sleeping
import uuid | |
from wrappers import * | |
from embedding_loader import * | |
from initialize_db import QdrantClientInitializer | |
from pdf_loader import PDFLoader | |
from IPython.display import display, Markdown | |
import gradio as gr | |
from langchain_core.messages import HumanMessage, AIMessage | |
from langchain.memory import ConversationBufferMemory | |
from langchain_core.chat_history import InMemoryChatMessageHistory | |
embeddings = import_embedding() | |
AZURE_OPENAI_KEY = os.getenv('azure_api') | |
os.environ['AZURE_OPENAI_KEY'] = AZURE_OPENAI_KEY | |
openai.api_version = "2024-02-15-preview" # change it with your own version | |
openai.azure_endpoint = os.getenv('azure_endpoint') | |
model = "gpt35turbo" # deployment name on Azure OPENAI Studio | |
myLLM = AzureChatOpenAI(azure_endpoint = openai.azure_endpoint, | |
api_key=AZURE_OPENAI_KEY, | |
api_version=openai.api_version, | |
temperature=0, | |
streaming=True, | |
model = model,) | |
obj_qdrant = QdrantClientInitializer() | |
client = obj_qdrant.initialize_db() | |
obj_loader = PDFLoader() | |
# def print_result(question, result): | |
# output_text = f"""### Question: | |
# {question} | |
# ### Answer: | |
# {result} | |
# """ | |
# return(output_text) | |
# def format_chat_prompt(chat_history): | |
# prompt = [] | |
# for turn in chat_history: | |
# user_message, ai_message = turn | |
# prompt.append(HumanMessage(user_message)) | |
# prompt.append(AIMessage(ai_message)) | |
# chat_history = InMemoryChatMessageHistory(messages=prompt) | |
# memory = ConversationBufferMemory(chat_memory=chat_history, memory_key="history", input_key="question") | |
# return memory | |
# def chat(question, manual, history): | |
# history = history or [] | |
# memory = format_chat_prompt(history) | |
# manual_list = {"Toyota_Corolla_2024_TR": -8580416610875007536, | |
# "Renault_Clio_2024_TR":-5514489544983735006, | |
# "Fiat_Egea_2024_TR":-2026113796962100812} | |
# collection_list = {"Toyota_Corolla_2024_TR": "TOYOTA_MANUAL_COLLECTION_EMBED3", | |
# "Renault_Clio_2024_TR": "RENAULT_MANUAL_COLLECTION_EMBED3", | |
# "Fiat_Egea_2024_TR": "FIAT_MANUAL_COLLECTION_EMBED3"} | |
# collection_name = collection_list[f"{manual}"] | |
# db = obj_loader.load_from_database(embeddings=embeddings, collection_name=collection_name) | |
# CAR_ID = manual_list[f"{manual}"] | |
# wrapper = Wrappers(collection_name, client, embeddings, myLLM, db, CAR_ID, memory) | |
# inputs = {"question": question, "iter_halucination": 0} | |
# app = wrapper.lagchain_graph() | |
# for output in app.stream(inputs): | |
# for key, value in output.items(): | |
# pprint(f"Finished running: {key}:") | |
# # display(Markdown(print_result(question, value["generation"]['text']))) | |
# response = value["generation"]['text'] | |
# history.append((question, response)) | |
# point_id = uuid.uuid4().hex | |
# DatabaseOperations.save_user_history_demo(client, "USER_COLLECTION_EMBED3", question, response, embeddings, point_id, manual) | |
# return '', history | |
# def vote(data: gr.LikeData): | |
# if data.liked: | |
# print("You upvoted this response: ") | |
# return "OK" | |
# else: | |
# print("You downvoted this response: " ) | |
# return "NOK" | |
# manual_list = ["Toyota_Corolla_2024_TR", "Renault_Clio_2024_TR", "Fiat_Egea_2024_TR"] | |
# with gr.Blocks() as demo: | |
# chatbot = gr.Chatbot(height=600) | |
# manual = gr.Dropdown(label="Kullanım Kılavuzları", value="Toyota_Corolla_2024_TR", choices=manual_list) | |
# textbox = gr.Textbox() | |
# clear = gr.ClearButton(components=[textbox, chatbot], value='Clear console') | |
# textbox.submit(chat, [textbox, manual, chatbot], [textbox, chatbot]) | |
# chatbot.like(vote, None, None) # Adding this line causes the like/dislike icons to appear in your chatbot | |
# # gr.close_all() | |
# demo.launch(share=True) | |
def print_result(question, result): | |
output_text = f"""### Question: | |
{question} | |
### Answer: | |
{result} | |
""" | |
return(output_text) | |
def format_chat_prompt(chat_history): | |
prompt = [] | |
print(chat_history) | |
for turn in chat_history: | |
user_message, ai_message = turn | |
prompt.append(HumanMessage(user_message)) | |
prompt.append(AIMessage(ai_message)) | |
chat_history = InMemoryChatMessageHistory(messages=prompt) | |
memory = ConversationBufferMemory(chat_memory=chat_history, memory_key="history", input_key="question") | |
return memory | |
liked_state = gr.State(None) | |
last_interaction = gr.State(None) | |
def chat(question, manual, history, liked): | |
history = history or [] | |
memory = format_chat_prompt(history) | |
manual_list = {"Toyota_Corolla_2024_TR": -8580416610875007536, | |
"Renault_Clio_2024_TR":-5514489544983735006, | |
"Fiat_Egea_2024_TR":-2026113796962100812} | |
collection_list = {"Toyota_Corolla_2024_TR": "TOYOTA_MANUAL_COLLECTION_EMBED3", | |
"Renault_Clio_2024_TR": "RENAULT_MANUAL_COLLECTION_EMBED3", | |
"Fiat_Egea_2024_TR": "FIAT_MANUAL_COLLECTION_EMBED3"} | |
collection_name = collection_list[manual] | |
db = obj_loader.load_from_database(embeddings=embeddings, collection_name=collection_name) | |
CAR_ID = manual_list[manual] | |
wrapper = Wrappers(collection_name, client, embeddings, myLLM, db, CAR_ID, memory) | |
inputs = {"question": question, "iter_halucination": 0} | |
app = wrapper.lagchain_graph() | |
for output in app.stream(inputs): | |
for key, value in output.items(): | |
pprint(f"Finished running: {key}:") | |
response = value["generation"]['text'] | |
history.append((question, response)) | |
# Store the last interaction without saving to the database yet | |
last_interaction.value = { | |
"question": question, | |
"response": response, | |
"manual": manual, | |
"point_id": uuid.uuid4().hex | |
} | |
return '', history | |
def save_last_interaction(feedback): | |
if last_interaction.value: | |
DatabaseOperations.save_user_history_demo( | |
client, | |
"USER_COLLECTION_EMBED3", | |
last_interaction.value["question"], | |
last_interaction.value["response"], | |
embeddings, | |
last_interaction.value["point_id"], | |
last_interaction.value["manual"], | |
feedback | |
) | |
last_interaction.value = None | |
manual_list = ["Toyota_Corolla_2024_TR", "Renault_Clio_2024_TR", "Fiat_Egea_2024_TR"] | |
with gr.Blocks() as demo: | |
chatbot = gr.Chatbot(height=600) | |
manual = gr.Dropdown(label="Kullanım Kılavuzları", value="Toyota_Corolla_2024_TR", choices=manual_list) | |
textbox = gr.Textbox() | |
clear = gr.ClearButton(components=[textbox, chatbot], value='Clear console') | |
def handle_like(data: gr.LikeData): | |
liked_state.value = data.liked | |
if liked_state.value is not None: | |
feedback = "LIKE" if liked_state.value else "DISLIKE" | |
save_last_interaction(feedback) | |
def gradio_chat(question, manual, history): | |
save_last_interaction("N/A") # Save previous interaction before starting a new one | |
return chat(question, manual, history, liked_state.value) | |
textbox.submit(gradio_chat, [textbox, manual, chatbot], [textbox, chatbot]) | |
chatbot.like(handle_like, None, None) | |
demo.launch() | |