File size: 6,795 Bytes
73c09e2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
from langchain_community.vectorstores import Qdrant
from langchain_together import Together
from langchain_community.embeddings import HuggingFaceBgeEmbeddings
from qdrant_client import QdrantClient
from langchain_core.prompts import PromptTemplate
import os
from dotenv import load_dotenv

from langchain_community.vectorstores import Qdrant
from langchain.embeddings import HuggingFaceBgeEmbeddings
from langchain.docstore.document import Document
import pandas as pd

# formatting the data for ingestion
all_prods_df = pd.read_csv("data/cleaned_CSVIndian10000.csv")
all_prods_df = all_prods_df.fillna("")

product_metadata = all_prods_df.to_dict(orient="index")

texts = [str(v['name']) + "\n" + str(v['product_desc']) for k, v in product_metadata.items()]

metadatas = list(product_metadata.values())

docs = [Document(page_content=txt, metadata={"source": meta}) for txt, meta in zip(texts, metadatas)]

print("Data loaded.........")


# load the embedding model
model_name = "BAAI/bge-large-en"
model_kwargs = {"device": "cpu"}
encode_kwargs = {"normalize_embeddings": True}

embeddings = HuggingFaceBgeEmbeddings(
    model_name=model_name,
    model_kwargs=model_kwargs,
    encode_kwargs=encode_kwargs
)

print("Embedding model loaded.........")


# load the vector store
# url="http://localhost:6333"
collection_name = "shopintel100v3"

vector_store = Qdrant.from_documents(
    docs,
    embeddings,
    location=":memory:",
    collection_name=collection_name,
    prefer_grpc = False
)

print("Vector store loaded.........")


load_dotenv()

TOGETHER_API_KEY = os.getenv('TOGETHER_API_KEY')
print("api key: ", TOGETHER_API_KEY, type(TOGETHER_API_KEY))


# load the embedding model
# model_name = "BAAI/bge-large-en"
# model_kwargs = {"device": "cpu"}
# encode_kwargs = {"normalize_embeddings": True}

# embeddings = HuggingFaceBgeEmbeddings(
#     model_name=model_name, model_kwargs=model_kwargs, encode_kwargs=encode_kwargs
# )
# print("embeddings loaded.............")

# url = "http://localhost:6333"
# collection_name = "shopintel100v3"

# client = QdrantClient(url=url, prefer_grpc=False)

# vector_store = Qdrant(
#     client=client, 
#     collection_name=collection_name, 
#     embeddings=embeddings
# )

print("qdrant embeddings from docker were loaded.............")

llm = Together(
    model="mistralai/Mixtral-8x7B-Instruct-v0.1",
    temperature=0.2,
    max_tokens=5000,
    top_k=50,
    together_api_key=TOGETHER_API_KEY
)


# query = "ASUS VivoBook 15 (2021)"
# result = vector_store.similarity_search_with_score(query=query, k=5)

# for i in result:
#     doc, score = i
#     print({"score": score, "content": doc.page_content, "metadata": doc.metadata["source"]})
#     print("---------------------------------")

# function to retrieve products from qdrant

def retrieve_product(user_input, vector_store, k = 10):
    result = vector_store.similarity_search_with_score(
      query=user_input,
      k=k
    )

    return result


# function to create context from user query

def create_context(user_input, vector_store):
    result = retrieve_product(user_input, vector_store)

    context = ""
    for index, value in enumerate(result):
        product = value
        product_title = product[0].page_content # Extracting the page_content for each result which is a string
        product_metadata = product[0].metadata["source"] # Extracting the metadata for each result which is a dictionary with key values

        context += f"""
        * Product {index + 1} -
          - Product name : {product_metadata["name"]}
          - Product price: {product_metadata["discount_price"]}
          - Brief description of the product: {product_metadata["product_desc"]}
          - Detailed description of the product: {product_metadata["about_this_item"]}
          - Rating value (1.0 - 5.0): {product_metadata["ratings"]}
          - Overall review: {product_metadata["overall_review"]}


        """
        # print(f"product_title: {type(product_title)}", product_title)
        # print(f"product_metadata: {type(product_metadata)}", product_metadata)

    return context



# prompt template for the mistral model

template = """You are a friendly, conversational AI ecommerce assistant. The context includes 5 ecommerce products.
Use only the following context, to find the answer to the questions from the customer.

Its very important that you follow the below instructions.
 -Dont use general knowledge to answer the question
 -If you dont find the answer from the context or the question is not related to the context, just say that you don't know the answer.
 -By any chance the customer should not know you are referring to a context. 


Context:

{context}


Question:
{question}


Helpful Answer:"""


import random
import gradio as gr

chat_history = []
def respond(message, chat_history):
    global  vector_store, template, llm
    chatbot_response = ""
    try:
        context = create_context(message, vector_store)
        print("context:-------------------------\n", context)
        prompt = PromptTemplate(template=template, input_variables=["context", "question"])
        prompt_formatted_str = prompt.format(
            context=context,
            question=message
        )
        output = llm.invoke(prompt_formatted_str)
        chat_history.append((message, output))
        return "", chat_history
    except Exception as e:
        print("Error:", e)
        error_responses = [
            "Sorry, I encountered an error while processing your request.",
            "Oops, something went wrong. Please try again later.",
            "I'm having trouble understanding that. Can you please rephrase?",
            "It seems there was an issue. Let's try something else."
        ]
        error_message = random.choice(error_responses)
        output = error_message
        chat_history.append((message, output))
        return "", chat_history

# Define the Gradio interface
# chatbot = gr.Chatbot(height=450)
# msg = gr.Textbox(label="What would you like to know?")
# gr.Interface(
#     fn=respond,
#     inputs=msg,
#     outputs=gr.Textbox(label="Response"),
#     title="Conversational AI Chatbot",
# ).launch(
#     share=True,
# )

# # Define Gradio components
with gr.Blocks() as demo:
    chat_history = []
    chatbot = gr.Chatbot(height=450)
    msg = gr.Textbox(label="What would you like to know?")
    btn = gr.Button("Submit")
    clear = gr.ClearButton(value="Clear Console", components=[msg, chatbot])

    # Button click event to respond to the message
    btn.click(respond, inputs=[msg, chatbot], outputs=[msg, chatbot])

    # Clear button event to clear the console
    msg.submit(respond, inputs=[msg, chatbot], outputs=[msg, chatbot])

# Define the Gradio interface
gr.close_all()

demo.launch()