File size: 18,150 Bytes
ee3932a
c735484
a4a901b
 
 
 
 
ee3932a
a4a901b
02a16b9
a4a901b
 
 
 
c735484
a4a901b
ee3932a
a4a901b
 
ee3932a
 
a4a901b
ee3932a
a4a901b
2bb0adf
ee3932a
a4a901b
ce04a13
 
 
ee3932a
a4a901b
21ab5a0
ee3932a
a4a901b
 
 
0f3983b
a4a901b
ffa09c7
a4a901b
 
 
bd1be43
b6a2d35
 
ee3932a
9af5d61
243c16d
a4a901b
 
 
ee3932a
 
a4a901b
7107eed
3527f0f
a4a901b
9848b7b
 
a4a901b
9848b7b
ee3932a
4c329c5
 
 
3222362
b4c9fc0
 
b7dc108
cefd40c
a4a901b
 
 
ae6ac2c
 
a4a901b
ce04a13
 
 
 
 
 
 
3222362
ce04a13
 
 
 
 
 
c9cbae2
a4a901b
 
 
2ac9665
 
 
b1d0396
 
 
2ac9665
b1d0396
24cb98e
2ac9665
 
8b861ae
 
 
2ac9665
8b861ae
 
 
2ac9665
 
602a060
2ac9665
 
 
 
602a060
2ac9665
 
bd1be43
2ac9665
a4a901b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
02a16b9
 
 
f1394c9
48daed2
 
 
 
f1394c9
 
 
 
841f8de
f1394c9
841f8de
 
02a16b9
 
 
 
 
 
 
 
 
 
 
a4a901b
 
 
5525e3d
 
 
 
 
db56f3c
a4a901b
 
 
 
 
 
 
288a785
 
 
a4a901b
f1394c9
 
 
a4a901b
 
 
 
 
 
 
 
 
 
 
8b296bb
 
a4a901b
 
 
 
6ab7be7
4c329c5
 
c73d15a
b324e50
bd1be43
00bd7ac
50070a6
4c329c5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3591403
 
 
 
 
 
 
 
 
 
 
 
 
0f3983b
6ab7be7
a4a901b
6ab7be7
bd1be43
6ab7be7
ab64196
24f8957
 
a4a901b
 
 
6e305ff
 
 
2ec1a1d
a4a901b
2ec1a1d
 
 
 
 
 
 
 
 
 
 
 
 
 
a4a901b
 
 
2ec1a1d
 
 
 
a4a901b
2ec1a1d
 
 
 
 
 
 
a4a901b
ab68327
d4d75a5
 
ab68327
 
d4d75a5
 
 
 
 
 
 
3adb0e0
 
 
 
ab68327
 
 
 
 
 
 
 
 
 
 
d4d75a5
 
ab68327
 
 
2ec1a1d
ab68327
 
 
 
 
 
 
 
 
 
 
 
 
090afd6
9795602
090afd6
 
ab68327
 
 
 
4c329c5
 
 
 
ab68327
ae8df18
ab68327
 
ae8df18
 
 
 
ab68327
ae8df18
 
ab68327
ae8df18
 
 
4c329c5
 
 
 
 
 
 
 
ab68327
ae8df18
ab68327
 
ae8df18
ab68327
 
ae8df18
c73d15a
115782d
4c329c5
ae8df18
 
 
ab68327
ae8df18
 
 
 
 
ab68327
ae8df18
 
 
 
a62d02a
ae8df18
 
ab68327
c73d15a
ab68327
ae8df18
 
ab68327
ae8df18
 
ab68327
 
 
 
 
 
 
b4c9fc0
ab68327
 
 
b4c9fc0
ab68327
 
 
 
b4c9fc0
 
 
 
 
2ec1a1d
090afd6
cefd40c
ee3932a
 
a4a901b
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
import openai
import os
openai.api_key=os.getenv("OPENAI_API_KEY")

from dotenv import load_dotenv
load_dotenv()

from flask import Flask, jsonify, render_template, request
import requests, json
import PyPDF2

# import nltk
# nltk.download("punkt")


import shutil
from werkzeug.utils import secure_filename
from werkzeug.datastructures import FileStorage

import nltk

from datetime import datetime

import openai
from langchain.llms import OpenAI, Replicate

from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.embeddings import HuggingFaceBgeEmbeddings
from langchain.embeddings import HuggingFaceInstructEmbeddings
from langchain.embeddings import SentenceTransformerEmbeddings

from langchain.document_loaders import SeleniumURLLoader, PyPDFLoader
from langchain.docstore.document import Document

from langchain.vectorstores import Chroma
from langchain.text_splitter import RecursiveCharacterTextSplitter

from langchain.chains import VectorDBQA, ConversationChain, ConversationalRetrievalChain

from langchain.document_loaders import UnstructuredFileLoader, TextLoader
from langchain import PromptTemplate

from langchain.chains import RetrievalQA
#from langchain.memory import ConversationBufferWindowMemory
from langchain.memory import ConversationBufferMemory


from transformers import LlamaTokenizer, AutoTokenizer

import warnings

warnings.filterwarnings("ignore")


#app = Flask(__name__)

app = Flask(__name__, template_folder="./")

# Create a directory in a known location to save files to.
uploads_dir = os.path.join(app.root_path,'static', 'uploads')

os.makedirs(uploads_dir, exist_ok=True)

# Initialize global variables for conversation history
conversation_history = []

defaultEmbeddingModelID = 3
defaultLLMID=0


def pretty_print_docs(docs):
    print(f"\n{'-' * 100}\n".join([f"Document {i + 1}:\n\n" + "Document Length>>>" + str(
        len(d.page_content)) + "\n\nDocument Source>>> " + d.metadata['source'] + "\n\nContent>>> " + d.page_content for
                                   i, d in enumerate(docs)]))


def getEmbeddingModel(embeddingId):
    if (embeddingId == 1):
        embeddings = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2")
    elif (embeddingId == 2):
        model_name = "hkunlp/instructor-large"
        model_kwargs = {'device': 'cpu'}
        encode_kwargs = {'normalize_embeddings': True}
        embeddings = HuggingFaceInstructEmbeddings(model_name=model_name,model_kwargs=model_kwargs,encode_kwargs=encode_kwargs)
    elif (embeddingId == 3):
        model_name = "BAAI/bge-large-en-v1.5"
        model_kwargs = {'device': 'cuda'}
        encode_kwargs = {'normalize_embeddings': True} # set True to compute cosine similarity
        model = HuggingFaceBgeEmbeddings(model_name=model_name,model_kwargs=model_kwargs,encode_kwargs=encode_kwargs)
    else:
        embeddings = OpenAIEmbeddings()
    return OpenAIEmbeddings()


def getLLMModel(LLMID):
    # else:
    #     llm = LlamaCpp(
    if LLMID == 1:
        # llm = Replicate(
        #     model="a16z-infra/llama13b-v2-chat:df7690f1994d94e96ad9d568eac121aecf50684a0b0963b25a41cc40061269e5",
        #     model_kwargs={"temperature": 0.2,"max_length": 2500})
        llm = Replicate(
            model="meta/llama-2-13b-chat:f4e2de70d66816a838a89eeeb621910adffb0dd0baba3976c96980970978018d",
            model_kwargs={"temperature": 0.2,"max_new_tokens":2500})
        print("LLAMA2 13B LLM Selected")
    elif LLMID == 2:
        # llm = Replicate(
        #     model="replicate/llama-2-70b-chat:2796ee9483c3fd7aa2e171d38f4ca12251a30609463dcfd4cd76703f22e96cdf",
        #     model_kwargs={"temperature": 0.2,"max_length": 2500})
        llm = Replicate(
        model="meta/llama-2-70b-chat:02e509c789964a7ea8736978a43525956ef40397be9033abf9fd2badfe68c9e3",
        model_kwargs={"temperature": 0.2,"max_new_tokens":2500})
        print("LLAMA2 70B LLM Selected")
    elif LLMID == 3:
        llm = Replicate(model="meta/llama-2-7b-chat:8e6975e5ed6174911a6ff3d60540dfd4844201974602551e10e9e87ab143d81e",
                        model_kwargs={"temperature": 0.2,"max_new_tokens":2500})
        print("LLAMA2 7B Chat LLM Selected")
    elif LLMID == 4:
        llm = Replicate(
            model="a16z-infra/mistral-7b-instruct-v0.1:83b6a56e7c828e667f21fd596c338fd4f0039b46bcfa18d973e8e70e455fda70",
            model_kwargs={"temperature": 0.2,"max_new_tokens":2500})
        print("Mistral AI LLM Selected")
    else:
        llm = OpenAI(model_name="gpt-3.5-turbo-0125",temperature=0.0)
        print("Open AI LLM Selected")
    return llm


def clearKBUploadDirectory(uploads_dir):
    for filename in os.listdir(uploads_dir):
        file_path = os.path.join(uploads_dir, filename)
        print("Clearing Doc Directory. Trying to delete" + file_path)
        try:
            if os.path.isfile(file_path) or os.path.islink(file_path):
                os.unlink(file_path)
            elif os.path.isdir(file_path):
                shutil.rmtree(file_path)
        except Exception as e:
            print('Failed to delete %s. Reason: %s' % (file_path, e))

def PDFChunkerWithSeparator(filepath, separator):

    content = ""
    if filepath.endswith(".pdf"):
        # creating a pdf reader object
        reader = PyPDF2.PdfReader(filepath)
        # print the number of pages in pdf file
        print(len(reader.pages))
        for page in reader.pages:
            content += page.extract_text()
    elif filepath.endswith(".txt"):
        with open(filepath) as f:
            lines = f.readlines()
            f.close()
            for line in lines:
                content+=line
    splitted_content_list = content.split(separator)

    doclist = []
    for splitted_content in splitted_content_list:
        new_doc = Document(page_content=splitted_content, metadata={"source": filepath})
        # print(type(new_doc))
        doclist.append(new_doc)
    if len(doclist)>3:
        print(doclist[len(doclist) - 3])
    return doclist


def loadKB(fileprovided, urlProvided, uploads_dir, request):
    documents = []
    global tokenizer
    BASE_MODEL = "LLAMA-TOKENIZER"
    savedModelPath = "./model/" + BASE_MODEL
    #tokenizer = LlamaTokenizer.from_pretrained(savedModelPath)
    tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
    separator = "</Q>"
    if fileprovided:
        # Delete Files
        clearKBUploadDirectory(uploads_dir)
        # Read and Embed New Files provided
        for file in request.files.getlist('files[]'):
            print("File Received>>>" + file.filename)
            file.save(os.path.join(uploads_dir, secure_filename(file.filename)))
            #loader = PyPDFLoader(os.path.join(uploads_dir, secure_filename(file.filename)))
            #documents.extend(loader.load())
            documents.extend(PDFChunkerWithSeparator(os.path.join(uploads_dir, secure_filename(file.filename)),separator))
    else:
        #loader = TextLoader('Jio.txt')
        #documents.extend(loader.load())
        documents.extend(PDFChunkerWithSeparator('JTest.txt',separator))

    if urlProvided:
        weburl = request.form.getlist('weburl')
        print(weburl)
        urlList = weburl[0].split(';')
        print(urlList)
        print("Selenium Started", datetime.now().strftime("%H:%M:%S"))
        # urlLoader=RecursiveUrlLoader(urlList[0])
        urlLoader = SeleniumURLLoader(urlList)
        print("Selenium Completed", datetime.now().strftime("%H:%M:%S"))
        documents.extend(urlLoader.load())
        print("inside selenium loader:")
        print(documents)

    return documents


def getRAGChain(customerName, customerDistrict, custDetailsPresent, vectordb,llmID):

    # Retrieve conversation history if available
    #memory = ConversationBufferWindowMemory(k=3, memory_key="history", input_key="question")
    global memory 
    #memory = ConversationBufferWindowMemory(k=3, memory_key="history", input_key="question", initial_memory=conversation_history)
    #memory = ConversationBufferMemory(k=3, memory_key="history", input_key="query", initial_memory=conversation_history)
    memory = ConversationBufferMemory(memorykey="history")
    
    # chain = RetrievalQA.from_chain_type(
    #     llm=getLLMModel(llmID),
    #     chain_type='stuff',
    #     retriever=getRetriever(vectordb),
    #     #retriever=vectordb.as_retriever(),
    #     verbose=False,
    #     chain_type_kwargs={
    #         "verbose": False,
    #         "prompt": createPrompt(customerName, customerDistrict, custDetailsPresent),
    #         "memory": ConversationBufferWindowMemory(
    #             k=3,
    #             memory_key="history",
    #             input_key="question"),
    #     }
    # )
    # chain = RetrievalQA.from_chain_type(
    #     llm=getLLMModel(llmID),
    #     chain_type='stuff',
    #     retriever=getRetriever(vectordb),
    #     memory=memory,
    #     #retriever=vectordb.as_retriever(),
    #     verbose=True,
    #     chain_type_kwargs={
    #         "verbose": False,
    #         "prompt": createPrompt(customerName, customerDistrict, custDetailsPresent),
    #         "memory": memory
    #     }
    # )
    chain = ConversationalRetrievalChain.from_llm(
        llm=getLLMModel(llmID),
        chain_type='stuff',
        retriever=getRetriever(vectordb),
        memory=memory,
        #retriever=vectordb.as_retriever(),
        verbose=True,
        combine_docs_chain_kwargs={'prompt': createPrompt(customerName, customerDistrict, custDetailsPresent)}
        #prompt=createPrompt(customerName, customerDistrict, custDetailsPresent)
    )
    return chain

def getRetriever(vectordb):
    return vectordb.as_retriever(search_type="mmr", search_kwargs={'k': 2})

def createVectorDB(documents,embeddingModelID):
    text_splitter = RecursiveCharacterTextSplitter(chunk_size=1500, chunk_overlap=150)
    texts = []
    for document in documents:
        tokenized_input = tokenizer.tokenize(document.page_content)
        print("Token Count::::::::::" + str(len(tokenized_input)))
        if (len(tokenized_input) > 1000):
            print("Splitting Content using RTS")
            splitted_doc = text_splitter.split_documents([document])
            texts.extend(splitted_doc)
        #             for text in texts:
        #                 print("splitted content:"+str(len(text.page_content)))
        #                 print(text.page_content)
        elif (len(tokenized_input) < 1000 and len(tokenized_input) > 1):
            texts.append(document)
    # texts = text_splitter.split_documents(documents)
    print("All chunk List START ***********************\n\n")
    pretty_print_docs(texts)
    print("All chunk List END ***********************\n\n")
    embeddings = getEmbeddingModel(embeddingModelID)
    print("Embedding Started >>>>>>>>>>>>>>>>>>", datetime.now().strftime("%H:%M:%S"))
    vectordb = Chroma.from_documents(texts, embeddings, collection_metadata={"hnsw:space": "cosine"})
    print("Vector Store Creation Completed*********************************\n\n")
    return vectordb
    # texts = text_splitter.split_documents(documents)
    # print("All chunk List START ***********************\n\n")
    # pretty_print_docs(texts)
    # print("All chunk List END ***********************\n\n")
    # embeddings = getEmbeddingModel(0)
    # vectordb = Chroma.from_documents(texts, embeddings)
    # return vectordb

def createPrompt(cName, cCity, custDetailsPresent):
    #cProfile = "Customer's Name is " + cName + "\nCustomer's lives in or customer's Resident State or Customer's place is " + cCity + "\n"
    cProfile = "Customer's Name is " + cName + "\nCustomer's lives in the city of " + cCity + "\n"
    print(cProfile)

    # template1 = """You role is of a Professional Customer Support Executive and your name is Jio AIAssist.
    #     You are talking to the below customer whose information is provided in block delimited by <cp></cp>.
    #     Use the following customer related information (delimited by <cp></cp>) and context (delimited by <ctx></ctx>) to answer the question at the end by thinking step by step alongwith reaonsing steps:
    #     If you are unable to answer the question using the context provided, just say that you don't know, don't try to make up an answer.
    #     Use the customer information to replace entities in the question before answering\n
    #     \n"""

    template1 = """You role is of a Professional Customer Support Executive and your name is Jio AIAssist. 
    Use the context (delimited by <ctx></ctx>) to answer the question at the end by thinking step by step alongwith reaonsing steps.
    If you are unable to answer the question using the context provided, just say that you don't know, don't try to make up an answer. 
    Use the customer information to replace entities in the question before answering\n \n"""

    template2 = """
        <ctx>
        {context}
        </ctx>
        <hs>
        {history}
        </hs>
        Question: {question}
        Answer: """

    #prompt_template = template1 + "<cp>\n" + cProfile + "\n</cp>\n" + template2
    prompt_template = template1 + "You are talking to the following customer. <cp>\n" + cProfile + "\n</cp>\n" + template2
    PROMPT = PromptTemplate(template=prompt_template, input_variables=["history", "context", "question"])
    return PROMPT

vectordb = createVectorDB(loadKB(False, False, uploads_dir, None),defaultEmbeddingModelID)

@app.route('/', methods=['GET'])
def test():
    return "Docker hello"

@app.route('/KBUploader')
def KBUpload():
    return render_template("KBTrain.html")

@app.route('/aiassist')
def aiassist():
    return render_template("index.html")

@app.route('/aisearch')
def aisearch():
    return render_template("aisearch.html")

@app.route('/agent/chat/suggestion', methods=['POST'])
def process_json():
    print(f"\n{'*' * 100}\n")
    print("Request Received >>>>>>>>>>>>>>>>>>", datetime.now().strftime("%H:%M:%S"))

    # Retrieve conversation ID from the request (use any suitable ID)
    conversation_id = request.json.get('conversation_id', None)
    
    content_type = request.headers.get('Content-Type')
    if content_type == 'application/json':
        requestQuery = request.get_json()
        print(type(requestQuery))
        custDetailsPresent = False
        customerName = ""
        customerDistrict = ""
        if "custDetails" in requestQuery:
            custDetailsPresent = True
            customerName = requestQuery['custDetails']['cName']
            customerDistrict = requestQuery['custDetails']['cDistrict']

        selectedLLMID=defaultLLMID
        if "llmID" in requestQuery:
            selectedLLMID=(int) (requestQuery['llmID'])

        # Create a conversation ID-specific history list if not exists
        conversation_history_id = f"{conversation_id}_history"
        if conversation_history_id not in globals():
            globals()[conversation_history_id] = []
        conversation_history = globals()[conversation_history_id]

        
        print("chain initiation")
        chainRAG = getRAGChain(customerName, customerDistrict, custDetailsPresent, vectordb,selectedLLMID)
        print("chain created")
        suggestionArray = []
        searchResultArray = []

        for index, query in enumerate(requestQuery['message']):
            # message = answering(query)
            memory.chat_memory.add_user_message(query)
            #relevantDoc = vectordb.similarity_search_with_score(query, distance_metric="cos", k=3)
            conversation_history.append(query)
            print("Printing Retriever Docs")
            for doc in getRetriever(vectordb).get_relevant_documents(query):
                searchResult = {}
                print(f"\n{'-' * 100}\n")
                searchResult['documentSource'] = doc.metadata['source']
                searchResult['pageContent'] = doc.page_content
                print(doc)
                print("Document Source>>>>>>  " + searchResult['documentSource'] + "\n\n")
                print("Page Content>>>>>> " + searchResult['pageContent'] + "\n\n")
                print(f"\n{'-' * 100}\n")
                searchResultArray.append(searchResult)
            print("Printing Retriever Docs Ended")

            print("Chain Run Started >>>>>>>>>>>>>>>>>>", datetime.now().strftime("%H:%M:%S"))
            message = chainRAG.run({"question": query,"chat_history":history})
            print("Chain Run Completed >>>>>>>>>>>>>>>>>>", datetime.now().strftime("%H:%M:%S"))
            print("query:", query)
            print("Response:", message)
            memory.chat_memory.add_ai_message(message)
            if "I don't know" in message:
                message = "Dear Sir/ Ma'am, Could you please ask questions relevant to Jio?"
            responseJSON = {"message": message, "id": index}
            suggestionArray.append(responseJSON)
        print("Response Sent >>>>>>>>>>>>>>>>>>", datetime.now().strftime("%H:%M:%S"))
        return jsonify(suggestions=suggestionArray, searchResult=searchResultArray)
    else:
        return 'Content-Type not supported!'

@app.route('/file_upload', methods=['POST'])
def file_Upload():
    fileprovided = not request.files.getlist('files[]')[0].filename == ''
    urlProvided = not request.form.getlist('weburl')[0] == ''
    embeddingModelProvided = not request.form.getlist('embeddingModelID')[0] == ''
    print("*******")
    print("File Provided:" + str(fileprovided))
    print("URL Provided:" + str(urlProvided))
    print("Embedding Model Provided:" + str(embeddingModelProvided))
    print("*******")

    print(uploads_dir)
    documents = loadKB(fileprovided, urlProvided, uploads_dir, request)
    embeddingModelID = defaultEmbeddingModelID
    if embeddingModelProvided:
        embeddingModelID = int(request.form.getlist('embeddingModelID')[0])
    global vectordb
    vectordb = createVectorDB(documents, embeddingModelID)
    #vectordb=createVectorDB(documents)
    return render_template("aisearch.html")

if __name__ == '__main__':
    app.run(host='0.0.0.0',  port=int(os.environ.get('PORT', 7860)))