Redmind commited on
Commit
e541e07
·
verified ·
1 Parent(s): e1c10c6

Upload 6 files

Browse files
Input/Goldman_LOA - Gold.pdf ADDED
Binary file (79.3 kB). View file
 
Input/Inbound.pdf ADDED
Binary file (162 kB). View file
 
Input/LOA_Sample.pdf ADDED
Binary file (5.76 kB). View file
 
Input/LOA_Sample_new.pdf ADDED
Binary file (4.19 kB). View file
 
app.py ADDED
@@ -0,0 +1,1507 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from PIL import Image
2
+ import base64
3
+ from io import BytesIO
4
+ import os
5
+ import re
6
+ import tempfile
7
+ import wave
8
+ import requests
9
+ import gradio as gr
10
+ import time
11
+ import shutil
12
+ import json
13
+ import nltk
14
+ import mysql.connector
15
+ import fnmatch
16
+ # audio related code is not included based on Arun's input
17
+ # audio package
18
+ import speech_recognition as sr
19
+ from pydub import AudioSegment
20
+ from pydub.playback import play
21
+ # SMTP code is not included since HFSpaces doesn't support it
22
+ # email library
23
+ import smtplib, ssl
24
+ from email.mime.multipart import MIMEMultipart
25
+ from email.mime.text import MIMEText
26
+ from email.mime.base import MIMEBase
27
+ from email import encoders
28
+ # langchain
29
+ from langchain_core.prompts import ChatPromptTemplate
30
+ from langchain_core.output_parsers import StrOutputParser
31
+ from langchain_core.runnables import RunnableSequence, RunnableLambda
32
+ from langchain_openai import ChatOpenAI
33
+ from langchain_openai import OpenAIEmbeddings
34
+ from langchain_community.vectorstores import FAISS
35
+ from langchain_community.utilities import SQLDatabase
36
+ from langchain.agents import create_tool_calling_agent, AgentExecutor, Tool
37
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
38
+ from langchain.tools import StructuredTool
39
+ #from langchain.pydantic_v1 import BaseModel, Field
40
+ from pydantic import BaseModel, Field
41
+ from PyPDF2 import PdfReader
42
+ from nltk.tokenize import sent_tokenize
43
+ from datetime import datetime
44
+ from sqlalchemy import create_engine
45
+ from sqlalchemy.sql import text
46
+ import openai
47
+
48
+ # pandas
49
+ import pandas as pd
50
+ from pandasai.llm.openai import OpenAI
51
+ from pandasai import SmartDataframe
52
+ from dotenv import load_dotenv
53
+
54
+ # Load environment variables
55
+ load_dotenv()
56
+
57
+ # langfuse analytics
58
+ from langfuse.callback import CallbackHandler
59
+
60
+ # Inventory API data table
61
+ from tabulate import tabulate
62
+
63
+ #forcefully stop the agent execution
64
+ import concurrent.futures
65
+ import threading
66
+
67
+ # mailjet_rest to send email
68
+ from mailjet_rest import Client
69
+ import base64
70
+
71
+ #for PDF form filling
72
+ from PyPDFForm import FormWrapper
73
+
74
+ import os
75
+
76
+ import zipfile
77
+ # Get the current working directory
78
+ current_folder = os.getcwd()
79
+ #Variables Initialization
80
+ agent_executor = None
81
+ vector_store1 = None
82
+ texts1 = None
83
+ excel_dataframe = None
84
+ file_extension = None
85
+ total_rows = ""
86
+ docstatus = ""
87
+ sample_table = ""
88
+ #This is to define the summary of the runtime tool. This summary will be updated in prompt template and description of the new tool
89
+ run_time_tool_summary=""
90
+ user_name = ""
91
+
92
+ # Define global variables for managing the thread and current_event
93
+ executor = concurrent.futures.ThreadPoolExecutor(max_workers=1)
94
+
95
+ current_event = None
96
+ stop_event = threading.Event()
97
+
98
+ # LangFuse API keys and host settings
99
+ os.environ["LANGFUSE_PUBLIC_KEY"] = os.getenv("LANGFUSE_PUBLIC_KEY")
100
+ os.environ["LANGFUSE_SECRET_KEY"] = os.getenv("LANGFUSE_SECRET_KEY")
101
+ os.environ["LANGFUSE_HOST"] = os.getenv("LANGFUSE_HOST")
102
+
103
+ DB_USER = 'u852023448_redmindgpt'
104
+ DB_PASSWORD = 'redmindGpt@123'
105
+ DB_HOST = '217.21.88.10'
106
+ DB_NAME = 'u852023448_redmindgpt'
107
+
108
+ from huggingface_hub import create_branch, create_tag
109
+ from huggingface_hub import login
110
+ from huggingface_hub import HfApi
111
+ api= HfApi()
112
+ token = os.getenv("HF_TOKEN")
113
+ login(token=os.getenv("HF_TOKEN"))
114
+
115
+ langfuse_handler = CallbackHandler()
116
+ langfuse_handler.auth_check() # Optional: Checks if the authentication is successful
117
+
118
+ nltk.download('punkt')
119
+
120
+ open_api_key_token = os.getenv("OPEN_AI_API")
121
+
122
+ os.environ['OPENAI_API_KEY'] = open_api_key_token
123
+ pdf_path = "Input/Inbound.pdf"
124
+
125
+ db_uri = os.getenv("POSTGRESQL_CONNECTION")
126
+
127
+ # Database setup
128
+ db = SQLDatabase.from_uri(db_uri)
129
+
130
+ user_email = ""
131
+ warehouse_name = ""
132
+ warehouse_id = ""
133
+ # Today's date to be populated in inventory API
134
+ inventory_date = datetime.today().strftime('%Y-%m-%d')
135
+
136
+ apis = [
137
+ # fetch warehouse ID
138
+ {
139
+ "url": "http://193.203.162.39:81/nxt-wms/userWarehouse/fetchWarehouseForUserId?",
140
+ "params": {"query": warehouse_name, "userId": 164}
141
+ },
142
+
143
+ # Stock summary based on warehouse id
144
+ {
145
+ "url": "http://193.203.162.39:81/nxt-wms/transactionHistory/stockSummary?",
146
+ "params": {"branchId": 343, "onDate": inventory_date, "warehouseId": warehouse_id}
147
+ }
148
+ ]
149
+
150
+ # LLM setup
151
+ llm = ChatOpenAI(model="gpt-4o-mini", max_tokens=300, temperature=0.1)
152
+ llm_chart = OpenAI(is_safe=False)
153
+
154
+ def get_schema(_):
155
+ schema_info = db.get_table_info() # This should be a string of your SQL schema
156
+ return schema_info
157
+
158
+
159
+ def generate_sql_query(question):
160
+ schema = get_schema(None)
161
+ template_query_generation = """
162
+ Schema: {schema}
163
+ Question: {question}
164
+ Provide a SQL query to answer the above question using the exact field names and table names specified in the schema.
165
+ SQL Query (Please provide only the SQL statement without explanations or formatting):
166
+ """
167
+ prompt_query_generation = ChatPromptTemplate.from_template(template_query_generation)
168
+ schema_and_question = RunnableLambda(lambda _: {'schema': schema, 'question': question})
169
+ sql_chain = RunnableSequence(
170
+ schema_and_question,
171
+ prompt_query_generation,
172
+ llm.bind(stop=["SQL Query End"]), # Adjust the stop sequence to your need
173
+ StrOutputParser()
174
+ )
175
+ sql_query = sql_chain.invoke({})
176
+ sql_query = sql_chain.invoke({}, config={"callbacks": [langfuse_handler]})
177
+ return sql_query.strip()
178
+
179
+
180
+ def run_query(query):
181
+ # Clean the query by removing markdown symbols and trimming whitespace
182
+ clean_query = query.replace("```sql", "").replace("```", "").strip()
183
+ print(f"Executing SQL Query: {clean_query}")
184
+ try:
185
+ result = db.run(clean_query)
186
+ return result
187
+ except Exception as e:
188
+ print(f"Error executing query: {e}")
189
+ return None
190
+
191
+
192
+ # Define the database query tool
193
+ # The function that uses the above models
194
+ # Define the function that will handle the database query
195
+ def database_tool(question):
196
+ sql_query = generate_sql_query(question)
197
+ return run_query(sql_query)
198
+
199
+
200
+ def get_ASN_data(question):
201
+ base_url = os.getenv("ASN_API_URL")
202
+ print(f"base_url{base_url}")
203
+ complete_url = f"{base_url}branchMaster.id=343&transactionUid={question}&userId=164&transactionType=ASN"
204
+ try:
205
+ response = requests.get(complete_url)
206
+ print(f"complete_url{complete_url}")
207
+ print(f"response{response}")
208
+ data = response.json()
209
+ response.raise_for_status()
210
+
211
+ if 'result' in data and 'content' in data['result'] and data['result']['content']:
212
+ content = data['result']['content'][0]
213
+ trnHeaderAsn = content['trnHeaderAsn']
214
+ party = content['party'][0]
215
+
216
+ transactionUid = trnHeaderAsn['transactionUid']
217
+ customerOrderNo = trnHeaderAsn.get('customerOrderNo', 'N/A')
218
+ orderDate = trnHeaderAsn.get('orderDate', 'N/A')
219
+ customerInvoiceNo = trnHeaderAsn.get('customerInvoiceNo', 'N/A')
220
+ invoiceDate = trnHeaderAsn.get('invoiceDate', 'N/A')
221
+ expectedReceivingDate = trnHeaderAsn['expectedReceivingDate']
222
+ transactionStatus = trnHeaderAsn['transactionStatus']
223
+ shipper_code = party['shipper']['code'] if party['shipper'] else 'N/A'
224
+ shipper_name = party['shipper']['name'] if party['shipper'] else 'N/A'
225
+
226
+ data = [
227
+ ["Transaction UID", transactionUid],
228
+ ["Customer Order No", customerOrderNo],
229
+ ["Order Date", orderDate],
230
+ ["Customer Invoice No", customerInvoiceNo],
231
+ ["Invoice Date", invoiceDate],
232
+ ["Expected Receiving Date", expectedReceivingDate],
233
+ ["Transaction Status", transactionStatus],
234
+ ["Shipper Code", shipper_code],
235
+ ["Shipper Name", shipper_name]
236
+ ]
237
+ return f"The ASN details of {question} is {data}."
238
+ else:
239
+ return "ASN Details are not found. Please contact system administrator."
240
+
241
+ except requests.exceptions.HTTPError as http_err:
242
+ print(f"HTTP error occurred: {http_err}")
243
+ return "Sorry, we encountered an error while processing your request. Please try after some time."
244
+ except Exception as err:
245
+ print(f"An error occurred: {err}")
246
+ return "Sorry, we encountered an error while processing your request. Please try after some time."
247
+
248
+ def load_and_split_pdf(pdf_path):
249
+ reader = PdfReader(pdf_path)
250
+ text = ''
251
+ for page in reader.pages:
252
+ text += page.extract_text()
253
+ text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=50)
254
+ texts = text_splitter.split_text(text)
255
+ return texts
256
+
257
+
258
+ def create_vector_store(texts):
259
+ embeddings = OpenAIEmbeddings()
260
+ vector_store = FAISS.from_texts(texts, embeddings)
261
+ return vector_store
262
+
263
+
264
+ def query_vector_store(vector_store, query, config=None):
265
+ if config:
266
+ print("Config passed:", config)
267
+ docs = vector_store.similarity_search(query, k=5)
268
+ print(f"Vector store return: {docs}")
269
+ return docs
270
+
271
+
272
+ def summarize_document(docs):
273
+ summarized_docs = []
274
+ for doc in docs:
275
+ if isinstance(doc, list):
276
+ doc_content = ' '.join([d.page_content for d in doc])
277
+ else:
278
+ doc_content = doc.page_content
279
+
280
+ sentences = sent_tokenize(doc_content)
281
+ if len(sentences) > 5:
282
+ summarized_content = ' '.join(sentences[:5])
283
+ else:
284
+ summarized_content = doc_content
285
+ summarized_docs.append(summarized_content)
286
+ return '\n\n'.join(summarized_docs)
287
+
288
+
289
+ texts = load_and_split_pdf(pdf_path)
290
+ vector_store = create_vector_store(texts)
291
+
292
+ def document_data_tool_runtime(question):
293
+ print(f"Document data runtime tool enter: {question} with {vector_store1}")
294
+ query_response = query_vector_store(vector_store1, question, config={"callbacks": [langfuse_handler]})
295
+ return query_response
296
+
297
+ def document_data_tool(question):
298
+ print(f"Document data tool enter: {question}")
299
+ # query_string = question['tags'][0] if 'tags' in question and question['tags'] else ""
300
+ query_response = query_vector_store(vector_store, question, config={"callbacks": [langfuse_handler]})
301
+ # summarized_response = summarize_document(query_response)
302
+ return query_response
303
+
304
+ # mailjet API since SMTP is not supported HF spaces
305
+ def send_email_with_attachment_mailjet(recipient_email, subject, body, attach_img_base64=None):
306
+ api_key = os.getenv("MAILJET_API_KEY")
307
+ api_secret = os.getenv("MAILJET_API_SECRET")
308
+
309
+ # Initialize the Mailjet client
310
+ mailjet = Client(auth=(api_key, api_secret), version='v3.1')
311
+
312
+ # Define the email details with an attachment
313
+ data = {
314
+ 'Messages': [
315
+ {
316
+ "From": {
317
+ "Email": "[email protected]",
318
+ "Name": "Redmind Technologies"
319
+ },
320
+ "To": [
321
+ {
322
+ "Email": recipient_email,
323
+ "Name": ""
324
+ }
325
+ ],
326
+ "Subject": subject,
327
+ "TextPart": body,
328
+
329
+ "CustomID": "AppGettingStartedTest",
330
+ "Attachments": [
331
+ {
332
+ "ContentType": "image/png", # Replace with the correct MIME type of your image
333
+ "Filename": "inventory_report.png", # Name of the image as it will appear in the email
334
+ "Base64Content": attach_img_base64 # Base64-encoded image content
335
+ }
336
+ ]
337
+
338
+ }
339
+ ]
340
+ }
341
+
342
+ # Send the email
343
+ result = mailjet.send.create(data=data)
344
+
345
+ # Check if the email was sent successfully
346
+ if result.status_code == 200:
347
+ print("Email sent successfully with attachment!")
348
+ else:
349
+ print(f"Failed to send email. Status code: {result.status_code}")
350
+ print(result.json())
351
+
352
+
353
+ #smtp lib
354
+ def send_email_with_attachment(recipient_email, subject, body, attachment_path):
355
+ try:
356
+ sender_email = os.getenv("EMAIL_SENDER")
357
+ sender_password = os.getenv("EMAIL_PASSWORD")
358
+ # Create a multipart message
359
+ msg = MIMEMultipart()
360
+ msg['From'] = sender_email
361
+ msg['To'] = recipient_email
362
+ msg['Subject'] = subject
363
+ # Attach the body with the msg instance
364
+ msg.attach(MIMEText(body, 'plain'))
365
+ # Open the file to be sent
366
+ attachment = open(attachment_path, "rb")
367
+ # print("Attached the image")
368
+ # Instance of MIMEBase and named as p
369
+ part = MIMEBase('application', 'octet-stream')
370
+
371
+ # To change the payload into encoded form
372
+ part.set_payload((attachment).read())
373
+
374
+ # Encode into base64
375
+ encoders.encode_base64(part)
376
+
377
+ part.add_header('Content-Disposition', f"attachment; filename= {attachment_path}")
378
+
379
+ # Attach the instance 'part' to instance 'msg'
380
+ msg.attach(part)
381
+
382
+ server = smtplib.SMTP('smtp.gmail.com', 587)
383
+ server.starttls()
384
+ server.login(sender_email, sender_password)
385
+ text = msg.as_string()
386
+ server.sendmail(sender_email, recipient_email, text)
387
+ server.quit()
388
+
389
+ except Exception as error:
390
+ print(f"An error occurred: {error}")
391
+
392
+ # return 1
393
+
394
+
395
+ def make_api_request(url, params):
396
+ """Generic function to make API GET requests and return JSON data."""
397
+ try:
398
+ response = requests.get(url, params=params)
399
+ response.raise_for_status() # Raises an HTTPError if the response was an error
400
+ return response.json() # Return the parsed JSON data
401
+ except requests.exceptions.HTTPError as http_err:
402
+ print(f"HTTP error occurred: {http_err}")
403
+ except Exception as err:
404
+ print(f"An error occurred: {err}")
405
+
406
+
407
+ def inventory_report(question):
408
+ # Split the question to extract warehouse name, user question, and optional email
409
+ if question.count(":") > 0:
410
+ parts = question.split(":", 2)
411
+ warehouse_name= parts[0].strip()
412
+ user_question = parts[1].strip()
413
+ user_email = parts[2].strip() if len(parts) > 2 else None
414
+ print(f"Warehouse: {warehouse_name}, Email: {user_email}, Question: {user_question}")
415
+ else:
416
+ return "warehouse name not found"
417
+
418
+ data = make_api_request(apis[0]["url"], apis[0]["params"])
419
+ print(data)
420
+ if data:
421
+ print(f"warehouse name: {warehouse_name}")
422
+ # Extracting the id for the warehouse with the name "WH"
423
+ warehouse_id = next((item['id'] for item in data['result'] if item['wareHouseId'] == warehouse_name), None)
424
+ print(f"warehouse_id:{warehouse_id}")
425
+ if (warehouse_id):
426
+ print("before api id")
427
+ # Step 3: Update the placeholder with the actual warehouse_id
428
+ for api in apis:
429
+ if isinstance(api, dict) and "params" in api:
430
+ if "warehouseId" in api["params"]:
431
+ api["params"]["warehouseId"] = warehouse_id
432
+
433
+ print("after api id")
434
+ print(apis)
435
+ data1 = make_api_request(apis[1]["url"], apis[1]["params"])
436
+ print(data1)
437
+ if (data1):
438
+ headers = ["S.No", "Warehouse Code", "Warehouse Name", "Customer Code", "Customer Name", "Item Code", "Item Name",
439
+ "Currency", "EAN", "UOM", "Quantity", "Gross Weight", "Volume", "Total Value"]
440
+ table_data = []
441
+
442
+ # Check if 'content' exists and is a list
443
+ if data1['result'].get('content', []):
444
+ for index, item in enumerate(data1['result'], start=1):
445
+ row = [
446
+ index, # Serial number
447
+ item['warehouse']['code'],
448
+ item['warehouse']['name'],
449
+ item['customer']['code'],
450
+ item['customer']['name'],
451
+ item['skuMaster']['code'],
452
+ item['skuMaster']['name'],
453
+ item['currency']['code'],
454
+ item['eanUpc'],
455
+ item['uom']['code'],
456
+ item['totalQty'],
457
+ item['grossWeight'],
458
+ item['volume'],
459
+ item['totalValue']
460
+ ]
461
+ table_data.append(row)
462
+ else:
463
+ print("No data available in 'content'.")
464
+ return "There are no inventory details for the warehouse you have given."
465
+
466
+ # Convert to pandas DataFrame
467
+ df = pd.DataFrame(table_data, columns=headers)
468
+ print(df)
469
+ chart_link = chat_with_llm(df,question)
470
+
471
+ return chart_link
472
+ else:
473
+ return "There are no inventory details for the warehouse you have given."
474
+ else:
475
+ return "Please provide a warehouse name available in the database."
476
+
477
+ def chat_with_llm(df,question):
478
+ sdf = SmartDataframe(df, config={"llm": llm_chart})
479
+ llm_response = sdf.chat(question)
480
+ return llm_response
481
+
482
+ def bind_llm(llm, tools,prompt_template):
483
+ llm = llm.bind()
484
+ agent = create_tool_calling_agent(llm, tools, ChatPromptTemplate.from_template(prompt_template))
485
+ agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
486
+ return agent_executor
487
+
488
+ # Define input and output models using Pydantic
489
+ class QueryInput(BaseModel):
490
+ question: str = Field(
491
+ description="The question to be answered by appropriate tool. Please follow the instructions. For API tool, do not send the question as it is. Please send the ASN id.")# Invoke datavisulaization tool by processing the user question and send two inputs to the tool. One input will be the warehouse name and another input to the tool will be the entire user_question itself. Please join those two strings and send them as a single input string with ':' as delimiter")
492
+ # config: dict = Field(default={}, description="Optional configuration for the database query.")
493
+
494
+
495
+ # Define the output model for database queries
496
+ class QueryOutput(BaseModel):
497
+ result: str = Field(...,
498
+ description="Display the answer based on the prompts given in each tool. For dataVisualization tool, it sends a image file as output. Please give the image file path only to the gr.Image. For DocumentData tool, Please provide a complete and concise response within 200 words and Ensure that the response is not truncated and covers the essential points.")
499
+
500
+
501
+ # Wrap the function with StructuredTool for better parameter handling
502
+ tools = [
503
+
504
+ StructuredTool(
505
+ func=get_ASN_data,
506
+ name="APIData",
507
+ args_schema=QueryInput,
508
+ output_schema=QueryOutput,
509
+ description="Tool to get details of ASN api. ASN id will be in the input with the format of first three letters as ASN and it is followed by 11 digit numeral. Pass only the id as input. Do not send the complete user question to the tool. If there are any other queries related to ASN without ASN id, please use the document tool."
510
+ ),
511
+ StructuredTool(
512
+ func=document_data_tool,
513
+ name="DocumentData",
514
+ args_schema=QueryInput,
515
+ output_schema=QueryOutput,
516
+ description="You are an AI assistant trained to help with warehouse management questions based on a detailed document about our WMS. The document covers various processes such as ASN handling, purchase orders, cross docking, appointment scheduling for shipments, and yard management. Please provide a complete and concise response within 200 words and Ensure that the response is not truncated and covers the essential points. "
517
+ ),
518
+ StructuredTool(
519
+ func=database_tool,
520
+ name="DatabaseQuery",
521
+ args_schema=QueryInput,
522
+ output_schema=QueryOutput,
523
+ description="Tool to query the database based on structured input."
524
+ ),
525
+ StructuredTool(
526
+ func=inventory_report,
527
+ name="dataVisualization",
528
+ args_schema=QueryInput,
529
+ output_schema=QueryOutput,
530
+ description=""" Tool to generate a visual chart output for a particular warehouse based on the provided question.
531
+ This tool processes the user question to identify the warehouse name and the specific request. If the user specifies
532
+ an email, include the email in the input. The input format should be: 'warehouse name: user question: email (if any)'.
533
+ The tool generates the requested chart and sends it to the provided email if specified.
534
+ Examples:
535
+ 1. Question without email, without warehouse: "Analyze item name and quantity in a bar chart in warehouse"
536
+ Input to tool: "I want to analyze item name and quantity in a bar chart"
537
+ 2. Question with email: "Analyze item name and quantity in a bar chart in warehouse Allcargo Logistics and send email to [email protected]"
538
+ Input to tool: "Allcargo Logistics: I want to analyze item name and quantity in a bar chart: [email protected]"
539
+ """
540
+ )
541
+ ]
542
+
543
+ prompt_template = f"""You are an assistant that helps with database queries, API information, and document retrieval. Your job is to provide clear, complete, and detailed responses to the following queries. Please give the output response in an user friendly way and remove "**" from the response. For example, document related queries can be answered in a clear and concise way with numbering and not as a paragraph. Database related queries should be answered with proper indentation and use numbering for the rows. ASN id related queries should be answered with proper indentation and use numbering for the rows.
544
+
545
+ For ASN id related questions, if the user specifies an ASN id, provide the information from the api tool. Pass only the id as input to the tool. Do not pass the entire question as input to the tool. If the details are not found, say it in a clear and concise way.
546
+ You are an AI assistant trained to help with warehouse management questions based on a detailed document about our WMS. The document covers various processes such as ASN handling, purchase orders, cross docking, appointment scheduling for shipments, and yard management. Please provide a complete and concise response within 200 words and Ensure that the response is not truncated and covers the essential points. When answering, focus on providing actionable insights and clear explanations related to the specific query. Please remove "**" from the response.
547
+ For SQL database-related questions, only use the fields available in the warehouse schema, including tables such as customer_master, efs_company_master, efs_group_company_master, efs_region_master, party_address_detail, wms_warehouse_master.
548
+ For datavisualization, user will ask for inventory report of a particular warehouse. Your job is to return the image path to chat interface and display the image as output.
549
+ If the proper response is generated, return the response back. Do not iterate again and again.
550
+ {{agent_scratchpad}}
551
+ Here is the information you need to process:
552
+ Question: {{input}}"""
553
+ agent_executor = bind_llm(llm,tools,prompt_template)
554
+
555
+ def ensure_temp_chart_dir():
556
+ temp_chart_dir = os.getenv("IMAGE_MAIN_URL")
557
+ if not os.path.exists(temp_chart_dir):
558
+ os.makedirs(temp_chart_dir)
559
+
560
+ def clean_gradio_tmp_dir():
561
+ tmp_dir = os.getenv("IMAGE_GRADIO_PATH")
562
+ if os.path.exists(tmp_dir):
563
+ try:
564
+ shutil.rmtree(tmp_dir)
565
+ except Exception as e:
566
+ print(f"Error cleaning up /tmp/gradio/ directory: {e}")
567
+
568
+
569
+ # Define the interface function
570
+ max_iterations = 5
571
+ iterations = 0
572
+
573
+
574
+ def handle_query(user_question, chatbot, audio=None):
575
+
576
+ """
577
+ Function to handle the processing of user input with `AgentExecutor.invoke()`.
578
+ """
579
+ global current_event, stop_event
580
+
581
+ # Clear previous stop event and current_event
582
+ stop_event.clear()
583
+
584
+ if current_event and not current_event.done():
585
+ chatbot.append(("","A query is already being processed. Please stop it before starting a new one."))
586
+ return gr.update(value=chatbot)
587
+
588
+ # Start the processing in a new thread
589
+ current_event = executor.submit(answer_question_thread, user_question, chatbot)
590
+
591
+ # Periodically check if current_event is done
592
+ while not current_event.done():
593
+ if stop_event.is_set():
594
+ #current_event.task.cancel() # Attempt to cancel the current_event
595
+ current_event.set_result((user_question, "Sorry, we encountered an error while processing your request. Please try after some time."))
596
+ current_event.cancel() # Attempt to cancel the current_event
597
+ executor.shutdown(wait=False) # Shutdown the executor
598
+ print("Current event cancelled")
599
+ print(current_event.cancelled())
600
+
601
+ chatbot.append((user_question, "Sorry, we encountered an error while processing your request. Please try after some time."))
602
+ return gr.update(value=chatbot)
603
+
604
+ time.sleep(1) # Wait for 1 second before checking again
605
+
606
+ if current_event.cancelled():
607
+ chatbot.append((user_question, "Sorry, we encountered an error while processing your request. Please try after some time."))
608
+ return gr.update(value=chatbot)
609
+ else:
610
+ try:
611
+ user_question1, response_text1 = current_event.result() # Get the result of the completed current_event
612
+ print("output")
613
+ print(user_question1)
614
+ print(response_text1)
615
+ chatbot.append((user_question1, response_text1))
616
+ return gr.update(value=chatbot)
617
+ except Exception as e:
618
+ print(f"Error occurred: {e}")
619
+ chatbot.append((user_question, "Sorry, we encountered an error while processing your request. Please try after some time."))
620
+ return gr.update(value=chatbot)
621
+
622
+
623
+ def stop_processing(chatbot):
624
+ """
625
+ Stops the current processing if it's running.
626
+ """
627
+ global current_event, stop_event
628
+ if current_event and not current_event.done():
629
+ stop_event.set() # Signal the process to stop
630
+ current_event.cancel() # Attempt to cancel the current_event
631
+ chatbot.append(("Sorry, we encountered an error while processing your request. Please try after some time.",""))
632
+ return gr.update(value=chatbot)
633
+
634
+ # This function is for agent executor invoke with the option of stop
635
+ def answer_question_thread(user_question, chatbot,audio=None):
636
+
637
+ global iterations
638
+ iterations = 0
639
+ # Ensure the temporary chart directory exists
640
+ # ensure_temp_chart_dir()
641
+ # Clean the /tmp/gradio/ directory
642
+ # clean_gradio_tmp_dir()
643
+ # Handle audio input if provided
644
+ """
645
+ if audio is not None:
646
+ sample_rate, audio_data = audio
647
+ audio_segment = AudioSegment(
648
+ audio_data.tobytes(),
649
+ frame_rate=sample_rate,
650
+ sample_width=audio_data.dtype.itemsize,
651
+ channels=1
652
+ )
653
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as temp_audio_file:
654
+ audio_segment.export(temp_audio_file.name, format="wav")
655
+ temp_audio_file_path = temp_audio_file.name
656
+
657
+ recognizer = sr.Recognizer()
658
+ with sr.AudioFile(temp_audio_file_path) as source:
659
+ audio_content = recognizer.record(source)
660
+ try:
661
+ user_question = recognizer.recognize_google(audio_content)
662
+ except sr.UnknownValueError:
663
+ user_question = "Sorry, I could not understand the audio."
664
+ except sr.RequestError:
665
+ user_question = "Could not request results from Google Speech Recognition service."
666
+ """
667
+
668
+ while iterations < max_iterations:
669
+
670
+ response = agent_executor.invoke({"input": user_question}, config={"callbacks": [langfuse_handler]}, early_stopping_method="generate")
671
+ #create_file_HF()
672
+ if isinstance(response, dict):
673
+ response_text = response.get("output", "")
674
+ else:
675
+ response_text = response
676
+ if "invalid" not in response_text.lower():
677
+ break
678
+ iterations += 1
679
+
680
+ if iterations == max_iterations:
681
+ return user_question , "Sorry, I couldn't complete your request" #"The agent could not generate a valid response within the iteration limit."
682
+
683
+ if os.getenv("IMAGE_PATH") in response_text:
684
+ # Open the image file
685
+ img = Image.open(os.getenv("IMAGE_PATH"))
686
+
687
+ # Convert the PIL Image to a base64 encoded string
688
+ buffered = BytesIO()
689
+ img.save(buffered, format="PNG")
690
+ img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
691
+
692
+ img = f'<img src="data:image/png;base64,{img_str}" style="width:450px; height:400px;">'
693
+
694
+ response_text = response.get("output", "").split(".")[0] + img
695
+
696
+ email_pattern = r'[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}'
697
+ match = re.search(email_pattern, user_question)
698
+ if match:
699
+ user_email = match.group() # Return the matched email
700
+
701
+ # email send
702
+ if len(user_email) > 0:
703
+ # Send email with the chart image attached
704
+ send_email_with_attachment_mailjet(
705
+ recipient_email=user_email,
706
+ subject="Warehouse Inventory Report",
707
+ body=response.get("output", "").split(".")[0] + ". This is an auto-generated email containing a chart created using Generative AI.",
708
+ # attachment_path=chart_path
709
+ attach_img_base64=img_str)
710
+
711
+
712
+ if "send email to" in user_question:
713
+ try:
714
+ os.remove(img) # Clean up the temporary image file
715
+ except Exception as e:
716
+ print(f"Error cleaning up image file: {e}")
717
+ except Exception as e:
718
+ print(f"Error loading image file: {e}")
719
+ response_text = "Chart generation failed. Please try again."
720
+
721
+ return user_question, response_text
722
+ else:
723
+ return user_question, response_text
724
+ # response_text = response_text.replace('\n', ' ').replace(' ', ' ').strip()
725
+ # return response_text
726
+
727
+
728
+ # without forceful stop option
729
+ def answer_question(user_question, chatbot, audio=None):
730
+
731
+ global iterations
732
+ iterations = 0
733
+ # Ensure the temporary chart directory exists
734
+ # ensure_temp_chart_dir()
735
+ # Clean the /tmp/gradio/ directory
736
+ # clean_gradio_tmp_dir()
737
+ # Handle audio input if provided
738
+ if audio is not None:
739
+ sample_rate, audio_data = audio
740
+ audio_segment = AudioSegment(
741
+ audio_data.tobytes(),
742
+ frame_rate=sample_rate,
743
+ sample_width=audio_data.dtype.itemsize,
744
+ channels=1
745
+ )
746
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as temp_audio_file:
747
+ audio_segment.export(temp_audio_file.name, format="wav")
748
+ temp_audio_file_path = temp_audio_file.name
749
+
750
+ recognizer = sr.Recognizer()
751
+ with sr.AudioFile(temp_audio_file_path) as source:
752
+ audio_content = recognizer.record(source)
753
+ try:
754
+ user_question = recognizer.recognize_google(audio_content)
755
+ except sr.UnknownValueError:
756
+ user_question = "Sorry, I could not understand the audio."
757
+ except sr.RequestError:
758
+ user_question = "Could not request results from Google Speech Recognition service."
759
+
760
+ while iterations < max_iterations:
761
+
762
+ response = agent_executor.invoke({"input": user_question}, config={"callbacks": [langfuse_handler]})
763
+
764
+ if isinstance(response, dict):
765
+ response_text = response.get("output", "")
766
+ else:
767
+ response_text = response
768
+ if "invalid" not in response_text.lower():
769
+ break
770
+ iterations += 1
771
+
772
+ if iterations == max_iterations:
773
+ return "The agent could not generate a valid response within the iteration limit."
774
+
775
+
776
+
777
+ if os.getenv("IMAGE_PATH") in response_text:
778
+ # Open the image file
779
+ img = Image.open(os.getenv("IMAGE_PATH"))
780
+
781
+ # Convert the PIL Image to a base64 encoded string
782
+ buffered = BytesIO()
783
+ img.save(buffered, format="PNG")
784
+ img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
785
+
786
+ img = f'<img src="data:image/png;base64,{img_str}" style="width:450px; height:400px;">'
787
+
788
+ chatbot.append((user_question, img))
789
+
790
+ email_pattern = r'[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}'
791
+ match = re.search(email_pattern, user_question)
792
+ if match:
793
+ user_email = match.group() # Return the matched email
794
+
795
+ # email send
796
+ if len(user_email) > 0:
797
+ # Send email with the chart image attached
798
+ send_email_with_attachment_mailjet(
799
+ recipient_email=user_email,
800
+ subject="Warehouse Inventory Report",
801
+ body=response.get("output", "").split(".")[0],
802
+ # attachment_path=chart_path
803
+ attachment_path=img_str)
804
+
805
+ # Send email with the chart image attached
806
+ """send_email_with_attachment(
807
+ recipient_email=user_email,
808
+ subject="Warehouse Inventory Report",
809
+ body=response.get("output", "").split(":")[0],
810
+ # attachment_path=chart_path
811
+ attachment_path=os.getenv("IMAGE_PATH")
812
+ )"""
813
+
814
+ if "send email to" in user_question:
815
+ try:
816
+ os.remove(img) # Clean up the temporary image file
817
+ except Exception as e:
818
+ print(f"Error cleaning up image file: {e}")
819
+ except Exception as e:
820
+ print(f"Error loading image file: {e}")
821
+ chatbot.append((user_question, "Chart generation failed. Please try again."))
822
+ return gr.update(value=chatbot)
823
+
824
+ else:
825
+ chatbot.append((user_question, response_text))
826
+ return gr.update(value=chatbot)
827
+
828
+
829
+ def submit_feedback(feedback, chatbot, request: gr.Request):
830
+ gr.Info("Thank you for your feedback.")
831
+ #save feedback with user question and response in database
832
+ save_feedback(request.username,chatbot[-1][0], chatbot[-1][1], feedback)
833
+ feedback_response = "User feedback: " + feedback
834
+ return chatbot + [(feedback_response, None)], gr.update(visible=False), gr.update(visible=False)
835
+
836
+
837
+ # Function to connect to MySQL database
838
+ def connect_to_db():
839
+ return mysql.connector.connect(
840
+ host=DB_HOST,
841
+ user=DB_USER,
842
+ password=DB_PASSWORD,
843
+ database=DB_NAME
844
+ )
845
+
846
+ # Function to save feedback to the database
847
+ def save_feedback(username, user_question, user_response, feedback):
848
+ try:
849
+ conn = connect_to_db()
850
+ cursor = conn.cursor()
851
+ query = "INSERT INTO user_feedback (username, question, response, feedback) VALUES (%s, %s, %s, %s)"
852
+ cursor.execute(query, (username, user_question, user_response, feedback))
853
+ conn.commit()
854
+ except mysql.connector.Error as err:
855
+ print(f"Error: {err}")
856
+ finally:
857
+ if cursor:
858
+ cursor.close()
859
+ if conn:
860
+ conn.close()
861
+
862
+ def handle_dislike(data: gr.LikeData):
863
+ if not data.liked:
864
+ print("downvote")
865
+ gr.Info("Please enter your feedback.")
866
+ return gr.update(visible=True), gr.update(visible=True)
867
+ else:
868
+ print("upvote")
869
+ return gr.update(visible=False), gr.update(visible=False)
870
+
871
+ # greet with user name on successful login
872
+ def update_message(request: gr.Request):
873
+ global user_name
874
+ user_name = request.username
875
+ return f"<h2 style=' font-family: Calibri;'>Welcome, {request.username}</h4>"
876
+
877
+ # Function to generate a 50-word summary of the newly uploaded doc using OpenAI
878
+ def generate_summary(text):
879
+ prompt = (
880
+ "You are an AI that helps with document analysis. Please provide a concise title and a summary of the following document. "
881
+ "The summary should be about 50 words and include key details that can help answer questions accurately:\n\n"
882
+ f"{text}\n\nTitle : Summary"
883
+ )
884
+ # Call the OpenAI API to generate a summary
885
+ response = openai.chat.completions.create(
886
+ messages=[
887
+ {
888
+ "role": "user",
889
+ "content": prompt,
890
+ }
891
+ ],
892
+ model="gpt-4o-mini",
893
+ )
894
+ # Extract the title and summary from the response
895
+ response_content = response.choices[0].message.content
896
+ lines = response_content.split("\n")
897
+ # Extract title
898
+ title_line = lines[0]
899
+ title = title_line.split("**Title:**")[-1].strip()
900
+
901
+ # Extract summary
902
+ summary_line = lines[2]
903
+ summary = summary_line.split("**Summary:**")[-1].strip()
904
+
905
+ return title, summary
906
+ #function to handle file upload decide whether excel or doc is uploaded and respective tool will be created with appropriate prompts at runtime
907
+ def upload_file(filepath):
908
+ global vector_store1, file_extension
909
+
910
+ # Get the file extension
911
+ _, file_extension = os.path.splitext(filepath)
912
+
913
+ if file_extension == ".pdf":
914
+ texts1 = load_and_split_pdf(filepath)
915
+
916
+ vector_store1 = create_vector_store(texts1)
917
+ # Generate a 50-word summary from the extracted text
918
+ title, summary = generate_summary(texts1)
919
+ #return title, summary, file_extension
920
+ success_msg = add_to_redmindgpt(title, summary)
921
+ elif file_extension == ".xlsx":
922
+ title, prompt = process_excel(filepath)
923
+ #return title, prompt
924
+ success_msg = add_to_redmindgpt(title, prompt)
925
+ return success_msg
926
+
927
+ def generate_example_questions(sheet_name, column_headers):
928
+ """
929
+ Generates natural language questions based on column headers.
930
+
931
+ Args:
932
+ sheet_name (str): The name of the Excel sheet.
933
+ column_headers (list): List of column headers from the sheet.
934
+
935
+ Returns:
936
+ questions (list): List of generated questions based on the columns.
937
+ """
938
+ questions = []
939
+
940
+ # Check for typical columns and create questions
941
+ if 'Product Name' in column_headers or 'Product' in column_headers:
942
+ questions.append(f"What is the total sales for a specific product in {sheet_name}?")
943
+
944
+ if 'Sales Amount' in column_headers or 'Amount' in column_headers:
945
+ questions.append(f"What is the total sales amount for a specific region in {sheet_name}?")
946
+
947
+ if 'Region' in column_headers:
948
+ questions.append(f"Which region had the highest sales in {sheet_name}?")
949
+
950
+ if 'Date' in column_headers:
951
+ questions.append(f"What were the total sales during a specific month in {sheet_name}?")
952
+
953
+ if 'Price' in column_headers:
954
+ questions.append(f"What is the price of a specific product in {sheet_name}?")
955
+
956
+ if any(fnmatch.fnmatch(header, 'Employee*') for header in column_headers):
957
+ questions.append(f"What are the details of the distinct broker names?")
958
+
959
+ return questions
960
+
961
+ def generate_prompt_from_excel_file(df_dict):
962
+ """
963
+ Generates a prompt from an Excel file containing multiple sheets.
964
+
965
+ Args:
966
+ excel_file_path (str): The path to the Excel file.
967
+
968
+ Returns:
969
+ prompt (str): A detailed prompt including sheet names, column headers, sample data,
970
+ and example questions for each sheet.
971
+ """
972
+
973
+ # Initialize prompt with basic structure
974
+ prompt = "You have been provided with an Excel file containing data in several sheets.\n"
975
+
976
+ # Loop through each sheet to extract column headers and sample data
977
+ for sheet_name, sheet_df in df_dict.items():
978
+ # Extract column headers
979
+ column_headers = list(sheet_df.columns)
980
+
981
+ # Get a sample of the data (first few rows)
982
+ sample_data = sheet_df.head(3).to_string(index=False)
983
+
984
+ # Add sheet details to the prompt
985
+ prompt += f"For the sheet '{sheet_name}', the column headers are:"
986
+ prompt += f"{', '.join(column_headers)}\n\n"
987
+ #prompt += f"Example data from sheet '{sheet_name}':\n"
988
+ #prompt += f"{sample_data}\n\n"
989
+
990
+ # Generate example natural language questions based on columns
991
+ example_questions = generate_example_questions(sheet_name, column_headers)
992
+ #prompt += "### Example Questions:\n"
993
+ #for question in example_questions:
994
+ # prompt += f"- {question}\n"
995
+ #prompt += "\n"
996
+
997
+ # Finalize the prompt with function call description
998
+
999
+ prompt += f"- Query: A natural language question (e.g., List all the employees whose names start with 'A'). The question should be sent as 'What are the employee details with name starts with a'."
1000
+ prompt += f"""Output : {docstatus}. Here is the sample table:
1001
+ {sample_table}.
1002
+ """
1003
+
1004
+ prompt += f"- Query: A natural language question with request to create LOA document (e.g., can you create LOA document for all the employees with broker name XXXX). The question should be sent as 'What are the employee details with broker name XXXX : LOA document'."
1005
+ prompt += f"""Output: {docstatus}. Here is the sample table:
1006
+ {sample_table}.
1007
+ If there is any error, please display the message returned by the function as response. """
1008
+
1009
+
1010
+ return "Excel data", prompt
1011
+
1012
+ # Function to handle "Add to RedMindGPT" button click
1013
+ def add_to_redmindgpt(title, summary):
1014
+ """
1015
+ Adds a document or Excel file to the RedmindGPT system and configures the appropriate runtime tool for handling related queries.
1016
+ Parameters:
1017
+ title (str): The title of the document or Excel file.
1018
+ summary (str): A brief summary of the document or Excel file.
1019
+ Returns:
1020
+ str: A message indicating whether the file has been added successfully.
1021
+ Behavior:
1022
+ - If the file extension is ".pdf", it sets up a runtime tool for handling document-related queries.
1023
+ - If the file extension is ".xlsx", it sets up a runtime tool for handling Excel data-related queries.
1024
+ - Configures the prompt template for the agent executor based on the file type.
1025
+ - Adds the configured runtime tool to the list of tools used by the agent executor.
1026
+ """
1027
+
1028
+ global agent_executor, file_extension
1029
+
1030
+ if file_extension == ".pdf":
1031
+ run_time_tool_summary = f"For {title} document related questions, Please refer runtimeDocumentData tool. {summary}. Please provide a complete and concise response within 200 words and Ensure that the response is not truncated and covers the essential points."
1032
+
1033
+ run_time_tool = StructuredTool(
1034
+ func=document_data_tool_runtime,
1035
+ name="runtimeDocumentData",
1036
+ args_schema=QueryInput,
1037
+ output_schema=QueryOutput,
1038
+ description=f"You are an AI assistant trained to help with the questions based on the uploaded document {title}. {summary}. Please provide a complete and concise response within 200 words and Ensure that the response is not truncated and covers the essential points."
1039
+ )
1040
+
1041
+ # Add the new tool to the beginning
1042
+ tools.insert(0, run_time_tool)
1043
+
1044
+ prompt_template = f"""You are an assistant that helps with database queries, API information, and document retrieval. Your job is to provide clear, complete, and detailed responses to the following queries. Please give the output response in an user friendly way and remove "**" from the response. For example, document related queries can be answered in a clear and concise way with numbering and not as a paragraph. Database related queries should be answered with proper indentation and use numbering for the rows. ASN id related queries should be answered with proper indentation and use numbering for the rows.
1045
+ {run_time_tool_summary}
1046
+ For ASN id related questions, if the user specifies an ASN id, provide the information from the api tool. Pass only the id as input to the tool. Do not pass the entire question as input to the tool. If the details are not found, say it in a clear and concise way.
1047
+ You are an AI assistant trained to help with warehouse management questions based on a detailed document about our WMS. The document covers various processes such as ASN handling, purchase orders, cross docking, appointment scheduling for shipments, and yard management. Please provide a complete and concise response within 200 words and Ensure that the response is not truncated and covers the essential points. When answering, focus on providing actionable insights and clear explanations related to the specific query. Please remove "**" from the response.
1048
+ For SQL database-related questions, only use the fields available in the warehouse schema, including tables such as customer_master, efs_company_master, efs_group_company_master, efs_region_master, party_address_detail, wms_warehouse_master.
1049
+ For datavisualization, user will ask for inventory report of a particular warehouse. Your job is to return the image path to chat interface and display the image as output.
1050
+
1051
+ {{agent_scratchpad}}
1052
+ Here is the information you need to process:
1053
+ Question: {{input}}"""
1054
+ agent_executor = bind_llm(llm,tools,prompt_template)
1055
+ return f"File has been added successfully."
1056
+ elif file_extension == ".xlsx":
1057
+ run_time_excel_tool_summary = f"For {title} related questions, Please refer runtimeExcelData tool. {summary}. Display the response only in the format as mentioned in the tool description. "
1058
+
1059
+ run_time_excel_tool = StructuredTool(
1060
+ func=chat_with_excel_data_dataframe,
1061
+ name="runtimeExcelData",
1062
+ args_schema=QueryInput,
1063
+ output_schema=QueryOutput,
1064
+ description=f"""You are an AI assistant trained to handle Excel data and return meaningful insights. If user query is given with an option of generating the document with the result set dataframe, pass two inputs to the tool. First input is the user query and the second input will be the phrase "create document". display the response only in the below format.
1065
+ {docstatus}. Here is the sample data:
1066
+ {sample_table}.
1067
+ If there is any error, please display the message returned by the function as response. """
1068
+ )
1069
+
1070
+ # Add the new tool to the beginning
1071
+ tools.insert(0, run_time_excel_tool)
1072
+
1073
+ prompt_template = f"""You are an assistant that helps with database queries, API information, and document retrieval. Your job is to provide clear, complete, and detailed responses to the following queries. Please give the output response in an user friendly way and remove "**" from the response. For example, document related queries can be answered in a clear and concise way with numbering and not as a paragraph. Database related queries should be answered with proper indentation and use numbering for the rows. ASN id related queries should be answered with proper indentation and use numbering for the rows.
1074
+ {run_time_excel_tool_summary}
1075
+ For ASN id related questions, if the user specifies an ASN id, provide the information from the api tool. Pass only the id as input to the tool. Do not pass the entire question as input to the tool. If the details are not found, say it in a clear and concise way.
1076
+ You are an AI assistant trained to help with warehouse management questions based on a detailed document about our WMS. The document covers various processes such as ASN handling, purchase orders, cross docking, appointment scheduling for shipments, and yard management. Please provide a complete and concise response within 200 words and Ensure that the response is not truncated and covers the essential points. When answering, focus on providing actionable insights and clear explanations related to the specific query. Please remove "**" from the response.
1077
+ For SQL database-related questions, only use the fields available in the warehouse schema, including tables such as customer_master, efs_company_master, efs_group_company_master, efs_region_master, party_address_detail, wms_warehouse_master.
1078
+ For datavisualization, user will ask for inventory report of a particular warehouse. Your job is to return the image path to chat interface and display the image as output.
1079
+
1080
+ {{agent_scratchpad}}
1081
+ Here is the information you need to process:
1082
+ Question: {{input}}"""
1083
+ agent_executor = bind_llm(llm,tools,prompt_template)
1084
+ return f"File has been added successfully."
1085
+
1086
+ def process_excel(file):
1087
+ global excel_dataframe
1088
+ # Check if the file is None
1089
+ if file is None:
1090
+ return "Excel file", "Your excel does not have values. Please upload a different file." # Return an empty dataframe if no file is uploaded
1091
+ else:
1092
+ # Read the uploaded Excel file
1093
+ excel_dataframe = pd.read_excel(file.name, sheet_name=None) # 'file.name' to get the actual file path
1094
+
1095
+ #to get title and summary of excel file
1096
+ title, prompt = generate_prompt_from_excel_file(excel_dataframe)
1097
+ excel_dataframe = pd.read_excel(file.name)
1098
+
1099
+ return title, prompt # Return the success message.
1100
+
1101
+ def chat_with_excel_data(question):
1102
+ global excel_dataframe
1103
+ response_dataframe = chat_with_llm(excel_dataframe,question)
1104
+ print(response_dataframe)
1105
+ return response_dataframe
1106
+
1107
+ def chat_with_excel_data_dataframe(question):
1108
+ isDataFrame = True
1109
+ print(f"question for excel data frame : {question}")
1110
+ if "LOA" in question:
1111
+ #question = question.replace("create document", "").strip()
1112
+ create_document = True
1113
+ else:
1114
+ create_document = False
1115
+ print(f"create document : {create_document}")
1116
+ response_dataframe = chat_with_excel_data(question)
1117
+ if isinstance(response_dataframe, pd.DataFrame) == False:
1118
+
1119
+ print("The result is not a DataFrame.")
1120
+ if ":" in response_dataframe:
1121
+ isDataFrame = False
1122
+ names_part = response_dataframe.split(":", 1)[1] # Get everything after the colon and space
1123
+
1124
+ # Split the names by commas to create a list
1125
+ names = names_part.split(",")
1126
+
1127
+ # Convert the list of names to a DataFrame
1128
+ response_dataframe = pd.DataFrame(names, columns=["Result"])
1129
+
1130
+
1131
+ #handle large dataset
1132
+ response = handle_large_dataset(response_dataframe, create_document,isDataFrame)
1133
+
1134
+ return response
1135
+
1136
+ #Save the respnse dataframe to an Excel file in hostinger so that the user can download it
1137
+ #save_file_path = "dataframe_output.xlsx"
1138
+ #response_dataframe.to_excel(save_file_path, index=False)
1139
+ #save_file_to_hostinger(save_file_path)
1140
+
1141
+ #return response_dataframe.head(10)#, len(response_dataframe)
1142
+
1143
+ def save_file_to_hostinger(save_file_path):
1144
+ from ftplib import FTP
1145
+ # Step 2: FTP server credentials
1146
+ ftp_host = 'ftp.redmindtechnologies.com' # Replace with your FTP server address
1147
+ ftp_user = 'u852023448.redmindGpt' # Replace with your FTP username
1148
+ ftp_pass = 'RedMind@505' # Replace with your FTP password
1149
+ remote_file_path = '/RedMindGPT/output.xlsx' # Replace with the desired path on the server
1150
+
1151
+ # Create an FTP connection
1152
+ ftp = FTP(ftp_host)
1153
+ ftp.login(ftp_user, ftp_pass)
1154
+
1155
+ # Open the local file and upload it to the server
1156
+ with open(save_file_path, 'rb') as file:
1157
+ ftp.storbinary(f'STOR {remote_file_path}', file)
1158
+
1159
+ print(f'File {save_file_path} uploaded to {remote_file_path} on server.')
1160
+
1161
+ # Close the FTP connection
1162
+ ftp.quit()
1163
+
1164
+ def handle_large_dataset(df, create_document,isDataFrame):
1165
+
1166
+ total_rows = len(df)
1167
+ #print(df)
1168
+ print(f"Total rows: {total_rows}")
1169
+
1170
+ #docstatus = f"Download the complete dataset <a href='https://huggingface.co/spaces/Redmind/NewageNXTGPT/resolve/main/assets/output.xlsx' download> here.</a>.There are total of {total_rows} rows."
1171
+
1172
+ if total_rows < 4000:
1173
+
1174
+
1175
+ # 1. Drop the original first column
1176
+ if len(df.columns) > 1:
1177
+ # Skipping the original first column
1178
+ # List of required columns
1179
+ required_columns = ['BROKER', 'ACCOUNT NUMBER', 'EMPLOYEE NAME', 'ACCOUNT NAME', 'ACCOUNT ID']
1180
+ # Filter the DataFrame to include only the required columns
1181
+ #print(df[required_columns])
1182
+ #limited_data = df[required_columns]
1183
+ limited_data11 = df.head(3)
1184
+ limited_data = limited_data11[required_columns]
1185
+ limited_data_without_first_column = limited_data.iloc[:, 1:]
1186
+ else:
1187
+ limited_data = df.head(20)
1188
+ limited_data_without_first_column = limited_data
1189
+
1190
+ # 2. Add SNo (serial number) as the first column, starting from 1
1191
+ if isDataFrame :
1192
+
1193
+ limited_data_without_first_column.insert(0, 'SNo', range(1, len(limited_data_without_first_column) + 1))
1194
+ else:
1195
+
1196
+ limited_data_without_first_column.insert(0, 'SNo', range(1, len(limited_data) + 1))
1197
+ # 3. Save the full dataset to a downloadable file
1198
+
1199
+
1200
+
1201
+
1202
+ file_path = "output_data.xlsx"
1203
+ #file_path = os.path.join(current_folder, 'output_data.xlsx')
1204
+ #Broker Name, Account Number, Employee name,Account Owner,Account ID
1205
+ df.to_excel(file_path, index=False)
1206
+
1207
+
1208
+
1209
+
1210
+ global user_name
1211
+ # Get today's date and current time
1212
+ now = datetime.now()
1213
+
1214
+ # Format the date and time as 'YYYY-MM-DD HH:MM'
1215
+ formatted_date_time = now.strftime("%Y-%m-%d %H:%M")
1216
+ print(formatted_date_time)
1217
+ directory = user_name + "/" + formatted_date_time
1218
+ create_file_HF(file_path, directory,False)
1219
+ dataset_link = get_download_link(directory,file_path)
1220
+ docstatus = f"""These are the sample data. Download the complete dataset <a href="{dataset_link}" download> here.</a>."""
1221
+ print("File created in the home path")
1222
+
1223
+ #save_file_to_hostinger('output_data.xlsx')
1224
+ # 4. Create a summary and table of the first 10 rows for display
1225
+
1226
+ #columns = list(df.columns)
1227
+ sample_table = limited_data_without_first_column.to_markdown()
1228
+ #print(sample_table)
1229
+ if create_document:
1230
+ #Logic to generate pdfs with employee name and account number
1231
+ for index, row in df.iterrows():
1232
+ # Create a PDF for each row
1233
+ create_pdf(row['COMPANY'], row['EMPLOYEE NAME'], row['ACCOUNT NUMBER'],directory)
1234
+ create_document = False
1235
+ # Directory containing PDF files
1236
+ pdf_dir = "/home/user/app"
1237
+ # Output ZIP file name
1238
+ zip_file_name = "pdf_files.zip"
1239
+ print(zip_file_name)
1240
+ zip_files_in_folder(directory, zip_file_name)
1241
+ create_file_HF(zip_file_name,directory, False)
1242
+ link = get_download_link(directory,zip_file_name)
1243
+ print(f"downloadable link: {link}")
1244
+ docstatus = f"""Please download <a href="{dataset_link}" download>excel</a> and <a href="{link}" download>PDFs</a>."""
1245
+ print(docstatus)
1246
+ print(sample_table)
1247
+ # 5. Return the summary and downloadable link
1248
+ #return f"""
1249
+ #There are a total of {total_rows} rows. Please download the complete dataset here: <a href="https://redmindtechnologies.com/RedMindGPT/output.xlsx" download>Download</a>. Here are the first 3 rows:
1250
+ #{sample_table} """
1251
+
1252
+ return sample_table, docstatus
1253
+
1254
+ else:
1255
+ return "Your query returns a large dataset which is not supported in the current version. Please try a different query."
1256
+
1257
+ def create_file_HF(file_path,directory,document_created = False):
1258
+
1259
+
1260
+
1261
+ repo_id = "Redmind/NewageNXTGPT_Repo_trial"
1262
+
1263
+
1264
+ if(document_created):
1265
+ directory = directory + "/" + "document"
1266
+ document_created = False
1267
+ #check if direcotry exists
1268
+ dir_exists_flag = directory_exists(repo_id, directory, token)
1269
+ if not dir_exists_flag:
1270
+ directory = directory + "/" + file_path
1271
+ else:
1272
+ directory = directory + "/" + file_path
1273
+
1274
+
1275
+ #create_branch("Redmind/NewageNXTGPT_Repo_trial", repo_type="space", branch="test-branch")
1276
+
1277
+ #create_tag("bigcode/the-stack", repo_type="dataset", revision="v0.1-release", tag="v0.1.1", tag_message="Bump release version.")
1278
+
1279
+
1280
+ api.upload_file(path_or_fileobj=file_path, repo_id="Redmind/NewageNXTGPT_Repo_trial",revision = "test-branch", repo_type= "space", path_in_repo=directory)
1281
+
1282
+
1283
+
1284
+ def create_pdf(cname,ename,account_number, directory):
1285
+
1286
+ filled = FormWrapper("Input/LOA_Sample_new.pdf").fill(
1287
+ {
1288
+ 'company name': cname,
1289
+ 'employee name': ename,
1290
+ 'account number': account_number
1291
+
1292
+
1293
+ },
1294
+ )
1295
+ output_file_name = f"{ename}_{cname}.pdf"
1296
+ document_created = True
1297
+ with open(output_file_name, "wb+") as output:
1298
+ output.write(filled.read())
1299
+ # Get the absolute path
1300
+ file_path = os.path.abspath(output_file_name)
1301
+ print(f"The file was created at: {file_path}")
1302
+ #create_file_HF(output_file_name, directory,document_created)
1303
+
1304
+
1305
+ return f"{output_file_name} is created successfully."
1306
+
1307
+ def zip_files_in_folder(directory_output, output_zip):
1308
+ # Initialize the ZIP file
1309
+ directory = "./"
1310
+ with zipfile.ZipFile(output_zip, 'w', zipfile.ZIP_DEFLATED) as zipf:
1311
+ # List all files in the directory
1312
+ for file in os.listdir(directory):
1313
+ file_path = os.path.join(directory, file)
1314
+ # Check if it's a PDF file and a regular file (not a directory)
1315
+ if file.endswith(".pdf") and os.path.isfile(file_path):
1316
+ # Add file to the ZIP archive
1317
+ zipf.write(file_path, file) # Save with its filename in the ZIP
1318
+ print(f"ZIP file created: {output_zip}, {output_zip}, {directory_output}")
1319
+
1320
+ def directory_exists(repo_id, directory, token):
1321
+ try:
1322
+
1323
+
1324
+ from datetime import datetime
1325
+
1326
+ # List all files in the repository
1327
+ files = api.list_repo_files(repo_id=repo_id, repo_type="space", token=token)
1328
+ # Check if any file starts with the directory path
1329
+ return any(file.startswith(directory) for file in files)
1330
+ except Exception as e:
1331
+ print(f"Error checking directory existence: {e}")
1332
+ return False
1333
+
1334
+ def get_download_link(file_path,file_name):
1335
+ # Hugging Face Spaces uses this URL format for repository-hosted files
1336
+ base_url = "https://huggingface.co/spaces/Redmind/NewageNXTGPT_Repo_trial/resolve/test-branch"
1337
+ # Replace <your-space-id> with your actual Space name
1338
+ return f"{base_url}/{file_path}/{file_name}"
1339
+
1340
+ css = """
1341
+
1342
+ /* Example of custom button styling */
1343
+ .gr-button {
1344
+ background-color: #6366f1; /* Change to your desired button color */
1345
+ color: white;
1346
+ border-radius: 8px; /* Make the corners rounded */
1347
+ border: none;
1348
+ padding: 10px 20px;
1349
+ font-size: 12px;
1350
+ cursor: pointer;
1351
+ }
1352
+
1353
+ .gr-button:hover {
1354
+ background-color: #8a92f7; /* Darker shade on hover */
1355
+ }
1356
+
1357
+ .gr-buttonbig {
1358
+ background-color: #6366f1; /* Change to your desired button color */
1359
+ color: white;
1360
+ border-radius: 8px; /* Make the corners rounded */
1361
+ border: none;
1362
+ padding: 10px 20px;
1363
+ font-size: 14px;
1364
+ cursor: pointer;
1365
+ }
1366
+
1367
+ .gr-buttonbig:hover {
1368
+ background-color: #8a92f7; /* Darker shade on hover */
1369
+ }
1370
+
1371
+ /* Customizing the Logout link to be on the right */
1372
+ .logout-link {
1373
+ text-align: right;
1374
+ display: inline-block;
1375
+ width: 100%;
1376
+ }
1377
+
1378
+ .logout-link a {
1379
+ color: #4A90E2; /* Link color */
1380
+ text-decoration: none;
1381
+ font-size: 16px;
1382
+ }
1383
+
1384
+ .chatbot_gpt {
1385
+ height: 600px !important; /* Adjust height as needed */
1386
+ }
1387
+
1388
+ .logout-link a:hover {
1389
+ text-decoration: underline; /* Underline on hover */
1390
+ }
1391
+
1392
+ .message-buttons-right{
1393
+ display: none !important;
1394
+ }
1395
+
1396
+ body, .gradio-container {
1397
+ margin: 0;
1398
+ padding: 0;
1399
+ }
1400
+
1401
+ /* Styling the tab header with a blue background */
1402
+ .gr-tab-header {
1403
+ background-color: #4A90E2; /* Blue background for the tab header */
1404
+ padding: 10px;
1405
+ border-radius: 8px;
1406
+ color: white;
1407
+ font-size: 16px;
1408
+ }
1409
+
1410
+ /* Styling the selected tab text color to be green */
1411
+ .gr-tab-header .gr-tab-active {
1412
+ color: green; /* Change selected tab text to green */
1413
+ }
1414
+
1415
+ /* Keep non-selected tab text color white */
1416
+ .gr-tab-header .gr-tab {
1417
+ color: white;
1418
+ }
1419
+
1420
+ /* Custom CSS for reducing the size of the video element */
1421
+ .video-player {
1422
+ width: 500px; /* Set a custom width for the video */
1423
+ height: 350px; /* Set a custom height for the video */
1424
+ margin: 0 auto; /* Center the video horizontally */
1425
+ }
1426
+ """
1427
+ with gr.Blocks(css=css, theme=gr.themes.Soft()) as demo:
1428
+ gr.HTML("<CENTER><B><h1 style='font-size:30px; font-family: Calibri;'>RedMindGPT</h1></B></CENTER>")
1429
+ # Logout link styled as text link in the right corner
1430
+ gr.Markdown("<div class='logout-link'><a href='/logout'><b>Logout</b></a></div>")
1431
+
1432
+ # Unified RedMindGPT Interface with sample ques
1433
+ with gr.Row():
1434
+ m = gr.Markdown()
1435
+ demo.load(update_message, None, m)
1436
+
1437
+ # Buttons for sample queries
1438
+ with gr.Row():
1439
+ sample_button = gr.Button("What are the details of ASN24091600002", elem_classes="gr-buttonbig")
1440
+ sample_button1 = gr.Button("What are the active warehouses available", elem_classes="gr-buttonbig")
1441
+ sample_button2 = gr.Button("Explain Pre-Receiving Yard Management", elem_classes="gr-buttonbig")
1442
+ sample_button3 = gr.Button("can you generate a doughnut chart with item name and quantities for warehouse WH1000001", elem_classes="gr-buttonbig")
1443
+ sample_button4 = gr.Button("Analyze item name & quantity for different customers in a stacked bar chart for the warehouse WH1000001 & send email to [email protected]", elem_classes="gr-button")
1444
+
1445
+ # Chatbot component
1446
+ with gr.Row():
1447
+ chatbot = gr.Chatbot(label="Select any of the questions listed above to experience RedMindGPT in action.", elem_classes="chatbot_gpt")
1448
+
1449
+ # Textbox for user questions
1450
+ with gr.Row():
1451
+ with gr.Column(scale=1):
1452
+ message = gr.Textbox(show_label=False, container=False, placeholder="Please enter your question")
1453
+
1454
+ with gr.Row():
1455
+ feedback_textbox = gr.Textbox(visible=False, show_label=False, container=False, placeholder="Please enter your feedback.")
1456
+ submit_feedback_button = gr.Button("Submit Feedback", visible=False, elem_classes="gr-buttonbig")
1457
+ with gr.Column(scale=1):
1458
+ with gr.Row():
1459
+ button = gr.Button("Submit", elem_id="submit", elem_classes="gr-buttonbig")
1460
+ stop_button = gr.Button("Stop", elem_classes="gr-buttonbig")
1461
+ # Rearranged to place Upload Doc and Upload Excel in the same row
1462
+ with gr.Row():
1463
+ with gr.Column(scale=1):
1464
+ # File Upload Section
1465
+ gr.Markdown("**Add a document or Excel for natural language interaction.**")
1466
+ with gr.Column(scale=1):
1467
+ u = gr.UploadButton("Upload a doc/excel", file_count="single", elem_classes="gr-buttonbig")
1468
+ #excel_file = gr.UploadButton("Upload an excel", file_count="single", elem_classes="gr-buttonbig", file_types=[".xlsx", ".xls"])
1469
+ with gr.Column(scale=1):
1470
+ add_button = gr.Button("Add to RedMindGPT", elem_classes="gr-buttonbig", visible=False)
1471
+ with gr.Row():
1472
+ title_textbox = gr.Textbox(label="Title", visible=False)
1473
+ summary_textarea = gr.Textbox(label="Summary", lines=5, visible=False)
1474
+
1475
+
1476
+ output_message = gr.Markdown() # Markdown to display output message
1477
+ success_message = gr.Markdown() # Placeholder for messages
1478
+
1479
+
1480
+ # Moved function calling lines to the end
1481
+ stop_button.click(stop_processing, [chatbot], [chatbot])
1482
+
1483
+ button.click(handle_query, [message, chatbot], [chatbot])
1484
+ message.submit(handle_query, [message, chatbot], [chatbot])
1485
+ message.submit(lambda x: gr.update(value=""), None, [message], queue=False)
1486
+ button.click(lambda x: gr.update(value=''), [], [message])
1487
+
1488
+ chatbot.like(handle_dislike, None, outputs=[feedback_textbox, submit_feedback_button])
1489
+ submit_feedback_button.click(submit_feedback, [feedback_textbox, chatbot], [chatbot, feedback_textbox, submit_feedback_button])
1490
+ submit_feedback_button.click(lambda x: gr.update(value=''), [], [feedback_textbox])
1491
+
1492
+ sample_button.click(handle_query, [sample_button, chatbot], [chatbot])
1493
+ sample_button1.click(handle_query, [sample_button1, chatbot], [chatbot])
1494
+ sample_button2.click(handle_query, [sample_button2, chatbot], [chatbot])
1495
+ sample_button3.click(handle_query, [sample_button3, chatbot], [chatbot])
1496
+ sample_button4.click(handle_query, [sample_button4, chatbot], [chatbot])
1497
+
1498
+ """u.upload(upload_file, u, [title_textbox, summary_textarea])
1499
+ u.upload(lambda _: (gr.update(visible=True), gr.update(visible=True), gr.update(visible=True)), None, [title_textbox, summary_textarea, add_button])
1500
+ add_button.click(add_to_redmindgpt, [title_textbox, summary_textarea], output_message)
1501
+ add_button.click(lambda _: (gr.update(visible=False), gr.update(visible=False), gr.update(visible=False)), None, [title_textbox, summary_textarea, add_button])
1502
+ """
1503
+ u.upload(upload_file, u, output_message)
1504
+ u.upload(lambda _: (gr.update(visible=False), gr.update(visible=False), gr.update(visible=False)), None, [title_textbox, summary_textarea, add_button])
1505
+
1506
+
1507
+ demo.launch(auth=[("lakshmi", "redmind"), ("admin", "redmind"), ("arun", "redmind"), ("NewageGlobal", "Newage123$")], auth_message="RedMindGPT", inline=False)
requirements.txt ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ huggingface_hub==0.22.2
2
+ langchain
3
+ mysql-connector-python
4
+ langchain-community
5
+ langchain-openai
6
+ requests
7
+ gradio
8
+ PyPDF2
9
+ faiss-cpu
10
+ psycopg2
11
+ nltk
12
+ tabulate
13
+ pandas
14
+ numpy
15
+ pandasai
16
+ pydub
17
+ speechrecognition
18
+ langfuse
19
+ fastapi-mail
20
+ mailjet_rest
21
+ PyPDFForm
22
+ seaborn
23
+ openpyxl