File size: 24,428 Bytes
e4950ac
 
 
 
 
 
 
 
 
 
 
f08b1e6
 
 
e4950ac
 
f08b1e6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e4950ac
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f08b1e6
 
 
 
 
 
 
 
 
e4950ac
 
 
 
 
f08b1e6
 
e4950ac
 
 
 
 
 
f08b1e6
 
e4950ac
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f08b1e6
e4950ac
 
 
 
 
 
 
 
 
 
 
 
 
f08b1e6
e4950ac
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f08b1e6
e4950ac
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f08b1e6
e4950ac
 
 
 
f08b1e6
e4950ac
 
 
 
 
 
 
 
f08b1e6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e4950ac
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f08b1e6
e4950ac
 
 
f08b1e6
e4950ac
 
 
 
 
f08b1e6
 
 
 
 
 
e4950ac
 
 
 
 
 
 
 
 
 
 
 
 
 
f08b1e6
 
e4950ac
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f08b1e6
e4950ac
f08b1e6
 
 
e4950ac
 
f08b1e6
 
 
 
 
 
 
 
 
 
 
 
e4950ac
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f08b1e6
 
 
 
 
 
 
 
 
 
 
 
 
e4950ac
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f08b1e6
 
 
 
 
 
 
 
 
 
 
 
 
e4950ac
 
 
 
f08b1e6
e4950ac
 
f08b1e6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e4950ac
 
f08b1e6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e4950ac
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f08b1e6
 
 
 
 
 
 
 
 
 
e4950ac
f08b1e6
e4950ac
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f08b1e6
e4950ac
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f08b1e6
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
# -*- coding: utf-8 -*-
"""Chat_with_agent_v4.ipynb
Automatically generated by Colab.
Original file is located at
    https://colab.research.google.com/drive/1T5Buj_yHaAnfoO__2-gCFDSvBVheiHrF
"""

from PIL import Image
import base64
from io import BytesIO
import os
import re
import tempfile
import wave
import requests
import gradio as gr
import time
import shutil
import json
import nltk
#audio package
import speech_recognition as sr
from pydub import AudioSegment
from pydub.playback import play
#email library
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email import encoders
#langchain
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnableSequence, RunnableLambda
from langchain_openai import ChatOpenAI
from langchain_openai import OpenAIEmbeddings
from langchain_community.vectorstores import FAISS
from langchain_community.utilities import SQLDatabase
from langchain.agents import create_tool_calling_agent, AgentExecutor, Tool
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.tools import StructuredTool
from langchain.pydantic_v1 import BaseModel, Field
from PyPDF2 import PdfReader
from nltk.tokenize import sent_tokenize
from sqlalchemy import create_engine
from sqlalchemy.sql import text

#pandas
import pandas as pd
from pandasai.llm.openai import OpenAI
from pandasai import SmartDataframe




nltk.download('punkt')

open_api_key_token = os.environ['OPEN_AI_API']

os.environ['OPENAI_API_KEY'] = open_api_key_token
pdf_path="Inbound.pdf"

db_uri = 'postgresql+psycopg2://postgres:[email protected]:5432/warehouseAi'
# Database setup

db = SQLDatabase.from_uri(db_uri)

# LLM setup
llm = ChatOpenAI(model="gpt-4o-mini",max_tokens=300,temperature=0.1)
llm_chart = OpenAI()

def get_schema(_):
    schema_info = db.get_table_info()  # This should be a string of your SQL schema
    return schema_info

def generate_sql_query(question):
    schema = get_schema(None)
    template_query_generation = """
    Schema: {schema}
    Question: {question}
    Provide a SQL query to answer the above question using the exact field names and table names specified in the schema.
    SQL Query (Please provide only the SQL statement without explanations or formatting):
    """
    prompt_query_generation = ChatPromptTemplate.from_template(template_query_generation)
    schema_and_question = RunnableLambda(lambda _: {'schema': schema, 'question': question})
    sql_chain = RunnableSequence(
        schema_and_question,
        prompt_query_generation,
        llm.bind(stop=["SQL Query End"]),  # Adjust the stop sequence to your need
        StrOutputParser()
    )
    sql_query = sql_chain.invoke({})
    return sql_query.strip()

def run_query(query):
    # Clean the query by removing markdown symbols and trimming whitespace
    clean_query = query.replace("```sql", "").replace("```", "").strip()
    print(f"Executing SQL Query: {clean_query}")
    try:
        result = db.run(clean_query)
        return result
    except Exception as e:
        print(f"Error executing query: {e}")
        return None

# Define the database query tool
# The function that uses the above models
# Define the function that will handle the database query
def database_tool(question):
    # print(question)
    sql_query = generate_sql_query(question)
    print(sql_query)
    return run_query(sql_query)

def get_ASN_data(question):
  #print(question)
  base_url = "http://193.203.162.39:9090/nxt-wms/trnHeader?"
  complete_url = f"{base_url}branchMaster.id=343&transactionUid={question}&userId=164&transactionType=ASN"
  #print("complete url")
  #print(complete_url)
  try:
      response = requests.get(complete_url)
      data = response.json()
      response.raise_for_status()

      if 'result' in data and 'content' in data['result'] and data['result']['content']:
          content = data['result']['content'][0]
          trnHeaderAsn = content['trnHeaderAsn']
          party = content['party'][0]

          transactionUid = trnHeaderAsn['transactionUid']
          customerOrderNo = trnHeaderAsn.get('customerOrderNo', 'N/A')
          orderDate = trnHeaderAsn.get('orderDate', 'N/A')
          customerInvoiceNo = trnHeaderAsn.get('customerInvoiceNo', 'N/A')
          invoiceDate = trnHeaderAsn.get('invoiceDate', 'N/A')
          expectedReceivingDate = trnHeaderAsn['expectedReceivingDate']
          transactionStatus = trnHeaderAsn['transactionStatus']
          shipper_code = party['shipper']['code'] if party['shipper'] else 'N/A'
          shipper_name = party['shipper']['name'] if party['shipper'] else 'N/A'

          data = [
              ["Transaction UID", transactionUid],
              ["Customer Order No", customerOrderNo],
              ["Order Date", orderDate],
              ["Customer Invoice No", customerInvoiceNo],
              ["Invoice Date", invoiceDate],
              ["Expected Receiving Date", expectedReceivingDate],
              ["Transaction Status", transactionStatus],
              ["Shipper Code", shipper_code],
              ["Shipper Name", shipper_name]
          ]
          return f"The ASN details of {question} is {data}."
      else:
          return "ASN Details are not found. Please contact system administrator."

  except requests.exceptions.HTTPError as http_err:
      print(f"HTTP error occurred: {http_err}")
  except Exception as err:
      print(f"An error occurred: {err}")

get_ASN_data("ASN24072400001")

def load_and_split_pdf(pdf_path):
    reader = PdfReader(pdf_path)
    text = ''
    for page in reader.pages:
        text += page.extract_text()
    text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=50)
    texts = text_splitter.split_text(text)
    return texts
def create_vector_store(texts):
  embeddings = OpenAIEmbeddings()
  vector_store = FAISS.from_texts(texts, embeddings)
  return vector_store

def query_vector_store(vector_store, query):
    docs = vector_store.similarity_search(query, k=5)
    print(f"Vector store return: {docs}")
    return docs

def summarize_document(docs):
    summarized_docs = []
    for doc in docs:
        if isinstance(doc, list):
            doc_content = ' '.join([d.page_content for d in doc])
        else:
            doc_content = doc.page_content

        sentences = sent_tokenize(doc_content)
        if len(sentences) > 5:
            summarized_content = ' '.join(sentences[:5])
        else:
            summarized_content = doc_content
        summarized_docs.append(summarized_content)
    return '\n\n'.join(summarized_docs)

texts = load_and_split_pdf(pdf_path)
vector_store = create_vector_store(texts)

def document_data_tool(question):
    print(f"Document data tool enter: {question}")
    # query_string = question['tags'][0] if 'tags' in question and question['tags'] else ""
    query_response = query_vector_store(vector_store, question)
    print("query****")
    print(query_response)
    #summarized_response = summarize_document(query_response)
    #print("summary***")
    #print(summarized_response)
    return query_response
    
def send_email_with_attachment(recipient_email, subject, body, attachment_path):
    sender_email = "[email protected]"
    sender_password = "jymz apyc raih eubg"

    # Create a multipart message
    msg = MIMEMultipart()
    msg['From'] = sender_email
    msg['To'] = recipient_email
    msg['Subject'] = subject

    # Attach the body with the msg instance
    msg.attach(MIMEText(body, 'plain'))

    # Open the file to be sent
    attachment = open(attachment_path, "rb")

    # Instance of MIMEBase and named as p
    part = MIMEBase('application', 'octet-stream')

    # To change the payload into encoded form
    part.set_payload((attachment).read())

    # Encode into base64
    encoders.encode_base64(part)

    part.add_header('Content-Disposition', f"attachment; filename= {attachment_path}")

    # Attach the instance 'part' to instance 'msg'
    msg.attach(part)

    # Create SMTP session for sending the mail
    server = smtplib.SMTP('smtp.gmail.com', 587)
    server.starttls()
    server.login(sender_email, sender_password)
    text = msg.as_string()
    server.sendmail(sender_email, recipient_email, text)
    server.quit()
    #return 1

def make_api_request(url, params):
  import requests
  """Generic function to make API GET requests and return JSON data."""
  try:
      response = requests.get(url, params=params)
      response.raise_for_status()  # Raises an HTTPError if the response was an error
      return response.json()  # Return the parsed JSON data
  except requests.exceptions.HTTPError as http_err:
      print(f"HTTP error occurred: {http_err}")
  except Exception as err:
      print(f"An error occurred: {err}")

name=""
warehouse_id = ""
apis = [
    #fetch warehouse ID
    {
        "url": "http://193.203.162.39:9090/nxt-wms/userWarehouse/fetchWarehouseForUserId?",
        "params": {"query": name, "userId": "164"}
    },
        
         #Stock summary based on warehouse id
    {
        "url": "http://193.203.162.39:9090/nxt-wms/transactionHistory/stockSummary?",
        "params": {"branchId": "343", "onDate": "2024-08-09", "warehouseId" : warehouse_id }
    }
]

def inventory_report(question):

  # Split the question to extract warehouse name, user question, and optional email
  parts = question.split(":", 2)
  name = parts[0].strip()
  user_question = parts[1].strip()
  user_email = parts[2].strip() if len(parts) > 2 else None
  print(f"Warehouse: {name}, Email: {user_email}, Question: {user_question}")


  data = make_api_request(apis[0]["url"], apis[0]["params"])
  if data:
      #print(data)
      # Extracting the id for the warehouse with the name "WH"
      warehouse_id = next((item['id'] for item in data['result'] if item['name'] == name), None)

      #print(f"The id for the warehouse named {name} is: {warehouse_id}")
      #Step 3: Update the placeholder with the actual warehouse_id
      for api in apis:
        if "warehouseId" in api["params"]:
          api["params"]["warehouseId"] = warehouse_id


  data1 = make_api_request(apis[1]["url"], apis[1]["params"])

  from tabulate import tabulate


  headers = ["S.No","Warehouse Code", "Warehouse Name", "Customer Code", "Customer Name", "Item Code", "Item Name", "Currency", "EAN", "UOM", "Quantity", "Gross Weight", "Volume", "Total Value"]
  table_data = []

  for index, item in enumerate(data1['result'], start=1):
    row = [
              index,  # Serial number
              item['warehouse']['code'],
              item['warehouse']['name'],
              item['customer']['code'],
              item['customer']['name'],
              item['skuMaster']['code'],
              item['skuMaster']['name'],
              item['currency']['code'],
              item['eanUpc'],
              item['uom']['code'],
              item['totalQty'],
              item['grossWeight'],
              item['volume'],
              item['totalValue']
          ]
    table_data.append(row)


 # Convert to pandas DataFrame
  df = pd.DataFrame(table_data, columns=headers)

  sdf = SmartDataframe(df, config={"llm": llm_chart})

  #chart = sdf.chat("Can you draw a bar chart with all avaialble item name and quantity.")
  chart = sdf.chat(question)

  #email send
  if user_email:
      # Send email with the chart image attached
    send_email_with_attachment(
        recipient_email=user_email,
        subject="Warehouse Inventory Report",
        body="Please find the attached bar chart report for the warehouse inventory analysis.",
        #attachment_path=chart_path
        attachment_path="/home/user/app/exports/charts/temp_chart.png"
    )

  return chart
#inventory_report("WH:can you give me a bar chart with item name and quantity for the warehouse WH")

# Define input and output models using Pydantic
class QueryInput(BaseModel):
    question: str = Field(description="The question to be answered by appropriate tool. Please follow the instructions. For API tool, do not send the question as it is. Please send the ASN id. Invoke datavisulaization tool by processing the user question and send two inputs to the tool. One input will be the warehouse name and another input to the tool will be the entire user_question itself. Please join those two strings and send them as a single input string with ':' as delimiter")
    # config: dict = Field(default={}, description="Optional configuration for the database query.")


# Define the output model for database queries
class QueryOutput(BaseModel):
    result: str = Field(..., description="Display the answer based on the prompts given in each tool. For dataVisualization tool, it sends a image file as output. Please give the image file path only to the gr.Image. For DocumentData tool, Please provide a complete and concise response within 200 words and Ensure that the response is not truncated and covers the essential points.")

# Wrap the function with StructuredTool for better parameter handling
tools = [
    StructuredTool(
        func=get_ASN_data,
        name="APIData",
        args_schema=QueryInput,
        output_schema=QueryOutput,
        description="Tool to get details of ASN api. ASN id will be in the input with the format of first three letters as ASN and it is followed by 11 digit numeral. Pass only the id as input.  Do not send the complete user question to the tool. If there are any other queries related to ASN without ASN id, please use the document tool."
    ),
    StructuredTool(
            func=document_data_tool,
            name="DocumentData",
            args_schema=QueryInput,
            output_schema=QueryOutput,
            description="You are an AI assistant trained to help with warehouse management questions based on a detailed document about our WMS. The document covers various processes such as ASN handling, purchase orders, cross docking, appointment scheduling for shipments, and yard management. Please provide a complete and concise response within 200 words and Ensure that the response is not truncated and covers the essential points. "
    ),
    StructuredTool(
        func=database_tool,
        name="DatabaseQuery",
        args_schema=QueryInput,
        output_schema=QueryOutput,
        description="Tool to query the database based on structured input."
    ),
    StructuredTool(
        func=inventory_report,
        name="dataVisualization",
        args_schema=QueryInput,
        output_schema=QueryOutput,
        description = """
        Tool to generate a visual output (such as a bar chart) for a particular warehouse based on the provided question.
        This tool processes the user question to identify the warehouse name and the specific request. If the user specifies
        an email, include the email in the input. The input format should be: 'warehouse name: user question: email (if any)'.
        The tool generates the requested chart and sends it to the provided email if specified.

        Examples:
        1. Question without email: "Analyze item name and quantity in a bar chart in warehouse Allcargo Logistics"
          Input to tool: "Allcargo Logistics: I want to analyze item name and quantity in a bar chart"

        2. Question with email: "Analyze item name and quantity in a bar chart in warehouse Allcargo Logistics report to send email to [email protected]"
          Input to tool: "Allcargo Logistics: I want to analyze item name and quantity in a bar chart: [email protected]"
        """
    )
]

prompt_template = f"""You are an assistant that helps with database queries, API information, and document retrieval.  Your job is to provide clear, complete, and detailed responses to the following queries. Please give the output response in an user friendly way and remove "**" from the response. For example, document related queries can be answered in a clear and concise way with numbering and not as a paragraph. Database related queries should be answered with proper indentation and use numbering for the rows. ASN id related queries should be answered with proper indentation and use numbering for the rows.
For ASN id related questions, if the user specifies an ASN id, provide the information from the api tool. Pass only the id as input to the tool. Do not pass the entire question as input to the tool. If the details are not found, say it in a clear and concise way.
You are an AI assistant trained to help with warehouse management questions based on a detailed document about our WMS. The document covers various processes such as ASN handling, purchase orders, cross docking, appointment scheduling for shipments, and yard management. Please provide a complete and concise response within 200 words and Ensure that the response is not truncated and covers the essential points. When answering, focus on providing actionable insights and clear explanations related to the specific query.  Please remove "**" from the response.
For SQL database-related questions, only use the fields available in the warehouse schema, including tables such as customer_master, efs_company_master, efs_group_company_master, efs_region_master, party_address_detail, wms_warehouse_master.
For datavisualization, user will ask for inventory report of a particular warehouse. Your job is to return the image path to chat interface and display the image as output.
{{agent_scratchpad}}
Here is the information you need to process:
Question: {{input}}"""

llm = llm.bind()
agent = create_tool_calling_agent(llm, tools, ChatPromptTemplate.from_template(prompt_template))
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)

def ensure_temp_chart_dir():
    temp_chart_dir = "/home/user/app/exports/charts/"
    if not os.path.exists(temp_chart_dir):
        os.makedirs(temp_chart_dir)

def clean_gradio_tmp_dir():
    tmp_dir = "/tmp/gradio/"
    if os.path.exists(tmp_dir):
        try:
            shutil.rmtree(tmp_dir)
        except Exception as e:
            print(f"Error cleaning up /tmp/gradio/ directory: {e}")

# Define the interface function
max_iterations = 5
iterations = 0

def answer_question(user_question, chatbot, audio=None):
    global iterations
    iterations = 0
    # Ensure the temporary chart directory exists
    #ensure_temp_chart_dir()
    # Clean the /tmp/gradio/ directory
    #clean_gradio_tmp_dir()
    # Handle audio input if provided
    if audio is not None:
        sample_rate, audio_data = audio
        audio_segment = AudioSegment(
            audio_data.tobytes(),
            frame_rate=sample_rate,
            sample_width=audio_data.dtype.itemsize,
            channels=1
        )
        with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as temp_audio_file:
            audio_segment.export(temp_audio_file.name, format="wav")
            temp_audio_file_path = temp_audio_file.name

        recognizer = sr.Recognizer()
        with sr.AudioFile(temp_audio_file_path) as source:
            audio_content = recognizer.record(source)
        try:
            user_question = recognizer.recognize_google(audio_content)
        except sr.UnknownValueError:
            user_question = "Sorry, I could not understand the audio."
        except sr.RequestError:
            user_question = "Could not request results from Google Speech Recognition service."

    while iterations < max_iterations:
        print(user_question)
        if "send email to" in user_question:
          email_match = re.search(r"send email to ([\w\.-]+@[\w\.-]+)", user_question)
          if email_match:
            user_email = email_match.group(1).strip()
            user_question = user_question.replace(f"send email to {user_email}", "").strip()
            user_question = f"{user_question}:{user_email}"

        response = agent_executor.invoke({"input": user_question})

        if isinstance(response, dict):
            response_text = response.get("output", "")
        else:
            response_text = response
        if "invalid" not in response_text.lower():
            break
        iterations += 1

    if iterations == max_iterations:
        return "The agent could not generate a valid response within the iteration limit."
    if "chart" in user_question:
        
        # Open the image file
        img = Image.open('/home/user/app/exports/charts/temp_chart.png')
        
        # Convert the PIL Image to a base64 encoded string
        buffered = BytesIO()
        img.save(buffered, format="PNG")
        img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
        #print(img_str)
        img =  f'<img src="data:image/png;base64,{img_str}">'
        #image = gr.Image(value=img_str)
        chatbot.append((user_question,img))
        #print(chatbot)
		if "send email to" in user_question:
                    try:
                        os.remove(image_path)  # Clean up the temporary image file
                    except Exception as e:
                        print(f"Error cleaning up image file: {e}")
            except Exception as e:
                print(f"Error loading image file: {e}")
                chatbot.append((user_question, "Chart generation failed. Please try again."))
        else:
            chatbot.append((user_question, "Chart generation failed. Please try again."))
        return gr.update(value=chatbot)
        

      #return [(user_question,gr.Image("/home/user/app/exports/charts/temp_chart.png"))]
      #  return "/home/user/app/exports/charts/temp_chart.png"
    else:
        chatbot.append((user_question, response_text))
        #print(chatbot)
        return gr.update(value=chatbot)
    #response_text = response_text.replace('\n', ' ').replace('  ', ' ').strip()
    #return response_text

import gradio as gr

css = """

.gr-chatbot {
  /* Custom styles for the Chatbot component */
  border: 1px solid #ccc;
  border-radius: 10px;
  padding: 10px;
  background-color: #f9f9f9;
  height: 300px;  /* Adjust the height as needed */
      /* Adjust the width as needed */
    overflow-y: auto;  /* Add scroll if the content exceeds the height */
}


.gr-button {
    height: 40px;  /* Adjust the height as needed */

"""

def submit_feedback(feedback, chatbot):
    feedback_response = "User feedback: " + feedback
    return chatbot + [(feedback_response,None)], gr.update(visible=False), gr.update(visible=False)

def handle_dislike(data: gr.LikeData):
    if not data.liked:
        print("downvote")
        return gr.update(visible=True), gr.update(visible=True)
    else:
        print("upvote")
        return gr.update(visible=False), gr.update(visible=False)


    

with gr.Blocks(css=css) as demo:
    gr.Markdown("<CENTER><h2 style='font-size: 20px; font-family: Calibri;'>NewageNXT GPT</h2></CENTER>")
    chatbot = gr.Chatbot(elem_classes="gr-chatbot", label="Ask a question about the API, Database, a Document or Warehouse inventory analysis.")#.style(color_map=["blue","grey","red"])
    
    with gr.Row():
        with gr.Column(scale=1):
            message = gr.Textbox(show_label=False)
            audio_input = gr.Audio(label="Record your question")
        with gr.Column(scale=1):
            with gr.Row():
                button = gr.Button("Submit", elem_classes="gr-button")
                gr.ClearButton(message, elem_classes="gr-button")
    with gr.Row():
        with gr.Column(scale=1):
            feedback_textbox = gr.Textbox(visible=False, show_label=False)
        with gr.Column(scale=1):
            submit_feedback_button = gr.Button("Submit Feedback", visible=False, elem_classes="gr-button")

               
    button.click(answer_question, [message, chatbot], [chatbot])
    message.submit(answer_question, [message, chatbot], [chatbot])
    message.submit(lambda x: gr.update(value=""), None, [message], queue=False)
    button.click(lambda x: gr.update(value=''), [], [message])
    
    

    chatbot.like(handle_dislike,None, outputs=[feedback_textbox, submit_feedback_button])
    submit_feedback_button.click(submit_feedback, [feedback_textbox, chatbot], [chatbot, feedback_textbox,submit_feedback_button])
    submit_feedback_button.click(lambda x: gr.update(value=''), [], [feedback_textbox])
    

   
    
demo.launch()