import gradio as gr import requests import pandas as pd from langchain.chat_models import ChatOpenAI from langchain.document_loaders import CSVLoader from langchain_together import TogetherEmbeddings from langchain.prompts import ChatPromptTemplate from langchain.vectorstores import Chroma from langchain_core.output_parsers import StrOutputParser from langchain_core.runnables import RunnableLambda, RunnablePassthrough from langchain.document_loaders import CSVLoader from langchain.embeddings.sentence_transformer import SentenceTransformerEmbeddings from langchain.vectorstores import Chroma from langchain_core.vectorstores import InMemoryVectorStore from langchain import PromptTemplate from langchain import LLMChain from langchain_together import Together import os os.environ['TOGETHER_API_KEY'] = "c2f52626b97118b71c0c36f66eda4f5957c8fc475e760c3d72f98ba07d3ed3b5" # Initialize global variable for vectorstore vectorstore = None embeddings = TogetherEmbeddings(model="togethercomputer/m2-bert-80M-8k-retrieval") llama3 = Together(model="meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo", max_tokens=1024) def update_csv_files(): # Define the login URL and credentials login_url = "https://livesystem.hisabkarlay.com/auth/login" payload = { "username": "user@123", "password": "user@123", "client_secret": "kNqJjlPkxyHdIKt3szCt4PYFWtFOdUheb8QVN8vQ", "client_id": "5", "grant_type": "password" } # Send a POST request to the login URL response = requests.post(login_url, data=payload) # Check the status and get the response data if response.status_code == 200: print("Login successful!") access_token = response.json()['access_token'] else: return f"Failed to log in: {response.status_code}" # Profit loss Fetch report report_url = "https://livesystem.hisabkarlay.com/connector/api/profit-loss-report" headers = { "Authorization": f"Bearer {access_token}" } response = requests.get(report_url, headers=headers) profit_loss_data = response.json()['data'] keys = list(profit_loss_data.keys()) del keys[23] # Adjust according to your needs del keys[20] del keys[19] data_dict = {} for key in keys: data_dict[key] = profit_loss_data.get(key) df = pd.DataFrame(data_dict, index=[0]) df.to_csv('profit_loss.csv', index=False) # API call to get purchase-sell data report_url = "https://livesystem.hisabkarlay.com/connector/api/purchase-sell" response = requests.get(report_url, headers=headers) sell_purchase_data = response.json() sell_purchase_data = dict(list(sell_purchase_data.items())[2:]) df = pd.json_normalize(sell_purchase_data) df.to_csv('purchase_sell_report.csv', index=False) # API call to get trending product data report_url = "https://livesystem.hisabkarlay.com/connector/api/trending-products" response = requests.get(report_url, headers=headers) trending_product_data = response.json()['data'] df = pd.DataFrame(trending_product_data) df.columns = ['Product Units Sold', 'Product Name', 'Unit Type', 'SKU (Stock Keeping Unit)'] df.to_csv('trending_product.csv', index=False) return "CSV files updated successfully!" def initialize_embedding(): global vectorstore # Initialize the embedding function # Load CSV files file_paths = [ "profit_loss.csv", "purchase_sell_report.csv", "trending_product.csv" ] documents = [] for path in file_paths: loader = CSVLoader(path, encoding="windows-1252") documents.extend(loader.load()) # Combine documents from all files # Create an InMemoryVectorStore from the combined documents vectorstore = InMemoryVectorStore.from_texts( [doc.page_content for doc in documents], # Extract the page_content from Document objects embedding=embeddings, ) return "Embeddings initialized successfully!" def qa_chain(query): if vectorstore is None: return "Please initialize the embeddings first." retriever = vectorstore.as_retriever() retrieved_documents = retriever.invoke(query) return retrieved_documents # Not shown directly in the UI def generate_response(query): if vectorstore is None: return "Please initialize the embeddings first." retrieved_documents = qa_chain(query) # Call qa_chain internally chat_template = """ You are a highly intelligent and professional AI assistant. Your role is to assist users by providing clear, concise, and accurate responses to their questions. Context: {retrieved_documents} Question: {query} Please provide a professional, human-like answer that directly addresses the user's question. Ensure that the response is well-structured and easy to understand. Avoid using jargon that may be confusing. Note: If the question involves historical places or historical heroes, do not provide a response. """ prompt = PromptTemplate( input_variables=['retrieved_documents', 'query'], template=chat_template ) Generated_chat = LLMChain(llm=llama3, prompt=prompt) response = Generated_chat.invoke({'retrieved_documents': retrieved_documents, 'query': query}) return response['text'] def gradio_app(): with gr.Blocks() as app: gr.Markdown("# Embedding and QA Interface") update_btn = gr.Button("Update CSV Files") update_output = gr.Textbox(label="Update Output") initialize_btn = gr.Button("Initialize Embedding") initialize_output = gr.Textbox(label="Output") query_input = gr.Textbox(label="Enter your query") generate_response_btn = gr.Button("Generate Response") response_output = gr.Textbox(label="Generated Response") # Button actions update_btn.click(update_csv_files, outputs=update_output) initialize_btn.click(initialize_embedding, outputs=initialize_output) generate_response_btn.click(generate_response, inputs=query_input, outputs=response_output) app.launch() # Run the Gradio app gradio_app()