Spaces:
Sleeping
Sleeping
import gradio as gr | |
import requests | |
import pandas as pd | |
from langchain.chat_models import ChatOpenAI | |
from langchain.document_loaders import CSVLoader | |
from langchain_together import TogetherEmbeddings | |
from langchain.vectorstores import Chroma | |
from langchain_core.vectorstores import InMemoryVectorStore | |
from langchain import PromptTemplate | |
from langchain import LLMChain | |
from langchain_together import Together | |
import os | |
os.environ['TOGETHER_API_KEY'] = "your_api_key" | |
# Initialize global variable for vectorstore | |
vectorstore = None | |
embeddings = TogetherEmbeddings(model="togethercomputer/m2-bert-80M-8k-retrieval") | |
llama3 = Together(model="meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo", max_tokens=1024) | |
def update_csv_files(): | |
login_url = "https://livesystem.hisabkarlay.com/auth/login" | |
payload = { | |
"username": "user@123", | |
"password": "user@123", | |
"client_secret": "kNqJjlPkxyHdIKt3szCt4PYFWtFOdUheb8QVN8vQ", | |
"client_id": "5", | |
"grant_type": "password" | |
} | |
response = requests.post(login_url, data=payload) | |
if response.status_code == 200: | |
access_token = response.json()['access_token'] | |
else: | |
return f"Failed to log in: {response.status_code}" | |
report_url = "https://livesystem.hisabkarlay.com/connector/api/profit-loss-report" | |
headers = {"Authorization": f"Bearer {access_token}"} | |
response = requests.get(report_url, headers=headers) | |
profit_loss_data = response.json()['data'] | |
keys = list(profit_loss_data.keys()) | |
del keys[23] | |
del keys[20] | |
del keys[19] | |
data_dict = {} | |
for key in keys: | |
data_dict[key] = profit_loss_data.get(key) | |
df = pd.DataFrame(data_dict, index=[0]) | |
df.to_csv('profit_loss.csv', index=False) | |
report_url = "https://livesystem.hisabkarlay.com/connector/api/purchase-sell" | |
response = requests.get(report_url, headers=headers) | |
sell_purchase_data = response.json() | |
sell_purchase_data = dict(list(sell_purchase_data.items())[2:]) | |
df = pd.json_normalize(sell_purchase_data) | |
df.to_csv('purchase_sell_report.csv', index=False) | |
report_url = "https://livesystem.hisabkarlay.com/connector/api/trending-products" | |
response = requests.get(report_url, headers=headers) | |
trending_product_data = response.json()['data'] | |
df = pd.DataFrame(trending_product_data) | |
df.columns = ['Product Units Sold', 'Product Name', 'Unit Type', 'SKU (Stock Keeping Unit)'] | |
df.to_csv('trending_product.csv', index=False) | |
return "CSV files updated successfully!" | |
def initialize_embedding(): | |
global vectorstore | |
file_paths = ["profit_loss.csv", "purchase_sell_report.csv", "trending_product.csv"] | |
documents = [] | |
for path in file_paths: | |
loader = CSVLoader(path, encoding="windows-1252") | |
documents.extend(loader.load()) | |
vectorstore = InMemoryVectorStore.from_texts( | |
[doc.page_content for doc in documents], | |
embedding=embeddings, | |
) | |
return "Embeddings initialized successfully!" | |
def qa_chain(query): | |
if vectorstore is None: | |
return "Please initialize the embeddings first." | |
retriever = vectorstore.as_retriever() | |
retrieved_documents = retriever.invoke(query) | |
return retrieved_documents | |
def generate_response(query, history): | |
if vectorstore is None: | |
return "Please initialize the embeddings first.", history | |
retrieved_documents = qa_chain(query) | |
chat_template = """ | |
You are a highly intelligent and professional AI assistant. | |
Generate the response according to the user's query: | |
Context: {retrieved_documents} | |
Question: {query} | |
""" | |
prompt = PromptTemplate( | |
input_variables=['retrieved_documents', 'query'], | |
template=chat_template | |
) | |
Generated_chat = LLMChain(llm=llama3, prompt=prompt) | |
response = Generated_chat.invoke({'retrieved_documents': retrieved_documents, 'query': query}) | |
# Ensure history is always a list of two-element lists [query, response] | |
history.append([query, response['text']]) | |
# Return the updated history and the new response for display | |
return history, history | |
def gradio_app(): | |
with gr.Blocks() as app: | |
gr.Markdown("# Embedding and QA Interface") | |
# Chatbox elements | |
chatbot = gr.Chatbot(label="Chat") | |
query_input = gr.Textbox(label="Enter your query") | |
generate_response_btn = gr.Button("Generate Response") | |
# Status output textboxes for CSV update and embedding initialization | |
update_csv_status = gr.Textbox(label="CSV Update Status", interactive=False) | |
initialize_status = gr.Textbox(label="Embedding Initialization Status", interactive=False) | |
# Buttons for CSV update and embedding initialization | |
update_csv_button = gr.Button("Update CSV Files") | |
initialize_button = gr.Button("Initialize Embedding") | |
# Button click actions | |
update_csv_button.click(update_csv_files, outputs=update_csv_status) | |
initialize_button.click(initialize_embedding, outputs=initialize_status) | |
# Chatbot functionality with history | |
history = gr.State([]) # Chat history state | |
generate_response_btn.click(generate_response, inputs=[query_input, history], outputs=[chatbot, history]) | |
app.launch() | |
# Run the Gradio app | |
gradio_app() | |