Spaces:
Sleeping
Sleeping
File size: 5,312 Bytes
5722bc8 10cc401 5722bc8 78a4c42 5722bc8 78a4c42 5722bc8 78a4c42 5722bc8 6f140bf 78a4c42 5722bc8 78a4c42 5722bc8 78a4c42 5722bc8 78a4c42 5722bc8 78a4c42 5722bc8 78a4c42 5722bc8 0f83a12 6f140bf 0f83a12 5722bc8 6f140bf 78a4c42 10cc401 78a4c42 10cc401 6f140bf 78a4c42 6f140bf 78a4c42 62c55b2 6f140bf 62c55b2 6f140bf 78a4c42 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 |
import gradio as gr
import requests
import pandas as pd
from langchain.chat_models import ChatOpenAI
from langchain.document_loaders import CSVLoader
from langchain_together import TogetherEmbeddings
from langchain.vectorstores import Chroma
from langchain_core.vectorstores import InMemoryVectorStore
from langchain import PromptTemplate
from langchain import LLMChain
from langchain_together import Together
import os
os.environ['TOGETHER_API_KEY'] = "your_api_key"
# Initialize global variable for vectorstore
vectorstore = None
embeddings = TogetherEmbeddings(model="togethercomputer/m2-bert-80M-8k-retrieval")
llama3 = Together(model="meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo", max_tokens=1024)
def update_csv_files():
login_url = "https://livesystem.hisabkarlay.com/auth/login"
payload = {
"username": "user@123",
"password": "user@123",
"client_secret": "kNqJjlPkxyHdIKt3szCt4PYFWtFOdUheb8QVN8vQ",
"client_id": "5",
"grant_type": "password"
}
response = requests.post(login_url, data=payload)
if response.status_code == 200:
access_token = response.json()['access_token']
else:
return f"Failed to log in: {response.status_code}"
report_url = "https://livesystem.hisabkarlay.com/connector/api/profit-loss-report"
headers = {"Authorization": f"Bearer {access_token}"}
response = requests.get(report_url, headers=headers)
profit_loss_data = response.json()['data']
keys = list(profit_loss_data.keys())
del keys[23]
del keys[20]
del keys[19]
data_dict = {}
for key in keys:
data_dict[key] = profit_loss_data.get(key)
df = pd.DataFrame(data_dict, index=[0])
df.to_csv('profit_loss.csv', index=False)
report_url = "https://livesystem.hisabkarlay.com/connector/api/purchase-sell"
response = requests.get(report_url, headers=headers)
sell_purchase_data = response.json()
sell_purchase_data = dict(list(sell_purchase_data.items())[2:])
df = pd.json_normalize(sell_purchase_data)
df.to_csv('purchase_sell_report.csv', index=False)
report_url = "https://livesystem.hisabkarlay.com/connector/api/trending-products"
response = requests.get(report_url, headers=headers)
trending_product_data = response.json()['data']
df = pd.DataFrame(trending_product_data)
df.columns = ['Product Units Sold', 'Product Name', 'Unit Type', 'SKU (Stock Keeping Unit)']
df.to_csv('trending_product.csv', index=False)
return "CSV files updated successfully!"
def initialize_embedding():
global vectorstore
file_paths = ["profit_loss.csv", "purchase_sell_report.csv", "trending_product.csv"]
documents = []
for path in file_paths:
loader = CSVLoader(path, encoding="windows-1252")
documents.extend(loader.load())
vectorstore = InMemoryVectorStore.from_texts(
[doc.page_content for doc in documents],
embedding=embeddings,
)
return "Embeddings initialized successfully!"
def qa_chain(query):
if vectorstore is None:
return "Please initialize the embeddings first."
retriever = vectorstore.as_retriever()
retrieved_documents = retriever.invoke(query)
return retrieved_documents
def generate_response(query, history):
if vectorstore is None:
return "Please initialize the embeddings first.", history
retrieved_documents = qa_chain(query)
chat_template = """
You are a highly intelligent and professional AI assistant.
Generate the response according to the user's query:
Context: {retrieved_documents}
Question: {query}
"""
prompt = PromptTemplate(
input_variables=['retrieved_documents', 'query'],
template=chat_template
)
Generated_chat = LLMChain(llm=llama3, prompt=prompt)
response = Generated_chat.invoke({'retrieved_documents': retrieved_documents, 'query': query})
# Ensure history is always a list of two-element lists [query, response]
history.append([query, response['text']])
# Return the updated history and the new response for display
return history, history
def gradio_app():
with gr.Blocks() as app:
gr.Markdown("# Embedding and QA Interface")
# Chatbox elements
chatbot = gr.Chatbot(label="Chat")
query_input = gr.Textbox(label="Enter your query")
generate_response_btn = gr.Button("Generate Response")
# Status output textboxes for CSV update and embedding initialization
update_csv_status = gr.Textbox(label="CSV Update Status", interactive=False)
initialize_status = gr.Textbox(label="Embedding Initialization Status", interactive=False)
# Buttons for CSV update and embedding initialization
update_csv_button = gr.Button("Update CSV Files")
initialize_button = gr.Button("Initialize Embedding")
# Button click actions
update_csv_button.click(update_csv_files, outputs=update_csv_status)
initialize_button.click(initialize_embedding, outputs=initialize_status)
# Chatbot functionality with history
history = gr.State([]) # Chat history state
generate_response_btn.click(generate_response, inputs=[query_input, history], outputs=[chatbot, history])
app.launch()
# Run the Gradio app
gradio_app()
|