Spaces:
Sleeping
Sleeping
Update main.py
Browse files
main.py
CHANGED
@@ -1,154 +1,100 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
import
|
6 |
-
import
|
7 |
-
from llama_index.llms import OpenAI
|
8 |
-
from llama_index import VectorStoreIndex, SimpleDirectoryReader
|
9 |
-
from llama_index.llms import OpenAI
|
10 |
-
from llama_index import StorageContext, load_index_from_storage
|
11 |
|
12 |
-
secret = os.environ["key"]
|
13 |
-
|
14 |
-
genai.configure(api_key=secret)
|
15 |
-
model = genai.GenerativeModel('gemini-1.5-flash')
|
16 |
-
|
17 |
-
import user_guide_sync
|
18 |
-
|
19 |
-
global index,query_engine
|
20 |
-
query_engine = index = None
|
21 |
-
#query_engine = (user_guide_sync.update_user_guide).as_query_engine()
|
22 |
|
|
|
23 |
app = FastAPI()
|
24 |
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
60 |
]
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
if index is None: # Check if the index is already loaded
|
75 |
-
from llama_index import StorageContext, load_index_from_storage # Import necessary modules
|
76 |
-
storage_context = StorageContext.from_defaults(persist_dir="llama_index")
|
77 |
-
index = load_index_from_storage(storage_context=storage_context)
|
78 |
-
print("Index loaded")
|
79 |
-
else:
|
80 |
-
print("Index already loaded")
|
81 |
-
# Set up a retriever to fetch similar documents directly without full query processing
|
82 |
-
retriever = index.as_retriever()
|
83 |
-
# Retrieve the top similar documents based on the user query
|
84 |
-
similar_docs = retriever.retrieve(user_query) # Adjust `top_k` as needed
|
85 |
-
# Prepare the context for LLM by concatenating the content of similar documents
|
86 |
-
context = "\n\n".join([doc.node.text for doc in similar_docs])
|
87 |
-
|
88 |
-
prompt = f"""
|
89 |
-
context : {context}
|
90 |
-
|
91 |
-
user query : {user_query}
|
92 |
-
|
93 |
-
Instructions:
|
94 |
-
- First, understand the user question carefully.
|
95 |
-
- If you find the correct answer from the provided data, respond with detailed steps (1, 2, ...) and always include a more details link.
|
96 |
-
- If the correct answer is not found in the provided data or proide the correct solution to user using data then output is only this : "contact our help desk". dont add extra anything
|
97 |
-
"""
|
98 |
-
messages = [
|
99 |
-
{"role": "user", "content": prompt }
|
100 |
]
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
print("help desk option")
|
112 |
-
|
113 |
-
|
114 |
-
prompt = f"""
|
115 |
-
system:
|
116 |
-
you are parallax technologies chatbot design for answer the user question like a real human.
|
117 |
-
contact details suppor team link : https://projects.storemate.lk/customer Email : [email protected] Youtube : https://www.youtube.com/channel/UCFkX9Fa-Qe6Qi4V5f0RcfSA Facebook : https://www.facebook.com/storemateinventory web link : https://storemate.lk
|
118 |
-
|
119 |
-
only give single answer and don't give answer for general answers(this is CRM system for only pos system clients)
|
120 |
-
note : don't give any steps for solve the issues but give steps for sytem slow and performance related questions
|
121 |
-
user:{user_query}
|
122 |
-
"""
|
123 |
-
messages = [
|
124 |
-
{"role": "system", "content": "you are parallax technologies chatbot design for answer the user question like a real human"},
|
125 |
-
{"role": "user", "content": prompt}
|
126 |
-
]
|
127 |
-
|
128 |
-
#gen_response = model.generate_content(prompt)
|
129 |
-
|
130 |
-
gpt_response = openai.chat.completions.create(
|
131 |
-
|
132 |
-
model="gpt-4o-mini",
|
133 |
-
|
134 |
-
messages=messages,
|
135 |
-
|
136 |
-
temperature=0,
|
137 |
-
|
138 |
-
)
|
139 |
-
|
140 |
-
response.message(str(gpt_response.choices[0].message.content))
|
141 |
-
#response.message(gen_response.text)
|
142 |
-
#response.message(gen_response.text +"\n\n"+default)
|
143 |
-
return PlainTextResponse(str(response), media_type="application/xml")
|
144 |
-
|
145 |
-
response.message(str(gpt_response))
|
146 |
-
#response.message("https://storemate.lk")
|
147 |
-
return PlainTextResponse(str(response), media_type="application/xml")
|
148 |
-
except Exception as e:
|
149 |
-
print(str(e))
|
150 |
-
response.message("please ask again...!")
|
151 |
-
return PlainTextResponse(str(response), media_type="application/xml")
|
152 |
-
|
153 |
-
|
154 |
-
# Run the application (Make sure you have the necessary setup to run FastAPI)
|
|
|
1 |
+
import os
|
2 |
+
import aiohttp
|
3 |
+
import asyncio
|
4 |
+
import requests
|
5 |
+
from fastapi import FastAPI, HTTPException
|
6 |
+
import openai
|
|
|
|
|
|
|
|
|
7 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
|
9 |
+
# Initialize FastAPI app
|
10 |
app = FastAPI()
|
11 |
|
12 |
+
# Set your OpenAI API key here
|
13 |
+
openai.api_key = os.environ["OPENAI_API_KEY"]
|
14 |
+
|
15 |
+
# Authorization headers
|
16 |
+
AUTH_HEADERS = {
|
17 |
+
'X-Tenant': 'royalexpress',
|
18 |
+
'Accept': 'application/json',
|
19 |
+
'Authorization': 'Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJhdWQiOiIxIiwianRpIjoiYjczMDAxYzMyMTE1ZGIyNTY4ODUzMTg2OWVjMzUwNTdjZmE3YTJmN2I3ZWZjMjQzNjdmZTA2ZTk4ZjY4ZjMwMTE0ZTIzOGUwY2I0ZmQ0YjIiLCJpYXQiOjE3MzA3ODIxNjEuMjkyNDM0LCJuYmYiOjE3MzA3ODIxNjEuMjkyNDM3LCJleHAiOjQ4ODY0NTU3NjEuMDc2ODExLCJzdWIiOiIxIiwic2NvcGVzIjpbXX0.I4wGFzoepmAC2RaADetE95BdbY4AYPUfUouFepVthZq_KewQLoYEiYMmxErgAOvYDL9IdhTg8pHm3KtCjtfF79Toigvzl-4RIYE9qwavVYabUaIMtxdkvLmzC2uSSxNkQ-Jx4ZsEVt34NpMMZ6ZsMsgszkreed_s7i5I6ek6T2-p9cZYPpFfGhlRIrgAhOL1yZe0t5HQMM7P1cULB7IMb3s0fvwLNBimPC4Iznick5o2lWO6KcubsKSAMyPwBaCQhjGTKd0eJCde1IvL8mEaMvhu8v853AIDSiBsC83hjK41hPAaiBHeev1JjdDhEd6_qO9dpucKaGCqYiVfBFH_pgnynErmhKlPEIz7sZlBWz8zxISDW5PRo9d-jXRP-A31W76Q3H-ZKfnam0D8yYFY0EIZHhvgvZUl3r0dR4PRh7PYlNZgnyfAcAYmK9Bektjbbx5RuzH6gtT9hLQrxYiQNg0irCNwgTYnuQ4AjPA3BpZuOfWtygeDZKgv1gnveTzMJG7T6s95k8yNSNT1_OfRQONPX8LBasRwZWCGkWj7fopO6K8gcrEU5FIpql0UviwGJOTZeFmqwWJ1AIcOM0MHWNp--Y8evHrvuNGk3SDcjBcvhF58I2Hd5F4MefN_ZB9N7oxUUDBYbxnTH6SN7Wx-VsluEOlf9ShfBNvHZaUi61E'
|
20 |
+
}
|
21 |
+
|
22 |
+
# Helper function to get the order ID from the waybill number
|
23 |
+
def get_order_id(waybill_no):
|
24 |
+
url = f"https://dev3.api.curfox.parallaxtec.com/api/ml/order/{waybill_no}"
|
25 |
+
response = requests.get(url, headers=AUTH_HEADERS)
|
26 |
+
if response.status_code == 200:
|
27 |
+
return response.json().get("data", {}).get("id")
|
28 |
+
else:
|
29 |
+
raise HTTPException(status_code=404, detail="Order ID not found")
|
30 |
+
|
31 |
+
# Asynchronous function to fetch data concurrently
|
32 |
+
async def fetch_data(session, url):
|
33 |
+
async with session.get(url, headers=AUTH_HEADERS) as response:
|
34 |
+
return await response.text()
|
35 |
+
|
36 |
+
# Asynchronous function to get all order data
|
37 |
+
async def get_all_order_data(order_id):
|
38 |
+
urls = [
|
39 |
+
f"https://dev3.api.curfox.parallaxtec.com/api/ml/order/{order_id}/time-line",
|
40 |
+
f"https://dev3.api.curfox.parallaxtec.com/api/ml/order/{order_id}/finance-time-line",
|
41 |
+
f"https://dev3.api.curfox.parallaxtec.com/api/ml/order/{order_id}/invoice-time-line"
|
42 |
+
]
|
43 |
+
|
44 |
+
async with aiohttp.ClientSession() as session:
|
45 |
+
tasks = [fetch_data(session, url) for url in urls]
|
46 |
+
responses = await asyncio.gather(*tasks)
|
47 |
+
|
48 |
+
full_data = {
|
49 |
+
"Timeline Details": responses[0],
|
50 |
+
"Finance Time Line": responses[1],
|
51 |
+
"Invoice Time Line": responses[2]
|
52 |
+
}
|
53 |
+
return full_data
|
54 |
+
|
55 |
+
# Function to interact with OpenAI API
|
56 |
+
def ask_openai(messages):
|
57 |
+
response = openai.ChatCompletion.create(model="gpt-4o-mini", messages=messages)
|
58 |
+
return response.choices[0].message['content']
|
59 |
+
|
60 |
+
# Main endpoint to handle user queries
|
61 |
+
@app.post("/process_query/")
|
62 |
+
async def process_query(query: str):
|
63 |
+
# Initial message to check for waybill number in the query
|
64 |
+
messages = [
|
65 |
+
{"role": "system", "content": "You are a helpful assistant for Curfox delivery system."},
|
66 |
+
{"role": "user", "content": f"""Check if the order number is present in the query (e.g., CA000001).
|
67 |
+
Respond 'done' if present, otherwise prompt the user to provide the number."""}
|
68 |
+
]
|
69 |
+
result = ask_openai(messages)
|
70 |
+
|
71 |
+
if result == "done":
|
72 |
+
# Extract the waybill number
|
73 |
+
extract_message = [
|
74 |
+
{"role": "system", "content": "You are a helpful assistant for Curfox delivery system."},
|
75 |
+
{"role": "user", "content": f"Extract the waybill number from the query. User query: {query}"}
|
76 |
]
|
77 |
+
waybill_number = ask_openai(extract_message)
|
78 |
+
|
79 |
+
# Fetch order ID and order details
|
80 |
+
try:
|
81 |
+
order_id = get_order_id(waybill_number)
|
82 |
+
full_data = await get_all_order_data(order_id)
|
83 |
+
except HTTPException as e:
|
84 |
+
return {"error": str(e.detail)}
|
85 |
+
|
86 |
+
# Generate final response based on collected data
|
87 |
+
response_message = [
|
88 |
+
{"role": "system", "content": "You are a helpful assistant for Curfox delivery system."},
|
89 |
+
{"role": "user", "content": f"Answer based on the provided data only. Data: {full_data}. User query: {query}"}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
90 |
]
|
91 |
+
final_answer = ask_openai(response_message)
|
92 |
+
return {"answer": final_answer}
|
93 |
+
else:
|
94 |
+
# If no order number is found, prompt the user
|
95 |
+
return {"message": result}
|
96 |
+
|
97 |
+
# Start the FastAPI app
|
98 |
+
# Run the app on Colab using Uvicorn
|
99 |
+
# import uvicorn
|
100 |
+
# uvicorn.run(app, host="0.0.0.0", port=8000)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|