Spaces:
Runtime error
Runtime error
File size: 13,553 Bytes
360b354 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 |
import json
import requests
from llama_index.core.agent import ReActAgent
from llama_index.core.indices.struct_store import JSONQueryEngine
from llama_index.core.tools import BaseTool, FunctionTool
from llama_index.core.llms import ChatMessage
from llama_index.core import PromptTemplate
json_schema = {
"$schema": "http://json-schema.org/draft-07/schema#",
"title": "RestaurantOrderData",
"type": "object",
"properties": {
"restaurant_name": {
"type": "string",
"description": "Name of the restaurant"
},
"number_of_times_item_ordered": {
"type": "object",
"description": "Number of times each item has been ordered. Each key is item name and value is number of times it get ordered",
"additionalProperties": {
"type": "integer",
"description": "Count of times the item was ordered"
}
},
"number_of_order_daywise": {
"type": "object",
"description": "Number of orders received each day. Each key is date and value is number of orders",
"additionalProperties": {
"type": "integer",
"description": "Count of orders for the specific day"
}
},
"number_of_order_canceled": {
"type": "object",
"description": "Number of orders received each day. Each key is date and value is number of orders get canceled",
"additionalProperties": {
"type": "integer",
"description": "Count of canceled orders for the specific day"
}
},
"number_of_order_completed": {
"type": "object",
"description": "Number of orders received each day. Each key is date and value is number of orders get completed",
"additionalProperties": {
"type": "integer",
"description": "Count of completed orders for the specific day"
}
},
"number_of_order_aggregator_wise": {
"type": "object",
"description": "Number of orders received from each aggregator. Each key is aggregator name and value is number of orders get on that specific aggregator",
"additionalProperties": {
"type": "integer",
"description": "Count of orders for the specific aggregator"
}
},
"total_revenue": {
"type": "number",
"description": "Total revenue generated"
},
"total_orders": {
"type": "integer",
"description": "Total number of orders"
},
"revenue_daywise": {
"type": "object",
"description": "Revenue generated each day. Each key is date and value is total revenue generated on that specific date",
"additionalProperties": {
"type": "number",
"description": "Revenue for the specific day"
}
}
},
"required": [
"restaurant_name",
"number_of_times_item_ordered",
"number_of_order_daywise",
"number_of_order_canceled",
"number_of_order_completed",
"number_of_order_aggregator_wise",
"total_revenue",
"total_orders",
"revenue_daywise"
]
}
def get_data(query: str, start_date:str , end_date:str) -> str:
print(f"QUERY :: {query}")
print(f"START DATE :: {start_date}")
print(f"END DATE :: {end_date}")
# from datetime import datetime, timedelta
# today_date = datetime.today()
# yesterday_date = today_date - timedelta(days=1)
# end_date = datetime.today().strftime('%Y-%m-%d')
# start_date = yesterday_date.strftime('%Y-%m-%d')
url = f"https://a03a-106-201-234-104.ngrok-free.app/fetch_data?store_id=634fdb58ad4c218c52bfaf4f&brand_id=6347b5f0851f703b75b39ad0&start_date={start_date}&end_date={end_date}"
payload = {}
headers = {
'accept': 'application/json'
}
response = requests.request("GET", url, headers=headers, data=payload)
a = response.json()
print(a)
def create_dynamic_prompt(query):
"""Create a dynamic prompt."""
prompt = f"""
The following is a task for an intelligent assistant:
Here is the JSON with order details of a restaurant named "Wrap and Dip Cafe":
{json.dumps(a, indent=2)}
Given the JSON schema for reference:
{json.dumps(json_schema, indent=2)}
You are a JSON analysis engine designed to answer questions based on the given restaurant order data. The data includes various aspects such as the number of times each item has been ordered, the number of orders per day, cancellations, completions, orders by aggregator, total revenue, total orders, and daily revenue.
When asked a question, follow these steps:
1. Understand the question and identify the relevant parts of the JSON data.
2. Extract the necessary information from the JSON data.
3. Perform any required calculations or comparisons.
4. Provide a concise and accurate answer without including unnecessary details.
5. If you encounter more than one answer, provide them in a list.
6. Provide accurate, concise, and clear answers based on the JSON data provided.
7. I only want the response to be printed after 'So,the answer is' or 'Therefore,the answer is' and not the based on json data line.
Special attention should be given to queries about items ordered the most. These queries require looking into "number_of_times_item_ordered" and identifying the item(s) with the highest count.
Here are a few examples of questions you should be able to answer:
- "Which item is ordered the most?"
- Look into "number_of_times_item_ordered" and find the item with the highest count.
- "On which date was the highest revenue collected?"
- Look into "revenue_daywise" and find the date with the highest revenue.
- "How many orders were completed on 2024-04-22?"
- Look into "number_of_order_completed" for the value corresponding to "2024-04-22".
- "What is the total revenue generated?"
- Return the value from "total_revenue".
- "How many orders were canceled on 2024-03-13?"
- Look into "number_of_order_canceled" for the value corresponding to "2024-03-13".
- "Find the item with exactly 3 orders."
- Look into "number_of_times_item_ordered" and find the item(s) with a count of 3.
Use these examples to guide your responses to similar questions. If you encounter a new type of question, use the structure and examples to determine how to extract and compute the answer.
Remember, your goal is to provide accurate, concise, and clear answers based on the JSON data provided. Do not generate lengthy responses or include detailed breakdowns unless explicitly asked. Return only the direct answer to the question.
The user's query is as follows: "{query}"
"""
messages = [
ChatMessage(role="system", content=prompt),
ChatMessage(role="user", content=query),
]
resp = llm.chat(messages)
return resp.message.content
response = create_dynamic_prompt(query)
print(response)
return response
from datetime import datetime
# def get_formatted_conv_str(conv_history: list):
# formatted_turns = []
# for turn in conv_history:
# user_query = turn["HUMAN"]
# system_query = turn["ASSISTANT"]
# # fitler out links and unnecessary info
# system_query = re.sub(r"\[(.*?)\].*?\(.*?\)", r"\1", system_query)
# max_words = 100
# if len(system_query.split()) > max_words:
# system_query = " ".join(system_query.split()[:max_words]) + "..."
# formatted_turns += [f"User: {user_query}" + f"\nSystem: {system_query}"]
# formatted_conversation = "\n\n".join(formatted_turns)
# return formatted_conversation
from datetime import datetime
prompt = (
"You are a Contextualization engine. Assume queries are related to Restaurant Order Analytics."
# "The following is a conversation between a user (User) and a Order Analytics Assistant (System): \n\n"
# f"{str_chat_history}"
"Rewrite the current user natural language query, to be a standalone natural language query that resolves all ambiguities.\n"
"Do this using common sense and your best judgement, resolving ambiguities by incorporating all relevant information from the conversation history.\n"
"Make minimal changes, and if no rewriting is required, then return the same final user query.\n"
"It is essential that you directly focus on the user's final query.\n"
"If query asked for any date related things, just convert into YYYY-MM-DD format.\n"
"If nothing mentions about whether the user wants today's, yesterday's, or month's, then just attach today's context.\n"
"If the keyword is mentioned then there is already a context so make it directly simplified.\n"
"IMPORTANT: ONLY GIVE CONTEXTUALIZATION QUERY. NOTHING ELSE. REMOVE ALL UNNECESSARY THINGS FROM RESPONSE.\n"
f'Note that the current date and time is: {datetime.now().strftime("%Y-%m-%d %H:%M:%S")}\n'
"IMPORTANT : Ensure the date is in the format yyyy-mm-dd"
)
system_msg = {"role": "system", "content": prompt}
user_msg = {"role": "user", "content": str(input())}
response = client.chat(
model="llama3.1",
messages=[system_msg, user_msg],
)
context_query = response['message']['content']
print(context_query)
prompt = (
"You are a Tool Calling engine. Assume queries are related to Restaurant Order Analytics.\n"
f'Note that the current date and time is: {datetime.now().strftime("%Y-%m-%d %H:%M:%S")}\n'
"Take start date and end date according how the question asked if week than take week day same goes for month and so on take it in your understanding how i gave it to you"
"IMPORTANT : Example for today take start date of a day before and end date of today , same for if yesterday than the day before yesterday as start date and yesterday as end and so on"
"If asked for a perticular date than need to take start date as that date and end date as the next date so it will give the the data of that perticular date."
"IMPORTANT: If the range is given then start date will be the first date and the end date will be the date after the last date.must need to take the end date plus 1 to get the data."
"STRICTLY:Always take end date after the day for which we are finding the data, means if the date is 2024-07-23 than end date must be plus one to the original date to find the data of that perticular date like end_date:2024-07-24 , and the start date is the date same date for which we are finding the data. "
"IMPORTANT:must take the end date Plus one to the given date no matter what"
"IMPORTANT : make sure the date is in the yyyy-mm-dd format"
)
messages = [{'role': 'system', 'content': prompt}]
messages.append({'role': 'user', 'content': context_query})
response = client.chat(
model="llama3.1",
messages=messages,
tools=[
{
'type': 'function',
'function': {
'name': 'get_data',
'description': "Get Today's Restaurant Order Information with detailed natural language query",
'parameters': {
'type': 'object',
'properties': {
'query': {
'type': 'string',
'description': 'natural language query string to be proceed',
},
"start_date": {
"type": "string",
"description": "Start date in YYYY-MM-DD format",
},
"end_date": {
"type": "string",
"description": "End date in YYYY-MM-DD format",
},
},
'required': ['query'],
},
},
},
],
)
messages.append(response['message'])
if response['message'].get('tool_calls'):
available_functions = {
'get_data': get_data
}
for tool in response['message']['tool_calls']:
function_to_call = available_functions[tool['function']['name']]
function_args = tool['function']['arguments']
function_response = function_to_call(**function_args)
print(f"Func Res : {function_to_call} {function_response}")
messages.append(
{
'role': 'tool',
'content': function_response,
}
)
else:
print(response['message']['content'])
print(messages)
tool_responses = []
for message in messages:
if message['role'] == 'tool':
tool_responses.append(message['content'])
Combnined_Responce = ' , '.join(tool_responses)
prompt = (
"Merge the sentence in simplified way , dont over cook anything just a basic merger"
"always show the numbers if provided."
"Never Compare."
"Just Give the relevent information , remove everything which is junk like Based on the JSON schema... and so on"
"if the answer is too long without junk than provide the full answer in simplified way , and provide in the list also"
)
system_msg = {"role": "system", "content": prompt}
user_msg = {"role": "user", "content": Combnined_Responce}
response = client.chat(
model="llama3.1",
messages=[system_msg, user_msg],
)
context_query = response['message']['content']
print(context_query)
|