Spaces:
Sleeping
Sleeping
Neurolingua
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -4,36 +4,313 @@ from twilio.rest import Client
|
|
4 |
import os
|
5 |
import requests
|
6 |
from PIL import Image
|
7 |
-
import io
|
8 |
-
import uuid
|
9 |
import shutil
|
10 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
app = Flask(__name__)
|
12 |
UPLOAD_FOLDER = '/code/uploads'
|
|
|
|
|
|
|
13 |
if not os.path.exists(UPLOAD_FOLDER):
|
14 |
os.makedirs(UPLOAD_FOLDER)
|
15 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
conversation_memory = ConversationBufferMemory(max_size=2)
|
17 |
|
18 |
account_sid = os.environ.get('TWILIO_ACCOUNT_SID')
|
19 |
auth_token = os.environ.get('TWILIO_AUTH_TOKEN')
|
20 |
client = Client(account_sid, auth_token)
|
21 |
-
# WhatsApp number to send messages from (your Twilio number)
|
22 |
from_whatsapp_number = 'whatsapp:+14155238886'
|
23 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
@app.route('/whatsapp', methods=['POST'])
|
25 |
def whatsapp_webhook():
|
26 |
incoming_msg = request.values.get('Body', '').lower()
|
27 |
sender = request.values.get('From')
|
28 |
-
# Check if an image is attached
|
29 |
num_media = int(request.values.get('NumMedia', 0))
|
30 |
-
|
31 |
-
# Get the chat history
|
32 |
chat_history = conversation_memory.get_memory()
|
33 |
-
|
34 |
if num_media > 0:
|
35 |
media_url = request.values.get('MediaUrl0')
|
36 |
-
response_text=media_url
|
37 |
content_type = request.values.get('MediaContentType0')
|
38 |
if content_type.startswith('image/'):
|
39 |
filepath = convert_img(media_url, account_sid, auth_token)
|
@@ -45,54 +322,43 @@ def whatsapp_webhook():
|
|
45 |
pest = predict_pest(filepath)
|
46 |
except:
|
47 |
pest = None
|
48 |
-
|
49 |
if disease:
|
50 |
response_text = f"Detected disease: {disease}"
|
51 |
-
# Generate additional insights about the disease
|
52 |
disease_info = generate_response(f"Provide brief information about {disease} in plants", chat_history)
|
53 |
response_text += f"\n\nAdditional information: {disease_info}"
|
54 |
elif pest:
|
55 |
response_text = f"Detected pest: {pest}"
|
56 |
-
# Generate additional insights about the pest
|
57 |
pest_info = generate_response(f"Provide brief information about {pest} in agriculture", chat_history)
|
58 |
response_text += f"\n\nAdditional information: {pest_info}"
|
59 |
else:
|
60 |
response_text = "Please upload another image with good quality."
|
61 |
-
|
|
|
|
|
|
|
62 |
else:
|
63 |
filepath = download_and_save_as_txt(media_url, account_sid, auth_token)
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
elif ('weather' in incoming_msg.lower()) or ('climate' in incoming_msg.lower()) or ('temperature' in incoming_msg.lower()):
|
75 |
-
response_text=get_weather(incoming_msg.lower())
|
76 |
elif 'bookkeeping' in incoming_msg:
|
77 |
response_text = "Please provide the details you'd like to record."
|
78 |
-
elif ('rates' in incoming_msg.lower()) or ('price' in incoming_msg.lower()) or (
|
79 |
-
|
80 |
-
|
|
|
81 |
elif ('news' in incoming_msg.lower()) or ('information' in incoming_msg.lower()):
|
82 |
-
news=get_news()
|
83 |
-
response_text = generate_response(incoming_msg+'data is '+str(news), chat_history)
|
84 |
-
|
85 |
else:
|
86 |
-
# Generate response considering the chat history
|
87 |
response_text = generate_response(incoming_msg, chat_history)
|
88 |
-
|
89 |
-
# Add the interaction to memory
|
90 |
conversation_memory.add_to_memory({"user": incoming_msg, "assistant": response_text})
|
91 |
-
|
92 |
send_message(sender, response_text)
|
93 |
return '', 204
|
94 |
-
|
95 |
-
return generate_response(query)
|
96 |
|
97 |
def send_message(to, body):
|
98 |
try:
|
@@ -105,15 +371,15 @@ def send_message(to, body):
|
|
105 |
except Exception as e:
|
106 |
print(f"Error sending message: {e}")
|
107 |
|
108 |
-
|
109 |
def send_initial_message(to_number):
|
110 |
send_message(
|
111 |
f'whatsapp:{to_number}',
|
112 |
'Welcome to the Agri AI Chatbot! How can I assist you today?'
|
113 |
)
|
114 |
|
|
|
115 |
if __name__ == '__main__':
|
116 |
-
send_initial_message('916382792828')
|
117 |
send_initial_message('919080522395')
|
118 |
-
|
119 |
-
app.run(host='0.0.0.0', port=7860)
|
|
|
4 |
import os
|
5 |
import requests
|
6 |
from PIL import Image
|
|
|
|
|
7 |
import shutil
|
8 |
+
|
9 |
+
from langchain.vectorstores.chroma import Chroma
|
10 |
+
from langchain.prompts import ChatPromptTemplate
|
11 |
+
from langchain_community.llms.ollama import Ollama
|
12 |
+
from get_embedding_function import get_embedding_function
|
13 |
+
from langchain.document_loaders.pdf import PyPDFDirectoryLoader
|
14 |
+
from langchain_text_splitters import RecursiveCharacterTextSplitter
|
15 |
+
from langchain.schema.document import Document
|
16 |
+
|
17 |
app = Flask(__name__)
|
18 |
UPLOAD_FOLDER = '/code/uploads'
|
19 |
+
if not os.path.exists(UPLOAD_FOLDER):
|
20 |
+
os.makedirs(UPLOAD_FOLDER) # Creates an 'uploads' directory in the current working directory
|
21 |
+
|
22 |
if not os.path.exists(UPLOAD_FOLDER):
|
23 |
os.makedirs(UPLOAD_FOLDER)
|
24 |
+
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
|
25 |
+
class ConversationBufferMemory:
|
26 |
+
def __init__(self, max_size=6):
|
27 |
+
self.memory = []
|
28 |
+
self.max_size = max_size
|
29 |
+
|
30 |
+
def add_to_memory(self, interaction):
|
31 |
+
self.memory.append(interaction)
|
32 |
+
if len(self.memory) > self.max_size:
|
33 |
+
self.memory.pop(0) # Remove the oldest interaction
|
34 |
+
|
35 |
+
def get_memory(self):
|
36 |
+
return self.memory
|
37 |
conversation_memory = ConversationBufferMemory(max_size=2)
|
38 |
|
39 |
account_sid = os.environ.get('TWILIO_ACCOUNT_SID')
|
40 |
auth_token = os.environ.get('TWILIO_AUTH_TOKEN')
|
41 |
client = Client(account_sid, auth_token)
|
|
|
42 |
from_whatsapp_number = 'whatsapp:+14155238886'
|
43 |
|
44 |
+
CHROMA_PATH = "chroma"
|
45 |
+
DATA_PATH = "data"
|
46 |
+
PROMPT_TEMPLATE = """
|
47 |
+
Answer the question based only on the following context:
|
48 |
+
|
49 |
+
{context}
|
50 |
+
|
51 |
+
---
|
52 |
+
|
53 |
+
Answer the question based on the above context: {question}
|
54 |
+
"""
|
55 |
+
|
56 |
+
import os
|
57 |
+
from bs4 import BeautifulSoup
|
58 |
+
import requests
|
59 |
+
from requests.auth import HTTPBasicAuth
|
60 |
+
from PIL import Image
|
61 |
+
from io import BytesIO
|
62 |
+
import pandas as pd
|
63 |
+
from urllib.parse import urlparse
|
64 |
+
import os
|
65 |
+
from pypdf import PdfReader
|
66 |
+
from ai71 import AI71
|
67 |
+
import os
|
68 |
+
|
69 |
+
import pandas as pd
|
70 |
+
|
71 |
+
from inference_sdk import InferenceHTTPClient
|
72 |
+
import base64
|
73 |
+
|
74 |
+
|
75 |
+
|
76 |
+
AI71_API_KEY = os.environ.get('AI71_API_KEY')
|
77 |
+
def generate_response(query,chat_history):
|
78 |
+
response = ''
|
79 |
+
for chunk in AI71(AI71_API_KEY).chat.completions.create(
|
80 |
+
model="tiiuae/falcon-180b-chat",
|
81 |
+
messages=[
|
82 |
+
{"role": "system", "content": "You are a best agricultural assistant.Remember to give response not more than 2 sentence.Greet the user if user greets you."},
|
83 |
+
{"role": "user",
|
84 |
+
"content": f'''Answer the query based on history {chat_history}:{query}'''},
|
85 |
+
],
|
86 |
+
stream=True,
|
87 |
+
):
|
88 |
+
if chunk.choices[0].delta.content:
|
89 |
+
response += chunk.choices[0].delta.content
|
90 |
+
return response.replace("###", '').replace('\nUser:','')
|
91 |
+
|
92 |
+
def predict_pest(filepath):
|
93 |
+
CLIENT = InferenceHTTPClient(
|
94 |
+
api_url="https://detect.roboflow.com",
|
95 |
+
api_key="oF1aC4b1FBCDtK8CoKx7"
|
96 |
+
)
|
97 |
+
result = CLIENT.infer(filepath, model_id="pest-detection-ueoco/1")
|
98 |
+
return result['predictions'][0]
|
99 |
+
|
100 |
+
|
101 |
+
def predict_disease(filepath):
|
102 |
+
CLIENT = InferenceHTTPClient(
|
103 |
+
api_url="https://classify.roboflow.com",
|
104 |
+
api_key="oF1aC4b1FBCDtK8CoKx7"
|
105 |
+
)
|
106 |
+
result = CLIENT.infer(filepath, model_id="plant-disease-detection-iefbi/1")
|
107 |
+
return result['predicted_classes'][0]
|
108 |
+
|
109 |
+
def convert_img(url, account_sid, auth_token):
|
110 |
+
try:
|
111 |
+
# Make the request to the media URL with authentication
|
112 |
+
response = requests.get(url, auth=HTTPBasicAuth(account_sid, auth_token))
|
113 |
+
response.raise_for_status() # Raise an error for bad responses
|
114 |
+
|
115 |
+
# Determine a filename from the URL
|
116 |
+
parsed_url = urlparse(url)
|
117 |
+
media_id = parsed_url.path.split('/')[-1] # Get the last part of the URL path
|
118 |
+
filename = f"downloaded_media_{media_id}"
|
119 |
+
|
120 |
+
# Save the media content to a file
|
121 |
+
media_filepath = os.path.join(UPLOAD_FOLDER, filename)
|
122 |
+
with open(media_filepath, 'wb') as file:
|
123 |
+
file.write(response.content)
|
124 |
+
|
125 |
+
print(f"Media downloaded successfully and saved as {media_filepath}")
|
126 |
+
|
127 |
+
# Convert the saved media file to an image
|
128 |
+
with open(media_filepath, 'rb') as img_file:
|
129 |
+
image = Image.open(img_file)
|
130 |
+
|
131 |
+
# Optionally, convert the image to JPG and save in UPLOAD_FOLDER
|
132 |
+
converted_filename = f"image.jpg"
|
133 |
+
converted_filepath = os.path.join(UPLOAD_FOLDER, converted_filename)
|
134 |
+
image.convert('RGB').save(converted_filepath, 'JPEG')
|
135 |
+
return converted_filepath
|
136 |
+
|
137 |
+
except requests.exceptions.HTTPError as err:
|
138 |
+
print(f"HTTP error occurred: {err}")
|
139 |
+
except Exception as err:
|
140 |
+
print(f"An error occurred: {err}")
|
141 |
+
def get_weather(city):
|
142 |
+
city=city.strip()
|
143 |
+
city=city.replace(' ',"+")
|
144 |
+
r = requests.get(f'https://www.google.com/search?q=weather+in+{city}')
|
145 |
+
|
146 |
+
soup=BeautifulSoup(r.text,'html.parser')
|
147 |
+
temperature=soup.find('div',attrs={'class':'BNeawe iBp4i AP7Wnd'}).text
|
148 |
+
|
149 |
+
return (temperature)
|
150 |
+
|
151 |
+
|
152 |
+
from zenrows import ZenRowsClient
|
153 |
+
from bs4 import BeautifulSoup
|
154 |
+
Zenrow_api=os.environ.get('Zenrow_api')
|
155 |
+
# Initialize ZenRows client with your API key
|
156 |
+
client = ZenRowsClient(str(Zenrow_api))
|
157 |
+
|
158 |
+
def get_rates(): # URL to scrape
|
159 |
+
url = "https://www.kisandeals.com/mandiprices/ALL/TAMIL-NADU/ALL"
|
160 |
+
|
161 |
+
# Fetch the webpage content using ZenRows
|
162 |
+
response = client.get(url)
|
163 |
+
|
164 |
+
# Check if the request was successful
|
165 |
+
if response.status_code == 200:
|
166 |
+
# Parse the raw HTML content with BeautifulSoup
|
167 |
+
soup = BeautifulSoup(response.content, 'html.parser')
|
168 |
+
|
169 |
+
# Find the table rows containing the data
|
170 |
+
rows = soup.select('table tbody tr')
|
171 |
+
data = {}
|
172 |
+
for row in rows:
|
173 |
+
# Extract commodity and price using BeautifulSoup
|
174 |
+
columns = row.find_all('td')
|
175 |
+
if len(columns) >= 2:
|
176 |
+
commodity = columns[0].get_text(strip=True)
|
177 |
+
price = columns[1].get_text(strip=True)
|
178 |
+
if '₹' in price:
|
179 |
+
data[commodity] = price
|
180 |
+
return str(data)+" This are the prices for 1 kg"
|
181 |
+
|
182 |
+
|
183 |
+
|
184 |
+
|
185 |
+
def get_news():
|
186 |
+
news=[] # URL to scrape
|
187 |
+
url = "https://economictimes.indiatimes.com/news/economy/agriculture?from=mdr"
|
188 |
+
|
189 |
+
# Fetch the webpage content using ZenRows
|
190 |
+
response = client.get(url)
|
191 |
+
|
192 |
+
# Check if the request was successful
|
193 |
+
if response.status_code == 200:
|
194 |
+
# Parse the raw HTML content with BeautifulSoup
|
195 |
+
soup = BeautifulSoup(response.content, 'html.parser')
|
196 |
+
|
197 |
+
# Find the table rows containing the data
|
198 |
+
headlines = soup.find_all("div", class_="eachStory")
|
199 |
+
for story in headlines:
|
200 |
+
# Extract the headline
|
201 |
+
headline = story.find('h3').text.strip()
|
202 |
+
news.append(headline)
|
203 |
+
return news
|
204 |
+
|
205 |
+
|
206 |
+
|
207 |
+
def download_and_save_as_txt(url, account_sid, auth_token):
|
208 |
+
try:
|
209 |
+
# Make the request to the media URL with authentication
|
210 |
+
response = requests.get(url, auth=HTTPBasicAuth(account_sid, auth_token))
|
211 |
+
response.raise_for_status() # Raise an error for bad responses
|
212 |
+
|
213 |
+
# Determine a filename from the URL
|
214 |
+
parsed_url = urlparse(url)
|
215 |
+
media_id = parsed_url.path.split('/')[-1] # Get the last part of the URL path
|
216 |
+
filename = f"pdf_file.pdf"
|
217 |
+
|
218 |
+
# Save the media content to a .txt file
|
219 |
+
txt_filepath = os.path.join(UPLOAD_FOLDER, filename)
|
220 |
+
with open(txt_filepath, 'wb') as file:
|
221 |
+
file.write(response.content)
|
222 |
+
|
223 |
+
print(f"Media downloaded successfully and saved as {txt_filepath}")
|
224 |
+
return txt_filepath
|
225 |
+
|
226 |
+
except requests.exceptions.HTTPError as err:
|
227 |
+
print(f"HTTP error occurred: {err}")
|
228 |
+
except Exception as err:
|
229 |
+
print(f"An error occurred: {err}")
|
230 |
+
def query_rag(query_text: str):
|
231 |
+
embedding_function = get_embedding_function()
|
232 |
+
db = Chroma(persist_directory=CHROMA_PATH, embedding_function=embedding_function)
|
233 |
+
results = db.similarity_search_with_score(query_text, k=5)
|
234 |
+
context_text = "\n\n---\n\n".join([doc.page_content for doc, _score in results])
|
235 |
+
prompt_template = ChatPromptTemplate.from_template(PROMPT_TEMPLATE)
|
236 |
+
prompt = prompt_template.format(context=context_text, question=query_text)
|
237 |
+
model = Ollama(model="llama2")
|
238 |
+
response_text = model.invoke(prompt)
|
239 |
+
return response_text
|
240 |
+
|
241 |
+
|
242 |
+
def save_pdf_and_update_database(media_url):
|
243 |
+
# Download the PDF file
|
244 |
+
response = requests.get(media_url)
|
245 |
+
pdf_filename = os.path.join(DATA_PATH, f"{uuid.uuid4()}.pdf")
|
246 |
+
with open(pdf_filename, 'wb') as f:
|
247 |
+
f.write(response.content)
|
248 |
+
|
249 |
+
# Use PyPDFDirectoryLoader if you want to process multiple PDFs in a directory
|
250 |
+
document_loader = PyPDFDirectoryLoader(DATA_PATH)
|
251 |
+
documents = document_loader.load()
|
252 |
+
|
253 |
+
# The rest of your code remains the same
|
254 |
+
text_splitter = RecursiveCharacterTextSplitter(
|
255 |
+
chunk_size=800,
|
256 |
+
chunk_overlap=80,
|
257 |
+
length_function=len,
|
258 |
+
is_separator_regex=False,
|
259 |
+
)
|
260 |
+
chunks = text_splitter.split_documents(documents)
|
261 |
+
|
262 |
+
add_to_chroma(chunks)
|
263 |
+
|
264 |
+
|
265 |
+
def add_to_chroma(chunks: list[Document]):
|
266 |
+
db = Chroma(
|
267 |
+
persist_directory=CHROMA_PATH, embedding_function=get_embedding_function()
|
268 |
+
)
|
269 |
+
chunks_with_ids = calculate_chunk_ids(chunks)
|
270 |
+
existing_items = db.get(include=[])
|
271 |
+
existing_ids = set(existing_items["ids"])
|
272 |
+
|
273 |
+
new_chunks = [chunk for chunk in chunks_with_ids if chunk.metadata["id"] not in existing_ids]
|
274 |
+
|
275 |
+
if new_chunks:
|
276 |
+
new_chunk_ids = [chunk.metadata["id"] for chunk in new_chunks]
|
277 |
+
db.add_documents(new_chunks, ids=new_chunk_ids)
|
278 |
+
db.persist()
|
279 |
+
|
280 |
+
|
281 |
+
def calculate_chunk_ids(chunks):
|
282 |
+
last_page_id = None
|
283 |
+
current_chunk_index = 0
|
284 |
+
|
285 |
+
for chunk in chunks:
|
286 |
+
source = chunk.metadata.get("source")
|
287 |
+
page = chunk.metadata.get("page")
|
288 |
+
current_page_id = f"{source}:{page}"
|
289 |
+
|
290 |
+
if current_page_id == last_page_id:
|
291 |
+
current_chunk_index += 1
|
292 |
+
else:
|
293 |
+
current_chunk_index = 0
|
294 |
+
|
295 |
+
chunk_id = f"{current_page_id}:{current_chunk_index}"
|
296 |
+
last_page_id = current_page_id
|
297 |
+
|
298 |
+
chunk.metadata["id"] = chunk_id
|
299 |
+
|
300 |
+
return chunks
|
301 |
+
|
302 |
+
|
303 |
@app.route('/whatsapp', methods=['POST'])
|
304 |
def whatsapp_webhook():
|
305 |
incoming_msg = request.values.get('Body', '').lower()
|
306 |
sender = request.values.get('From')
|
|
|
307 |
num_media = int(request.values.get('NumMedia', 0))
|
308 |
+
|
|
|
309 |
chat_history = conversation_memory.get_memory()
|
310 |
+
|
311 |
if num_media > 0:
|
312 |
media_url = request.values.get('MediaUrl0')
|
313 |
+
response_text = media_url
|
314 |
content_type = request.values.get('MediaContentType0')
|
315 |
if content_type.startswith('image/'):
|
316 |
filepath = convert_img(media_url, account_sid, auth_token)
|
|
|
322 |
pest = predict_pest(filepath)
|
323 |
except:
|
324 |
pest = None
|
325 |
+
|
326 |
if disease:
|
327 |
response_text = f"Detected disease: {disease}"
|
|
|
328 |
disease_info = generate_response(f"Provide brief information about {disease} in plants", chat_history)
|
329 |
response_text += f"\n\nAdditional information: {disease_info}"
|
330 |
elif pest:
|
331 |
response_text = f"Detected pest: {pest}"
|
|
|
332 |
pest_info = generate_response(f"Provide brief information about {pest} in agriculture", chat_history)
|
333 |
response_text += f"\n\nAdditional information: {pest_info}"
|
334 |
else:
|
335 |
response_text = "Please upload another image with good quality."
|
336 |
+
elif content_type == "application/pdf":
|
337 |
+
# Process the PDF and update the database
|
338 |
+
save_pdf_and_update_database(media_url)
|
339 |
+
response_text = "Your PDF has been saved and processed."
|
340 |
else:
|
341 |
filepath = download_and_save_as_txt(media_url, account_sid, auth_token)
|
342 |
+
response_text = query_rag(filepath)
|
343 |
+
elif ('weather' in incoming_msg.lower()) or ('climate' in incoming_msg.lower()) or (
|
344 |
+
'temperature' in incoming_msg.lower()):
|
345 |
+
response_text = get_weather(incoming_msg.lower())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
346 |
elif 'bookkeeping' in incoming_msg:
|
347 |
response_text = "Please provide the details you'd like to record."
|
348 |
+
elif ('rates' in incoming_msg.lower()) or ('price' in incoming_msg.lower()) or (
|
349 |
+
'market' in incoming_msg.lower()) or ('rate' in incoming_msg.lower()) or ('prices' in incoming_msg.lower()):
|
350 |
+
rates = get_rates()
|
351 |
+
response_text = generate_response(incoming_msg + ' data is ' + rates, chat_history)
|
352 |
elif ('news' in incoming_msg.lower()) or ('information' in incoming_msg.lower()):
|
353 |
+
news = get_news()
|
354 |
+
response_text = generate_response(incoming_msg + ' data is ' + str(news), chat_history)
|
|
|
355 |
else:
|
|
|
356 |
response_text = generate_response(incoming_msg, chat_history)
|
357 |
+
|
|
|
358 |
conversation_memory.add_to_memory({"user": incoming_msg, "assistant": response_text})
|
|
|
359 |
send_message(sender, response_text)
|
360 |
return '', 204
|
361 |
+
|
|
|
362 |
|
363 |
def send_message(to, body):
|
364 |
try:
|
|
|
371 |
except Exception as e:
|
372 |
print(f"Error sending message: {e}")
|
373 |
|
374 |
+
|
375 |
def send_initial_message(to_number):
|
376 |
send_message(
|
377 |
f'whatsapp:{to_number}',
|
378 |
'Welcome to the Agri AI Chatbot! How can I assist you today?'
|
379 |
)
|
380 |
|
381 |
+
|
382 |
if __name__ == '__main__':
|
383 |
+
#send_initial_message('916382792828')
|
384 |
send_initial_message('919080522395')
|
385 |
+
app.run(host='0.0.0.0', port=7860)
|
|