Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -3,7 +3,7 @@ from twilio.twiml.messaging_response import MessagingResponse
|
|
3 |
from twilio.rest import Client
|
4 |
import os
|
5 |
import shutil
|
6 |
-
|
7 |
from bs4 import BeautifulSoup
|
8 |
import requests
|
9 |
from requests.auth import HTTPBasicAuth
|
@@ -25,19 +25,6 @@ if not os.path.exists(UPLOAD_FOLDER):
|
|
25 |
|
26 |
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
|
27 |
|
28 |
-
|
29 |
-
class ConversationBufferMemory:
|
30 |
-
def __init__(self, max_size):
|
31 |
-
self.memory = []
|
32 |
-
self.max_size = max_size
|
33 |
-
|
34 |
-
def add_to_memory(self, interaction):
|
35 |
-
self.memory.append(interaction)
|
36 |
-
if len(self.memory) > self.max_size:
|
37 |
-
self.memory.pop(0)
|
38 |
-
|
39 |
-
def get_memory(self):
|
40 |
-
return self.memory
|
41 |
conversation_memory = ConversationBufferMemory(max_size=2)
|
42 |
|
43 |
account_sid = os.environ.get('TWILIO_ACCOUNT_SID')
|
@@ -52,147 +39,7 @@ Answer the question based only on the following context:
|
|
52 |
Answer the question based on the above context: {question}
|
53 |
"""
|
54 |
|
55 |
-
AI71_API_KEY = os.environ.get('AI71_API_KEY')
|
56 |
-
|
57 |
-
def generate_response(query, chat_history):
|
58 |
-
response = ''
|
59 |
-
for chunk in AI71(AI71_API_KEY).chat.completions.create(
|
60 |
-
model="tiiuae/falcon-180b-chat",
|
61 |
-
messages=[
|
62 |
-
{"role": "system", "content": "You are the best agricultural assistant. Remember to give a response in not more than 2 sentences. Greet the user if the user greets you."},
|
63 |
-
{"role": "user", "content": f'''Answer the query based on history {chat_history}: {query}'''},
|
64 |
-
],
|
65 |
-
stream=True,
|
66 |
-
):
|
67 |
-
if chunk.choices[0].delta.content:
|
68 |
-
response += chunk.choices[0].delta.content
|
69 |
-
return response.replace("###", '').replace('\nUser:', '')
|
70 |
-
|
71 |
-
def predict_pest(filepath):
|
72 |
-
CLIENT = InferenceHTTPClient(
|
73 |
-
api_url="https://detect.roboflow.com",
|
74 |
-
api_key="oF1aC4b1FBCDtK8CoKx7"
|
75 |
-
)
|
76 |
-
result = CLIENT.infer(filepath, model_id="pest-detection-ueoco/1")
|
77 |
-
return result['predictions'][0]
|
78 |
-
|
79 |
-
|
80 |
-
def predict_disease(filepath):
|
81 |
-
CLIENT = InferenceHTTPClient(
|
82 |
-
api_url="https://classify.roboflow.com",
|
83 |
-
api_key="oF1aC4b1FBCDtK8CoKx7"
|
84 |
-
)
|
85 |
-
result = CLIENT.infer(filepath, model_id="plant-disease-detection-iefbi/1")
|
86 |
-
return result['predicted_classes'][0]
|
87 |
-
|
88 |
-
def convert_img(url, account_sid, auth_token):
|
89 |
-
try:
|
90 |
-
response = requests.get(url, auth=HTTPBasicAuth(account_sid, auth_token))
|
91 |
-
response.raise_for_status()
|
92 |
-
|
93 |
-
parsed_url = urlparse(url)
|
94 |
-
media_id = parsed_url.path.split('/')[-1]
|
95 |
-
filename = f"downloaded_media_{media_id}"
|
96 |
-
|
97 |
-
media_filepath = os.path.join(UPLOAD_FOLDER, filename)
|
98 |
-
with open(media_filepath, 'wb') as file:
|
99 |
-
file.write(response.content)
|
100 |
-
|
101 |
-
print(f"Media downloaded successfully and saved as {media_filepath}")
|
102 |
-
|
103 |
-
with open(media_filepath, 'rb') as img_file:
|
104 |
-
image = Image.open(img_file)
|
105 |
-
|
106 |
-
converted_filename = f"image.jpg"
|
107 |
-
converted_filepath = os.path.join(UPLOAD_FOLDER, converted_filename)
|
108 |
-
image.convert('RGB').save(converted_filepath, 'JPEG')
|
109 |
-
return converted_filepath
|
110 |
-
|
111 |
-
except requests.exceptions.HTTPError as err:
|
112 |
-
print(f"HTTP error occurred: {err}")
|
113 |
-
except Exception as err:
|
114 |
-
print(f"An error occurred: {err}")
|
115 |
|
116 |
-
def get_weather(city):
|
117 |
-
city = city.strip().replace(' ', '+')
|
118 |
-
r = requests.get(f'https://www.google.com/search?q=weather+in+{city}')
|
119 |
-
soup = BeautifulSoup(r.text, 'html.parser')
|
120 |
-
temperature = soup.find('div', attrs={'class': 'BNeawe iBp4i AP7Wnd'}).text
|
121 |
-
return temperature
|
122 |
-
|
123 |
-
from zenrows import ZenRowsClient
|
124 |
-
Zenrow_api = os.environ.get('Zenrow_api')
|
125 |
-
zenrows_client = ZenRowsClient(Zenrow_api)
|
126 |
-
|
127 |
-
def get_rates():
|
128 |
-
url = "https://www.kisandeals.com/mandiprices/ALL/TAMIL-NADU/ALL"
|
129 |
-
response = zenrows_client.get(url)
|
130 |
-
|
131 |
-
if response.status_code == 200:
|
132 |
-
soup = BeautifulSoup(response.content, 'html.parser')
|
133 |
-
rows = soup.select('table tbody tr')
|
134 |
-
data = {}
|
135 |
-
for row in rows:
|
136 |
-
columns = row.find_all('td')
|
137 |
-
if len(columns) >= 2:
|
138 |
-
commodity = columns[0].get_text(strip=True)
|
139 |
-
price = columns[1].get_text(strip=True)
|
140 |
-
if '₹' in price:
|
141 |
-
data[commodity] = price
|
142 |
-
return str(data) + " These are the prices for 1 kg"
|
143 |
-
|
144 |
-
def get_news():
|
145 |
-
news = []
|
146 |
-
url = "https://economictimes.indiatimes.com/news/economy/agriculture?from=mdr"
|
147 |
-
response = zenrows_client.get(url)
|
148 |
-
|
149 |
-
if response.status_code == 200:
|
150 |
-
soup = BeautifulSoup(response.content, 'html.parser')
|
151 |
-
headlines = soup.find_all("div", class_="eachStory")
|
152 |
-
for story in headlines:
|
153 |
-
headline = story.find('h3').text.strip()
|
154 |
-
news.append(headline)
|
155 |
-
return news
|
156 |
-
|
157 |
-
def download_and_save_as_txt(url, account_sid, auth_token):
|
158 |
-
try:
|
159 |
-
response = requests.get(url, auth=HTTPBasicAuth(account_sid, auth_token))
|
160 |
-
response.raise_for_status()
|
161 |
-
|
162 |
-
parsed_url = urlparse(url)
|
163 |
-
media_id = parsed_url.path.split('/')[-1]
|
164 |
-
filename = f"pdf_file.pdf"
|
165 |
-
|
166 |
-
txt_filepath = os.path.join(UPLOAD_FOLDER, filename)
|
167 |
-
with open(txt_filepath, 'wb') as file:
|
168 |
-
file.write(response.content)
|
169 |
-
|
170 |
-
print(f"Media downloaded successfully and saved as {txt_filepath}")
|
171 |
-
return txt_filepath
|
172 |
-
|
173 |
-
except requests.exceptions.HTTPError as err:
|
174 |
-
print(f"HTTP error occurred: {err}")
|
175 |
-
except Exception as err:
|
176 |
-
print(f"An error occurred: {err}")
|
177 |
-
|
178 |
-
def download_file(url, extension):
|
179 |
-
try:
|
180 |
-
response = requests.get(url)
|
181 |
-
response.raise_for_status()
|
182 |
-
filename = f"{uuid.uuid4()}{extension}"
|
183 |
-
file_path = os.path.join(UPLOAD_FOLDER, filename)
|
184 |
-
|
185 |
-
with open(file_path, 'wb') as file:
|
186 |
-
file.write(response.content)
|
187 |
-
|
188 |
-
print(f"File downloaded and saved as {file_path}")
|
189 |
-
return file_path
|
190 |
-
|
191 |
-
except requests.exceptions.HTTPError as err:
|
192 |
-
print(f"HTTP error occurred: {err}")
|
193 |
-
except Exception as err:
|
194 |
-
print(f"An error occurred: {err}")
|
195 |
-
return None
|
196 |
|
197 |
@app.route('/whatsapp', methods=['POST'])
|
198 |
def whatsapp_webhook():
|
@@ -209,7 +56,14 @@ def whatsapp_webhook():
|
|
209 |
if content_type.startswith('image/'):
|
210 |
# Handle image processing (disease/pest detection)
|
211 |
filepath = convert_img(media_url, account_sid, auth_token)
|
212 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
213 |
else:
|
214 |
# Handle PDF processing
|
215 |
filepath = download_and_save_as_txt(media_url, account_sid, auth_token)
|
@@ -233,28 +87,7 @@ def whatsapp_webhook():
|
|
233 |
send_message(sender, response_text)
|
234 |
return '', 204
|
235 |
|
236 |
-
def handle_image(filepath):
|
237 |
-
try:
|
238 |
-
disease = predict_disease(filepath)
|
239 |
-
except:
|
240 |
-
disease = None
|
241 |
-
try:
|
242 |
-
pest = predict_pest(filepath)
|
243 |
-
except:
|
244 |
-
pest = None
|
245 |
|
246 |
-
if disease:
|
247 |
-
response_text = f"Detected disease: {disease}"
|
248 |
-
disease_info = generate_response(f"Provide brief information about {disease} in agriculture", "")
|
249 |
-
response_text += "\n" + disease_info
|
250 |
-
elif pest:
|
251 |
-
response_text = f"Detected pest: {pest}"
|
252 |
-
pest_info = generate_response(f"Provide brief information about {pest} in agriculture", "")
|
253 |
-
response_text += "\n" + pest_info
|
254 |
-
else:
|
255 |
-
response_text = "Sorry, I couldn't detect any disease or pest. Please try another image."
|
256 |
-
|
257 |
-
return response_text
|
258 |
|
259 |
def process_and_query_pdf(filepath):
|
260 |
# Read and process the PDF
|
|
|
3 |
from twilio.rest import Client
|
4 |
import os
|
5 |
import shutil
|
6 |
+
from other_function import ConversationBufferMemory,generate_response,get_weather,get_rates,get_news,convert_img,predict_disease,predict_pest, download_and_save_as_txt,download_file,
|
7 |
from bs4 import BeautifulSoup
|
8 |
import requests
|
9 |
from requests.auth import HTTPBasicAuth
|
|
|
25 |
|
26 |
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
|
27 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
28 |
conversation_memory = ConversationBufferMemory(max_size=2)
|
29 |
|
30 |
account_sid = os.environ.get('TWILIO_ACCOUNT_SID')
|
|
|
39 |
Answer the question based on the above context: {question}
|
40 |
"""
|
41 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
|
44 |
@app.route('/whatsapp', methods=['POST'])
|
45 |
def whatsapp_webhook():
|
|
|
56 |
if content_type.startswith('image/'):
|
57 |
# Handle image processing (disease/pest detection)
|
58 |
filepath = convert_img(media_url, account_sid, auth_token)
|
59 |
+
if predict_disease(filepath):
|
60 |
+
|
61 |
+
response_text = predict_disease(filepath)
|
62 |
+
elif predict_pest(filepath):
|
63 |
+
response_text=predict_pest(filepath)
|
64 |
+
else:
|
65 |
+
response_text = "Please upload other image with good quality."
|
66 |
+
|
67 |
else:
|
68 |
# Handle PDF processing
|
69 |
filepath = download_and_save_as_txt(media_url, account_sid, auth_token)
|
|
|
87 |
send_message(sender, response_text)
|
88 |
return '', 204
|
89 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
90 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
91 |
|
92 |
def process_and_query_pdf(filepath):
|
93 |
# Read and process the PDF
|