AgriChatbot / other_function.py
Neurolingua's picture
Update other_function.py
bcba21f verified
raw
history blame
6.59 kB
import os
from bs4 import BeautifulSoup
import requests
from requests.auth import HTTPBasicAuth
from PIL import Image
from io import BytesIO
import pandas as pd
from urllib.parse import urlparse
import os
from pypdf import PdfReader
from ai71 import AI71
import os
import pandas as pd
from inference_sdk import InferenceHTTPClient
import base64
UPLOAD_FOLDER = '/code/uploads'
if not os.path.exists(UPLOAD_FOLDER):
os.makedirs(UPLOAD_FOLDER)
AI71_API_KEY = os.environ.get('AI71_API_KEY')
def generate_response(query,chat_history):
response = ''
for chunk in AI71(AI71_API_KEY).chat.completions.create(
model="tiiuae/falcon-180b-chat",
messages=[
{"role": "system", "content": "You are a best agricultural assistant.Remember to give response not more than 2 sentence.Greet the user if user greets you."},
{"role": "user",
"content": f'''Answer the query based on history {chat_history}:{query}'''},
],
stream=True,
):
if chunk.choices[0].delta.content:
response += chunk.choices[0].delta.content
return response.replace("###", '').replace('\nUser:','')
class ConversationBufferMemory:
def __init__(self, max_size):
self.memory = []
self.max_size = max_size
def add_to_memory(self, interaction):
self.memory.append(interaction)
if len(self.memory) > self.max_size:
self.memory.pop(0) # Remove the oldest interaction
def get_memory(self):
return self.memory
def predict_pest(filepath):
CLIENT = InferenceHTTPClient(
api_url="https://detect.roboflow.com",
api_key="oF1aC4b1FBCDtK8CoKx7"
)
result = CLIENT.infer(filepath, model_id="pest-detection-ueoco/1")
return result['predictions'][0]
def predict_disease(filepath):
CLIENT = InferenceHTTPClient(
api_url="https://classify.roboflow.com",
api_key="oF1aC4b1FBCDtK8CoKx7"
)
result = CLIENT.infer(filepath, model_id="plant-disease-detection-iefbi/1")
return result['predicted_classes'][0]
def convert_img(url, account_sid, auth_token):
if 1==1:
# Make the request to the media URL with authentication
response = requests.get(url, auth=HTTPBasicAuth(account_sid, auth_token))
response.raise_for_status() # Raise an error for bad responses
# Determine a filename from the URL
parsed_url = urlparse(url)
media_id = parsed_url.path.split('/')[-1] # Get the last part of the URL path
filename = f"image.jpg"
# Save the media content to a .txt file
txt_filepath = os.path.join(UPLOAD_FOLDER, filename)
with open(txt_filepath, 'wb') as file:
file.write(response.content)
print(f"Media downloaded successfully and saved as {txt_filepath}")
return txt_filepath
else :
return 'errir in process none'
def get_weather(city):
city=city.strip()
city=city.replace(' ',"+")
r = requests.get(f'https://www.google.com/search?q=weather+in+{city}')
soup=BeautifulSoup(r.text,'html.parser')
temperature=soup.find('div',attrs={'class':'BNeawe iBp4i AP7Wnd'}).text
return (temperature)
from zenrows import ZenRowsClient
from bs4 import BeautifulSoup
Zenrow_api=os.environ.get('Zenrow_api')
# Initialize ZenRows client with your API key
client = ZenRowsClient(str(Zenrow_api))
def get_rates(): # URL to scrape
url = "https://www.kisandeals.com/mandiprices/ALL/TAMIL-NADU/ALL"
# Fetch the webpage content using ZenRows
response = client.get(url)
# Check if the request was successful
if response.status_code == 200:
# Parse the raw HTML content with BeautifulSoup
soup = BeautifulSoup(response.content, 'html.parser')
# Find the table rows containing the data
rows = soup.select('table tbody tr')
data = {}
for row in rows:
# Extract commodity and price using BeautifulSoup
columns = row.find_all('td')
if len(columns) >= 2:
commodity = columns[0].get_text(strip=True)
price = columns[1].get_text(strip=True)
if '₹' in price:
data[commodity] = price
return str(data)+" This are the prices for 1 kg"
def get_news():
news=[] # URL to scrape
url = "https://economictimes.indiatimes.com/news/economy/agriculture?from=mdr"
# Fetch the webpage content using ZenRows
response = client.get(url)
# Check if the request was successful
if response.status_code == 200:
# Parse the raw HTML content with BeautifulSoup
soup = BeautifulSoup(response.content, 'html.parser')
# Find the table rows containing the data
headlines = soup.find_all("div", class_="eachStory")
for story in headlines:
# Extract the headline
headline = story.find('h3').text.strip()
news.append(headline)
return news
def download_and_save_as_txt(url, account_sid, auth_token):
try:
# Make the request to the media URL with authentication
response = requests.get(url, auth=HTTPBasicAuth(account_sid, auth_token))
response.raise_for_status() # Raise an error for bad responses
# Determine a filename from the URL
parsed_url = urlparse(url)
media_id = parsed_url.path.split('/')[-1] # Get the last part of the URL path
filename = f"pdf_file.pdf"
# Save the media content to a .txt file
txt_filepath = os.path.join(UPLOAD_FOLDER, filename)
with open(txt_filepath, 'wb') as file:
file.write(response.content)
print(f"Media downloaded successfully and saved as {txt_filepath}")
return txt_filepath
except requests.exceptions.HTTPError as err:
print(f"HTTP error occurred: {err}")
except Exception as err:
print(f"An error occurred: {err}")
def download_file(url, extension):
try:
response = requests.get(url)
response.raise_for_status()
filename = f"{uuid.uuid4()}{extension}"
file_path = os.path.join(UPLOAD_FOLDER, filename)
with open(file_path, 'wb') as file:
file.write(response.content)
print(f"File downloaded and saved as {file_path}")
return file_path
except requests.exceptions.HTTPError as err:
print(f"HTTP error occurred: {err}")
except Exception as err:
print(f"An error occurred: {err}")
return None