File size: 5,123 Bytes
a136ebd bebbf0f a136ebd 393577d a136ebd 6fa1c6b a136ebd def2d74 a136ebd 6fa1c6b a136ebd 7d56735 e6d5541 7d56735 e6d5541 7d56735 e6d5541 a136ebd e9ec5c3 a136ebd bebbf0f a85a9a9 cae540a a85a9a9 2c66a93 6af6472 2c66a93 6af6472 2c66a93 6af6472 2c66a93 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 |
import os
from bs4 import BeautifulSoup
import requests
from requests.auth import HTTPBasicAuth
from PIL import Image
from io import BytesIO
import pandas as pd
from urllib.parse import urlparse
import os
from pypdf import PdfReader
from ai71 import AI71
import os
from inference_sdk import InferenceHTTPClient
import base64
UPLOAD_FOLDER = '/code/uploads'
if not os.path.exists(UPLOAD_FOLDER):
os.makedirs(UPLOAD_FOLDER)
AI71_API_KEY = os.environ.get('AI71_API_KEY')
def generate_response(query,chat_history):
response = ''
for chunk in AI71(AI71_API_KEY).chat.completions.create(
model="tiiuae/falcon-180b-chat",
messages=[
{"role": "system", "content": "You are a best agricultural assistant.Remember to give response not more than 2 sentence.Greet the user if user greets you."},
{"role": "user",
"content": f'''Answer the query based on history {chat_history}:{query}'''},
],
stream=True,
):
if chunk.choices[0].delta.content:
response += chunk.choices[0].delta.content
return response.replace("###", '').replace('\nUser:','')
class ConversationBufferMemory:
def __init__(self, max_size=6):
self.memory = []
self.max_size = max_size
def add_to_memory(self, interaction):
self.memory.append(interaction)
if len(self.memory) > self.max_size:
self.memory.pop(0) # Remove the oldest interaction
def get_memory(self):
return self.memory
def predict_pest(filepath):
CLIENT = InferenceHTTPClient(
api_url="https://detect.roboflow.com",
api_key="oF1aC4b1FBCDtK8CoKx7"
)
result = CLIENT.infer(filepath, model_id="pest-detection-ueoco/1")
return result['predictions'][0]
def predict_disease(filepath):
CLIENT = InferenceHTTPClient(
api_url="https://classify.roboflow.com",
api_key="oF1aC4b1FBCDtK8CoKx7"
)
result = CLIENT.infer(filepath, model_id="plant-disease-detection-iefbi/1")
return result['predicted_classes'][0]
def convert_img(url, account_sid, auth_token):
try:
# Make the request to the media URL with authentication
response = requests.get(url, auth=HTTPBasicAuth(account_sid, auth_token))
response.raise_for_status() # Raise an error for bad responses
# Determine a filename from the URL
parsed_url = urlparse(url)
media_id = parsed_url.path.split('/')[-1] # Get the last part of the URL path
filename = f"downloaded_media_{media_id}"
# Save the media content to a file
media_filepath = os.path.join(UPLOAD_FOLDER, filename)
with open(media_filepath, 'wb') as file:
file.write(response.content)
print(f"Media downloaded successfully and saved as {media_filepath}")
# Convert the saved media file to an image
with open(media_filepath, 'rb') as img_file:
image = Image.open(img_file)
# Optionally, convert the image to JPG and save in UPLOAD_FOLDER
converted_filename = f"image.jpg"
converted_filepath = os.path.join(UPLOAD_FOLDER, converted_filename)
image.convert('RGB').save(converted_filepath, 'JPEG')
return converted_filepath
except requests.exceptions.HTTPError as err:
print(f"HTTP error occurred: {err}")
except Exception as err:
print(f"An error occurred: {err}")
def get_weather(city):
city=city.strip()
city=city.replace(' ',"+")
r = requests.get(f'https://www.google.com/search?q=weather+in+{city}')
soup=BeautifulSoup(r.text,'html.parser')
temperature=soup.find('div',attrs={'class':'BNeawe iBp4i AP7Wnd'}).text
degree=temperature[:-2]
celcius=str(round((int(degree) - 32)* 5/9,1))+temperature[-2]+'C'
return (celcius)
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36'
}
def get_rates():
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36'
}
# URL for fetching mandi prices for all states in India
url = 'https://www.kisandeals.com/mandiprices/ALL/ALL/ALL'
# Sending the GET request with headers
r = requests.get(url, headers=headers)
soup = BeautifulSoup(r.text, 'html.parser')
# Finding the table in the HTML
table = soup.find('table')
# Check if a table was found
if table:
# Convert the HTML table to a DataFrame
df = pd.read_html(str(table))[0]
# Dropping the 'Quintal Price' column if it exists
if 'Quintal Price' in df.columns:
df.drop(columns=['Quintal Price'], inplace=True)
# Creating a dictionary from the DataFrame
d = {}
for i in range(len(df)):
d[df.iloc[i, 0]] = df.iloc[i, 1]
return str(d) + ' These prices are for 1 kg'
else:
return "No table found on the page"
|