from langchain.tools import tool import requests from pydantic import BaseModel, Field import datetime from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder @tool def filter_funds(text: str) -> str: """ This function filters the dataframe to find funds located in the specia region. Parameters: df (pandas.DataFrame): The dataframe containing fund data. Returns: pandas.DataFrame: A dataframe with only {text} funds. """ return data[data['Region'] == text] @tool def max_min_profit_margin_funds() -> str: """ Return the fund names with the highest and lowest profit margins. This function handles potential errors and ensures data consistency. """ try: # Convert Profit Margin column to numeric, handling errors gracefully data['Profit Margin'] = pd.to_numeric(data['Profit Margin'].str.replace('%', ''), errors='coerce') # Drop rows with NaN values in Profit Margin column clean_data = data.dropna(subset=['Profit Margin']) if clean_data.empty: return "No valid profit margin data available." # Find the funds with the highest and lowest profit margins max_profit_margin_fund = clean_data.loc[clean_data['Profit Margin'].idxmax(), 'Fund Name'] min_profit_margin_fund = clean_data.loc[clean_data['Profit Margin'].idxmin(), 'Fund Name'] return f"Highest Profit Margin Fund: {max_profit_margin_fund}, Lowest Profit Margin Fund: {min_profit_margin_fund}" except Exception as e: return f"An error occurred: {str(e)}" import requests from bs4 import BeautifulSoup import random # List of different headers to mimic various browser requests user_agents = [ "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0.3 Safari/605.1.15", "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.101 Safari/537.36", "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:89.0) Gecko/20100101 Firefox/89.0", "Mozilla/5.0 (iPhone; CPU iPhone OS 14_6 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0 Mobile/15E148 Safari/604.1" ] @tool def gresb(query: str) -> str: """Search for the query on the GRESB website and extract article content if available.""" base_url = "https://www.gresb.com/nl-en?s=" search_url = f"{base_url}{query.replace(' ', '+')}" # Select a random User-Agent header headers = { "User-Agent": random.choice(user_agents) } # Make a request to the search URL with headers response = requests.get(search_url, headers=headers) # Check if the request was successful if response.status_code == 200: # Parse the HTML content soup = BeautifulSoup(response.content, 'html.parser') # Extract search results (adjust the selector based on the website structure) results = soup.find_all('a', class_='overlay-link z-index-1') # Check if there are any results if results: # Get the first result's link article_url = results[0]['href'] # Fetch the HTML content of the article article_response = requests.get(article_url, headers=headers) if article_response.status_code == 200: # Extract and return the article text return extract_article_text(article_response.content) else: return f"Failed to retrieve the article page. Status code: {article_response.status_code}" else: return "No search results found." else: return f"Failed to retrieve search results. Status code: {response.status_code}" def extract_article_text(html_content): soup = BeautifulSoup(html_content, 'html.parser') # Look for common article structures on GRESB's website article = soup.find('div', class_='wysiwyg') if article: paragraphs = article.find_all(['p', 'ul', 'blockquote', 'h2', 'h4']) # Includes

,